diff --git a/cts/scheduler/dot/bug-5025-1.dot b/cts/scheduler/dot/bug-5025-1.dot
index 8cae3a30e0..bcc05e127a 100644
--- a/cts/scheduler/dot/bug-5025-1.dot
+++ b/cts/scheduler/dot/bug-5025-1.dot
@@ -1,6 +1,6 @@
digraph "g" {
"A_clear_failcount_0 fc16-builder" [ style=bold color="green" fontcolor="black"]
"A_monitor_30000 fc16-builder" [ style=bold color="green" fontcolor="black"]
-"A_reload_0 fc16-builder" -> "A_monitor_30000 fc16-builder" [ style = bold]
-"A_reload_0 fc16-builder" [ style=bold color="green" fontcolor="black"]
+"A_reload-agent_0 fc16-builder" -> "A_monitor_30000 fc16-builder" [ style = bold]
+"A_reload-agent_0 fc16-builder" [ style=bold color="green" fontcolor="black"]
}
diff --git a/cts/scheduler/dot/load-stopped-loop.dot b/cts/scheduler/dot/load-stopped-loop.dot
index 76e9051418..df2b6b35dd 100644
--- a/cts/scheduler/dot/load-stopped-loop.dot
+++ b/cts/scheduler/dot/load-stopped-loop.dot
@@ -1,62 +1,62 @@
digraph "g" {
"license.anbriz.vds-ok.com-vm_migrate_from_0 v03-a" -> "license.anbriz.vds-ok.com-vm_start_0 v03-a" [ style = bold]
"license.anbriz.vds-ok.com-vm_migrate_from_0 v03-a" -> "license.anbriz.vds-ok.com-vm_stop_0 v03-b" [ style = bold]
"license.anbriz.vds-ok.com-vm_migrate_from_0 v03-a" [ style=bold color="green" fontcolor="black"]
"license.anbriz.vds-ok.com-vm_migrate_to_0 v03-b" -> "license.anbriz.vds-ok.com-vm_migrate_from_0 v03-a" [ style = bold]
"license.anbriz.vds-ok.com-vm_migrate_to_0 v03-b" [ style=bold color="green" fontcolor="black"]
"license.anbriz.vds-ok.com-vm_monitor_10000 v03-a" [ style=bold color="green" fontcolor="black"]
"license.anbriz.vds-ok.com-vm_start_0 v03-a" -> "license.anbriz.vds-ok.com-vm_monitor_10000 v03-a" [ style = bold]
"license.anbriz.vds-ok.com-vm_start_0 v03-a" [ style=bold color="green" fontcolor="orange"]
"license.anbriz.vds-ok.com-vm_stop_0 v03-b" -> "license.anbriz.vds-ok.com-vm_start_0 v03-a" [ style = bold]
"license.anbriz.vds-ok.com-vm_stop_0 v03-b" -> "load_stopped_v03-b v03-b" [ style = bold]
"license.anbriz.vds-ok.com-vm_stop_0 v03-b" [ style=bold color="green" fontcolor="black"]
"load_stopped_mgmt01 mgmt01" [ style=bold color="green" fontcolor="orange"]
"load_stopped_v03-a v03-a" -> "license.anbriz.vds-ok.com-vm_migrate_to_0 v03-b" [ style = bold]
"load_stopped_v03-a v03-a" -> "license.anbriz.vds-ok.com-vm_start_0 v03-a" [ style = bold]
"load_stopped_v03-a v03-a" -> "vd01-d.cdev.ttc.prague.cz.vds-ok.com-vm_start_0 v03-a" [ style = bold]
"load_stopped_v03-a v03-a" [ style=bold color="green" fontcolor="orange"]
"load_stopped_v03-b v03-b" -> "terminal0.anbriz.vds-ok.com-vm_start_0 v03-b" [ style = bold]
"load_stopped_v03-b v03-b" [ style=bold color="green" fontcolor="orange"]
"stonith-v03-a_monitor_60000 v03-b" [ style=bold color="green" fontcolor="black"]
"stonith-v03-a_start_0 v03-b" -> "stonith-v03-a_monitor_60000 v03-b" [ style = bold]
"stonith-v03-a_start_0 v03-b" [ style=bold color="green" fontcolor="black"]
"stonith-v03-a_stop_0 v03-b" -> "stonith-v03-a_start_0 v03-b" [ style = bold]
"stonith-v03-a_stop_0 v03-b" [ style=bold color="green" fontcolor="black"]
"stonith-v03-b_monitor_60000 v03-a" [ style=bold color="green" fontcolor="black"]
"stonith-v03-b_start_0 v03-a" -> "stonith-v03-b_monitor_60000 v03-a" [ style = bold]
"stonith-v03-b_start_0 v03-a" [ style=bold color="green" fontcolor="black"]
"stonith-v03-b_stop_0 v03-a" -> "stonith-v03-b_start_0 v03-a" [ style = bold]
"stonith-v03-b_stop_0 v03-a" [ style=bold color="green" fontcolor="black"]
"terminal0.anbriz.vds-ok.com-vm_migrate_from_0 v03-b" -> "terminal0.anbriz.vds-ok.com-vm_start_0 v03-b" [ style = bold]
"terminal0.anbriz.vds-ok.com-vm_migrate_from_0 v03-b" -> "terminal0.anbriz.vds-ok.com-vm_stop_0 v03-a" [ style = bold]
"terminal0.anbriz.vds-ok.com-vm_migrate_from_0 v03-b" [ style=bold color="green" fontcolor="black"]
"terminal0.anbriz.vds-ok.com-vm_migrate_to_0 v03-a" -> "terminal0.anbriz.vds-ok.com-vm_migrate_from_0 v03-b" [ style = bold]
"terminal0.anbriz.vds-ok.com-vm_migrate_to_0 v03-a" [ style=bold color="green" fontcolor="black"]
"terminal0.anbriz.vds-ok.com-vm_monitor_10000 v03-b" [ style=bold color="green" fontcolor="black"]
"terminal0.anbriz.vds-ok.com-vm_start_0 v03-b" -> "terminal0.anbriz.vds-ok.com-vm_monitor_10000 v03-b" [ style = bold]
"terminal0.anbriz.vds-ok.com-vm_start_0 v03-b" [ style=bold color="green" fontcolor="orange"]
"terminal0.anbriz.vds-ok.com-vm_stop_0 v03-a" -> "load_stopped_v03-a v03-a" [ style = bold]
"terminal0.anbriz.vds-ok.com-vm_stop_0 v03-a" -> "terminal0.anbriz.vds-ok.com-vm_start_0 v03-b" [ style = bold]
"terminal0.anbriz.vds-ok.com-vm_stop_0 v03-a" [ style=bold color="green" fontcolor="black"]
"vd01-d.cdev.ttc.prague.cz.vds-ok.com-vm_monitor_10000 v03-a" [ style=bold color="green" fontcolor="black"]
"vd01-d.cdev.ttc.prague.cz.vds-ok.com-vm_start_0 v03-a" -> "vd01-d.cdev.ttc.prague.cz.vds-ok.com-vm_monitor_10000 v03-a" [ style = bold]
"vd01-d.cdev.ttc.prague.cz.vds-ok.com-vm_start_0 v03-a" [ style=bold color="green" fontcolor="black"]
"vds-ok-pool-0-iscsi:0_monitor_30000 v03-b" [ style=bold color="green" fontcolor="black"]
-"vds-ok-pool-0-iscsi:0_reload_0 v03-b" -> "vds-ok-pool-0-iscsi:0_monitor_30000 v03-b" [ style = bold]
-"vds-ok-pool-0-iscsi:0_reload_0 v03-b" [ style=bold color="green" fontcolor="black"]
+"vds-ok-pool-0-iscsi:0_reload-agent_0 v03-b" -> "vds-ok-pool-0-iscsi:0_monitor_30000 v03-b" [ style = bold]
+"vds-ok-pool-0-iscsi:0_reload-agent_0 v03-b" [ style=bold color="green" fontcolor="black"]
"vds-ok-pool-0-iscsi:1_monitor_30000 mgmt01" [ style=bold color="green" fontcolor="black"]
-"vds-ok-pool-0-iscsi:1_reload_0 mgmt01" -> "vds-ok-pool-0-iscsi:1_monitor_30000 mgmt01" [ style = bold]
-"vds-ok-pool-0-iscsi:1_reload_0 mgmt01" [ style=bold color="green" fontcolor="black"]
+"vds-ok-pool-0-iscsi:1_reload-agent_0 mgmt01" -> "vds-ok-pool-0-iscsi:1_monitor_30000 mgmt01" [ style = bold]
+"vds-ok-pool-0-iscsi:1_reload-agent_0 mgmt01" [ style=bold color="green" fontcolor="black"]
"vds-ok-pool-0-iscsi:2_monitor_30000 v03-a" [ style=bold color="green" fontcolor="black"]
-"vds-ok-pool-0-iscsi:2_reload_0 v03-a" -> "vds-ok-pool-0-iscsi:2_monitor_30000 v03-a" [ style = bold]
-"vds-ok-pool-0-iscsi:2_reload_0 v03-a" [ style=bold color="green" fontcolor="black"]
+"vds-ok-pool-0-iscsi:2_reload-agent_0 v03-a" -> "vds-ok-pool-0-iscsi:2_monitor_30000 v03-a" [ style = bold]
+"vds-ok-pool-0-iscsi:2_reload-agent_0 v03-a" [ style=bold color="green" fontcolor="black"]
"vds-ok-pool-1-iscsi:0_monitor_30000 v03-b" [ style=bold color="green" fontcolor="black"]
-"vds-ok-pool-1-iscsi:0_reload_0 v03-b" -> "vds-ok-pool-1-iscsi:0_monitor_30000 v03-b" [ style = bold]
-"vds-ok-pool-1-iscsi:0_reload_0 v03-b" [ style=bold color="green" fontcolor="black"]
+"vds-ok-pool-1-iscsi:0_reload-agent_0 v03-b" -> "vds-ok-pool-1-iscsi:0_monitor_30000 v03-b" [ style = bold]
+"vds-ok-pool-1-iscsi:0_reload-agent_0 v03-b" [ style=bold color="green" fontcolor="black"]
"vds-ok-pool-1-iscsi:1_monitor_30000 mgmt01" [ style=bold color="green" fontcolor="black"]
-"vds-ok-pool-1-iscsi:1_reload_0 mgmt01" -> "vds-ok-pool-1-iscsi:1_monitor_30000 mgmt01" [ style = bold]
-"vds-ok-pool-1-iscsi:1_reload_0 mgmt01" [ style=bold color="green" fontcolor="black"]
+"vds-ok-pool-1-iscsi:1_reload-agent_0 mgmt01" -> "vds-ok-pool-1-iscsi:1_monitor_30000 mgmt01" [ style = bold]
+"vds-ok-pool-1-iscsi:1_reload-agent_0 mgmt01" [ style=bold color="green" fontcolor="black"]
"vds-ok-pool-1-iscsi:2_monitor_30000 v03-a" [ style=bold color="green" fontcolor="black"]
-"vds-ok-pool-1-iscsi:2_reload_0 v03-a" -> "vds-ok-pool-1-iscsi:2_monitor_30000 v03-a" [ style = bold]
-"vds-ok-pool-1-iscsi:2_reload_0 v03-a" [ style=bold color="green" fontcolor="black"]
+"vds-ok-pool-1-iscsi:2_reload-agent_0 v03-a" -> "vds-ok-pool-1-iscsi:2_monitor_30000 v03-a" [ style = bold]
+"vds-ok-pool-1-iscsi:2_reload-agent_0 v03-a" [ style=bold color="green" fontcolor="black"]
}
diff --git a/cts/scheduler/dot/params-4.dot b/cts/scheduler/dot/params-4.dot
index 0168625e2c..8faadd8f70 100644
--- a/cts/scheduler/dot/params-4.dot
+++ b/cts/scheduler/dot/params-4.dot
@@ -1,26 +1,26 @@
digraph "g" {
"Cancel rsc_c001n02_monitor_5000 c001n02" [ style=bold color="green" fontcolor="black"]
-"DcIPaddr_monitor_0 c001n01" -> "DcIPaddr_reload_0 c001n02" [ style = bold]
+"DcIPaddr_monitor_0 c001n01" -> "DcIPaddr_reload-agent_0 c001n02" [ style = bold]
"DcIPaddr_monitor_0 c001n01" [ style=bold color="green" fontcolor="black"]
-"DcIPaddr_monitor_0 c001n03" -> "DcIPaddr_reload_0 c001n02" [ style = bold]
+"DcIPaddr_monitor_0 c001n03" -> "DcIPaddr_reload-agent_0 c001n02" [ style = bold]
"DcIPaddr_monitor_0 c001n03" [ style=bold color="green" fontcolor="black"]
-"DcIPaddr_monitor_0 c001n08" -> "DcIPaddr_reload_0 c001n02" [ style = bold]
+"DcIPaddr_monitor_0 c001n08" -> "DcIPaddr_reload-agent_0 c001n02" [ style = bold]
"DcIPaddr_monitor_0 c001n08" [ style=bold color="green" fontcolor="black"]
"DcIPaddr_monitor_5000 c001n02" [ style=bold color="green" fontcolor="black"]
-"DcIPaddr_reload_0 c001n02" -> "DcIPaddr_monitor_5000 c001n02" [ style = bold]
-"DcIPaddr_reload_0 c001n02" [ style=bold color="green" fontcolor="black"]
+"DcIPaddr_reload-agent_0 c001n02" -> "DcIPaddr_monitor_5000 c001n02" [ style = bold]
+"DcIPaddr_reload-agent_0 c001n02" [ style=bold color="green" fontcolor="black"]
"rsc_c001n01_monitor_0 c001n02" [ style=bold color="green" fontcolor="black"]
"rsc_c001n01_monitor_0 c001n03" [ style=bold color="green" fontcolor="black"]
"rsc_c001n01_monitor_0 c001n08" [ style=bold color="green" fontcolor="black"]
"rsc_c001n02_monitor_0 c001n01" [ style=bold color="green" fontcolor="black"]
"rsc_c001n02_monitor_0 c001n03" [ style=bold color="green" fontcolor="black"]
"rsc_c001n02_monitor_0 c001n08" [ style=bold color="green" fontcolor="black"]
"rsc_c001n02_monitor_6000 c001n02" [ style=bold color="green" fontcolor="black"]
"rsc_c001n03_monitor_0 c001n01" [ style=bold color="green" fontcolor="black"]
"rsc_c001n03_monitor_0 c001n02" [ style=bold color="green" fontcolor="black"]
"rsc_c001n03_monitor_0 c001n08" [ style=bold color="green" fontcolor="black"]
"rsc_c001n08_monitor_0 c001n01" [ style=bold color="green" fontcolor="black"]
"rsc_c001n08_monitor_0 c001n02" [ style=bold color="green" fontcolor="black"]
"rsc_c001n08_monitor_0 c001n03" [ style=bold color="green" fontcolor="black"]
"rsc_c001n08_monitor_5000 c001n08" [ style=bold color="green" fontcolor="black"]
}
diff --git a/cts/scheduler/dot/params-6.dot b/cts/scheduler/dot/params-6.dot
index 0cc058cb20..db28556809 100644
--- a/cts/scheduler/dot/params-6.dot
+++ b/cts/scheduler/dot/params-6.dot
@@ -1,10 +1,10 @@
digraph "g" {
"c5-x64-devel.vds-ok.com-vm_monitor_10000 v03-a" [ style=bold color="green" fontcolor="black"]
-"c5-x64-devel.vds-ok.com-vm_reload_0 v03-a" -> "c5-x64-devel.vds-ok.com-vm_monitor_10000 v03-a" [ style = bold]
-"c5-x64-devel.vds-ok.com-vm_reload_0 v03-a" [ style=bold color="green" fontcolor="black"]
+"c5-x64-devel.vds-ok.com-vm_reload-agent_0 v03-a" -> "c5-x64-devel.vds-ok.com-vm_monitor_10000 v03-a" [ style = bold]
+"c5-x64-devel.vds-ok.com-vm_reload-agent_0 v03-a" [ style=bold color="green" fontcolor="black"]
"load_stopped_mgmt01 mgmt01" [ style=bold color="green" fontcolor="orange"]
"load_stopped_v03-a v03-a" [ style=bold color="green" fontcolor="orange"]
"load_stopped_v03-b v03-b" [ style=bold color="green" fontcolor="orange"]
"vd01-b.cdev.ttc.prague.cz.vds-ok.com-vm_monitor_10000 v03-b" [ style=bold color="green" fontcolor="black"]
"vd01-d.cdev.ttc.prague.cz.vds-ok.com-vm_monitor_10000 v03-b" [ style=bold color="green" fontcolor="black"]
}
diff --git a/cts/scheduler/exp/bug-5025-1.exp b/cts/scheduler/exp/bug-5025-1.exp
index e213e34f80..c81d8bf00c 100644
--- a/cts/scheduler/exp/bug-5025-1.exp
+++ b/cts/scheduler/exp/bug-5025-1.exp
@@ -1,33 +1,33 @@
-
+
-
+
diff --git a/cts/scheduler/exp/load-stopped-loop.exp b/cts/scheduler/exp/load-stopped-loop.exp
index 4d69aabcc1..a029e1fb49 100644
--- a/cts/scheduler/exp/load-stopped-loop.exp
+++ b/cts/scheduler/exp/load-stopped-loop.exp
@@ -1,398 +1,398 @@
-
+
-
+
-
+
-
+
-
+
-
+
-
+
-
+
-
+
-
+
-
+
-
+
diff --git a/cts/scheduler/exp/params-4.exp b/cts/scheduler/exp/params-4.exp
index c03425864b..2880e65266 100644
--- a/cts/scheduler/exp/params-4.exp
+++ b/cts/scheduler/exp/params-4.exp
@@ -1,196 +1,196 @@
-
+
-
+
diff --git a/cts/scheduler/exp/params-6.exp b/cts/scheduler/exp/params-6.exp
index 993663d60d..88447d10a7 100644
--- a/cts/scheduler/exp/params-6.exp
+++ b/cts/scheduler/exp/params-6.exp
@@ -1,66 +1,66 @@
-
+
-
+
diff --git a/cts/scheduler/summary/bug-5025-1.summary b/cts/scheduler/summary/bug-5025-1.summary
index e2db7a9351..f83116e44b 100644
--- a/cts/scheduler/summary/bug-5025-1.summary
+++ b/cts/scheduler/summary/bug-5025-1.summary
@@ -1,25 +1,25 @@
Current cluster status:
* Node List:
* Online: [ fc16-builder ]
* OFFLINE: [ fc16-builder2 fc16-builder3 ]
* Full List of Resources:
* virt-fencing (stonith:fence_xvm): Started fc16-builder
* A (ocf:pacemaker:Dummy): Started fc16-builder
Transition Summary:
* Reload A ( fc16-builder )
Executing Cluster Transition:
* Cluster action: clear_failcount for A on fc16-builder
- * Resource action: A reload on fc16-builder
+ * Resource action: A reload-agent on fc16-builder
* Resource action: A monitor=30000 on fc16-builder
Revised Cluster Status:
* Node List:
* Online: [ fc16-builder ]
* OFFLINE: [ fc16-builder2 fc16-builder3 ]
* Full List of Resources:
* virt-fencing (stonith:fence_xvm): Started fc16-builder
* A (ocf:pacemaker:Dummy): Started fc16-builder
diff --git a/cts/scheduler/summary/load-stopped-loop.summary b/cts/scheduler/summary/load-stopped-loop.summary
index ff80da4fa0..f3f2473a4f 100644
--- a/cts/scheduler/summary/load-stopped-loop.summary
+++ b/cts/scheduler/summary/load-stopped-loop.summary
@@ -1,337 +1,337 @@
32 of 308 resource instances DISABLED and 0 BLOCKED from further action due to failure
Current cluster status:
* Node List:
* Online: [ mgmt01 v03-a v03-b ]
* Full List of Resources:
* stonith-v02-a (stonith:fence_ipmilan): Stopped (disabled)
* stonith-v02-b (stonith:fence_ipmilan): Stopped (disabled)
* stonith-v02-c (stonith:fence_ipmilan): Stopped (disabled)
* stonith-v02-d (stonith:fence_ipmilan): Stopped (disabled)
* stonith-mgmt01 (stonith:fence_xvm): Started v03-b
* stonith-mgmt02 (stonith:meatware): Started mgmt01
* stonith-v03-c (stonith:fence_ipmilan): Stopped (disabled)
* stonith-v03-a (stonith:fence_ipmilan): Started v03-b
* stonith-v03-b (stonith:fence_ipmilan): Started v03-a
* stonith-v03-d (stonith:fence_ipmilan): Stopped (disabled)
* Clone Set: cl-clvmd [clvmd]:
* Started: [ mgmt01 v03-a v03-b ]
* Clone Set: cl-dlm [dlm]:
* Started: [ mgmt01 v03-a v03-b ]
* Clone Set: cl-iscsid [iscsid]:
* Started: [ mgmt01 v03-a v03-b ]
* Clone Set: cl-libvirtd [libvirtd]:
* Started: [ v03-a v03-b ]
* Stopped: [ mgmt01 ]
* Clone Set: cl-multipathd [multipathd]:
* Started: [ mgmt01 v03-a v03-b ]
* Clone Set: cl-node-params [node-params]:
* Started: [ v03-a v03-b ]
* Stopped: [ mgmt01 ]
* Clone Set: cl-vlan1-if [vlan1-if]:
* Started: [ v03-a v03-b ]
* Stopped: [ mgmt01 ]
* Clone Set: cl-vlan101-if [vlan101-if]:
* Started: [ v03-a v03-b ]
* Stopped: [ mgmt01 ]
* Clone Set: cl-vlan102-if [vlan102-if]:
* Started: [ v03-a v03-b ]
* Stopped: [ mgmt01 ]
* Clone Set: cl-vlan103-if [vlan103-if]:
* Started: [ v03-a v03-b ]
* Stopped: [ mgmt01 ]
* Clone Set: cl-vlan104-if [vlan104-if]:
* Started: [ v03-a v03-b ]
* Stopped: [ mgmt01 ]
* Clone Set: cl-vlan3-if [vlan3-if]:
* Started: [ v03-a v03-b ]
* Stopped: [ mgmt01 ]
* Clone Set: cl-vlan4-if [vlan4-if]:
* Started: [ v03-a v03-b ]
* Stopped: [ mgmt01 ]
* Clone Set: cl-vlan5-if [vlan5-if]:
* Started: [ v03-a v03-b ]
* Stopped: [ mgmt01 ]
* Clone Set: cl-vlan900-if [vlan900-if]:
* Started: [ v03-a v03-b ]
* Stopped: [ mgmt01 ]
* Clone Set: cl-vlan909-if [vlan909-if]:
* Started: [ v03-a v03-b ]
* Stopped: [ mgmt01 ]
* Clone Set: cl-libvirt-images-fs [libvirt-images-fs]:
* Started: [ mgmt01 v03-a v03-b ]
* Clone Set: cl-libvirt-install-fs [libvirt-install-fs]:
* Started: [ mgmt01 v03-a v03-b ]
* Clone Set: cl-vds-ok-pool-0-iscsi [vds-ok-pool-0-iscsi]:
* Started: [ mgmt01 v03-a v03-b ]
* Clone Set: cl-vds-ok-pool-0-vg [vds-ok-pool-0-vg]:
* Started: [ mgmt01 v03-a v03-b ]
* Clone Set: cl-vds-ok-pool-1-iscsi [vds-ok-pool-1-iscsi]:
* Started: [ mgmt01 v03-a v03-b ]
* Clone Set: cl-vds-ok-pool-1-vg [vds-ok-pool-1-vg]:
* Started: [ mgmt01 v03-a v03-b ]
* Clone Set: cl-libvirt-images-pool [libvirt-images-pool]:
* Started: [ v03-a v03-b ]
* Stopped: [ mgmt01 ]
* Clone Set: cl-vds-ok-pool-0-pool [vds-ok-pool-0-pool]:
* Started: [ v03-a v03-b ]
* Stopped: [ mgmt01 ]
* Clone Set: cl-vds-ok-pool-1-pool [vds-ok-pool-1-pool]:
* Started: [ v03-a v03-b ]
* Stopped: [ mgmt01 ]
* git.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled)
* vd01-a.cdev.ttc.prague.cz.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Started v03-a
* vd01-b.cdev.ttc.prague.cz.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Started v03-a
* vd01-c.cdev.ttc.prague.cz.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Started v03-b
* vd01-d.cdev.ttc.prague.cz.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Stopped
* vd02-a.cdev.ttc.prague.cz.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled)
* vd02-b.cdev.ttc.prague.cz.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled)
* vd02-c.cdev.ttc.prague.cz.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled)
* vd02-d.cdev.ttc.prague.cz.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled)
* vd03-a.cdev.ttc.prague.cz.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled)
* vd03-b.cdev.ttc.prague.cz.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled)
* vd03-c.cdev.ttc.prague.cz.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled)
* vd03-d.cdev.ttc.prague.cz.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled)
* vd04-a.cdev.ttc.prague.cz.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled)
* vd04-b.cdev.ttc.prague.cz.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled)
* vd04-c.cdev.ttc.prague.cz.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled)
* vd04-d.cdev.ttc.prague.cz.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled)
* f13-x64-devel.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Started v03-b
* eu2.ca-pages.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled)
* zakaz.transferrus.ru-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled)
* Clone Set: cl-vlan200-if [vlan200-if]:
* Started: [ v03-a v03-b ]
* Stopped: [ mgmt01 ]
* lenny-x32-devel-vm (ocf:vds-ok:VirtualDomain): Started v03-a
* dist.express-consult.org-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled)
* eu1.ca-pages.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled)
* gotin-bbb-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled)
* maxb-c55-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled)
* metae.ru-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled)
* rodovoepomestie.ru-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled)
* ubuntu9.10-gotin-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled)
* c5-x64-devel.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Started v03-b
* Clone Set: cl-mcast-test-net [mcast-test-net]:
* Started: [ v03-a v03-b ]
* Stopped: [ mgmt01 ]
* dist.fly-uni.org-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled)
* ktstudio.net-vm (ocf:vds-ok:VirtualDomain): Started v03-a
* cloudsrv.credo-dialogue.com-vm (ocf:vds-ok:VirtualDomain): Started v03-b
* c6-x64-devel.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Started v03-a
* lustre01-right.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Started v03-b
* lustre02-right.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Started v03-a
* lustre03-left.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Started v03-b
* lustre03-right.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Started v03-a
* lustre04-left.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Started v03-b
* lustre04-right.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Started v03-a
* Clone Set: cl-mcast-anbriz-net [mcast-anbriz-net]:
* Started: [ v03-a v03-b ]
* Stopped: [ mgmt01 ]
* gw.anbriz.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Started v03-b
* license.anbriz.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Started v03-b
* terminal.anbriz.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled)
* lustre01-left.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Started v03-a
* lustre02-left.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Started v03-b
* test-01.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Started v03-a
* Clone Set: cl-libvirt-qpid [libvirt-qpid]:
* Started: [ v03-a v03-b ]
* Stopped: [ mgmt01 ]
* gw.gleb.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled)
* gw.gotin.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled)
* terminal0.anbriz.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Started v03-a
* Clone Set: cl-mcast-gleb-net [mcast-gleb-net]:
* Started: [ v03-a v03-b ]
* Stopped: [ mgmt01 ]
Transition Summary:
* Reload vds-ok-pool-0-iscsi:0 ( mgmt01 )
* Reload vds-ok-pool-0-iscsi:1 ( v03-b )
* Reload vds-ok-pool-0-iscsi:2 ( v03-a )
* Reload vds-ok-pool-1-iscsi:0 ( mgmt01 )
* Reload vds-ok-pool-1-iscsi:1 ( v03-b )
* Reload vds-ok-pool-1-iscsi:2 ( v03-a )
* Restart stonith-v03-b ( v03-a ) due to resource definition change
* Restart stonith-v03-a ( v03-b ) due to resource definition change
* Migrate license.anbriz.vds-ok.com-vm ( v03-b -> v03-a )
* Migrate terminal0.anbriz.vds-ok.com-vm ( v03-a -> v03-b )
* Start vd01-d.cdev.ttc.prague.cz.vds-ok.com-vm ( v03-a )
Executing Cluster Transition:
- * Resource action: vds-ok-pool-0-iscsi:1 reload on mgmt01
+ * Resource action: vds-ok-pool-0-iscsi:1 reload-agent on mgmt01
* Resource action: vds-ok-pool-0-iscsi:1 monitor=30000 on mgmt01
- * Resource action: vds-ok-pool-0-iscsi:0 reload on v03-b
+ * Resource action: vds-ok-pool-0-iscsi:0 reload-agent on v03-b
* Resource action: vds-ok-pool-0-iscsi:0 monitor=30000 on v03-b
- * Resource action: vds-ok-pool-0-iscsi:2 reload on v03-a
+ * Resource action: vds-ok-pool-0-iscsi:2 reload-agent on v03-a
* Resource action: vds-ok-pool-0-iscsi:2 monitor=30000 on v03-a
- * Resource action: vds-ok-pool-1-iscsi:1 reload on mgmt01
+ * Resource action: vds-ok-pool-1-iscsi:1 reload-agent on mgmt01
* Resource action: vds-ok-pool-1-iscsi:1 monitor=30000 on mgmt01
- * Resource action: vds-ok-pool-1-iscsi:0 reload on v03-b
+ * Resource action: vds-ok-pool-1-iscsi:0 reload-agent on v03-b
* Resource action: vds-ok-pool-1-iscsi:0 monitor=30000 on v03-b
- * Resource action: vds-ok-pool-1-iscsi:2 reload on v03-a
+ * Resource action: vds-ok-pool-1-iscsi:2 reload-agent on v03-a
* Resource action: vds-ok-pool-1-iscsi:2 monitor=30000 on v03-a
* Resource action: stonith-v03-b stop on v03-a
* Resource action: stonith-v03-b start on v03-a
* Resource action: stonith-v03-b monitor=60000 on v03-a
* Resource action: stonith-v03-a stop on v03-b
* Resource action: stonith-v03-a start on v03-b
* Resource action: stonith-v03-a monitor=60000 on v03-b
* Resource action: terminal0.anbriz.vds-ok.com-vm migrate_to on v03-a
* Pseudo action: load_stopped_mgmt01
* Resource action: terminal0.anbriz.vds-ok.com-vm migrate_from on v03-b
* Resource action: terminal0.anbriz.vds-ok.com-vm stop on v03-a
* Pseudo action: load_stopped_v03-a
* Resource action: license.anbriz.vds-ok.com-vm migrate_to on v03-b
* Resource action: vd01-d.cdev.ttc.prague.cz.vds-ok.com-vm start on v03-a
* Resource action: license.anbriz.vds-ok.com-vm migrate_from on v03-a
* Resource action: license.anbriz.vds-ok.com-vm stop on v03-b
* Resource action: vd01-d.cdev.ttc.prague.cz.vds-ok.com-vm monitor=10000 on v03-a
* Pseudo action: load_stopped_v03-b
* Pseudo action: license.anbriz.vds-ok.com-vm_start_0
* Pseudo action: terminal0.anbriz.vds-ok.com-vm_start_0
* Resource action: license.anbriz.vds-ok.com-vm monitor=10000 on v03-a
* Resource action: terminal0.anbriz.vds-ok.com-vm monitor=10000 on v03-b
Revised Cluster Status:
* Node List:
* Online: [ mgmt01 v03-a v03-b ]
* Full List of Resources:
* stonith-v02-a (stonith:fence_ipmilan): Stopped (disabled)
* stonith-v02-b (stonith:fence_ipmilan): Stopped (disabled)
* stonith-v02-c (stonith:fence_ipmilan): Stopped (disabled)
* stonith-v02-d (stonith:fence_ipmilan): Stopped (disabled)
* stonith-mgmt01 (stonith:fence_xvm): Started v03-b
* stonith-mgmt02 (stonith:meatware): Started mgmt01
* stonith-v03-c (stonith:fence_ipmilan): Stopped (disabled)
* stonith-v03-a (stonith:fence_ipmilan): Started v03-b
* stonith-v03-b (stonith:fence_ipmilan): Started v03-a
* stonith-v03-d (stonith:fence_ipmilan): Stopped (disabled)
* Clone Set: cl-clvmd [clvmd]:
* Started: [ mgmt01 v03-a v03-b ]
* Clone Set: cl-dlm [dlm]:
* Started: [ mgmt01 v03-a v03-b ]
* Clone Set: cl-iscsid [iscsid]:
* Started: [ mgmt01 v03-a v03-b ]
* Clone Set: cl-libvirtd [libvirtd]:
* Started: [ v03-a v03-b ]
* Stopped: [ mgmt01 ]
* Clone Set: cl-multipathd [multipathd]:
* Started: [ mgmt01 v03-a v03-b ]
* Clone Set: cl-node-params [node-params]:
* Started: [ v03-a v03-b ]
* Stopped: [ mgmt01 ]
* Clone Set: cl-vlan1-if [vlan1-if]:
* Started: [ v03-a v03-b ]
* Stopped: [ mgmt01 ]
* Clone Set: cl-vlan101-if [vlan101-if]:
* Started: [ v03-a v03-b ]
* Stopped: [ mgmt01 ]
* Clone Set: cl-vlan102-if [vlan102-if]:
* Started: [ v03-a v03-b ]
* Stopped: [ mgmt01 ]
* Clone Set: cl-vlan103-if [vlan103-if]:
* Started: [ v03-a v03-b ]
* Stopped: [ mgmt01 ]
* Clone Set: cl-vlan104-if [vlan104-if]:
* Started: [ v03-a v03-b ]
* Stopped: [ mgmt01 ]
* Clone Set: cl-vlan3-if [vlan3-if]:
* Started: [ v03-a v03-b ]
* Stopped: [ mgmt01 ]
* Clone Set: cl-vlan4-if [vlan4-if]:
* Started: [ v03-a v03-b ]
* Stopped: [ mgmt01 ]
* Clone Set: cl-vlan5-if [vlan5-if]:
* Started: [ v03-a v03-b ]
* Stopped: [ mgmt01 ]
* Clone Set: cl-vlan900-if [vlan900-if]:
* Started: [ v03-a v03-b ]
* Stopped: [ mgmt01 ]
* Clone Set: cl-vlan909-if [vlan909-if]:
* Started: [ v03-a v03-b ]
* Stopped: [ mgmt01 ]
* Clone Set: cl-libvirt-images-fs [libvirt-images-fs]:
* Started: [ mgmt01 v03-a v03-b ]
* Clone Set: cl-libvirt-install-fs [libvirt-install-fs]:
* Started: [ mgmt01 v03-a v03-b ]
* Clone Set: cl-vds-ok-pool-0-iscsi [vds-ok-pool-0-iscsi]:
* Started: [ mgmt01 v03-a v03-b ]
* Clone Set: cl-vds-ok-pool-0-vg [vds-ok-pool-0-vg]:
* Started: [ mgmt01 v03-a v03-b ]
* Clone Set: cl-vds-ok-pool-1-iscsi [vds-ok-pool-1-iscsi]:
* Started: [ mgmt01 v03-a v03-b ]
* Clone Set: cl-vds-ok-pool-1-vg [vds-ok-pool-1-vg]:
* Started: [ mgmt01 v03-a v03-b ]
* Clone Set: cl-libvirt-images-pool [libvirt-images-pool]:
* Started: [ v03-a v03-b ]
* Stopped: [ mgmt01 ]
* Clone Set: cl-vds-ok-pool-0-pool [vds-ok-pool-0-pool]:
* Started: [ v03-a v03-b ]
* Stopped: [ mgmt01 ]
* Clone Set: cl-vds-ok-pool-1-pool [vds-ok-pool-1-pool]:
* Started: [ v03-a v03-b ]
* Stopped: [ mgmt01 ]
* git.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled)
* vd01-a.cdev.ttc.prague.cz.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Started v03-a
* vd01-b.cdev.ttc.prague.cz.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Started v03-a
* vd01-c.cdev.ttc.prague.cz.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Started v03-b
* vd01-d.cdev.ttc.prague.cz.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Started v03-a
* vd02-a.cdev.ttc.prague.cz.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled)
* vd02-b.cdev.ttc.prague.cz.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled)
* vd02-c.cdev.ttc.prague.cz.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled)
* vd02-d.cdev.ttc.prague.cz.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled)
* vd03-a.cdev.ttc.prague.cz.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled)
* vd03-b.cdev.ttc.prague.cz.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled)
* vd03-c.cdev.ttc.prague.cz.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled)
* vd03-d.cdev.ttc.prague.cz.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled)
* vd04-a.cdev.ttc.prague.cz.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled)
* vd04-b.cdev.ttc.prague.cz.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled)
* vd04-c.cdev.ttc.prague.cz.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled)
* vd04-d.cdev.ttc.prague.cz.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled)
* f13-x64-devel.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Started v03-b
* eu2.ca-pages.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled)
* zakaz.transferrus.ru-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled)
* Clone Set: cl-vlan200-if [vlan200-if]:
* Started: [ v03-a v03-b ]
* Stopped: [ mgmt01 ]
* lenny-x32-devel-vm (ocf:vds-ok:VirtualDomain): Started v03-a
* dist.express-consult.org-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled)
* eu1.ca-pages.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled)
* gotin-bbb-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled)
* maxb-c55-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled)
* metae.ru-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled)
* rodovoepomestie.ru-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled)
* ubuntu9.10-gotin-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled)
* c5-x64-devel.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Started v03-b
* Clone Set: cl-mcast-test-net [mcast-test-net]:
* Started: [ v03-a v03-b ]
* Stopped: [ mgmt01 ]
* dist.fly-uni.org-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled)
* ktstudio.net-vm (ocf:vds-ok:VirtualDomain): Started v03-a
* cloudsrv.credo-dialogue.com-vm (ocf:vds-ok:VirtualDomain): Started v03-b
* c6-x64-devel.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Started v03-a
* lustre01-right.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Started v03-b
* lustre02-right.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Started v03-a
* lustre03-left.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Started v03-b
* lustre03-right.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Started v03-a
* lustre04-left.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Started v03-b
* lustre04-right.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Started v03-a
* Clone Set: cl-mcast-anbriz-net [mcast-anbriz-net]:
* Started: [ v03-a v03-b ]
* Stopped: [ mgmt01 ]
* gw.anbriz.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Started v03-b
* license.anbriz.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Started v03-a
* terminal.anbriz.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled)
* lustre01-left.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Started v03-a
* lustre02-left.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Started v03-b
* test-01.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Started v03-a
* Clone Set: cl-libvirt-qpid [libvirt-qpid]:
* Started: [ v03-a v03-b ]
* Stopped: [ mgmt01 ]
* gw.gleb.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled)
* gw.gotin.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled)
* terminal0.anbriz.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Started v03-b
* Clone Set: cl-mcast-gleb-net [mcast-gleb-net]:
* Started: [ v03-a v03-b ]
* Stopped: [ mgmt01 ]
diff --git a/cts/scheduler/summary/params-4.summary b/cts/scheduler/summary/params-4.summary
index 5257d91509..d6a71474af 100644
--- a/cts/scheduler/summary/params-4.summary
+++ b/cts/scheduler/summary/params-4.summary
@@ -1,46 +1,46 @@
Current cluster status:
* Node List:
* Online: [ c001n01 c001n02 c001n03 c001n08 ]
* Full List of Resources:
* DcIPaddr (ocf:heartbeat:IPaddr): Started c001n02
* rsc_c001n08 (ocf:heartbeat:IPaddr): Started c001n08
* rsc_c001n02 (ocf:heartbeat:IPaddr): Started c001n02
* rsc_c001n03 (ocf:heartbeat:IPaddr): Started c001n03
* rsc_c001n01 (ocf:heartbeat:IPaddr): Started c001n01
Transition Summary:
* Reload DcIPaddr ( c001n02 )
Executing Cluster Transition:
* Resource action: DcIPaddr monitor on c001n08
* Resource action: DcIPaddr monitor on c001n03
* Resource action: DcIPaddr monitor on c001n01
- * Resource action: DcIPaddr reload on c001n02
+ * Resource action: DcIPaddr reload-agent on c001n02
* Resource action: DcIPaddr monitor=5000 on c001n02
* Resource action: rsc_c001n08 monitor on c001n03
* Resource action: rsc_c001n08 monitor on c001n02
* Resource action: rsc_c001n08 monitor on c001n01
* Resource action: rsc_c001n08 monitor=5000 on c001n08
* Resource action: rsc_c001n02 monitor=6000 on c001n02
* Resource action: rsc_c001n02 monitor on c001n08
* Resource action: rsc_c001n02 monitor on c001n03
* Resource action: rsc_c001n02 monitor on c001n01
* Resource action: rsc_c001n02 cancel=5000 on c001n02
* Resource action: rsc_c001n03 monitor on c001n08
* Resource action: rsc_c001n03 monitor on c001n02
* Resource action: rsc_c001n03 monitor on c001n01
* Resource action: rsc_c001n01 monitor on c001n08
* Resource action: rsc_c001n01 monitor on c001n03
* Resource action: rsc_c001n01 monitor on c001n02
Revised Cluster Status:
* Node List:
* Online: [ c001n01 c001n02 c001n03 c001n08 ]
* Full List of Resources:
* DcIPaddr (ocf:heartbeat:IPaddr): Started c001n02
* rsc_c001n08 (ocf:heartbeat:IPaddr): Started c001n08
* rsc_c001n02 (ocf:heartbeat:IPaddr): Started c001n02
* rsc_c001n03 (ocf:heartbeat:IPaddr): Started c001n03
* rsc_c001n01 (ocf:heartbeat:IPaddr): Started c001n01
diff --git a/cts/scheduler/summary/params-6.summary b/cts/scheduler/summary/params-6.summary
index 56a9a2ddb7..4b5c4807ae 100644
--- a/cts/scheduler/summary/params-6.summary
+++ b/cts/scheduler/summary/params-6.summary
@@ -1,379 +1,379 @@
90 of 337 resource instances DISABLED and 0 BLOCKED from further action due to failure
Current cluster status:
* Node List:
* Online: [ mgmt01 v03-a v03-b ]
* Full List of Resources:
* stonith-v02-a (stonith:fence_ipmilan): Stopped (disabled)
* stonith-v02-b (stonith:fence_ipmilan): Stopped (disabled)
* stonith-v02-c (stonith:fence_ipmilan): Stopped (disabled)
* stonith-v02-d (stonith:fence_ipmilan): Stopped (disabled)
* stonith-mgmt01 (stonith:fence_xvm): Started v03-a
* stonith-mgmt02 (stonith:meatware): Started v03-a
* stonith-v03-c (stonith:fence_ipmilan): Stopped (disabled)
* stonith-v03-a (stonith:fence_ipmilan): Started v03-b
* stonith-v03-b (stonith:fence_ipmilan): Started mgmt01
* stonith-v03-d (stonith:fence_ipmilan): Stopped (disabled)
* Clone Set: cl-clvmd [clvmd]:
* Started: [ mgmt01 v03-a v03-b ]
* Clone Set: cl-dlm [dlm]:
* Started: [ mgmt01 v03-a v03-b ]
* Clone Set: cl-iscsid [iscsid]:
* Started: [ mgmt01 v03-a v03-b ]
* Clone Set: cl-libvirtd [libvirtd]:
* Started: [ v03-a v03-b ]
* Stopped: [ mgmt01 ]
* Clone Set: cl-multipathd [multipathd]:
* Started: [ mgmt01 v03-a v03-b ]
* Clone Set: cl-node-params [node-params]:
* Started: [ v03-a v03-b ]
* Stopped: [ mgmt01 ]
* Clone Set: cl-vlan1-if [vlan1-if]:
* Started: [ v03-a v03-b ]
* Stopped: [ mgmt01 ]
* Clone Set: cl-vlan101-if [vlan101-if]:
* Started: [ v03-a v03-b ]
* Stopped: [ mgmt01 ]
* Clone Set: cl-vlan102-if [vlan102-if]:
* Started: [ v03-a v03-b ]
* Stopped: [ mgmt01 ]
* Clone Set: cl-vlan103-if [vlan103-if]:
* Started: [ v03-a v03-b ]
* Stopped: [ mgmt01 ]
* Clone Set: cl-vlan104-if [vlan104-if]:
* Started: [ v03-a v03-b ]
* Stopped: [ mgmt01 ]
* Clone Set: cl-vlan3-if [vlan3-if]:
* Started: [ v03-a v03-b ]
* Stopped: [ mgmt01 ]
* Clone Set: cl-vlan4-if [vlan4-if]:
* Started: [ v03-a v03-b ]
* Stopped: [ mgmt01 ]
* Clone Set: cl-vlan5-if [vlan5-if]:
* Started: [ v03-a v03-b ]
* Stopped: [ mgmt01 ]
* Clone Set: cl-vlan900-if [vlan900-if]:
* Started: [ v03-a v03-b ]
* Stopped: [ mgmt01 ]
* Clone Set: cl-vlan909-if [vlan909-if]:
* Started: [ v03-a v03-b ]
* Stopped: [ mgmt01 ]
* Clone Set: cl-libvirt-images-fs [libvirt-images-fs]:
* Started: [ mgmt01 v03-a v03-b ]
* Clone Set: cl-libvirt-install-fs [libvirt-install-fs]:
* Started: [ mgmt01 v03-a v03-b ]
* Clone Set: cl-vds-ok-pool-0-iscsi [vds-ok-pool-0-iscsi]:
* Started: [ mgmt01 v03-a v03-b ]
* Clone Set: cl-vds-ok-pool-0-vg [vds-ok-pool-0-vg]:
* Started: [ mgmt01 v03-a v03-b ]
* Clone Set: cl-vds-ok-pool-1-iscsi [vds-ok-pool-1-iscsi]:
* Started: [ mgmt01 v03-a v03-b ]
* Clone Set: cl-vds-ok-pool-1-vg [vds-ok-pool-1-vg]:
* Started: [ mgmt01 v03-a v03-b ]
* Clone Set: cl-libvirt-images-pool [libvirt-images-pool]:
* Started: [ v03-a v03-b ]
* Stopped: [ mgmt01 ]
* Clone Set: cl-vds-ok-pool-0-pool [vds-ok-pool-0-pool]:
* Started: [ v03-a v03-b ]
* Stopped: [ mgmt01 ]
* Clone Set: cl-vds-ok-pool-1-pool [vds-ok-pool-1-pool]:
* Started: [ v03-a v03-b ]
* Stopped: [ mgmt01 ]
* git.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled)
* Clone Set: cl-libvirt-qpid [libvirt-qpid]:
* Started: [ v03-a v03-b ]
* Stopped: [ mgmt01 ]
* vd01-a.cdev.ttc.prague.cz.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Started v03-b
* vd01-b.cdev.ttc.prague.cz.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Started v03-b
* vd01-c.cdev.ttc.prague.cz.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Started v03-a
* vd01-d.cdev.ttc.prague.cz.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Started v03-b
* vd02-a.cdev.ttc.prague.cz.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled)
* vd02-b.cdev.ttc.prague.cz.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled)
* vd02-c.cdev.ttc.prague.cz.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled)
* vd02-d.cdev.ttc.prague.cz.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled)
* vd03-a.cdev.ttc.prague.cz.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled)
* vd03-b.cdev.ttc.prague.cz.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled)
* vd03-c.cdev.ttc.prague.cz.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled)
* vd03-d.cdev.ttc.prague.cz.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled)
* vd04-a.cdev.ttc.prague.cz.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled)
* vd04-b.cdev.ttc.prague.cz.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled)
* vd04-c.cdev.ttc.prague.cz.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled)
* vd04-d.cdev.ttc.prague.cz.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled)
* f13-x64-devel.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Started v03-a
* eu2.ca-pages.com-vm (ocf:vds-ok:VirtualDomain): Started v03-b
* zakaz.transferrus.ru-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled)
* Clone Set: cl-vlan200-if [vlan200-if]:
* Started: [ v03-a v03-b ]
* Stopped: [ mgmt01 ]
* anbriz-gw-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled)
* anbriz-work-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled)
* lenny-x32-devel-vm (ocf:vds-ok:VirtualDomain): Started v03-a
* vptest1.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled)
* vptest2.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled)
* vptest3.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled)
* vptest4.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled)
* vptest5.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled)
* vptest6.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled)
* vptest7.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled)
* vptest8.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled)
* vptest9.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled)
* vptest10.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled)
* vptest11.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled)
* vptest12.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled)
* vptest13.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled)
* vptest14.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled)
* vptest15.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled)
* vptest16.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled)
* vptest17.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled)
* vptest18.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled)
* vptest19.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled)
* vptest20.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled)
* vptest21.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled)
* vptest22.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled)
* vptest23.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled)
* vptest24.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled)
* vptest25.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled)
* vptest26.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled)
* vptest27.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled)
* vptest28.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled)
* vptest29.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled)
* vptest30.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled)
* vptest31.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled)
* vptest32.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled)
* vptest33.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled)
* vptest34.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled)
* vptest35.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled)
* vptest36.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled)
* vptest37.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled)
* vptest38.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled)
* vptest39.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled)
* vptest40.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled)
* vptest41.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled)
* vptest42.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled)
* vptest43.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled)
* vptest44.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled)
* vptest45.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled)
* vptest46.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled)
* vptest47.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled)
* vptest48.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled)
* vptest49.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled)
* vptest50.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled)
* vptest51.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled)
* vptest52.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled)
* vptest53.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled)
* vptest54.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled)
* vptest55.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled)
* vptest56.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled)
* vptest57.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled)
* vptest58.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled)
* vptest59.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled)
* vptest60.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled)
* sl6-x64-devel.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Started v03-b
* dist.express-consult.org-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled)
* eu1.ca-pages.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled)
* gotin-bbb-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled)
* maxb-c55-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled)
* metae.ru-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled)
* rodovoepomestie.ru-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled)
* ubuntu9.10-gotin-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled)
* c5-x64-devel.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Started v03-a
* Clone Set: cl-mcast-test-net [mcast-test-net]:
* Started: [ v03-a v03-b ]
* Stopped: [ mgmt01 ]
* dist.fly-uni.org-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled)
Transition Summary:
* Reload c5-x64-devel.vds-ok.com-vm ( v03-a )
Executing Cluster Transition:
* Resource action: vd01-b.cdev.ttc.prague.cz.vds-ok.com-vm monitor=10000 on v03-b
* Resource action: vd01-d.cdev.ttc.prague.cz.vds-ok.com-vm monitor=10000 on v03-b
- * Resource action: c5-x64-devel.vds-ok.com-vm reload on v03-a
+ * Resource action: c5-x64-devel.vds-ok.com-vm reload-agent on v03-a
* Resource action: c5-x64-devel.vds-ok.com-vm monitor=10000 on v03-a
* Pseudo action: load_stopped_v03-b
* Pseudo action: load_stopped_v03-a
* Pseudo action: load_stopped_mgmt01
Revised Cluster Status:
* Node List:
* Online: [ mgmt01 v03-a v03-b ]
* Full List of Resources:
* stonith-v02-a (stonith:fence_ipmilan): Stopped (disabled)
* stonith-v02-b (stonith:fence_ipmilan): Stopped (disabled)
* stonith-v02-c (stonith:fence_ipmilan): Stopped (disabled)
* stonith-v02-d (stonith:fence_ipmilan): Stopped (disabled)
* stonith-mgmt01 (stonith:fence_xvm): Started v03-a
* stonith-mgmt02 (stonith:meatware): Started v03-a
* stonith-v03-c (stonith:fence_ipmilan): Stopped (disabled)
* stonith-v03-a (stonith:fence_ipmilan): Started v03-b
* stonith-v03-b (stonith:fence_ipmilan): Started mgmt01
* stonith-v03-d (stonith:fence_ipmilan): Stopped (disabled)
* Clone Set: cl-clvmd [clvmd]:
* Started: [ mgmt01 v03-a v03-b ]
* Clone Set: cl-dlm [dlm]:
* Started: [ mgmt01 v03-a v03-b ]
* Clone Set: cl-iscsid [iscsid]:
* Started: [ mgmt01 v03-a v03-b ]
* Clone Set: cl-libvirtd [libvirtd]:
* Started: [ v03-a v03-b ]
* Stopped: [ mgmt01 ]
* Clone Set: cl-multipathd [multipathd]:
* Started: [ mgmt01 v03-a v03-b ]
* Clone Set: cl-node-params [node-params]:
* Started: [ v03-a v03-b ]
* Stopped: [ mgmt01 ]
* Clone Set: cl-vlan1-if [vlan1-if]:
* Started: [ v03-a v03-b ]
* Stopped: [ mgmt01 ]
* Clone Set: cl-vlan101-if [vlan101-if]:
* Started: [ v03-a v03-b ]
* Stopped: [ mgmt01 ]
* Clone Set: cl-vlan102-if [vlan102-if]:
* Started: [ v03-a v03-b ]
* Stopped: [ mgmt01 ]
* Clone Set: cl-vlan103-if [vlan103-if]:
* Started: [ v03-a v03-b ]
* Stopped: [ mgmt01 ]
* Clone Set: cl-vlan104-if [vlan104-if]:
* Started: [ v03-a v03-b ]
* Stopped: [ mgmt01 ]
* Clone Set: cl-vlan3-if [vlan3-if]:
* Started: [ v03-a v03-b ]
* Stopped: [ mgmt01 ]
* Clone Set: cl-vlan4-if [vlan4-if]:
* Started: [ v03-a v03-b ]
* Stopped: [ mgmt01 ]
* Clone Set: cl-vlan5-if [vlan5-if]:
* Started: [ v03-a v03-b ]
* Stopped: [ mgmt01 ]
* Clone Set: cl-vlan900-if [vlan900-if]:
* Started: [ v03-a v03-b ]
* Stopped: [ mgmt01 ]
* Clone Set: cl-vlan909-if [vlan909-if]:
* Started: [ v03-a v03-b ]
* Stopped: [ mgmt01 ]
* Clone Set: cl-libvirt-images-fs [libvirt-images-fs]:
* Started: [ mgmt01 v03-a v03-b ]
* Clone Set: cl-libvirt-install-fs [libvirt-install-fs]:
* Started: [ mgmt01 v03-a v03-b ]
* Clone Set: cl-vds-ok-pool-0-iscsi [vds-ok-pool-0-iscsi]:
* Started: [ mgmt01 v03-a v03-b ]
* Clone Set: cl-vds-ok-pool-0-vg [vds-ok-pool-0-vg]:
* Started: [ mgmt01 v03-a v03-b ]
* Clone Set: cl-vds-ok-pool-1-iscsi [vds-ok-pool-1-iscsi]:
* Started: [ mgmt01 v03-a v03-b ]
* Clone Set: cl-vds-ok-pool-1-vg [vds-ok-pool-1-vg]:
* Started: [ mgmt01 v03-a v03-b ]
* Clone Set: cl-libvirt-images-pool [libvirt-images-pool]:
* Started: [ v03-a v03-b ]
* Stopped: [ mgmt01 ]
* Clone Set: cl-vds-ok-pool-0-pool [vds-ok-pool-0-pool]:
* Started: [ v03-a v03-b ]
* Stopped: [ mgmt01 ]
* Clone Set: cl-vds-ok-pool-1-pool [vds-ok-pool-1-pool]:
* Started: [ v03-a v03-b ]
* Stopped: [ mgmt01 ]
* git.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled)
* Clone Set: cl-libvirt-qpid [libvirt-qpid]:
* Started: [ v03-a v03-b ]
* Stopped: [ mgmt01 ]
* vd01-a.cdev.ttc.prague.cz.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Started v03-b
* vd01-b.cdev.ttc.prague.cz.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Started v03-b
* vd01-c.cdev.ttc.prague.cz.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Started v03-a
* vd01-d.cdev.ttc.prague.cz.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Started v03-b
* vd02-a.cdev.ttc.prague.cz.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled)
* vd02-b.cdev.ttc.prague.cz.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled)
* vd02-c.cdev.ttc.prague.cz.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled)
* vd02-d.cdev.ttc.prague.cz.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled)
* vd03-a.cdev.ttc.prague.cz.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled)
* vd03-b.cdev.ttc.prague.cz.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled)
* vd03-c.cdev.ttc.prague.cz.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled)
* vd03-d.cdev.ttc.prague.cz.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled)
* vd04-a.cdev.ttc.prague.cz.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled)
* vd04-b.cdev.ttc.prague.cz.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled)
* vd04-c.cdev.ttc.prague.cz.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled)
* vd04-d.cdev.ttc.prague.cz.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled)
* f13-x64-devel.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Started v03-a
* eu2.ca-pages.com-vm (ocf:vds-ok:VirtualDomain): Started v03-b
* zakaz.transferrus.ru-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled)
* Clone Set: cl-vlan200-if [vlan200-if]:
* Started: [ v03-a v03-b ]
* Stopped: [ mgmt01 ]
* anbriz-gw-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled)
* anbriz-work-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled)
* lenny-x32-devel-vm (ocf:vds-ok:VirtualDomain): Started v03-a
* vptest1.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled)
* vptest2.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled)
* vptest3.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled)
* vptest4.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled)
* vptest5.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled)
* vptest6.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled)
* vptest7.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled)
* vptest8.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled)
* vptest9.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled)
* vptest10.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled)
* vptest11.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled)
* vptest12.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled)
* vptest13.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled)
* vptest14.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled)
* vptest15.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled)
* vptest16.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled)
* vptest17.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled)
* vptest18.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled)
* vptest19.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled)
* vptest20.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled)
* vptest21.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled)
* vptest22.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled)
* vptest23.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled)
* vptest24.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled)
* vptest25.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled)
* vptest26.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled)
* vptest27.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled)
* vptest28.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled)
* vptest29.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled)
* vptest30.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled)
* vptest31.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled)
* vptest32.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled)
* vptest33.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled)
* vptest34.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled)
* vptest35.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled)
* vptest36.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled)
* vptest37.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled)
* vptest38.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled)
* vptest39.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled)
* vptest40.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled)
* vptest41.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled)
* vptest42.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled)
* vptest43.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled)
* vptest44.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled)
* vptest45.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled)
* vptest46.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled)
* vptest47.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled)
* vptest48.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled)
* vptest49.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled)
* vptest50.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled)
* vptest51.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled)
* vptest52.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled)
* vptest53.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled)
* vptest54.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled)
* vptest55.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled)
* vptest56.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled)
* vptest57.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled)
* vptest58.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled)
* vptest59.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled)
* vptest60.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled)
* sl6-x64-devel.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Started v03-b
* dist.express-consult.org-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled)
* eu1.ca-pages.com-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled)
* gotin-bbb-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled)
* maxb-c55-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled)
* metae.ru-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled)
* rodovoepomestie.ru-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled)
* ubuntu9.10-gotin-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled)
* c5-x64-devel.vds-ok.com-vm (ocf:vds-ok:VirtualDomain): Started v03-a
* Clone Set: cl-mcast-test-net [mcast-test-net]:
* Started: [ v03-a v03-b ]
* Stopped: [ mgmt01 ]
* dist.fly-uni.org-vm (ocf:vds-ok:VirtualDomain): Stopped (disabled)
diff --git a/daemons/controld/controld_execd.c b/daemons/controld/controld_execd.c
index c529fc0801..90261a36dd 100644
--- a/daemons/controld/controld_execd.c
+++ b/daemons/controld/controld_execd.c
@@ -1,2842 +1,2886 @@
/*
* Copyright 2004-2021 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
* This source code is licensed under the GNU General Public License version 2
* or later (GPLv2+) WITHOUT ANY WARRANTY.
*/
#include
#include
#include
#include
#include
#include
#include // lrmd_event_data_t, lrmd_rsc_info_t, etc.
#include
#include
#include
#include
#include
#include
#define START_DELAY_THRESHOLD 5 * 60 * 1000
#define MAX_LRM_REG_FAILS 30
struct delete_event_s {
int rc;
const char *rsc;
lrm_state_t *lrm_state;
};
static gboolean is_rsc_active(lrm_state_t * lrm_state, const char *rsc_id);
static gboolean build_active_RAs(lrm_state_t * lrm_state, xmlNode * rsc_list);
static gboolean stop_recurring_actions(gpointer key, gpointer value, gpointer user_data);
static lrmd_event_data_t *construct_op(lrm_state_t * lrm_state, xmlNode * rsc_op,
const char *rsc_id, const char *operation);
static void do_lrm_rsc_op(lrm_state_t *lrm_state, lrmd_rsc_info_t *rsc,
const char *operation, xmlNode *msg);
static gboolean lrm_state_verify_stopped(lrm_state_t * lrm_state, enum crmd_fsa_state cur_state,
int log_level);
static int do_update_resource(const char *node_name, lrmd_rsc_info_t *rsc,
lrmd_event_data_t *op, time_t lock_time);
static void
lrm_connection_destroy(void)
{
if (pcmk_is_set(fsa_input_register, R_LRM_CONNECTED)) {
crm_crit("Connection to executor failed");
register_fsa_input(C_FSA_INTERNAL, I_ERROR, NULL);
controld_clear_fsa_input_flags(R_LRM_CONNECTED);
} else {
crm_info("Disconnected from executor");
}
}
static char *
make_stop_id(const char *rsc, int call_id)
{
return crm_strdup_printf("%s:%d", rsc, call_id);
}
static void
copy_instance_keys(gpointer key, gpointer value, gpointer user_data)
{
if (strstr(key, CRM_META "_") == NULL) {
g_hash_table_replace(user_data, strdup((const char *)key), strdup((const char *)value));
}
}
static void
copy_meta_keys(gpointer key, gpointer value, gpointer user_data)
{
if (strstr(key, CRM_META "_") != NULL) {
g_hash_table_replace(user_data, strdup((const char *)key), strdup((const char *)value));
}
}
/*!
* \internal
* \brief Remove a recurring operation from a resource's history
*
* \param[in,out] history Resource history to modify
* \param[in] op Operation to remove
*
* \return TRUE if the operation was found and removed, FALSE otherwise
*/
static gboolean
history_remove_recurring_op(rsc_history_t *history, const lrmd_event_data_t *op)
{
GList *iter;
for (iter = history->recurring_op_list; iter != NULL; iter = iter->next) {
lrmd_event_data_t *existing = iter->data;
if ((op->interval_ms == existing->interval_ms)
&& pcmk__str_eq(op->rsc_id, existing->rsc_id, pcmk__str_none)
&& pcmk__str_eq(op->op_type, existing->op_type, pcmk__str_casei)) {
history->recurring_op_list = g_list_delete_link(history->recurring_op_list, iter);
lrmd_free_event(existing);
return TRUE;
}
}
return FALSE;
}
/*!
* \internal
* \brief Free all recurring operations in resource history
*
* \param[in,out] history Resource history to modify
*/
static void
history_free_recurring_ops(rsc_history_t *history)
{
GList *iter;
for (iter = history->recurring_op_list; iter != NULL; iter = iter->next) {
lrmd_free_event(iter->data);
}
g_list_free(history->recurring_op_list);
history->recurring_op_list = NULL;
}
/*!
* \internal
* \brief Free resource history
*
* \param[in,out] history Resource history to free
*/
void
history_free(gpointer data)
{
rsc_history_t *history = (rsc_history_t*)data;
if (history->stop_params) {
g_hash_table_destroy(history->stop_params);
}
/* Don't need to free history->rsc.id because it's set to history->id */
free(history->rsc.type);
free(history->rsc.standard);
free(history->rsc.provider);
lrmd_free_event(history->failed);
lrmd_free_event(history->last);
free(history->id);
history_free_recurring_ops(history);
free(history);
}
static void
update_history_cache(lrm_state_t * lrm_state, lrmd_rsc_info_t * rsc, lrmd_event_data_t * op)
{
int target_rc = 0;
rsc_history_t *entry = NULL;
if (op->rsc_deleted) {
crm_debug("Purged history for '%s' after %s", op->rsc_id, op->op_type);
controld_delete_resource_history(op->rsc_id, lrm_state->node_name,
NULL, crmd_cib_smart_opt());
return;
}
if (pcmk__str_eq(op->op_type, RSC_NOTIFY, pcmk__str_casei)) {
return;
}
crm_debug("Updating history for '%s' with %s op", op->rsc_id, op->op_type);
entry = g_hash_table_lookup(lrm_state->resource_history, op->rsc_id);
if (entry == NULL && rsc) {
entry = calloc(1, sizeof(rsc_history_t));
entry->id = strdup(op->rsc_id);
g_hash_table_insert(lrm_state->resource_history, entry->id, entry);
entry->rsc.id = entry->id;
entry->rsc.type = strdup(rsc->type);
entry->rsc.standard = strdup(rsc->standard);
if (rsc->provider) {
entry->rsc.provider = strdup(rsc->provider);
} else {
entry->rsc.provider = NULL;
}
} else if (entry == NULL) {
crm_info("Resource %s no longer exists, not updating cache", op->rsc_id);
return;
}
entry->last_callid = op->call_id;
target_rc = rsc_op_expected_rc(op);
if (op->op_status == PCMK_LRM_OP_CANCELLED) {
if (op->interval_ms > 0) {
crm_trace("Removing cancelled recurring op: " PCMK__OP_FMT,
op->rsc_id, op->op_type, op->interval_ms);
history_remove_recurring_op(entry, op);
return;
} else {
crm_trace("Skipping " PCMK__OP_FMT " rc=%d, status=%d",
op->rsc_id, op->op_type, op->interval_ms, op->rc,
op->op_status);
}
} else if (did_rsc_op_fail(op, target_rc)) {
/* Store failed monitors here, otherwise the block below will cause them
* to be forgotten when a stop happens.
*/
if (entry->failed) {
lrmd_free_event(entry->failed);
}
entry->failed = lrmd_copy_event(op);
} else if (op->interval_ms == 0) {
if (entry->last) {
lrmd_free_event(entry->last);
}
entry->last = lrmd_copy_event(op);
if (op->params && pcmk__strcase_any_of(op->op_type, CRMD_ACTION_START,
- "reload", CRMD_ACTION_STATUS, NULL)) {
+ CRMD_ACTION_RELOAD,
+ CRMD_ACTION_RELOAD_AGENT,
+ CRMD_ACTION_STATUS, NULL)) {
if (entry->stop_params) {
g_hash_table_destroy(entry->stop_params);
}
entry->stop_params = pcmk__strkey_table(free, free);
g_hash_table_foreach(op->params, copy_instance_keys, entry->stop_params);
}
}
if (op->interval_ms > 0) {
/* Ensure there are no duplicates */
history_remove_recurring_op(entry, op);
crm_trace("Adding recurring op: " PCMK__OP_FMT,
op->rsc_id, op->op_type, op->interval_ms);
entry->recurring_op_list = g_list_prepend(entry->recurring_op_list, lrmd_copy_event(op));
} else if (entry->recurring_op_list && !pcmk__str_eq(op->op_type, RSC_STATUS, pcmk__str_casei)) {
crm_trace("Dropping %d recurring ops because of: " PCMK__OP_FMT,
g_list_length(entry->recurring_op_list), op->rsc_id,
op->op_type, op->interval_ms);
history_free_recurring_ops(entry);
}
}
/*!
* \internal
* \brief Send a direct OK ack for a resource task
*
* \param[in] lrm_state LRM connection
* \param[in] input Input message being ack'ed
* \param[in] rsc_id ID of affected resource
* \param[in] rsc Affected resource (if available)
* \param[in] task Operation task being ack'ed
* \param[in] ack_host Name of host to send ack to
* \param[in] ack_sys IPC system name to ack
*/
static void
send_task_ok_ack(lrm_state_t *lrm_state, ha_msg_input_t *input,
const char *rsc_id, lrmd_rsc_info_t *rsc, const char *task,
const char *ack_host, const char *ack_sys)
{
lrmd_event_data_t *op = construct_op(lrm_state, input->xml, rsc_id, task);
op->rc = PCMK_OCF_OK;
op->op_status = PCMK_LRM_OP_DONE;
controld_ack_event_directly(ack_host, ack_sys, rsc, op, rsc_id);
lrmd_free_event(op);
}
static inline const char *
op_node_name(lrmd_event_data_t *op)
{
return op->remote_nodename? op->remote_nodename : fsa_our_uname;
}
void
lrm_op_callback(lrmd_event_data_t * op)
{
CRM_CHECK(op != NULL, return);
switch (op->type) {
case lrmd_event_disconnect:
if (op->remote_nodename == NULL) {
/* If this is the local executor IPC connection, set the right
* bits in the controller when the connection goes down.
*/
lrm_connection_destroy();
}
break;
case lrmd_event_exec_complete:
{
lrm_state_t *lrm_state = lrm_state_find(op_node_name(op));
CRM_ASSERT(lrm_state != NULL);
process_lrm_event(lrm_state, op, NULL, NULL);
}
break;
default:
break;
}
}
/* A_LRM_CONNECT */
void
do_lrm_control(long long action,
enum crmd_fsa_cause cause,
enum crmd_fsa_state cur_state,
enum crmd_fsa_input current_input, fsa_data_t * msg_data)
{
/* This only pertains to local executor connections. Remote connections are
* handled as resources within the scheduler. Connecting and disconnecting
* from remote executor instances is handled differently.
*/
lrm_state_t *lrm_state = NULL;
if(fsa_our_uname == NULL) {
return; /* Nothing to do */
}
lrm_state = lrm_state_find_or_create(fsa_our_uname);
if (lrm_state == NULL) {
register_fsa_error(C_FSA_INTERNAL, I_ERROR, NULL);
return;
}
if (action & A_LRM_DISCONNECT) {
if (lrm_state_verify_stopped(lrm_state, cur_state, LOG_INFO) == FALSE) {
if (action == A_LRM_DISCONNECT) {
crmd_fsa_stall(FALSE);
return;
}
}
controld_clear_fsa_input_flags(R_LRM_CONNECTED);
crm_info("Disconnecting from the executor");
lrm_state_disconnect(lrm_state);
lrm_state_reset_tables(lrm_state, FALSE);
crm_notice("Disconnected from the executor");
}
if (action & A_LRM_CONNECT) {
int ret = pcmk_ok;
crm_debug("Connecting to the executor");
ret = lrm_state_ipc_connect(lrm_state);
if (ret != pcmk_ok) {
if (lrm_state->num_lrm_register_fails < MAX_LRM_REG_FAILS) {
crm_warn("Failed to connect to the executor %d time%s (%d max)",
lrm_state->num_lrm_register_fails,
pcmk__plural_s(lrm_state->num_lrm_register_fails),
MAX_LRM_REG_FAILS);
controld_start_timer(wait_timer);
crmd_fsa_stall(FALSE);
return;
}
}
if (ret != pcmk_ok) {
crm_err("Failed to connect to the executor the max allowed %d time%s",
lrm_state->num_lrm_register_fails,
pcmk__plural_s(lrm_state->num_lrm_register_fails));
register_fsa_error(C_FSA_INTERNAL, I_ERROR, NULL);
return;
}
controld_set_fsa_input_flags(R_LRM_CONNECTED);
crm_info("Connection to the executor established");
}
if (action & ~(A_LRM_CONNECT | A_LRM_DISCONNECT)) {
crm_err("Unexpected action %s in %s", fsa_action2string(action),
__func__);
}
}
static gboolean
lrm_state_verify_stopped(lrm_state_t * lrm_state, enum crmd_fsa_state cur_state, int log_level)
{
int counter = 0;
gboolean rc = TRUE;
const char *when = "lrm disconnect";
GHashTableIter gIter;
const char *key = NULL;
rsc_history_t *entry = NULL;
active_op_t *pending = NULL;
crm_debug("Checking for active resources before exit");
if (cur_state == S_TERMINATE) {
log_level = LOG_ERR;
when = "shutdown";
} else if (pcmk_is_set(fsa_input_register, R_SHUTDOWN)) {
when = "shutdown... waiting";
}
if (lrm_state->pending_ops && lrm_state_is_connected(lrm_state) == TRUE) {
guint removed = g_hash_table_foreach_remove(
lrm_state->pending_ops, stop_recurring_actions, lrm_state);
guint nremaining = g_hash_table_size(lrm_state->pending_ops);
if (removed || nremaining) {
crm_notice("Stopped %u recurring operation%s at %s (%u remaining)",
removed, pcmk__plural_s(removed), when, nremaining);
}
}
if (lrm_state->pending_ops) {
g_hash_table_iter_init(&gIter, lrm_state->pending_ops);
while (g_hash_table_iter_next(&gIter, NULL, (void **)&pending)) {
/* Ignore recurring actions in the shutdown calculations */
if (pending->interval_ms == 0) {
counter++;
}
}
}
if (counter > 0) {
do_crm_log(log_level, "%d pending executor operation%s at %s",
counter, pcmk__plural_s(counter), when);
if ((cur_state == S_TERMINATE)
|| !pcmk_is_set(fsa_input_register, R_SENT_RSC_STOP)) {
g_hash_table_iter_init(&gIter, lrm_state->pending_ops);
while (g_hash_table_iter_next(&gIter, (gpointer*)&key, (gpointer*)&pending)) {
do_crm_log(log_level, "Pending action: %s (%s)", key, pending->op_key);
}
} else {
rc = FALSE;
}
return rc;
}
if (lrm_state->resource_history == NULL) {
return rc;
}
if (pcmk_is_set(fsa_input_register, R_SHUTDOWN)) {
/* At this point we're not waiting, we're just shutting down */
when = "shutdown";
}
counter = 0;
g_hash_table_iter_init(&gIter, lrm_state->resource_history);
while (g_hash_table_iter_next(&gIter, NULL, (gpointer*)&entry)) {
if (is_rsc_active(lrm_state, entry->id) == FALSE) {
continue;
}
counter++;
if (log_level == LOG_ERR) {
crm_info("Found %s active at %s", entry->id, when);
} else {
crm_trace("Found %s active at %s", entry->id, when);
}
if (lrm_state->pending_ops) {
GHashTableIter hIter;
g_hash_table_iter_init(&hIter, lrm_state->pending_ops);
while (g_hash_table_iter_next(&hIter, (gpointer*)&key, (gpointer*)&pending)) {
if (pcmk__str_eq(entry->id, pending->rsc_id, pcmk__str_none)) {
crm_notice("%sction %s (%s) incomplete at %s",
pending->interval_ms == 0 ? "A" : "Recurring a",
key, pending->op_key, when);
}
}
}
}
if (counter) {
crm_err("%d resource%s active at %s",
counter, (counter == 1)? " was" : "s were", when);
}
return rc;
}
+/*!
+ * \internal
+ * \brief Build XML and string of parameters meeting some criteria, for digest
+ *
+ * \param[in] op Executor event with parameter table to use
+ * \param[in] metadata Parsed meta-data for executed resource agent
+ * \param[in] param_type Flag used for selection criteria
+ * \param[out] result Will be set to newly created XML with selected
+ * parameters as attributes
+ *
+ * \return Newly allocated space-separated string of parameter names
+ * \note Selection criteria varies by param_type: for the restart digest, we
+ * want parameters that are *not* marked reloadable (OCF 1.1) or that
+ * *are* marked unique (pre-1.1), for both string and XML results; for the
+ * secure digest, we want parameters that *are* marked private for the
+ * string, but parameters that are *not* marked private for the XML.
+ * \note It is the caller's responsibility to free the string return value with
+ * free() and the XML result with free_xml().
+ */
static char *
build_parameter_list(const lrmd_event_data_t *op,
const struct ra_metadata_s *metadata,
- xmlNode *result, enum ra_param_flags_e param_type,
- bool invert_for_xml)
+ enum ra_param_flags_e param_type, xmlNode **result)
{
char *list = NULL;
size_t len = 0;
+ *result = create_xml_node(NULL, XML_TAG_PARAMS);
+
for (GList *iter = metadata->ra_params; iter != NULL; iter = iter->next) {
struct ra_param_s *param = (struct ra_param_s *) iter->data;
- bool accept = pcmk_is_set(param->rap_flags, param_type);
- if (accept) {
+ bool accept_for_list = false;
+ bool accept_for_xml = false;
+
+ switch (param_type) {
+ case ra_param_reloadable:
+ accept_for_list = !pcmk_is_set(param->rap_flags, param_type);
+ accept_for_xml = accept_for_list;
+ break;
+
+ case ra_param_unique:
+ accept_for_list = pcmk_is_set(param->rap_flags, param_type);
+ accept_for_xml = accept_for_list;
+ break;
+
+ case ra_param_private:
+ accept_for_list = pcmk_is_set(param->rap_flags, param_type);
+ accept_for_xml = !accept_for_list;
+ break;
+ }
+
+ if (accept_for_list) {
crm_trace("Attr %s is %s", param->rap_name, ra_param_flag2text(param_type));
if (list == NULL) {
// We will later search for " WORD ", so start list with a space
pcmk__add_word(&list, &len, " ");
}
pcmk__add_word(&list, &len, param->rap_name);
} else {
crm_trace("Rejecting %s for %s", param->rap_name, ra_param_flag2text(param_type));
}
- if (result && (invert_for_xml? !accept : accept)) {
+ if (accept_for_xml) {
const char *v = g_hash_table_lookup(op->params, param->rap_name);
if (v != NULL) {
crm_trace("Adding attr %s=%s to the xml result", param->rap_name, v);
- crm_xml_add(result, param->rap_name, v);
+ crm_xml_add(*result, param->rap_name, v);
}
}
}
if (list != NULL) {
// We will later search for " WORD ", so end list with a space
pcmk__add_word(&list, &len, " ");
}
return list;
}
static void
append_restart_list(lrmd_event_data_t *op, struct ra_metadata_s *metadata,
xmlNode *update, const char *version)
{
char *list = NULL;
char *digest = NULL;
xmlNode *restart = NULL;
CRM_LOG_ASSERT(op->params != NULL);
if (op->interval_ms > 0) {
/* monitors are not reloadable */
return;
}
- if (pcmk_is_set(metadata->ra_flags, ra_supports_reload)) {
- restart = create_xml_node(NULL, XML_TAG_PARAMS);
- /* Add any parameters with unique="1" to the "op-force-restart" list.
+ if (pcmk_is_set(metadata->ra_flags, ra_supports_reload_agent)) {
+ // Add parameters not marked reloadable to the "op-force-restart" list
+ list = build_parameter_list(op, metadata, ra_param_reloadable,
+ &restart);
+
+ } else if (pcmk_is_set(metadata->ra_flags, ra_supports_legacy_reload)) {
+ /* @COMPAT pre-OCF-1.1 resource agents
*
- * (Currently, we abuse "unique=0" to indicate reloadability. This is
- * nonstandard and should eventually be replaced once the OCF standard
- * is updated with something better.)
+ * Before OCF 1.1, Pacemaker abused "unique=0" to indicate
+ * reloadability. Add any parameters with unique="1" to the
+ * "op-force-restart" list.
*/
- list = build_parameter_list(op, metadata, restart, ra_param_unique,
- FALSE);
+ list = build_parameter_list(op, metadata, ra_param_unique, &restart);
} else {
- /* Resource does not support reloads */
+ // Resource does not support agent reloads
return;
}
digest = calculate_operation_digest(restart, version);
/* Add "op-force-restart" and "op-restart-digest" to indicate the resource supports reload,
* no matter if it actually supports any parameters with unique="1"). */
crm_xml_add(update, XML_LRM_ATTR_OP_RESTART, list? list: "");
crm_xml_add(update, XML_LRM_ATTR_RESTART_DIGEST, digest);
crm_trace("%s: %s, %s", op->rsc_id, digest, list);
crm_log_xml_trace(restart, "restart digest source");
free_xml(restart);
free(digest);
free(list);
}
static void
append_secure_list(lrmd_event_data_t *op, struct ra_metadata_s *metadata,
xmlNode *update, const char *version)
{
char *list = NULL;
char *digest = NULL;
xmlNode *secure = NULL;
CRM_LOG_ASSERT(op->params != NULL);
/*
* To keep XML_LRM_ATTR_OP_SECURE short, we want it to contain the
* secure parameters but XML_LRM_ATTR_SECURE_DIGEST to be based on
* the insecure ones
*/
- secure = create_xml_node(NULL, XML_TAG_PARAMS);
- list = build_parameter_list(op, metadata, secure, ra_param_private, TRUE);
+ list = build_parameter_list(op, metadata, ra_param_private, &secure);
if (list != NULL) {
digest = calculate_operation_digest(secure, version);
crm_xml_add(update, XML_LRM_ATTR_OP_SECURE, list);
crm_xml_add(update, XML_LRM_ATTR_SECURE_DIGEST, digest);
crm_trace("%s: %s, %s", op->rsc_id, digest, list);
crm_log_xml_trace(secure, "secure digest source");
} else {
crm_trace("%s: no secure parameters", op->rsc_id);
}
free_xml(secure);
free(digest);
free(list);
}
static gboolean
build_operation_update(xmlNode * parent, lrmd_rsc_info_t * rsc, lrmd_event_data_t * op,
const char *node_name, const char *src)
{
int target_rc = 0;
xmlNode *xml_op = NULL;
struct ra_metadata_s *metadata = NULL;
const char *caller_version = NULL;
lrm_state_t *lrm_state = NULL;
+ uint32_t metadata_source = controld_metadata_from_agent;
if (op == NULL) {
return FALSE;
}
target_rc = rsc_op_expected_rc(op);
/* there is a small risk in formerly mixed clusters that it will
* be sub-optimal.
*
* however with our upgrade policy, the update we send should
* still be completely supported anyway
*/
caller_version = g_hash_table_lookup(op->params, XML_ATTR_CRM_VERSION);
CRM_LOG_ASSERT(caller_version != NULL);
if(caller_version == NULL) {
caller_version = CRM_FEATURE_SET;
}
crm_trace("Building %s operation update with originator version: %s", op->rsc_id, caller_version);
xml_op = pcmk__create_history_xml(parent, op, caller_version, target_rc,
fsa_our_uname, src, LOG_DEBUG);
if (xml_op == NULL) {
return TRUE;
}
if ((rsc == NULL) || (op->params == NULL)
|| !crm_op_needs_metadata(rsc->standard, op->op_type)) {
crm_trace("No digests needed for %s action on %s (params=%p rsc=%p)",
op->op_type, op->rsc_id, op->params, rsc);
return TRUE;
}
lrm_state = lrm_state_find(node_name);
if (lrm_state == NULL) {
crm_warn("Cannot calculate digests for operation " PCMK__OP_FMT
" because we have no connection to executor for %s",
op->rsc_id, op->op_type, op->interval_ms, node_name);
return TRUE;
}
- metadata = metadata_cache_get(lrm_state->metadata_cache, rsc);
+ /* Getting meta-data from cache is OK unless this is a successful start
+ * action -- always refresh from the agent for those, in case the
+ * resource agent was updated.
+ *
+ * @TODO Only refresh the meta-data after starts if the agent actually
+ * changed (using something like inotify, or a hash or modification time of
+ * the agent executable).
+ */
+ if ((op->op_status != PCMK_LRM_OP_DONE) || (op->rc != target_rc)
+ || !pcmk__str_eq(op->op_type, CRMD_ACTION_START, pcmk__str_none)) {
+ metadata_source |= controld_metadata_from_cache;
+ }
+ metadata = controld_get_rsc_metadata(lrm_state, rsc, metadata_source);
if (metadata == NULL) {
- /* For now, we always collect resource agent meta-data via a local,
- * synchronous, direct execution of the agent. This has multiple issues:
- * the executor should execute agents, not the controller; meta-data for
- * Pacemaker Remote nodes should be collected on those nodes, not
- * locally; and the meta-data call shouldn't eat into the timeout of the
- * real action being performed.
- *
- * These issues are planned to be addressed by having the scheduler
- * schedule a meta-data cache check at the beginning of each transition.
- * Once that is working, this block will only be a fallback in case the
- * initial collection fails.
- */
- char *metadata_str = NULL;
-
- int rc = lrm_state_get_metadata(lrm_state, rsc->standard,
- rsc->provider, rsc->type,
- &metadata_str, 0);
-
- if (rc != pcmk_ok) {
- crm_warn("Failed to get metadata for %s (%s:%s:%s)",
- rsc->id, rsc->standard, rsc->provider, rsc->type);
- return TRUE;
- }
-
- metadata = metadata_cache_update(lrm_state->metadata_cache, rsc,
- metadata_str);
- free(metadata_str);
- if (metadata == NULL) {
- crm_warn("Failed to update metadata for %s (%s:%s:%s)",
- rsc->id, rsc->standard, rsc->provider, rsc->type);
- return TRUE;
- }
+ return TRUE;
}
#if ENABLE_VERSIONED_ATTRS
crm_xml_add(xml_op, XML_ATTR_RA_VERSION, metadata->ra_version);
#endif
crm_trace("Including additional digests for %s:%s:%s",
rsc->standard, rsc->provider, rsc->type);
append_restart_list(op, metadata, xml_op, caller_version);
append_secure_list(op, metadata, xml_op, caller_version);
return TRUE;
}
static gboolean
is_rsc_active(lrm_state_t * lrm_state, const char *rsc_id)
{
rsc_history_t *entry = NULL;
entry = g_hash_table_lookup(lrm_state->resource_history, rsc_id);
if (entry == NULL || entry->last == NULL) {
return FALSE;
}
crm_trace("Processing %s: %s.%d=%d", rsc_id, entry->last->op_type,
entry->last->interval_ms, entry->last->rc);
if (entry->last->rc == PCMK_OCF_OK && pcmk__str_eq(entry->last->op_type, CRMD_ACTION_STOP, pcmk__str_casei)) {
return FALSE;
} else if (entry->last->rc == PCMK_OCF_OK
&& pcmk__str_eq(entry->last->op_type, CRMD_ACTION_MIGRATE, pcmk__str_casei)) {
// A stricter check is too complex ... leave that to the scheduler
return FALSE;
} else if (entry->last->rc == PCMK_OCF_NOT_RUNNING) {
return FALSE;
} else if ((entry->last->interval_ms == 0)
&& (entry->last->rc == PCMK_OCF_NOT_CONFIGURED)) {
/* Badly configured resources can't be reliably stopped */
return FALSE;
}
return TRUE;
}
static gboolean
build_active_RAs(lrm_state_t * lrm_state, xmlNode * rsc_list)
{
GHashTableIter iter;
rsc_history_t *entry = NULL;
g_hash_table_iter_init(&iter, lrm_state->resource_history);
while (g_hash_table_iter_next(&iter, NULL, (void **)&entry)) {
GList *gIter = NULL;
xmlNode *xml_rsc = create_xml_node(rsc_list, XML_LRM_TAG_RESOURCE);
crm_xml_add(xml_rsc, XML_ATTR_ID, entry->id);
crm_xml_add(xml_rsc, XML_ATTR_TYPE, entry->rsc.type);
crm_xml_add(xml_rsc, XML_AGENT_ATTR_CLASS, entry->rsc.standard);
crm_xml_add(xml_rsc, XML_AGENT_ATTR_PROVIDER, entry->rsc.provider);
if (entry->last && entry->last->params) {
const char *container = g_hash_table_lookup(entry->last->params, CRM_META"_"XML_RSC_ATTR_CONTAINER);
if (container) {
crm_trace("Resource %s is a part of container resource %s", entry->id, container);
crm_xml_add(xml_rsc, XML_RSC_ATTR_CONTAINER, container);
}
}
build_operation_update(xml_rsc, &(entry->rsc), entry->failed, lrm_state->node_name,
__func__);
build_operation_update(xml_rsc, &(entry->rsc), entry->last, lrm_state->node_name,
__func__);
for (gIter = entry->recurring_op_list; gIter != NULL; gIter = gIter->next) {
build_operation_update(xml_rsc, &(entry->rsc), gIter->data, lrm_state->node_name,
__func__);
}
}
return FALSE;
}
static xmlNode *
do_lrm_query_internal(lrm_state_t *lrm_state, int update_flags)
{
xmlNode *xml_state = NULL;
xmlNode *xml_data = NULL;
xmlNode *rsc_list = NULL;
crm_node_t *peer = NULL;
peer = crm_get_peer_full(0, lrm_state->node_name, CRM_GET_PEER_ANY);
CRM_CHECK(peer != NULL, return NULL);
xml_state = create_node_state_update(peer, update_flags, NULL,
__func__);
if (xml_state == NULL) {
return NULL;
}
xml_data = create_xml_node(xml_state, XML_CIB_TAG_LRM);
crm_xml_add(xml_data, XML_ATTR_ID, peer->uuid);
rsc_list = create_xml_node(xml_data, XML_LRM_TAG_RESOURCES);
/* Build a list of active (not always running) resources */
build_active_RAs(lrm_state, rsc_list);
crm_log_xml_trace(xml_state, "Current executor state");
return xml_state;
}
xmlNode *
controld_query_executor_state(const char *node_name)
{
lrm_state_t *lrm_state = lrm_state_find(node_name);
if (!lrm_state) {
crm_err("Could not find executor state for node %s", node_name);
return NULL;
}
return do_lrm_query_internal(lrm_state,
node_update_cluster|node_update_peer);
}
/*!
* \internal
* \brief Map standard Pacemaker return code to operation status and OCF code
*
* \param[out] event Executor event whose status and return code should be set
* \param[in] rc Standard Pacemaker return code
*/
void
controld_rc2event(lrmd_event_data_t *event, int rc)
{
switch (rc) {
case pcmk_rc_ok:
event->rc = PCMK_OCF_OK;
event->op_status = PCMK_LRM_OP_DONE;
break;
case EACCES:
event->rc = PCMK_OCF_INSUFFICIENT_PRIV;
event->op_status = PCMK_LRM_OP_ERROR;
break;
default:
event->rc = PCMK_OCF_UNKNOWN_ERROR;
event->op_status = PCMK_LRM_OP_ERROR;
break;
}
}
/*!
* \internal
* \brief Trigger a new transition after CIB status was deleted
*
* If a CIB status delete was not expected (as part of the transition graph),
* trigger a new transition by updating the (arbitrary) "last-lrm-refresh"
* cluster property.
*
* \param[in] from_sys IPC name that requested the delete
* \param[in] rsc_id Resource whose status was deleted (for logging only)
*/
void
controld_trigger_delete_refresh(const char *from_sys, const char *rsc_id)
{
if (!pcmk__str_eq(from_sys, CRM_SYSTEM_TENGINE, pcmk__str_casei)) {
char *now_s = crm_strdup_printf("%lld", (long long) time(NULL));
crm_debug("Triggering a refresh after %s cleaned %s", from_sys, rsc_id);
update_attr_delegate(fsa_cib_conn, cib_none, XML_CIB_TAG_CRMCONFIG,
NULL, NULL, NULL, NULL, "last-lrm-refresh", now_s,
FALSE, NULL, NULL);
free(now_s);
}
}
static void
notify_deleted(lrm_state_t * lrm_state, ha_msg_input_t * input, const char *rsc_id, int rc)
{
lrmd_event_data_t *op = NULL;
const char *from_sys = crm_element_value(input->msg, F_CRM_SYS_FROM);
const char *from_host = crm_element_value(input->msg, F_CRM_HOST_FROM);
crm_info("Notifying %s on %s that %s was%s deleted",
from_sys, (from_host? from_host : "localhost"), rsc_id,
((rc == pcmk_ok)? "" : " not"));
op = construct_op(lrm_state, input->xml, rsc_id, CRMD_ACTION_DELETE);
controld_rc2event(op, pcmk_legacy2rc(rc));
controld_ack_event_directly(from_host, from_sys, NULL, op, rsc_id);
lrmd_free_event(op);
controld_trigger_delete_refresh(from_sys, rsc_id);
}
static gboolean
lrm_remove_deleted_rsc(gpointer key, gpointer value, gpointer user_data)
{
struct delete_event_s *event = user_data;
struct pending_deletion_op_s *op = value;
if (pcmk__str_eq(event->rsc, op->rsc, pcmk__str_none)) {
notify_deleted(event->lrm_state, op->input, event->rsc, event->rc);
return TRUE;
}
return FALSE;
}
static gboolean
lrm_remove_deleted_op(gpointer key, gpointer value, gpointer user_data)
{
const char *rsc = user_data;
active_op_t *pending = value;
if (pcmk__str_eq(rsc, pending->rsc_id, pcmk__str_none)) {
crm_info("Removing op %s:%d for deleted resource %s",
pending->op_key, pending->call_id, rsc);
return TRUE;
}
return FALSE;
}
static void
delete_rsc_entry(lrm_state_t * lrm_state, ha_msg_input_t * input, const char *rsc_id,
GHashTableIter * rsc_gIter, int rc, const char *user_name)
{
struct delete_event_s event;
CRM_CHECK(rsc_id != NULL, return);
if (rc == pcmk_ok) {
char *rsc_id_copy = strdup(rsc_id);
if (rsc_gIter) {
g_hash_table_iter_remove(rsc_gIter);
} else {
g_hash_table_remove(lrm_state->resource_history, rsc_id_copy);
}
controld_delete_resource_history(rsc_id_copy, lrm_state->node_name,
user_name, crmd_cib_smart_opt());
g_hash_table_foreach_remove(lrm_state->pending_ops, lrm_remove_deleted_op, rsc_id_copy);
free(rsc_id_copy);
}
if (input) {
notify_deleted(lrm_state, input, rsc_id, rc);
}
event.rc = rc;
event.rsc = rsc_id;
event.lrm_state = lrm_state;
g_hash_table_foreach_remove(lrm_state->deletion_ops, lrm_remove_deleted_rsc, &event);
}
/*!
* \internal
* \brief Erase an LRM history entry from the CIB, given the operation data
*
* \param[in] lrm_state LRM state of the desired node
* \param[in] op Operation whose history should be deleted
*/
static void
erase_lrm_history_by_op(lrm_state_t *lrm_state, lrmd_event_data_t *op)
{
xmlNode *xml_top = NULL;
CRM_CHECK(op != NULL, return);
xml_top = create_xml_node(NULL, XML_LRM_TAG_RSC_OP);
crm_xml_add_int(xml_top, XML_LRM_ATTR_CALLID, op->call_id);
crm_xml_add(xml_top, XML_ATTR_TRANSITION_KEY, op->user_data);
if (op->interval_ms > 0) {
char *op_id = pcmk__op_key(op->rsc_id, op->op_type, op->interval_ms);
/* Avoid deleting last_failure too (if it was a result of this recurring op failing) */
crm_xml_add(xml_top, XML_ATTR_ID, op_id);
free(op_id);
}
crm_debug("Erasing resource operation history for " PCMK__OP_FMT " (call=%d)",
op->rsc_id, op->op_type, op->interval_ms, op->call_id);
fsa_cib_conn->cmds->remove(fsa_cib_conn, XML_CIB_TAG_STATUS, xml_top,
cib_quorum_override);
crm_log_xml_trace(xml_top, "op:cancel");
free_xml(xml_top);
}
/* Define xpath to find LRM resource history entry by node and resource */
#define XPATH_HISTORY \
"/" XML_TAG_CIB "/" XML_CIB_TAG_STATUS \
"/" XML_CIB_TAG_STATE "[@" XML_ATTR_UNAME "='%s']" \
"/" XML_CIB_TAG_LRM "/" XML_LRM_TAG_RESOURCES \
"/" XML_LRM_TAG_RESOURCE "[@" XML_ATTR_ID "='%s']" \
"/" XML_LRM_TAG_RSC_OP
/* ... and also by operation key */
#define XPATH_HISTORY_ID XPATH_HISTORY \
"[@" XML_ATTR_ID "='%s']"
/* ... and also by operation key and operation call ID */
#define XPATH_HISTORY_CALL XPATH_HISTORY \
"[@" XML_ATTR_ID "='%s' and @" XML_LRM_ATTR_CALLID "='%d']"
/* ... and also by operation key and original operation key */
#define XPATH_HISTORY_ORIG XPATH_HISTORY \
"[@" XML_ATTR_ID "='%s' and @" XML_LRM_ATTR_TASK_KEY "='%s']"
/*!
* \internal
* \brief Erase an LRM history entry from the CIB, given operation identifiers
*
* \param[in] lrm_state LRM state of the node to clear history for
* \param[in] rsc_id Name of resource to clear history for
* \param[in] key Operation key of operation to clear history for
* \param[in] orig_op If specified, delete only if it has this original op
* \param[in] call_id If specified, delete entry only if it has this call ID
*/
static void
erase_lrm_history_by_id(lrm_state_t *lrm_state, const char *rsc_id,
const char *key, const char *orig_op, int call_id)
{
char *op_xpath = NULL;
CRM_CHECK((rsc_id != NULL) && (key != NULL), return);
if (call_id > 0) {
op_xpath = crm_strdup_printf(XPATH_HISTORY_CALL,
lrm_state->node_name, rsc_id, key,
call_id);
} else if (orig_op) {
op_xpath = crm_strdup_printf(XPATH_HISTORY_ORIG,
lrm_state->node_name, rsc_id, key,
orig_op);
} else {
op_xpath = crm_strdup_printf(XPATH_HISTORY_ID,
lrm_state->node_name, rsc_id, key);
}
crm_debug("Erasing resource operation history for %s on %s (call=%d)",
key, rsc_id, call_id);
fsa_cib_conn->cmds->remove(fsa_cib_conn, op_xpath, NULL,
cib_quorum_override | cib_xpath);
free(op_xpath);
}
static inline gboolean
last_failed_matches_op(rsc_history_t *entry, const char *op, guint interval_ms)
{
if (entry == NULL) {
return FALSE;
}
if (op == NULL) {
return TRUE;
}
return (pcmk__str_eq(op, entry->failed->op_type, pcmk__str_casei)
&& (interval_ms == entry->failed->interval_ms));
}
/*!
* \internal
* \brief Clear a resource's last failure
*
* Erase a resource's last failure on a particular node from both the
* LRM resource history in the CIB, and the resource history remembered
* for the LRM state.
*
* \param[in] rsc_id Resource name
* \param[in] node_name Node name
* \param[in] operation If specified, only clear if matching this operation
* \param[in] interval_ms If operation is specified, it has this interval
*/
void
lrm_clear_last_failure(const char *rsc_id, const char *node_name,
const char *operation, guint interval_ms)
{
char *op_key = NULL;
char *orig_op_key = NULL;
lrm_state_t *lrm_state = NULL;
lrm_state = lrm_state_find(node_name);
if (lrm_state == NULL) {
return;
}
/* Erase from CIB */
op_key = pcmk__op_key(rsc_id, "last_failure", 0);
if (operation) {
orig_op_key = pcmk__op_key(rsc_id, operation, interval_ms);
}
erase_lrm_history_by_id(lrm_state, rsc_id, op_key, orig_op_key, 0);
free(op_key);
free(orig_op_key);
/* Remove from memory */
if (lrm_state->resource_history) {
rsc_history_t *entry = g_hash_table_lookup(lrm_state->resource_history,
rsc_id);
if (last_failed_matches_op(entry, operation, interval_ms)) {
lrmd_free_event(entry->failed);
entry->failed = NULL;
}
}
}
/* Returns: gboolean - cancellation is in progress */
static gboolean
cancel_op(lrm_state_t * lrm_state, const char *rsc_id, const char *key, int op, gboolean remove)
{
int rc = pcmk_ok;
char *local_key = NULL;
active_op_t *pending = NULL;
CRM_CHECK(op != 0, return FALSE);
CRM_CHECK(rsc_id != NULL, return FALSE);
if (key == NULL) {
local_key = make_stop_id(rsc_id, op);
key = local_key;
}
pending = g_hash_table_lookup(lrm_state->pending_ops, key);
if (pending) {
if (remove && !pcmk_is_set(pending->flags, active_op_remove)) {
controld_set_active_op_flags(pending, active_op_remove);
crm_debug("Scheduling %s for removal", key);
}
if (pcmk_is_set(pending->flags, active_op_cancelled)) {
crm_debug("Operation %s already cancelled", key);
free(local_key);
return FALSE;
}
controld_set_active_op_flags(pending, active_op_cancelled);
} else {
crm_info("No pending op found for %s", key);
free(local_key);
return FALSE;
}
crm_debug("Cancelling op %d for %s (%s)", op, rsc_id, key);
rc = lrm_state_cancel(lrm_state, pending->rsc_id, pending->op_type,
pending->interval_ms);
if (rc == pcmk_ok) {
crm_debug("Op %d for %s (%s): cancelled", op, rsc_id, key);
free(local_key);
return TRUE;
}
crm_debug("Op %d for %s (%s): Nothing to cancel", op, rsc_id, key);
/* The caller needs to make sure the entry is
* removed from the pending_ops list
*
* Usually by returning TRUE inside the worker function
* supplied to g_hash_table_foreach_remove()
*
* Not removing the entry from pending_ops will block
* the node from shutting down
*/
free(local_key);
return FALSE;
}
struct cancel_data {
gboolean done;
gboolean remove;
const char *key;
lrmd_rsc_info_t *rsc;
lrm_state_t *lrm_state;
};
static gboolean
cancel_action_by_key(gpointer key, gpointer value, gpointer user_data)
{
gboolean remove = FALSE;
struct cancel_data *data = user_data;
active_op_t *op = value;
if (pcmk__str_eq(op->op_key, data->key, pcmk__str_none)) {
data->done = TRUE;
remove = !cancel_op(data->lrm_state, data->rsc->id, key, op->call_id, data->remove);
}
return remove;
}
static gboolean
cancel_op_key(lrm_state_t * lrm_state, lrmd_rsc_info_t * rsc, const char *key, gboolean remove)
{
guint removed = 0;
struct cancel_data data;
CRM_CHECK(rsc != NULL, return FALSE);
CRM_CHECK(key != NULL, return FALSE);
data.key = key;
data.rsc = rsc;
data.done = FALSE;
data.remove = remove;
data.lrm_state = lrm_state;
removed = g_hash_table_foreach_remove(lrm_state->pending_ops, cancel_action_by_key, &data);
crm_trace("Removed %u op cache entries, new size: %u",
removed, g_hash_table_size(lrm_state->pending_ops));
return data.done;
}
/*!
* \internal
* \brief Retrieve resource information from LRM
*
* \param[in] lrm_state LRM connection to use
* \param[in] rsc_xml XML containing resource configuration
* \param[in] do_create If true, register resource with LRM if not already
* \param[out] rsc_info Where to store resource information obtained from LRM
*
* \retval pcmk_ok Success (and rsc_info holds newly allocated result)
* \retval -EINVAL Required information is missing from arguments
* \retval -ENOTCONN No active connection to LRM
* \retval -ENODEV Resource not found
* \retval -errno Error communicating with executor when registering resource
*
* \note Caller is responsible for freeing result on success.
*/
static int
get_lrm_resource(lrm_state_t *lrm_state, xmlNode *rsc_xml, gboolean do_create,
lrmd_rsc_info_t **rsc_info)
{
const char *id = ID(rsc_xml);
CRM_CHECK(lrm_state && rsc_xml && rsc_info, return -EINVAL);
CRM_CHECK(id, return -EINVAL);
if (lrm_state_is_connected(lrm_state) == FALSE) {
return -ENOTCONN;
}
crm_trace("Retrieving resource information for %s from the executor", id);
*rsc_info = lrm_state_get_rsc_info(lrm_state, id, 0);
// If resource isn't known by ID, try clone name, if provided
if (!*rsc_info) {
const char *long_id = crm_element_value(rsc_xml, XML_ATTR_ID_LONG);
if (long_id) {
*rsc_info = lrm_state_get_rsc_info(lrm_state, long_id, 0);
}
}
if ((*rsc_info == NULL) && do_create) {
const char *class = crm_element_value(rsc_xml, XML_AGENT_ATTR_CLASS);
const char *provider = crm_element_value(rsc_xml, XML_AGENT_ATTR_PROVIDER);
const char *type = crm_element_value(rsc_xml, XML_ATTR_TYPE);
int rc;
crm_trace("Registering resource %s with the executor", id);
rc = lrm_state_register_rsc(lrm_state, id, class, provider, type,
lrmd_opt_drop_recurring);
if (rc != pcmk_ok) {
fsa_data_t *msg_data = NULL;
crm_err("Could not register resource %s with the executor on %s: %s "
CRM_XS " rc=%d",
id, lrm_state->node_name, pcmk_strerror(rc), rc);
/* Register this as an internal error if this involves the local
* executor. Otherwise, we're likely dealing with an unresponsive
* remote node, which is not an FSA failure.
*/
if (lrm_state_is_local(lrm_state) == TRUE) {
register_fsa_error(C_FSA_INTERNAL, I_FAIL, NULL);
}
return rc;
}
*rsc_info = lrm_state_get_rsc_info(lrm_state, id, 0);
}
return *rsc_info? pcmk_ok : -ENODEV;
}
static void
delete_resource(lrm_state_t * lrm_state,
const char *id,
lrmd_rsc_info_t * rsc,
GHashTableIter * gIter,
const char *sys,
const char *user,
ha_msg_input_t * request,
gboolean unregister)
{
int rc = pcmk_ok;
crm_info("Removing resource %s from executor for %s%s%s",
id, sys, (user? " as " : ""), (user? user : ""));
if (rsc && unregister) {
rc = lrm_state_unregister_rsc(lrm_state, id, 0);
}
if (rc == pcmk_ok) {
crm_trace("Resource %s deleted from executor", id);
} else if (rc == -EINPROGRESS) {
crm_info("Deletion of resource '%s' from executor is pending", id);
if (request) {
struct pending_deletion_op_s *op = NULL;
char *ref = crm_element_value_copy(request->msg, XML_ATTR_REFERENCE);
op = calloc(1, sizeof(struct pending_deletion_op_s));
op->rsc = strdup(rsc->id);
op->input = copy_ha_msg_input(request);
g_hash_table_insert(lrm_state->deletion_ops, ref, op);
}
return;
} else {
crm_warn("Could not delete '%s' from executor for %s%s%s: %s "
CRM_XS " rc=%d", id, sys, (user? " as " : ""),
(user? user : ""), pcmk_strerror(rc), rc);
}
delete_rsc_entry(lrm_state, request, id, gIter, rc, user);
}
static int
get_fake_call_id(lrm_state_t *lrm_state, const char *rsc_id)
{
int call_id = 999999999;
rsc_history_t *entry = NULL;
if(lrm_state) {
entry = g_hash_table_lookup(lrm_state->resource_history, rsc_id);
}
/* Make sure the call id is greater than the last successful operation,
* otherwise the failure will not result in a possible recovery of the resource
* as it could appear the failure occurred before the successful start */
if (entry) {
call_id = entry->last_callid + 1;
}
if (call_id < 0) {
call_id = 1;
}
return call_id;
}
static void
fake_op_status(lrm_state_t *lrm_state, lrmd_event_data_t *op, int op_status,
enum ocf_exitcode op_exitcode)
{
op->call_id = get_fake_call_id(lrm_state, op->rsc_id);
op->t_run = time(NULL);
op->t_rcchange = op->t_run;
op->op_status = op_status;
op->rc = op_exitcode;
}
static void
force_reprobe(lrm_state_t *lrm_state, const char *from_sys,
const char *from_host, const char *user_name,
gboolean is_remote_node)
{
GHashTableIter gIter;
rsc_history_t *entry = NULL;
crm_info("Clearing resource history on node %s", lrm_state->node_name);
g_hash_table_iter_init(&gIter, lrm_state->resource_history);
while (g_hash_table_iter_next(&gIter, NULL, (void **)&entry)) {
/* only unregister the resource during a reprobe if it is not a remote connection
* resource. otherwise unregistering the connection will terminate remote-node
* membership */
gboolean unregister = TRUE;
if (is_remote_lrmd_ra(NULL, NULL, entry->id)) {
lrm_state_t *remote_lrm_state = lrm_state_find(entry->id);
if (remote_lrm_state) {
/* when forcing a reprobe, make sure to clear remote node before
* clearing the remote node's connection resource */
force_reprobe(remote_lrm_state, from_sys, from_host, user_name, TRUE);
}
unregister = FALSE;
}
delete_resource(lrm_state, entry->id, &entry->rsc, &gIter, from_sys,
user_name, NULL, unregister);
}
/* Now delete the copy in the CIB */
controld_delete_node_state(lrm_state->node_name, controld_section_lrm,
cib_scope_local);
/* Finally, _delete_ the value in pacemaker-attrd -- setting it to FALSE
* would result in the scheduler sending us back here again
*/
update_attrd(lrm_state->node_name, CRM_OP_PROBED, NULL, user_name, is_remote_node);
}
/*!
* \internal
* \brief Fail a requested action without actually executing it
*
* For an action that can't be executed, process it similarly to an actual
* execution result, with specified error status (except for notify actions,
* which will always be treated as successful).
*
* \param[in] lrm_state Executor connection that action is for
* \param[in] action Action XML from request
* \param[in] rc Desired return code to use
* \param[in] op_status Desired operation status to use
*/
static void
synthesize_lrmd_failure(lrm_state_t *lrm_state, xmlNode *action,
int op_status, enum ocf_exitcode rc)
{
lrmd_event_data_t *op = NULL;
const char *operation = crm_element_value(action, XML_LRM_ATTR_TASK);
const char *target_node = crm_element_value(action, XML_LRM_ATTR_TARGET);
xmlNode *xml_rsc = find_xml_node(action, XML_CIB_TAG_RESOURCE, TRUE);
if ((xml_rsc == NULL) || (ID(xml_rsc) == NULL)) {
/* @TODO Should we do something else, like direct ack? */
crm_info("Can't fake %s failure (%d) on %s without resource configuration",
crm_element_value(action, XML_LRM_ATTR_TASK_KEY), rc,
target_node);
return;
} else if(operation == NULL) {
/* This probably came from crm_resource -C, nothing to do */
crm_info("Can't fake %s failure (%d) on %s without operation",
ID(xml_rsc), rc, target_node);
return;
}
op = construct_op(lrm_state, action, ID(xml_rsc), operation);
if (pcmk__str_eq(operation, RSC_NOTIFY, pcmk__str_casei)) { // Notifications can't fail
fake_op_status(lrm_state, op, PCMK_LRM_OP_DONE, PCMK_OCF_OK);
} else {
fake_op_status(lrm_state, op, op_status, rc);
}
crm_info("Faking " PCMK__OP_FMT " result (%d) on %s",
op->rsc_id, op->op_type, op->interval_ms, op->rc, target_node);
// Process the result as if it came from the LRM
process_lrm_event(lrm_state, op, NULL, action);
lrmd_free_event(op);
}
/*!
* \internal
* \brief Get target of an LRM operation
*
* \param[in] xml LRM operation data XML
*
* \return LRM operation target node name (local node or Pacemaker Remote node)
*/
static const char *
lrm_op_target(xmlNode *xml)
{
const char *target = NULL;
if (xml) {
target = crm_element_value(xml, XML_LRM_ATTR_TARGET);
}
if (target == NULL) {
target = fsa_our_uname;
}
return target;
}
static void
fail_lrm_resource(xmlNode *xml, lrm_state_t *lrm_state, const char *user_name,
const char *from_host, const char *from_sys)
{
lrmd_event_data_t *op = NULL;
lrmd_rsc_info_t *rsc = NULL;
xmlNode *xml_rsc = find_xml_node(xml, XML_CIB_TAG_RESOURCE, TRUE);
CRM_CHECK(xml_rsc != NULL, return);
/* The executor simply executes operations and reports the results, without
* any concept of success or failure, so to fail a resource, we must fake
* what a failure looks like.
*
* To do this, we create a fake executor operation event for the resource,
* and pass that event to the executor client callback so it will be
* processed as if it came from the executor.
*/
op = construct_op(lrm_state, xml, ID(xml_rsc), "asyncmon");
fake_op_status(lrm_state, op, PCMK_LRM_OP_DONE, PCMK_OCF_UNKNOWN_ERROR);
free((char*) op->user_data);
op->user_data = NULL;
op->interval_ms = 0;
if (user_name && !pcmk__is_privileged(user_name)) {
crm_err("%s does not have permission to fail %s", user_name, ID(xml_rsc));
controld_ack_event_directly(from_host, from_sys, NULL, op, ID(xml_rsc));
lrmd_free_event(op);
return;
}
if (get_lrm_resource(lrm_state, xml_rsc, TRUE, &rsc) == pcmk_ok) {
crm_info("Failing resource %s...", rsc->id);
op->exit_reason = strdup("Simulated failure");
process_lrm_event(lrm_state, op, NULL, xml);
op->op_status = PCMK_LRM_OP_DONE;
op->rc = PCMK_OCF_OK;
lrmd_free_rsc_info(rsc);
} else {
crm_info("Cannot find/create resource in order to fail it...");
crm_log_xml_warn(xml, "bad input");
}
controld_ack_event_directly(from_host, from_sys, NULL, op, ID(xml_rsc));
lrmd_free_event(op);
}
static void
handle_refresh_op(lrm_state_t *lrm_state, const char *user_name,
const char *from_host, const char *from_sys)
{
int rc = pcmk_ok;
xmlNode *fragment = do_lrm_query_internal(lrm_state, node_update_all);
fsa_cib_update(XML_CIB_TAG_STATUS, fragment, cib_quorum_override, rc, user_name);
crm_info("Forced a local resource history refresh: call=%d", rc);
if (!pcmk__str_eq(CRM_SYSTEM_CRMD, from_sys, pcmk__str_casei)) {
xmlNode *reply = create_request(CRM_OP_INVOKE_LRM, fragment, from_host,
from_sys, CRM_SYSTEM_LRMD,
fsa_our_uuid);
crm_debug("ACK'ing refresh from %s (%s)", from_sys, from_host);
if (relay_message(reply, TRUE) == FALSE) {
crm_log_xml_err(reply, "Unable to route reply");
}
free_xml(reply);
}
free_xml(fragment);
}
static void
handle_query_op(xmlNode *msg, lrm_state_t *lrm_state)
{
xmlNode *data = do_lrm_query_internal(lrm_state, node_update_all);
xmlNode *reply = create_reply(msg, data);
if (relay_message(reply, TRUE) == FALSE) {
crm_err("Unable to route reply");
crm_log_xml_err(reply, "reply");
}
free_xml(reply);
free_xml(data);
}
static void
handle_reprobe_op(lrm_state_t *lrm_state, const char *from_sys,
const char *from_host, const char *user_name,
gboolean is_remote_node)
{
crm_notice("Forcing the status of all resources to be redetected");
force_reprobe(lrm_state, from_sys, from_host, user_name, is_remote_node);
if (!pcmk__strcase_any_of(from_sys, CRM_SYSTEM_PENGINE, CRM_SYSTEM_TENGINE, NULL)) {
xmlNode *reply = create_request(CRM_OP_INVOKE_LRM, NULL, from_host,
from_sys, CRM_SYSTEM_LRMD,
fsa_our_uuid);
crm_debug("ACK'ing re-probe from %s (%s)", from_sys, from_host);
if (relay_message(reply, TRUE) == FALSE) {
crm_log_xml_err(reply, "Unable to route reply");
}
free_xml(reply);
}
}
static bool do_lrm_cancel(ha_msg_input_t *input, lrm_state_t *lrm_state,
lrmd_rsc_info_t *rsc, const char *from_host, const char *from_sys)
{
char *op_key = NULL;
char *meta_key = NULL;
int call = 0;
const char *call_id = NULL;
const char *op_task = NULL;
guint interval_ms = 0;
gboolean in_progress = FALSE;
xmlNode *params = find_xml_node(input->xml, XML_TAG_ATTRS, TRUE);
CRM_CHECK(params != NULL, return FALSE);
meta_key = crm_meta_name(XML_LRM_ATTR_TASK);
op_task = crm_element_value(params, meta_key);
free(meta_key);
CRM_CHECK(op_task != NULL, return FALSE);
meta_key = crm_meta_name(XML_LRM_ATTR_INTERVAL_MS);
if (crm_element_value_ms(params, meta_key, &interval_ms) != pcmk_ok) {
free(meta_key);
return FALSE;
}
free(meta_key);
op_key = pcmk__op_key(rsc->id, op_task, interval_ms);
meta_key = crm_meta_name(XML_LRM_ATTR_CALLID);
call_id = crm_element_value(params, meta_key);
free(meta_key);
crm_debug("Scheduler requested op %s (call=%s) be cancelled",
op_key, (call_id? call_id : "NA"));
pcmk__scan_min_int(call_id, &call, 0);
if (call == 0) {
// Normal case when the scheduler cancels a recurring op
in_progress = cancel_op_key(lrm_state, rsc, op_key, TRUE);
} else {
// Normal case when the scheduler cancels an orphan op
in_progress = cancel_op(lrm_state, rsc->id, NULL, call, TRUE);
}
// Acknowledge cancellation operation if for a remote connection resource
if (!in_progress || is_remote_lrmd_ra(NULL, NULL, rsc->id)) {
char *op_id = make_stop_id(rsc->id, call);
if (is_remote_lrmd_ra(NULL, NULL, rsc->id) == FALSE) {
crm_info("Nothing known about operation %d for %s", call, op_key);
}
erase_lrm_history_by_id(lrm_state, rsc->id, op_key, NULL, call);
send_task_ok_ack(lrm_state, input, rsc->id, rsc, op_task,
from_host, from_sys);
/* needed at least for cancellation of a remote operation */
g_hash_table_remove(lrm_state->pending_ops, op_id);
free(op_id);
} else {
/* No ack is needed since abcdaa8, but peers with older versions
* in a rolling upgrade need one. We didn't bump the feature set
* at that commit, so we can only compare against the previous
* CRM version (3.0.8). If any peers have feature set 3.0.9 but
* not abcdaa8, they will time out waiting for the ack (no
* released versions of Pacemaker are affected).
*/
const char *peer_version = crm_element_value(params, XML_ATTR_CRM_VERSION);
if (compare_version(peer_version, "3.0.8") <= 0) {
crm_info("Sending compatibility ack for %s cancellation to %s (CRM version %s)",
op_key, from_host, peer_version);
send_task_ok_ack(lrm_state, input, rsc->id, rsc, op_task,
from_host, from_sys);
}
}
free(op_key);
return TRUE;
}
static void
do_lrm_delete(ha_msg_input_t *input, lrm_state_t *lrm_state,
lrmd_rsc_info_t *rsc, const char *from_sys, const char *from_host,
bool crm_rsc_delete, const char *user_name)
{
gboolean unregister = TRUE;
int cib_rc = controld_delete_resource_history(rsc->id, lrm_state->node_name,
user_name,
cib_dryrun|cib_sync_call);
if (cib_rc != pcmk_rc_ok) {
lrmd_event_data_t *op = NULL;
op = construct_op(lrm_state, input->xml, rsc->id, CRMD_ACTION_DELETE);
op->op_status = PCMK_LRM_OP_ERROR;
if (cib_rc == EACCES) {
op->rc = PCMK_OCF_INSUFFICIENT_PRIV;
} else {
op->rc = PCMK_OCF_UNKNOWN_ERROR;
}
controld_ack_event_directly(from_host, from_sys, NULL, op, rsc->id);
lrmd_free_event(op);
return;
}
if (crm_rsc_delete && is_remote_lrmd_ra(NULL, NULL, rsc->id)) {
unregister = FALSE;
}
delete_resource(lrm_state, rsc->id, rsc, NULL, from_sys,
user_name, input, unregister);
}
/* A_LRM_INVOKE */
void
do_lrm_invoke(long long action,
enum crmd_fsa_cause cause,
enum crmd_fsa_state cur_state,
enum crmd_fsa_input current_input, fsa_data_t * msg_data)
{
lrm_state_t *lrm_state = NULL;
const char *crm_op = NULL;
const char *from_sys = NULL;
const char *from_host = NULL;
const char *operation = NULL;
ha_msg_input_t *input = fsa_typed_data(fsa_dt_ha_msg);
const char *user_name = NULL;
const char *target_node = NULL;
gboolean is_remote_node = FALSE;
bool crm_rsc_delete = FALSE;
target_node = lrm_op_target(input->xml);
is_remote_node = !pcmk__str_eq(target_node, fsa_our_uname,
pcmk__str_casei);
lrm_state = lrm_state_find(target_node);
if ((lrm_state == NULL) && is_remote_node) {
crm_err("Failing action because local node has never had connection to remote node %s",
target_node);
synthesize_lrmd_failure(NULL, input->xml, PCMK_LRM_OP_NOT_CONNECTED,
PCMK_OCF_UNKNOWN_ERROR);
return;
}
CRM_ASSERT(lrm_state != NULL);
user_name = pcmk__update_acl_user(input->msg, F_CRM_USER, NULL);
crm_op = crm_element_value(input->msg, F_CRM_TASK);
from_sys = crm_element_value(input->msg, F_CRM_SYS_FROM);
if (!pcmk__str_eq(from_sys, CRM_SYSTEM_TENGINE, pcmk__str_casei)) {
from_host = crm_element_value(input->msg, F_CRM_HOST_FROM);
}
crm_trace("Executor %s command from %s as user %s",
crm_op, from_sys, user_name);
if (pcmk__str_eq(crm_op, CRM_OP_LRM_DELETE, pcmk__str_casei)) {
if (!pcmk__str_eq(from_sys, CRM_SYSTEM_TENGINE, pcmk__str_casei)) {
crm_rsc_delete = TRUE; // from crm_resource
}
operation = CRMD_ACTION_DELETE;
} else if (pcmk__str_eq(crm_op, CRM_OP_LRM_FAIL, pcmk__str_casei)) {
fail_lrm_resource(input->xml, lrm_state, user_name, from_host,
from_sys);
return;
} else if (input->xml != NULL) {
operation = crm_element_value(input->xml, XML_LRM_ATTR_TASK);
}
if (pcmk__str_eq(crm_op, CRM_OP_LRM_REFRESH, pcmk__str_casei)) {
handle_refresh_op(lrm_state, user_name, from_host, from_sys);
} else if (pcmk__str_eq(crm_op, CRM_OP_LRM_QUERY, pcmk__str_casei)) {
handle_query_op(input->msg, lrm_state);
} else if (pcmk__str_eq(operation, CRM_OP_PROBED, pcmk__str_casei)) {
update_attrd(lrm_state->node_name, CRM_OP_PROBED, XML_BOOLEAN_TRUE,
user_name, is_remote_node);
} else if (pcmk__str_eq(crm_op, CRM_OP_REPROBE, pcmk__str_casei)
|| pcmk__str_eq(operation, CRM_OP_REPROBE, pcmk__str_casei)) {
handle_reprobe_op(lrm_state, from_sys, from_host, user_name,
is_remote_node);
} else if (operation != NULL) {
lrmd_rsc_info_t *rsc = NULL;
xmlNode *xml_rsc = find_xml_node(input->xml, XML_CIB_TAG_RESOURCE, TRUE);
gboolean create_rsc = !pcmk__str_eq(operation, CRMD_ACTION_DELETE,
pcmk__str_casei);
int rc;
// We can't return anything meaningful without a resource ID
CRM_CHECK(xml_rsc && ID(xml_rsc), return);
rc = get_lrm_resource(lrm_state, xml_rsc, create_rsc, &rsc);
if (rc == -ENOTCONN) {
synthesize_lrmd_failure(lrm_state, input->xml,
PCMK_LRM_OP_NOT_CONNECTED,
PCMK_OCF_UNKNOWN_ERROR);
return;
} else if ((rc < 0) && !create_rsc) {
/* Delete of malformed or nonexistent resource
* (deleting something that does not exist is a success)
*/
crm_notice("Not registering resource '%s' for a %s event "
CRM_XS " get-rc=%d (%s) transition-key=%s",
ID(xml_rsc), operation,
rc, pcmk_strerror(rc), ID(input->xml));
delete_rsc_entry(lrm_state, input, ID(xml_rsc), NULL, pcmk_ok,
user_name);
return;
} else if (rc == -EINVAL) {
// Resource operation on malformed resource
crm_err("Invalid resource definition for %s", ID(xml_rsc));
crm_log_xml_warn(input->msg, "invalid resource");
synthesize_lrmd_failure(lrm_state, input->xml, PCMK_LRM_OP_ERROR,
PCMK_OCF_NOT_CONFIGURED); // fatal error
return;
} else if (rc < 0) {
// Error communicating with the executor
crm_err("Could not register resource '%s' with executor: %s "
CRM_XS " rc=%d",
ID(xml_rsc), pcmk_strerror(rc), rc);
crm_log_xml_warn(input->msg, "failed registration");
synthesize_lrmd_failure(lrm_state, input->xml, PCMK_LRM_OP_ERROR,
PCMK_OCF_INVALID_PARAM); // hard error
return;
}
if (pcmk__str_eq(operation, CRMD_ACTION_CANCEL, pcmk__str_casei)) {
if (!do_lrm_cancel(input, lrm_state, rsc, from_host, from_sys)) {
crm_log_xml_warn(input->xml, "Bad command");
}
} else if (pcmk__str_eq(operation, CRMD_ACTION_DELETE, pcmk__str_casei)) {
do_lrm_delete(input, lrm_state, rsc, from_sys, from_host,
crm_rsc_delete, user_name);
+ } else if (pcmk__str_any_of(operation, CRMD_ACTION_RELOAD,
+ CRMD_ACTION_RELOAD_AGENT, NULL)) {
+ /* Pre-2.1.0 DCs will schedule reload actions only, and 2.1.0+ DCs
+ * will schedule reload-agent actions only. In either case, we need
+ * to map that to whatever the resource agent actually supports.
+ * Default to the OCF 1.1 name.
+ */
+ struct ra_metadata_s *md = NULL;
+ const char *reload_name = CRMD_ACTION_RELOAD_AGENT;
+
+ md = controld_get_rsc_metadata(lrm_state, rsc,
+ controld_metadata_from_cache);
+ if ((md != NULL)
+ && pcmk_is_set(md->ra_flags, ra_supports_legacy_reload)) {
+ reload_name = CRMD_ACTION_RELOAD;
+ }
+ do_lrm_rsc_op(lrm_state, rsc, reload_name, input->xml);
+
} else {
do_lrm_rsc_op(lrm_state, rsc, operation, input->xml);
}
lrmd_free_rsc_info(rsc);
} else {
crm_err("Cannot perform operation %s of unknown type", crm_str(crm_op));
register_fsa_error(C_FSA_INTERNAL, I_ERROR, NULL);
}
}
#if ENABLE_VERSIONED_ATTRS
static void
resolve_versioned_parameters(lrm_state_t *lrm_state, const char *rsc_id,
const xmlNode *rsc_op, GHashTable *params)
{
/* Resource info *should* already be cached, so we don't get
* executor call */
lrmd_rsc_info_t *rsc = lrm_state_get_rsc_info(lrm_state, rsc_id, 0);
struct ra_metadata_s *metadata;
- metadata = metadata_cache_get(lrm_state->metadata_cache, rsc);
+ metadata = controld_get_rsc_metadata(lrm_state, rsc,
+ controld_metadata_from_cache);
if (metadata) {
xmlNode *versioned_attrs = NULL;
GHashTable *hash = NULL;
char *key = NULL;
char *value = NULL;
GHashTableIter iter;
versioned_attrs = first_named_child(rsc_op, XML_TAG_OP_VER_ATTRS);
hash = pe_unpack_versioned_parameters(versioned_attrs, metadata->ra_version);
g_hash_table_iter_init(&iter, hash);
while (g_hash_table_iter_next(&iter, (gpointer *) &key, (gpointer *) &value)) {
g_hash_table_iter_steal(&iter);
g_hash_table_replace(params, key, value);
}
g_hash_table_destroy(hash);
versioned_attrs = first_named_child(rsc_op, XML_TAG_OP_VER_META);
hash = pe_unpack_versioned_parameters(versioned_attrs, metadata->ra_version);
g_hash_table_iter_init(&iter, hash);
while (g_hash_table_iter_next(&iter, (gpointer *) &key, (gpointer *) &value)) {
g_hash_table_replace(params, crm_meta_name(key), strdup(value));
if (pcmk__str_eq(key, XML_ATTR_TIMEOUT, pcmk__str_casei)) {
pcmk__scan_min_int(value, &op->timeout, 0);
} else if (pcmk__str_eq(key, XML_OP_ATTR_START_DELAY, pcmk__str_casei)) {
pcmk__scan_min_int(value, &op->start_delay, 0);
}
}
g_hash_table_destroy(hash);
versioned_attrs = first_named_child(rsc_op, XML_TAG_RSC_VER_ATTRS);
hash = pe_unpack_versioned_parameters(versioned_attrs, metadata->ra_version);
g_hash_table_iter_init(&iter, hash);
while (g_hash_table_iter_next(&iter, (gpointer *) &key, (gpointer *) &value)) {
g_hash_table_iter_steal(&iter);
g_hash_table_replace(params, key, value);
}
g_hash_table_destroy(hash);
}
lrmd_free_rsc_info(rsc);
}
#endif
static lrmd_event_data_t *
construct_op(lrm_state_t *lrm_state, xmlNode *rsc_op, const char *rsc_id,
const char *operation)
{
lrmd_event_data_t *op = NULL;
const char *op_delay = NULL;
const char *op_timeout = NULL;
GHashTable *params = NULL;
xmlNode *primitive = NULL;
const char *class = NULL;
const char *transition = NULL;
CRM_ASSERT(rsc_id && operation);
op = lrmd_new_event(rsc_id, operation, 0);
op->type = lrmd_event_exec_complete;
op->op_status = PCMK_LRM_OP_PENDING;
op->rc = -1;
op->timeout = 0;
op->start_delay = 0;
if (rsc_op == NULL) {
CRM_LOG_ASSERT(pcmk__str_eq(CRMD_ACTION_STOP, operation, pcmk__str_casei));
op->user_data = NULL;
/* the stop_all_resources() case
* by definition there is no DC (or they'd be shutting
* us down).
* So we should put our version here.
*/
op->params = pcmk__strkey_table(free, free);
g_hash_table_insert(op->params, strdup(XML_ATTR_CRM_VERSION), strdup(CRM_FEATURE_SET));
crm_trace("Constructed %s op for %s", operation, rsc_id);
return op;
}
params = xml2list(rsc_op);
g_hash_table_remove(params, CRM_META "_op_target_rc");
op_delay = crm_meta_value(params, XML_OP_ATTR_START_DELAY);
pcmk__scan_min_int(op_delay, &op->start_delay, 0);
op_timeout = crm_meta_value(params, XML_ATTR_TIMEOUT);
pcmk__scan_min_int(op_timeout, &op->timeout, 0);
if (pcmk__guint_from_hash(params, CRM_META "_" XML_LRM_ATTR_INTERVAL_MS, 0,
&(op->interval_ms)) != pcmk_rc_ok) {
op->interval_ms = 0;
}
/* Use pcmk_monitor_timeout instead of meta timeout for stonith
recurring monitor, if set */
primitive = find_xml_node(rsc_op, XML_CIB_TAG_RESOURCE, FALSE);
class = crm_element_value(primitive, XML_AGENT_ATTR_CLASS);
if (pcmk_is_set(pcmk_get_ra_caps(class), pcmk_ra_cap_fence_params)
&& pcmk__str_eq(operation, CRMD_ACTION_STATUS, pcmk__str_casei)
&& (op->interval_ms > 0)) {
op_timeout = g_hash_table_lookup(params, "pcmk_monitor_timeout");
if (op_timeout != NULL) {
op->timeout = crm_get_msec(op_timeout);
}
}
#if ENABLE_VERSIONED_ATTRS
if (lrm_state && !is_remote_lrmd_ra(NULL, NULL, rsc_id)
&& !pcmk__strcase_any_of(op_type, CRMD_ACTION_METADATA, CRMD_ACTION_DELETE,
NULL)) {
resolve_versioned_parameters(lrm_state, rsc_id, rsc_op, params);
}
#endif
if (!pcmk__str_eq(operation, RSC_STOP, pcmk__str_casei)) {
op->params = params;
} else {
rsc_history_t *entry = NULL;
if (lrm_state) {
entry = g_hash_table_lookup(lrm_state->resource_history, rsc_id);
}
/* If we do not have stop parameters cached, use
* whatever we are given */
if (!entry || !entry->stop_params) {
op->params = params;
} else {
/* Copy the cached parameter list so that we stop the resource
* with the old attributes, not the new ones */
op->params = pcmk__strkey_table(free, free);
g_hash_table_foreach(params, copy_meta_keys, op->params);
g_hash_table_foreach(entry->stop_params, copy_instance_keys, op->params);
g_hash_table_destroy(params);
params = NULL;
}
}
/* sanity */
if (op->timeout <= 0) {
op->timeout = op->interval_ms;
}
if (op->start_delay < 0) {
op->start_delay = 0;
}
transition = crm_element_value(rsc_op, XML_ATTR_TRANSITION_KEY);
CRM_CHECK(transition != NULL, return op);
op->user_data = strdup(transition);
if (op->interval_ms != 0) {
if (pcmk__strcase_any_of(operation, CRMD_ACTION_START, CRMD_ACTION_STOP, NULL)) {
crm_err("Start and Stop actions cannot have an interval: %u",
op->interval_ms);
op->interval_ms = 0;
}
}
crm_trace("Constructed %s op for %s: interval=%u",
operation, rsc_id, op->interval_ms);
return op;
}
/*!
* \internal
* \brief Send a (synthesized) event result
*
* Reply with a synthesized event result directly, as opposed to going through
* the executor.
*
* \param[in] to_host Host to send result to
* \param[in] to_sys IPC name to send result to (NULL for transition engine)
* \param[in] rsc Type information about resource the result is for
* \param[in] op Event with result to send
* \param[in] rsc_id ID of resource the result is for
*/
void
controld_ack_event_directly(const char *to_host, const char *to_sys,
lrmd_rsc_info_t *rsc, lrmd_event_data_t *op,
const char *rsc_id)
{
xmlNode *reply = NULL;
xmlNode *update, *iter;
crm_node_t *peer = NULL;
CRM_CHECK(op != NULL, return);
if (op->rsc_id == NULL) {
CRM_ASSERT(rsc_id != NULL);
op->rsc_id = strdup(rsc_id);
}
if (to_sys == NULL) {
to_sys = CRM_SYSTEM_TENGINE;
}
peer = crm_get_peer(0, fsa_our_uname);
update = create_node_state_update(peer, node_update_none, NULL,
__func__);
iter = create_xml_node(update, XML_CIB_TAG_LRM);
crm_xml_add(iter, XML_ATTR_ID, fsa_our_uuid);
iter = create_xml_node(iter, XML_LRM_TAG_RESOURCES);
iter = create_xml_node(iter, XML_LRM_TAG_RESOURCE);
crm_xml_add(iter, XML_ATTR_ID, op->rsc_id);
build_operation_update(iter, rsc, op, fsa_our_uname, __func__);
reply = create_request(CRM_OP_INVOKE_LRM, update, to_host, to_sys, CRM_SYSTEM_LRMD, NULL);
crm_log_xml_trace(update, "[direct ACK]");
crm_debug("ACK'ing resource op " PCMK__OP_FMT " from %s: %s",
op->rsc_id, op->op_type, op->interval_ms, op->user_data,
crm_element_value(reply, XML_ATTR_REFERENCE));
if (relay_message(reply, TRUE) == FALSE) {
crm_log_xml_err(reply, "Unable to route reply");
}
free_xml(update);
free_xml(reply);
}
gboolean
verify_stopped(enum crmd_fsa_state cur_state, int log_level)
{
gboolean res = TRUE;
GList *lrm_state_list = lrm_state_get_list();
GList *state_entry;
for (state_entry = lrm_state_list; state_entry != NULL; state_entry = state_entry->next) {
lrm_state_t *lrm_state = state_entry->data;
if (!lrm_state_verify_stopped(lrm_state, cur_state, log_level)) {
/* keep iterating through all even when false is returned */
res = FALSE;
}
}
controld_set_fsa_input_flags(R_SENT_RSC_STOP);
g_list_free(lrm_state_list); lrm_state_list = NULL;
return res;
}
struct stop_recurring_action_s {
lrmd_rsc_info_t *rsc;
lrm_state_t *lrm_state;
};
static gboolean
stop_recurring_action_by_rsc(gpointer key, gpointer value, gpointer user_data)
{
gboolean remove = FALSE;
struct stop_recurring_action_s *event = user_data;
active_op_t *op = value;
if ((op->interval_ms != 0)
&& pcmk__str_eq(op->rsc_id, event->rsc->id, pcmk__str_none)) {
crm_debug("Cancelling op %d for %s (%s)", op->call_id, op->rsc_id, (char*)key);
remove = !cancel_op(event->lrm_state, event->rsc->id, key, op->call_id, FALSE);
}
return remove;
}
static gboolean
stop_recurring_actions(gpointer key, gpointer value, gpointer user_data)
{
gboolean remove = FALSE;
lrm_state_t *lrm_state = user_data;
active_op_t *op = value;
if (op->interval_ms != 0) {
crm_info("Cancelling op %d for %s (%s)", op->call_id, op->rsc_id,
(const char *) key);
remove = !cancel_op(lrm_state, op->rsc_id, key, op->call_id, FALSE);
}
return remove;
}
static void
record_pending_op(const char *node_name, lrmd_rsc_info_t *rsc, lrmd_event_data_t *op)
{
const char *record_pending = NULL;
CRM_CHECK(node_name != NULL, return);
CRM_CHECK(rsc != NULL, return);
CRM_CHECK(op != NULL, return);
// Never record certain operation types as pending
if ((op->op_type == NULL) || (op->params == NULL)
|| !controld_action_is_recordable(op->op_type)) {
return;
}
// defaults to true
record_pending = crm_meta_value(op->params, XML_OP_ATTR_PENDING);
if (record_pending && !crm_is_true(record_pending)) {
return;
}
op->call_id = -1;
op->op_status = PCMK_LRM_OP_PENDING;
op->rc = PCMK_OCF_UNKNOWN;
op->t_run = time(NULL);
op->t_rcchange = op->t_run;
/* write a "pending" entry to the CIB, inhibit notification */
crm_debug("Recording pending op " PCMK__OP_FMT " on %s in the CIB",
op->rsc_id, op->op_type, op->interval_ms, node_name);
do_update_resource(node_name, rsc, op, 0);
}
static void
do_lrm_rsc_op(lrm_state_t *lrm_state, lrmd_rsc_info_t *rsc,
const char *operation, xmlNode *msg)
{
int call_id = 0;
char *op_id = NULL;
lrmd_event_data_t *op = NULL;
lrmd_key_value_t *params = NULL;
fsa_data_t *msg_data = NULL;
const char *transition = NULL;
gboolean stop_recurring = FALSE;
bool send_nack = FALSE;
CRM_CHECK(rsc != NULL, return);
CRM_CHECK(operation != NULL, return);
if (msg != NULL) {
transition = crm_element_value(msg, XML_ATTR_TRANSITION_KEY);
if (transition == NULL) {
crm_log_xml_err(msg, "Missing transition number");
}
}
op = construct_op(lrm_state, msg, rsc->id, operation);
CRM_CHECK(op != NULL, return);
if (is_remote_lrmd_ra(NULL, NULL, rsc->id)
&& (op->interval_ms == 0)
&& strcmp(operation, CRMD_ACTION_MIGRATE) == 0) {
/* pcmk remote connections are a special use case.
* We never ever want to stop monitoring a connection resource until
* the entire migration has completed. If the connection is unexpectedly
* severed, even during a migration, this is an event we must detect.*/
stop_recurring = FALSE;
} else if ((op->interval_ms == 0)
&& strcmp(operation, CRMD_ACTION_STATUS) != 0
&& strcmp(operation, CRMD_ACTION_NOTIFY) != 0) {
/* stop any previous monitor operations before changing the resource state */
stop_recurring = TRUE;
}
if (stop_recurring == TRUE) {
guint removed = 0;
struct stop_recurring_action_s data;
data.rsc = rsc;
data.lrm_state = lrm_state;
removed = g_hash_table_foreach_remove(
lrm_state->pending_ops, stop_recurring_action_by_rsc, &data);
if (removed) {
crm_debug("Stopped %u recurring operation%s in preparation for "
PCMK__OP_FMT, removed, pcmk__plural_s(removed),
rsc->id, operation, op->interval_ms);
}
}
/* now do the op */
crm_notice("Requesting local execution of %s operation for %s on %s "
CRM_XS " transition_key=%s op_key=" PCMK__OP_FMT,
crm_action_str(op->op_type, op->interval_ms), rsc->id, lrm_state->node_name,
transition, rsc->id, operation, op->interval_ms);
if (pcmk_is_set(fsa_input_register, R_SHUTDOWN)
&& pcmk__str_eq(operation, RSC_START, pcmk__str_casei)) {
register_fsa_input(C_SHUTDOWN, I_SHUTDOWN, NULL);
send_nack = TRUE;
} else if (fsa_state != S_NOT_DC
&& fsa_state != S_POLICY_ENGINE /* Recalculating */
&& fsa_state != S_TRANSITION_ENGINE
&& !pcmk__str_eq(operation, CRMD_ACTION_STOP, pcmk__str_casei)) {
send_nack = TRUE;
}
if(send_nack) {
crm_notice("Discarding attempt to perform action %s on %s in state %s (shutdown=%s)",
operation, rsc->id, fsa_state2string(fsa_state),
pcmk__btoa(pcmk_is_set(fsa_input_register, R_SHUTDOWN)));
op->rc = PCMK_OCF_UNKNOWN_ERROR;
op->op_status = PCMK_LRM_OP_INVALID;
controld_ack_event_directly(NULL, NULL, rsc, op, rsc->id);
lrmd_free_event(op);
free(op_id);
return;
}
record_pending_op(lrm_state->node_name, rsc, op);
op_id = pcmk__op_key(rsc->id, op->op_type, op->interval_ms);
if (op->interval_ms > 0) {
/* cancel it so we can then restart it without conflict */
cancel_op_key(lrm_state, rsc, op_id, FALSE);
}
if (op->params) {
char *key = NULL;
char *value = NULL;
GHashTableIter iter;
g_hash_table_iter_init(&iter, op->params);
while (g_hash_table_iter_next(&iter, (gpointer *) & key, (gpointer *) & value)) {
params = lrmd_key_value_add(params, key, value);
}
}
call_id = lrm_state_exec(lrm_state, rsc->id, op->op_type, op->user_data,
op->interval_ms, op->timeout, op->start_delay,
params);
if (call_id <= 0 && lrm_state_is_local(lrm_state)) {
crm_err("Operation %s on %s failed: %d", operation, rsc->id, call_id);
register_fsa_error(C_FSA_INTERNAL, I_FAIL, NULL);
} else if (call_id <= 0) {
crm_err("Operation %s on resource %s failed to execute on remote node %s: %d",
operation, rsc->id, lrm_state->node_name, call_id);
fake_op_status(lrm_state, op, PCMK_LRM_OP_DONE, PCMK_OCF_UNKNOWN_ERROR);
process_lrm_event(lrm_state, op, NULL, NULL);
} else {
/* record all operations so we can wait
* for them to complete during shutdown
*/
char *call_id_s = make_stop_id(rsc->id, call_id);
active_op_t *pending = NULL;
pending = calloc(1, sizeof(active_op_t));
crm_trace("Recording pending op: %d - %s %s", call_id, op_id, call_id_s);
pending->call_id = call_id;
pending->interval_ms = op->interval_ms;
pending->op_type = strdup(operation);
pending->op_key = strdup(op_id);
pending->rsc_id = strdup(rsc->id);
pending->start_time = time(NULL);
pending->user_data = op->user_data? strdup(op->user_data) : NULL;
if (crm_element_value_epoch(msg, XML_CONFIG_ATTR_SHUTDOWN_LOCK,
&(pending->lock_time)) != pcmk_ok) {
pending->lock_time = 0;
}
g_hash_table_replace(lrm_state->pending_ops, call_id_s, pending);
if ((op->interval_ms > 0)
&& (op->start_delay > START_DELAY_THRESHOLD)) {
int target_rc = 0;
crm_info("Faking confirmation of %s: execution postponed for over 5 minutes", op_id);
decode_transition_key(op->user_data, NULL, NULL, NULL, &target_rc);
op->rc = target_rc;
op->op_status = PCMK_LRM_OP_DONE;
controld_ack_event_directly(NULL, NULL, rsc, op, rsc->id);
}
pending->params = op->params;
op->params = NULL;
}
free(op_id);
lrmd_free_event(op);
return;
}
int last_resource_update = 0;
static void
cib_rsc_callback(xmlNode * msg, int call_id, int rc, xmlNode * output, void *user_data)
{
switch (rc) {
case pcmk_ok:
case -pcmk_err_diff_failed:
case -pcmk_err_diff_resync:
crm_trace("Resource update %d complete: rc=%d", call_id, rc);
break;
default:
crm_warn("Resource update %d failed: (rc=%d) %s", call_id, rc, pcmk_strerror(rc));
}
if (call_id == last_resource_update) {
last_resource_update = 0;
trigger_fsa();
}
}
/* Only successful stops, and probes that found the resource inactive, get locks
* recorded in the history. This ensures the resource stays locked to the node
* until it is active there again after the node comes back up.
*/
static bool
should_preserve_lock(lrmd_event_data_t *op)
{
if (!controld_shutdown_lock_enabled) {
return false;
}
if (!strcmp(op->op_type, RSC_STOP) && (op->rc == PCMK_OCF_OK)) {
return true;
}
if (!strcmp(op->op_type, RSC_STATUS) && (op->rc == PCMK_OCF_NOT_RUNNING)) {
return true;
}
return false;
}
static int
do_update_resource(const char *node_name, lrmd_rsc_info_t *rsc,
lrmd_event_data_t *op, time_t lock_time)
{
/*
*/
int rc = pcmk_ok;
xmlNode *update, *iter = NULL;
int call_opt = crmd_cib_smart_opt();
const char *uuid = NULL;
CRM_CHECK(op != NULL, return 0);
iter = create_xml_node(iter, XML_CIB_TAG_STATUS);
update = iter;
iter = create_xml_node(iter, XML_CIB_TAG_STATE);
if (pcmk__str_eq(node_name, fsa_our_uname, pcmk__str_casei)) {
uuid = fsa_our_uuid;
} else {
/* remote nodes uuid and uname are equal */
uuid = node_name;
crm_xml_add(iter, XML_NODE_IS_REMOTE, "true");
}
CRM_LOG_ASSERT(uuid != NULL);
if(uuid == NULL) {
rc = -EINVAL;
goto done;
}
crm_xml_add(iter, XML_ATTR_UUID, uuid);
crm_xml_add(iter, XML_ATTR_UNAME, node_name);
crm_xml_add(iter, XML_ATTR_ORIGIN, __func__);
iter = create_xml_node(iter, XML_CIB_TAG_LRM);
crm_xml_add(iter, XML_ATTR_ID, uuid);
iter = create_xml_node(iter, XML_LRM_TAG_RESOURCES);
iter = create_xml_node(iter, XML_LRM_TAG_RESOURCE);
crm_xml_add(iter, XML_ATTR_ID, op->rsc_id);
build_operation_update(iter, rsc, op, node_name, __func__);
if (rsc) {
const char *container = NULL;
crm_xml_add(iter, XML_ATTR_TYPE, rsc->type);
crm_xml_add(iter, XML_AGENT_ATTR_CLASS, rsc->standard);
crm_xml_add(iter, XML_AGENT_ATTR_PROVIDER, rsc->provider);
if (lock_time != 0) {
/* Actions on a locked resource should either preserve the lock by
* recording it with the action result, or clear it.
*/
if (!should_preserve_lock(op)) {
lock_time = 0;
}
crm_xml_add_ll(iter, XML_CONFIG_ATTR_SHUTDOWN_LOCK,
(long long) lock_time);
}
if (op->params) {
container = g_hash_table_lookup(op->params, CRM_META"_"XML_RSC_ATTR_CONTAINER);
}
if (container) {
crm_trace("Resource %s is a part of container resource %s", op->rsc_id, container);
crm_xml_add(iter, XML_RSC_ATTR_CONTAINER, container);
}
} else {
crm_warn("Resource %s no longer exists in the executor", op->rsc_id);
controld_ack_event_directly(NULL, NULL, rsc, op, op->rsc_id);
goto cleanup;
}
crm_log_xml_trace(update, __func__);
/* make it an asynchronous call and be done with it
*
* Best case:
* the resource state will be discovered during
* the next signup or election.
*
* Bad case:
* we are shutting down and there is no DC at the time,
* but then why were we shutting down then anyway?
* (probably because of an internal error)
*
* Worst case:
* we get shot for having resources "running" that really weren't
*
* the alternative however means blocking here for too long, which
* isn't acceptable
*/
fsa_cib_update(XML_CIB_TAG_STATUS, update, call_opt, rc, NULL);
if (rc > 0) {
last_resource_update = rc;
}
done:
/* the return code is a call number, not an error code */
crm_trace("Sent resource state update message: %d for %s=%u on %s",
rc, op->op_type, op->interval_ms, op->rsc_id);
fsa_register_cib_callback(rc, FALSE, NULL, cib_rsc_callback);
cleanup:
free_xml(update);
return rc;
}
void
do_lrm_event(long long action,
enum crmd_fsa_cause cause,
enum crmd_fsa_state cur_state, enum crmd_fsa_input cur_input, fsa_data_t * msg_data)
{
CRM_CHECK(FALSE, return);
}
static char *
unescape_newlines(const char *string)
{
char *pch = NULL;
char *ret = NULL;
static const char *escaped_newline = "\\n";
if (!string) {
return NULL;
}
ret = strdup(string);
pch = strstr(ret, escaped_newline);
while (pch != NULL) {
/* Replace newline escape pattern with actual newline (and a space so we
* don't have to shuffle the rest of the buffer)
*/
pch[0] = '\n';
pch[1] = ' ';
pch = strstr(pch, escaped_newline);
}
return ret;
}
static bool
did_lrm_rsc_op_fail(lrm_state_t *lrm_state, const char * rsc_id,
const char * op_type, guint interval_ms)
{
rsc_history_t *entry = NULL;
CRM_CHECK(lrm_state != NULL, return FALSE);
CRM_CHECK(rsc_id != NULL, return FALSE);
CRM_CHECK(op_type != NULL, return FALSE);
entry = g_hash_table_lookup(lrm_state->resource_history, rsc_id);
if (entry == NULL || entry->failed == NULL) {
return FALSE;
}
if (pcmk__str_eq(entry->failed->rsc_id, rsc_id, pcmk__str_none)
&& pcmk__str_eq(entry->failed->op_type, op_type, pcmk__str_casei)
&& entry->failed->interval_ms == interval_ms) {
return TRUE;
}
return FALSE;
}
void
process_lrm_event(lrm_state_t *lrm_state, lrmd_event_data_t *op,
active_op_t *pending, xmlNode *action_xml)
{
char *op_id = NULL;
char *op_key = NULL;
int update_id = 0;
gboolean remove = FALSE;
gboolean removed = FALSE;
bool need_direct_ack = FALSE;
lrmd_rsc_info_t *rsc = NULL;
const char *node_name = NULL;
CRM_CHECK(op != NULL, return);
CRM_CHECK(op->rsc_id != NULL, return);
// Remap new status codes for older DCs
if (compare_version(fsa_our_dc_version, "3.2.0") < 0) {
switch (op->op_status) {
case PCMK_LRM_OP_NOT_CONNECTED:
op->op_status = PCMK_LRM_OP_ERROR;
op->rc = PCMK_OCF_CONNECTION_DIED;
break;
case PCMK_LRM_OP_INVALID:
op->op_status = PCMK_LRM_OP_ERROR;
op->rc = CRM_DIRECT_NACK_RC;
break;
default:
break;
}
}
op_id = make_stop_id(op->rsc_id, op->call_id);
op_key = pcmk__op_key(op->rsc_id, op->op_type, op->interval_ms);
// Get resource info if available (from executor state or action XML)
if (lrm_state) {
rsc = lrm_state_get_rsc_info(lrm_state, op->rsc_id, 0);
}
if ((rsc == NULL) && action_xml) {
xmlNode *xml = find_xml_node(action_xml, XML_CIB_TAG_RESOURCE, TRUE);
const char *standard = crm_element_value(xml, XML_AGENT_ATTR_CLASS);
const char *provider = crm_element_value(xml, XML_AGENT_ATTR_PROVIDER);
const char *type = crm_element_value(xml, XML_ATTR_TYPE);
if (standard && type) {
crm_info("%s agent information not cached, using %s%s%s:%s from action XML",
op->rsc_id, standard,
(provider? ":" : ""), (provider? provider : ""), type);
rsc = lrmd_new_rsc_info(op->rsc_id, standard, provider, type);
} else {
crm_err("Can't process %s result because %s agent information not cached or in XML",
op_key, op->rsc_id);
}
}
// Get node name if available (from executor state or action XML)
if (lrm_state) {
node_name = lrm_state->node_name;
} else if (action_xml) {
node_name = crm_element_value(action_xml, XML_LRM_ATTR_TARGET);
}
if(pending == NULL) {
remove = TRUE;
if (lrm_state) {
pending = g_hash_table_lookup(lrm_state->pending_ops, op_id);
}
}
if (op->op_status == PCMK_LRM_OP_ERROR) {
switch(op->rc) {
case PCMK_OCF_NOT_RUNNING:
case PCMK_OCF_RUNNING_PROMOTED:
case PCMK_OCF_DEGRADED:
case PCMK_OCF_DEGRADED_PROMOTED:
// Leave it to the TE/scheduler to decide if this is an error
op->op_status = PCMK_LRM_OP_DONE;
break;
default:
/* Nothing to do */
break;
}
}
if (op->op_status != PCMK_LRM_OP_CANCELLED) {
/* We might not record the result, so directly acknowledge it to the
* originator instead, so it doesn't time out waiting for the result
* (especially important if part of a transition).
*/
need_direct_ack = TRUE;
if (controld_action_is_recordable(op->op_type)) {
if (node_name && rsc) {
// We should record the result, and happily, we can
update_id = do_update_resource(node_name, rsc, op,
pending? pending->lock_time : 0);
need_direct_ack = FALSE;
} else if (op->rsc_deleted) {
/* We shouldn't record the result (likely the resource was
* refreshed, cleaned, or removed while this operation was
* in flight).
*/
crm_notice("Not recording %s result in CIB because "
"resource information was removed since it was initiated",
op_key);
} else {
/* This shouldn't be possible; the executor didn't consider the
* resource deleted, but we couldn't find resource or node
* information.
*/
crm_err("Unable to record %s result in CIB: %s", op_key,
(node_name? "No resource information" : "No node name"));
}
}
} else if (op->interval_ms == 0) {
/* A non-recurring operation was cancelled. Most likely, the
* never-initiated action was removed from the executor's pending
* operations list upon resource removal.
*/
need_direct_ack = TRUE;
} else if (pending == NULL) {
/* This recurring operation was cancelled, but was not pending. No
* transition actions are waiting on it, nothing needs to be done.
*/
} else if (op->user_data == NULL) {
/* This recurring operation was cancelled and pending, but we don't
* have a transition key. This should never happen.
*/
crm_err("Recurring operation %s was cancelled without transition information",
op_key);
} else if (pcmk_is_set(pending->flags, active_op_remove)) {
/* This recurring operation was cancelled (by us) and pending, and we
* have been waiting for it to finish.
*/
if (lrm_state) {
erase_lrm_history_by_op(lrm_state, op);
}
/* If the recurring operation had failed, the lrm_rsc_op is recorded as
* "last_failure" which won't get erased from the cib given the logic on
* purpose in erase_lrm_history_by_op(). So that the cancel action won't
* have a chance to get confirmed by DC with process_op_deletion().
* Cluster transition would get stuck waiting for the remaining action
* timer to time out.
*
* Directly acknowledge the cancel operation in this case.
*/
if (did_lrm_rsc_op_fail(lrm_state, pending->rsc_id,
pending->op_type, pending->interval_ms)) {
need_direct_ack = TRUE;
}
} else if (op->rsc_deleted) {
/* This recurring operation was cancelled (but not by us, and the
* executor does not have resource information, likely due to resource
* cleanup, refresh, or removal) and pending.
*/
crm_debug("Recurring op %s was cancelled due to resource deletion",
op_key);
need_direct_ack = TRUE;
} else {
/* This recurring operation was cancelled (but not by us, likely by the
* executor before stopping the resource) and pending. We don't need to
* do anything special.
*/
}
if (need_direct_ack) {
controld_ack_event_directly(NULL, NULL, NULL, op, op->rsc_id);
}
if(remove == FALSE) {
/* The caller will do this afterwards, but keep the logging consistent */
removed = TRUE;
} else if (lrm_state && ((op->interval_ms == 0)
|| (op->op_status == PCMK_LRM_OP_CANCELLED))) {
gboolean found = g_hash_table_remove(lrm_state->pending_ops, op_id);
if (op->interval_ms != 0) {
removed = TRUE;
} else if (found) {
removed = TRUE;
crm_trace("Op %s (call=%d, stop-id=%s, remaining=%u): Confirmed",
op_key, op->call_id, op_id,
g_hash_table_size(lrm_state->pending_ops));
}
}
if (node_name == NULL) {
node_name = "unknown node"; // for logging
}
switch (op->op_status) {
case PCMK_LRM_OP_CANCELLED:
crm_info("Result of %s operation for %s on %s: %s "
CRM_XS " call=%d key=%s confirmed=%s",
crm_action_str(op->op_type, op->interval_ms),
op->rsc_id, node_name,
services_lrm_status_str(op->op_status),
op->call_id, op_key, pcmk__btoa(removed));
break;
case PCMK_LRM_OP_DONE:
crm_notice("Result of %s operation for %s on %s: %s "
CRM_XS " rc=%d call=%d key=%s confirmed=%s cib-update=%d",
crm_action_str(op->op_type, op->interval_ms),
op->rsc_id, node_name,
services_ocf_exitcode_str(op->rc), op->rc,
op->call_id, op_key, pcmk__btoa(removed), update_id);
break;
case PCMK_LRM_OP_TIMEOUT:
crm_err("Result of %s operation for %s on %s: %s "
CRM_XS " call=%d key=%s timeout=%dms",
crm_action_str(op->op_type, op->interval_ms),
op->rsc_id, node_name,
services_lrm_status_str(op->op_status),
op->call_id, op_key, op->timeout);
break;
default:
crm_err("Result of %s operation for %s on %s: %s "
CRM_XS " call=%d key=%s confirmed=%s status=%d cib-update=%d",
crm_action_str(op->op_type, op->interval_ms),
op->rsc_id, node_name,
services_lrm_status_str(op->op_status), op->call_id, op_key,
pcmk__btoa(removed), op->op_status, update_id);
}
if (op->output) {
char *prefix =
crm_strdup_printf("%s-" PCMK__OP_FMT ":%d", node_name,
op->rsc_id, op->op_type, op->interval_ms,
op->call_id);
if (op->rc) {
crm_log_output(LOG_NOTICE, prefix, op->output);
} else {
crm_log_output(LOG_DEBUG, prefix, op->output);
}
free(prefix);
}
if (lrm_state) {
if (!pcmk__str_eq(op->op_type, RSC_METADATA, pcmk__str_casei)) {
crmd_alert_resource_op(lrm_state->node_name, op);
} else if (rsc && (op->rc == PCMK_OCF_OK)) {
char *metadata = unescape_newlines(op->output);
metadata_cache_update(lrm_state->metadata_cache, rsc, metadata);
free(metadata);
}
}
if (op->rsc_deleted) {
crm_info("Deletion of resource '%s' complete after %s", op->rsc_id, op_key);
if (lrm_state) {
delete_rsc_entry(lrm_state, NULL, op->rsc_id, NULL, pcmk_ok, NULL);
}
}
/* If a shutdown was escalated while operations were pending,
* then the FSA will be stalled right now... allow it to continue
*/
mainloop_set_trigger(fsa_source);
if (lrm_state && rsc) {
update_history_cache(lrm_state, rsc, op);
}
lrmd_free_rsc_info(rsc);
free(op_key);
free(op_id);
}
diff --git a/daemons/controld/controld_lrm.h b/daemons/controld/controld_lrm.h
index 0fa5f8d912..8bc16107b4 100644
--- a/daemons/controld/controld_lrm.h
+++ b/daemons/controld/controld_lrm.h
@@ -1,193 +1,192 @@
/*
- * Copyright 2004-2020 the Pacemaker project contributors
+ * Copyright 2004-2021 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
* This source code is licensed under the GNU Lesser General Public License
* version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
*/
#ifndef CONTROLD_LRM__H
# define CONTROLD_LRM__H
#include
-#include
extern gboolean verify_stopped(enum crmd_fsa_state cur_state, int log_level);
void lrm_clear_last_failure(const char *rsc_id, const char *node_name,
const char *operation, guint interval_ms);
void lrm_op_callback(lrmd_event_data_t * op);
lrmd_t *crmd_local_lrmd_conn(void);
typedef struct resource_history_s {
char *id;
uint32_t last_callid;
lrmd_rsc_info_t rsc;
lrmd_event_data_t *last;
lrmd_event_data_t *failed;
GList *recurring_op_list;
/* Resources must be stopped using the same
* parameters they were started with. This hashtable
* holds the parameters that should be used for the next stop
* cmd on this resource. */
GHashTable *stop_params;
} rsc_history_t;
void history_free(gpointer data);
enum active_op_e {
active_op_remove = (1 << 0),
active_op_cancelled = (1 << 1),
};
// In-flight action (recurring or pending)
typedef struct active_op_s {
guint interval_ms;
int call_id;
uint32_t flags; // bitmask of active_op_e
time_t start_time;
time_t lock_time;
char *rsc_id;
char *op_type;
char *op_key;
char *user_data;
GHashTable *params;
} active_op_t;
#define controld_set_active_op_flags(active_op, flags_to_set) do { \
(active_op)->flags = pcmk__set_flags_as(__func__, __LINE__, \
LOG_TRACE, "Active operation", (active_op)->op_key, \
(active_op)->flags, (flags_to_set), #flags_to_set); \
} while (0)
#define controld_clear_active_op_flags(active_op, flags_to_clear) do { \
(active_op)->flags = pcmk__clear_flags_as(__func__, __LINE__, \
LOG_TRACE, "Active operation", (active_op)->op_key, \
(active_op)->flags, (flags_to_clear), #flags_to_clear); \
} while (0)
typedef struct lrm_state_s {
const char *node_name;
void *conn; // Reserved for controld_execd_state.c usage
void *remote_ra_data; // Reserved for controld_remote_ra.c usage
GHashTable *resource_history;
GHashTable *pending_ops;
GHashTable *deletion_ops;
GHashTable *rsc_info_cache;
GHashTable *metadata_cache; // key = class[:provider]:agent, value = ra_metadata_s
int num_lrm_register_fails;
} lrm_state_t;
struct pending_deletion_op_s {
char *rsc;
ha_msg_input_t *input;
};
/*!
* \brief Check whether this the local IPC connection to the executor
*/
gboolean
lrm_state_is_local(lrm_state_t *lrm_state);
/*!
* \brief Clear all state information from a single state entry.
* \note It sometimes useful to save metadata cache when it won't go stale.
* \note This does not close the executor connection
*/
void lrm_state_reset_tables(lrm_state_t * lrm_state, gboolean reset_metadata);
GList *lrm_state_get_list(void);
/*!
* \brief Initiate internal state tables
*/
gboolean lrm_state_init_local(void);
/*!
* \brief Destroy all state entries and internal state tables
*/
void lrm_state_destroy_all(void);
/*!
* \brief Create executor connection entry
*/
lrm_state_t *lrm_state_create(const char *node_name);
/*!
* \brief Destroy executor connection by node name
*/
void lrm_state_destroy(const char *node_name);
/*!
* \brief Find lrm_state data by node name
*/
lrm_state_t *lrm_state_find(const char *node_name);
/*!
* \brief Either find or create a new entry
*/
lrm_state_t *lrm_state_find_or_create(const char *node_name);
/*!
* The functions below are wrappers for the executor API the the controller
* uses. These wrapper functions allow us to treat the controller's remote
* executor connection resources the same as regular resources. Internally,
* regular resources go to the executor, and remote connection resources are
* handled locally in the controller.
*/
void lrm_state_disconnect_only(lrm_state_t * lrm_state);
void lrm_state_disconnect(lrm_state_t * lrm_state);
int lrm_state_ipc_connect(lrm_state_t * lrm_state);
int lrm_state_remote_connect_async(lrm_state_t * lrm_state, const char *server, int port,
int timeout);
int lrm_state_is_connected(lrm_state_t * lrm_state);
int lrm_state_poke_connection(lrm_state_t * lrm_state);
int lrm_state_get_metadata(lrm_state_t * lrm_state,
const char *class,
const char *provider,
const char *agent, char **output, enum lrmd_call_options options);
int lrm_state_cancel(lrm_state_t *lrm_state, const char *rsc_id,
const char *action, guint interval_ms);
int lrm_state_exec(lrm_state_t *lrm_state, const char *rsc_id,
const char *action, const char *userdata, guint interval_ms,
int timeout, /* ms */
int start_delay, /* ms */
lrmd_key_value_t * params);
lrmd_rsc_info_t *lrm_state_get_rsc_info(lrm_state_t * lrm_state,
const char *rsc_id, enum lrmd_call_options options);
int lrm_state_register_rsc(lrm_state_t * lrm_state,
const char *rsc_id,
const char *class,
const char *provider, const char *agent, enum lrmd_call_options options);
int lrm_state_unregister_rsc(lrm_state_t * lrm_state,
const char *rsc_id, enum lrmd_call_options options);
// Functions used to manage remote executor connection resources
void remote_lrm_op_callback(lrmd_event_data_t * op);
gboolean is_remote_lrmd_ra(const char *agent, const char *provider, const char *id);
lrmd_rsc_info_t *remote_ra_get_rsc_info(lrm_state_t * lrm_state, const char *rsc_id);
int remote_ra_cancel(lrm_state_t *lrm_state, const char *rsc_id,
const char *action, guint interval_ms);
int remote_ra_exec(lrm_state_t *lrm_state, const char *rsc_id,
const char *action, const char *userdata, guint interval_ms,
int timeout, /* ms */
int start_delay, /* ms */
lrmd_key_value_t * params);
void remote_ra_cleanup(lrm_state_t * lrm_state);
void remote_ra_fail(const char *node_name);
void remote_ra_process_pseudo(xmlNode *xml);
gboolean remote_ra_is_in_maintenance(lrm_state_t * lrm_state);
void remote_ra_process_maintenance_nodes(xmlNode *xml);
gboolean remote_ra_controlling_guest(lrm_state_t * lrm_state);
void process_lrm_event(lrm_state_t *lrm_state, lrmd_event_data_t *op,
active_op_t *pending, xmlNode *action_xml);
void controld_ack_event_directly(const char *to_host, const char *to_sys,
lrmd_rsc_info_t *rsc, lrmd_event_data_t *op,
const char *rsc_id);
void controld_rc2event(lrmd_event_data_t *event, int rc);
void controld_trigger_delete_refresh(const char *from_sys, const char *rsc_id);
#endif
diff --git a/daemons/controld/controld_metadata.c b/daemons/controld/controld_metadata.c
index 9d76307dd4..9bf8355a10 100644
--- a/daemons/controld/controld_metadata.c
+++ b/daemons/controld/controld_metadata.c
@@ -1,287 +1,390 @@
/*
* Copyright 2017-2021 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
* This source code is licensed under the GNU General Public License version 2
* or later (GPLv2+) WITHOUT ANY WARRANTY.
*/
#include
#include
#include
#include
#include
#include
#include
#if ENABLE_VERSIONED_ATTRS
static regex_t *version_format_regex = NULL;
#endif
static void
ra_param_free(void *param)
{
if (param) {
struct ra_param_s *p = (struct ra_param_s *) param;
if (p->rap_name) {
free(p->rap_name);
}
free(param);
}
}
static void
metadata_free(void *metadata)
{
if (metadata) {
struct ra_metadata_s *md = (struct ra_metadata_s *) metadata;
if (md->ra_version) {
free(md->ra_version);
}
g_list_free_full(md->ra_params, ra_param_free);
free(metadata);
}
}
GHashTable *
metadata_cache_new()
{
return pcmk__strkey_table(free, metadata_free);
}
void
metadata_cache_free(GHashTable *mdc)
{
if (mdc) {
crm_trace("Destroying metadata cache with %d members", g_hash_table_size(mdc));
g_hash_table_destroy(mdc);
}
}
void
metadata_cache_reset(GHashTable *mdc)
{
if (mdc) {
crm_trace("Resetting metadata cache with %d members",
g_hash_table_size(mdc));
g_hash_table_remove_all(mdc);
}
}
#if ENABLE_VERSIONED_ATTRS
static gboolean
valid_version_format(const char *version)
{
if (version == NULL) {
return FALSE;
}
if (version_format_regex == NULL) {
/* The OCF standard allows free-form versioning, but for our purposes of
* versioned resource and operation attributes, we constrain it to
* dot-separated numbers. Agents are still free to use other schemes,
* but we can't determine attributes based on them.
*/
const char *regex_string = "^[[:digit:]]+([.][[:digit:]]+)*$";
version_format_regex = calloc(1, sizeof(regex_t));
regcomp(version_format_regex, regex_string, REG_EXTENDED | REG_NOSUB);
/* If our regex doesn't compile, it's a bug on our side, so CRM_CHECK()
* will give us a core dump to catch it. Pretend the version is OK
* because we don't want our mistake to break versioned attributes
* (which should only ever happen in a development branch anyway).
*/
CRM_CHECK(version_format_regex != NULL, return TRUE);
}
return regexec(version_format_regex, version, 0, NULL, 0) == 0;
}
#endif
void
metadata_cache_fini()
{
#if ENABLE_VERSIONED_ATTRS
if (version_format_regex) {
regfree(version_format_regex);
free(version_format_regex);
version_format_regex = NULL;
}
#endif
}
#if ENABLE_VERSIONED_ATTRS
static char *
ra_version_from_xml(xmlNode *metadata_xml, const lrmd_rsc_info_t *rsc)
{
const char *version = crm_element_value(metadata_xml, XML_ATTR_VERSION);
if (version == NULL) {
crm_debug("Metadata for %s:%s:%s does not specify a version",
rsc->standard, rsc->provider, rsc->type);
version = PCMK_DEFAULT_AGENT_VERSION;
} else if (!valid_version_format(version)) {
crm_notice("%s:%s:%s metadata version has unrecognized format",
rsc->standard, rsc->provider, rsc->type);
version = PCMK_DEFAULT_AGENT_VERSION;
} else {
crm_debug("Metadata for %s:%s:%s has version %s",
rsc->standard, rsc->provider, rsc->type, version);
}
return strdup(version);
}
#endif
static struct ra_param_s *
ra_param_from_xml(xmlNode *param_xml)
{
const char *param_name = crm_element_value(param_xml, "name");
const char *value;
struct ra_param_s *p;
p = calloc(1, sizeof(struct ra_param_s));
if (p == NULL) {
crm_crit("Could not allocate memory for resource metadata");
return NULL;
}
p->rap_name = strdup(param_name);
if (p->rap_name == NULL) {
crm_crit("Could not allocate memory for resource metadata");
free(p);
return NULL;
}
+ value = crm_element_value(param_xml, "reloadable");
+ if (crm_is_true(value)) {
+ controld_set_ra_param_flags(p, ra_param_reloadable);
+ }
+
value = crm_element_value(param_xml, "unique");
if (crm_is_true(value)) {
controld_set_ra_param_flags(p, ra_param_unique);
}
value = crm_element_value(param_xml, "private");
if (crm_is_true(value)) {
controld_set_ra_param_flags(p, ra_param_private);
}
return p;
}
+static void
+log_ra_ocf_version(const char *ra_key, const char *ra_ocf_version)
+{
+ if (pcmk__str_empty(ra_ocf_version)) {
+ crm_warn("%s does not advertise OCF version supported", ra_key);
+
+ } else if (compare_version(ra_ocf_version, "2") >= 0) {
+ crm_warn("%s supports OCF version %s (this Pacemaker version supports "
+ PCMK_OCF_VERSION " and might not work properly with agent)",
+ ra_key, ra_ocf_version);
+
+ } else if (compare_version(ra_ocf_version, PCMK_OCF_VERSION) > 0) {
+ crm_info("%s supports OCF version %s (this Pacemaker version supports "
+ PCMK_OCF_VERSION " and might not use all agent features)",
+ ra_key, ra_ocf_version);
+
+ } else {
+ crm_debug("%s supports OCF version %s", ra_key, ra_ocf_version);
+ }
+}
+
struct ra_metadata_s *
metadata_cache_update(GHashTable *mdc, lrmd_rsc_info_t *rsc,
const char *metadata_str)
{
char *key = NULL;
xmlNode *metadata = NULL;
xmlNode *match = NULL;
struct ra_metadata_s *md = NULL;
bool any_private_params = false;
+ bool ocf1_1 = false;
CRM_CHECK(mdc && rsc && metadata_str, return NULL);
key = crm_generate_ra_key(rsc->standard, rsc->provider, rsc->type);
if (!key) {
crm_crit("Could not allocate memory for resource metadata");
goto err;
}
metadata = string2xml(metadata_str);
if (!metadata) {
crm_err("Metadata for %s:%s:%s is not valid XML",
rsc->standard, rsc->provider, rsc->type);
goto err;
}
md = calloc(1, sizeof(struct ra_metadata_s));
if (md == NULL) {
crm_crit("Could not allocate memory for resource metadata");
goto err;
}
#if ENABLE_VERSIONED_ATTRS
md->ra_version = ra_version_from_xml(metadata, rsc);
#endif
+ if (strcmp(rsc->standard, PCMK_RESOURCE_CLASS_OCF) == 0) {
+ xmlChar *content = NULL;
+ xmlNode *version_element = first_named_child(metadata, "version");
+
+ if (version_element != NULL) {
+ content = xmlNodeGetContent(version_element);
+ }
+ log_ra_ocf_version(key, (const char *) content);
+ if (content != NULL) {
+ ocf1_1 = (compare_version((const char *) content, "1.1") >= 0);
+ xmlFree(content);
+ }
+ }
+
// Check supported actions
match = first_named_child(metadata, "actions");
for (match = first_named_child(match, "action"); match != NULL;
match = crm_next_same_xml(match)) {
const char *action_name = crm_element_value(match, "name");
- if (pcmk__str_eq(action_name, "reload", pcmk__str_casei)) {
- controld_set_ra_flags(md, key, ra_supports_reload);
- break; // since this is the only action we currently care about
+ if (pcmk__str_eq(action_name, CRMD_ACTION_RELOAD_AGENT,
+ pcmk__str_none)) {
+ if (ocf1_1) {
+ controld_set_ra_flags(md, key, ra_supports_reload_agent);
+ } else {
+ crm_notice("reload-agent action will not be used with %s "
+ "because it does not support OCF 1.1 or later", key);
+ }
+
+ } else if (!ocf1_1 && pcmk__str_eq(action_name, CRMD_ACTION_RELOAD,
+ pcmk__str_casei)) {
+ controld_set_ra_flags(md, key, ra_supports_legacy_reload);
}
}
// Build a parameter list
match = first_named_child(metadata, "parameters");
for (match = first_named_child(match, "parameter"); match != NULL;
match = crm_next_same_xml(match)) {
const char *param_name = crm_element_value(match, "name");
if (param_name == NULL) {
crm_warn("Metadata for %s:%s:%s has parameter without a name",
rsc->standard, rsc->provider, rsc->type);
} else {
struct ra_param_s *p = ra_param_from_xml(match);
if (p == NULL) {
goto err;
}
if (pcmk_is_set(p->rap_flags, ra_param_private)) {
any_private_params = true;
}
md->ra_params = g_list_prepend(md->ra_params, p);
}
}
/* Newer resource agents support the "private" parameter attribute to
* indicate sensitive parameters. For backward compatibility with older
* agents, implicitly treat a few common names as private when the agent
* doesn't specify any explicitly.
*/
if (!any_private_params) {
for (GList *iter = md->ra_params; iter != NULL; iter = iter->next) {
struct ra_param_s *p = iter->data;
if (pcmk__str_any_of(p->rap_name, "password", "passwd", "user",
NULL)) {
controld_set_ra_param_flags(p, ra_param_private);
}
}
}
g_hash_table_replace(mdc, key, md);
free_xml(metadata);
return md;
err:
free(key);
free_xml(metadata);
metadata_free(md);
return NULL;
}
+/*!
+ * \internal
+ * \brief Get meta-data for a resource
+ *
+ * \param[in] lrm_state Use meta-data cache from this executor connection
+ * \param[in] rsc Resource to get meta-data for
+ * \param[in] source Allowed meta-data sources (bitmask of enum
+ * controld_metadata_source_e values)
+ *
+ * \return Meta-data cache entry for given resource, or NULL if not available
+ */
struct ra_metadata_s *
-metadata_cache_get(GHashTable *mdc, lrmd_rsc_info_t *rsc)
+controld_get_rsc_metadata(lrm_state_t *lrm_state, lrmd_rsc_info_t *rsc,
+ uint32_t source)
{
- char *key = NULL;
struct ra_metadata_s *metadata = NULL;
+ char *metadata_str = NULL;
+ char *key = NULL;
+ int rc = pcmk_ok;
- CRM_CHECK(mdc && rsc, return NULL);
- key = crm_generate_ra_key(rsc->standard, rsc->provider, rsc->type);
- if (key) {
- metadata = g_hash_table_lookup(mdc, key);
- free(key);
+ CRM_CHECK((lrm_state != NULL) && (rsc != NULL), return NULL);
+
+ if (pcmk_is_set(source, controld_metadata_from_cache)) {
+ key = crm_generate_ra_key(rsc->standard, rsc->provider, rsc->type);
+ if (key != NULL) {
+ metadata = g_hash_table_lookup(lrm_state->metadata_cache, key);
+ free(key);
+ }
+ if (metadata != NULL) {
+ return metadata;
+ }
+ }
+
+ if (!pcmk_is_set(source, controld_metadata_from_agent)) {
+ return NULL;
+ }
+
+ /* For now, we always collect resource agent meta-data via a local,
+ * synchronous, direct execution of the agent. This has multiple issues:
+ * the executor should execute agents, not the controller; meta-data for
+ * Pacemaker Remote nodes should be collected on those nodes, not
+ * locally; and the meta-data call shouldn't eat into the timeout of the
+ * real action being performed.
+ *
+ * These issues are planned to be addressed by having the scheduler
+ * schedule a meta-data cache check at the beginning of each transition.
+ * Once that is working, this block will only be a fallback in case the
+ * initial collection fails.
+ */
+ rc = lrm_state_get_metadata(lrm_state, rsc->standard, rsc->provider,
+ rsc->type, &metadata_str, 0);
+ if (rc != pcmk_ok) {
+ crm_warn("Failed to get metadata for %s (%s:%s:%s): %s",
+ rsc->id, rsc->standard, rsc->provider, rsc->type,
+ pcmk_strerror(rc));
+ return NULL;
+ }
+
+ metadata = metadata_cache_update(lrm_state->metadata_cache, rsc,
+ metadata_str);
+ free(metadata_str);
+ if (metadata == NULL) {
+ crm_warn("Failed to update metadata for %s (%s:%s:%s)",
+ rsc->id, rsc->standard, rsc->provider, rsc->type);
}
return metadata;
}
diff --git a/daemons/controld/controld_metadata.h b/daemons/controld/controld_metadata.h
index 398d12aac7..7354f94642 100644
--- a/daemons/controld/controld_metadata.h
+++ b/daemons/controld/controld_metadata.h
@@ -1,68 +1,98 @@
/*
- * Copyright 2017-2020 the Pacemaker project contributors
+ * Copyright 2017-2021 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
* This source code is licensed under the GNU General Public License version 2
* or later (GPLv2+) WITHOUT ANY WARRANTY.
*/
#ifndef CRMD_METADATA_H
#define CRMD_METADATA_H
+#include // uint32_t
+#include // GList, GHashTable
+#include "controld_lrm.h" // lrm_state_t, lrm_rsc_info_t
+
+/*
+ * @COMPAT pre-OCF-1.1 resource agents
+ *
+ * Pacemaker previously used the "reload" action to reload agent parameters,
+ * but most agents used it to reload the service configuration. Pacemaker also
+ * misused the OCF 1.0 "unique" parameter attribute to indicate reloadability.
+ *
+ * OCF 1.1 created the "reload-agent" action and "reloadable" parameter
+ * attribute for the Pacemaker usage.
+ *
+ * Pacemaker now supports the OCF 1.1 usage. The old usage is now deprecated,
+ * but will be supported if the agent does not claim OCF 1.1 or later
+ * compliance and does not advertise the reload-agent action.
+ */
enum ra_flags_e {
- ra_supports_reload = 0x01,
+ ra_supports_legacy_reload = (1 << 0),
+ ra_supports_reload_agent = (1 << 1),
};
enum ra_param_flags_e {
- ra_param_unique = 0x01,
- ra_param_private = 0x02,
+ ra_param_unique = (1 << 0),
+ ra_param_private = (1 << 1),
+ ra_param_reloadable = (1 << 2),
+};
+
+// Allowed sources of resource agent meta-data when requesting it
+enum controld_metadata_source_e {
+ controld_metadata_from_cache = (1 << 0),
+ controld_metadata_from_agent = (1 << 1),
};
struct ra_param_s {
char *rap_name;
uint32_t rap_flags; // bitmask of ra_param_flags_s
};
struct ra_metadata_s {
char *ra_version;
GList *ra_params; // ra_param_s
uint32_t ra_flags; // bitmask of ra_flags_e
};
#define controld_set_ra_flags(ra_md, ra_key, flags_to_set) do { \
(ra_md)->ra_flags = pcmk__set_flags_as(__func__, __LINE__, \
LOG_TRACE, "Resource agent", ra_key, \
(ra_md)->ra_flags, (flags_to_set), #flags_to_set); \
} while (0)
#define controld_set_ra_param_flags(ra_param, flags_to_set) do { \
(ra_param)->rap_flags = pcmk__set_flags_as(__func__, __LINE__, \
LOG_TRACE, "Resource agent parameter", (ra_param)->rap_name, \
(ra_param)->rap_flags, (flags_to_set), #flags_to_set); \
} while (0)
GHashTable *metadata_cache_new(void);
void metadata_cache_free(GHashTable *mdc);
void metadata_cache_reset(GHashTable *mdc);
void metadata_cache_fini(void);
struct ra_metadata_s *metadata_cache_update(GHashTable *mdc,
lrmd_rsc_info_t *rsc,
const char *metadata_str);
-struct ra_metadata_s *metadata_cache_get(GHashTable *mdc, lrmd_rsc_info_t *rsc);
+struct ra_metadata_s *controld_get_rsc_metadata(lrm_state_t *lrm_state,
+ lrmd_rsc_info_t *rsc,
+ uint32_t source);
static inline const char *
ra_param_flag2text(enum ra_param_flags_e flag)
{
switch (flag) {
+ case ra_param_reloadable:
+ return "reloadable";
case ra_param_unique:
return "unique";
case ra_param_private:
return "private";
default:
return "unknown";
}
}
#endif
diff --git a/daemons/controld/controld_remote_ra.c b/daemons/controld/controld_remote_ra.c
index 4522696212..37beec7ce5 100644
--- a/daemons/controld/controld_remote_ra.c
+++ b/daemons/controld/controld_remote_ra.c
@@ -1,1299 +1,1306 @@
/*
* Copyright 2013-2021 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
* This source code is licensed under the GNU General Public License version 2
* or later (GPLv2+) WITHOUT ANY WARRANTY.
*/
#include
#include
#include
#include
#include
#include
#include
#define REMOTE_LRMD_RA "remote"
/* The max start timeout before cmd retry */
#define MAX_START_TIMEOUT_MS 10000
typedef struct remote_ra_cmd_s {
/*! the local node the cmd is issued from */
char *owner;
/*! the remote node the cmd is executed on */
char *rsc_id;
/*! the action to execute */
char *action;
/*! some string the client wants us to give it back */
char *userdata;
char *exit_reason; // descriptive text on error
/*! start delay in ms */
int start_delay;
/*! timer id used for start delay. */
int delay_id;
/*! timeout in ms for cmd */
int timeout;
int remaining_timeout;
/*! recurring interval in ms */
guint interval_ms;
/*! interval timer id */
int interval_id;
int reported_success;
int monitor_timeout_id;
int takeover_timeout_id;
/*! action parameters */
lrmd_key_value_t *params;
/*! executed rc */
int rc;
int op_status;
int call_id;
time_t start_time;
gboolean cancel;
} remote_ra_cmd_t;
enum remote_migration_status {
expect_takeover = 1,
takeover_complete,
};
typedef struct remote_ra_data_s {
crm_trigger_t *work;
remote_ra_cmd_t *cur_cmd;
GList *cmds;
GList *recurring_cmds;
enum remote_migration_status migrate_status;
gboolean active;
/* Maintenance mode is difficult to determine from the controller's context,
* so we have it signalled back with the transition from the scheduler.
*/
gboolean is_maintenance;
/* Similar for whether we are controlling a guest node or remote node.
* Fortunately there is a meta-attribute in the transition already and
* as the situation doesn't change over time we can use the
* resource start for noting down the information for later use when
* the attributes aren't at hand.
*/
gboolean controlling_guest;
} remote_ra_data_t;
static int handle_remote_ra_start(lrm_state_t * lrm_state, remote_ra_cmd_t * cmd, int timeout_ms);
static void handle_remote_ra_stop(lrm_state_t * lrm_state, remote_ra_cmd_t * cmd);
static GList *fail_all_monitor_cmds(GList * list);
static void
free_cmd(gpointer user_data)
{
remote_ra_cmd_t *cmd = user_data;
if (!cmd) {
return;
}
if (cmd->delay_id) {
g_source_remove(cmd->delay_id);
}
if (cmd->interval_id) {
g_source_remove(cmd->interval_id);
}
if (cmd->monitor_timeout_id) {
g_source_remove(cmd->monitor_timeout_id);
}
if (cmd->takeover_timeout_id) {
g_source_remove(cmd->takeover_timeout_id);
}
free(cmd->owner);
free(cmd->rsc_id);
free(cmd->action);
free(cmd->userdata);
free(cmd->exit_reason);
lrmd_key_value_freeall(cmd->params);
free(cmd);
}
static int
generate_callid(void)
{
static int remote_ra_callid = 0;
remote_ra_callid++;
if (remote_ra_callid <= 0) {
remote_ra_callid = 1;
}
return remote_ra_callid;
}
static gboolean
recurring_helper(gpointer data)
{
remote_ra_cmd_t *cmd = data;
lrm_state_t *connection_rsc = NULL;
cmd->interval_id = 0;
connection_rsc = lrm_state_find(cmd->rsc_id);
if (connection_rsc && connection_rsc->remote_ra_data) {
remote_ra_data_t *ra_data = connection_rsc->remote_ra_data;
ra_data->recurring_cmds = g_list_remove(ra_data->recurring_cmds, cmd);
ra_data->cmds = g_list_append(ra_data->cmds, cmd);
mainloop_set_trigger(ra_data->work);
}
return FALSE;
}
static gboolean
start_delay_helper(gpointer data)
{
remote_ra_cmd_t *cmd = data;
lrm_state_t *connection_rsc = NULL;
cmd->delay_id = 0;
connection_rsc = lrm_state_find(cmd->rsc_id);
if (connection_rsc && connection_rsc->remote_ra_data) {
remote_ra_data_t *ra_data = connection_rsc->remote_ra_data;
mainloop_set_trigger(ra_data->work);
}
return FALSE;
}
/*!
* \internal
* \brief Handle cluster communication related to pacemaker_remote node joining
*
* \param[in] node_name Name of newly integrated pacemaker_remote node
*/
static void
remote_node_up(const char *node_name)
{
int call_opt, call_id = 0;
xmlNode *update, *state;
crm_node_t *node;
enum controld_section_e section = controld_section_all;
CRM_CHECK(node_name != NULL, return);
crm_info("Announcing pacemaker_remote node %s", node_name);
/* Clear node's entire state (resource history and transient attributes)
* other than shutdown locks. The transient attributes should and normally
* will be cleared when the node leaves, but since remote node state has a
* number of corner cases, clear them here as well, to be sure.
*/
call_opt = crmd_cib_smart_opt();
if (controld_shutdown_lock_enabled) {
section = controld_section_all_unlocked;
}
controld_delete_node_state(node_name, section, call_opt);
/* Clear node's probed attribute */
update_attrd(node_name, CRM_OP_PROBED, NULL, NULL, TRUE);
/* Ensure node is in the remote peer cache with member status */
node = crm_remote_peer_get(node_name);
CRM_CHECK(node != NULL, return);
pcmk__update_peer_state(__func__, node, CRM_NODE_MEMBER, 0);
/* pacemaker_remote nodes don't participate in the membership layer,
* so cluster nodes don't automatically get notified when they come and go.
* We send a cluster message to the DC, and update the CIB node state entry,
* so the DC will get it sooner (via message) or later (via CIB refresh),
* and any other interested parties can query the CIB.
*/
send_remote_state_message(node_name, TRUE);
update = create_xml_node(NULL, XML_CIB_TAG_STATUS);
state = create_node_state_update(node, node_update_cluster, update,
__func__);
/* Clear the XML_NODE_IS_FENCED flag in the node state. If the node ever
* needs to be fenced, this flag will allow various actions to determine
* whether the fencing has happened yet.
*/
crm_xml_add(state, XML_NODE_IS_FENCED, "0");
/* TODO: If the remote connection drops, and this (async) CIB update either
* failed or has not yet completed, later actions could mistakenly think the
* node has already been fenced (if the XML_NODE_IS_FENCED attribute was
* previously set, because it won't have been cleared). This could prevent
* actual fencing or allow recurring monitor failures to be cleared too
* soon. Ideally, we wouldn't rely on the CIB for the fenced status.
*/
fsa_cib_update(XML_CIB_TAG_STATUS, update, call_opt, call_id, NULL);
if (call_id < 0) {
crm_perror(LOG_WARNING, "%s CIB node state setup", node_name);
}
free_xml(update);
}
enum down_opts {
DOWN_KEEP_LRM,
DOWN_ERASE_LRM
};
/*!
* \internal
* \brief Handle cluster communication related to pacemaker_remote node leaving
*
* \param[in] node_name Name of lost node
* \param[in] opts Whether to keep or erase LRM history
*/
static void
remote_node_down(const char *node_name, const enum down_opts opts)
{
xmlNode *update;
int call_id = 0;
int call_opt = crmd_cib_smart_opt();
crm_node_t *node;
/* Purge node from attrd's memory */
update_attrd_remote_node_removed(node_name, NULL);
/* Normally, only node attributes should be erased, and the resource history
* should be kept until the node comes back up. However, after a successful
* fence, we want to clear the history as well, so we don't think resources
* are still running on the node.
*/
if (opts == DOWN_ERASE_LRM) {
controld_delete_node_state(node_name, controld_section_all, call_opt);
} else {
controld_delete_node_state(node_name, controld_section_attrs, call_opt);
}
/* Ensure node is in the remote peer cache with lost state */
node = crm_remote_peer_get(node_name);
CRM_CHECK(node != NULL, return);
pcmk__update_peer_state(__func__, node, CRM_NODE_LOST, 0);
/* Notify DC */
send_remote_state_message(node_name, FALSE);
/* Update CIB node state */
update = create_xml_node(NULL, XML_CIB_TAG_STATUS);
create_node_state_update(node, node_update_cluster, update, __func__);
fsa_cib_update(XML_CIB_TAG_STATUS, update, call_opt, call_id, NULL);
if (call_id < 0) {
crm_perror(LOG_ERR, "%s CIB node state update", node_name);
}
free_xml(update);
}
/*!
* \internal
* \brief Handle effects of a remote RA command on node state
*
* \param[in] cmd Completed remote RA command
*/
static void
check_remote_node_state(remote_ra_cmd_t *cmd)
{
/* Only successful actions can change node state */
if (cmd->rc != PCMK_OCF_OK) {
return;
}
if (pcmk__str_eq(cmd->action, "start", pcmk__str_casei)) {
remote_node_up(cmd->rsc_id);
} else if (pcmk__str_eq(cmd->action, "migrate_from", pcmk__str_casei)) {
/* After a successful migration, we don't need to do remote_node_up()
* because the DC already knows the node is up, and we don't want to
* clear LRM history etc. We do need to add the remote node to this
* host's remote peer cache, because (unless it happens to be DC)
* it hasn't been tracking the remote node, and other code relies on
* the cache to distinguish remote nodes from unseen cluster nodes.
*/
crm_node_t *node = crm_remote_peer_get(cmd->rsc_id);
CRM_CHECK(node != NULL, return);
pcmk__update_peer_state(__func__, node, CRM_NODE_MEMBER, 0);
} else if (pcmk__str_eq(cmd->action, "stop", pcmk__str_casei)) {
lrm_state_t *lrm_state = lrm_state_find(cmd->rsc_id);
remote_ra_data_t *ra_data = lrm_state? lrm_state->remote_ra_data : NULL;
if (ra_data) {
if (ra_data->migrate_status != takeover_complete) {
/* Stop means down if we didn't successfully migrate elsewhere */
remote_node_down(cmd->rsc_id, DOWN_KEEP_LRM);
} else if (AM_I_DC == FALSE) {
/* Only the connection host and DC track node state,
* so if the connection migrated elsewhere and we aren't DC,
* un-cache the node, so we don't have stale info
*/
crm_remote_peer_cache_remove(cmd->rsc_id);
}
}
}
/* We don't do anything for successful monitors, which is correct for
* routine recurring monitors, and for monitors on nodes where the
* connection isn't supposed to be (the cluster will stop the connection in
* that case). However, if the initial probe finds the connection already
* active on the node where we want it, we probably should do
* remote_node_up(). Unfortunately, we can't distinguish that case here.
* Given that connections have to be initiated by the cluster, the chance of
* that should be close to zero.
*/
}
static void
report_remote_ra_result(remote_ra_cmd_t * cmd)
{
lrmd_event_data_t op = { 0, };
check_remote_node_state(cmd);
op.type = lrmd_event_exec_complete;
op.rsc_id = cmd->rsc_id;
op.op_type = cmd->action;
op.user_data = cmd->userdata;
op.exit_reason = cmd->exit_reason;
op.timeout = cmd->timeout;
op.interval_ms = cmd->interval_ms;
op.rc = cmd->rc;
op.op_status = cmd->op_status;
op.t_run = (unsigned int) cmd->start_time;
op.t_rcchange = (unsigned int) cmd->start_time;
if (cmd->reported_success && cmd->rc != PCMK_OCF_OK) {
op.t_rcchange = (unsigned int) time(NULL);
/* This edge case will likely never ever occur, but if it does the
* result is that a failure will not be processed correctly. This is only
* remotely possible because we are able to detect a connection resource's tcp
* connection has failed at any moment after start has completed. The actual
* recurring operation is just a connectivity ping.
*
* basically, we are not guaranteed that the first successful monitor op and
* a subsequent failed monitor op will not occur in the same timestamp. We have to
* make it look like the operations occurred at separate times though. */
if (op.t_rcchange == op.t_run) {
op.t_rcchange++;
}
}
if (cmd->params) {
lrmd_key_value_t *tmp;
op.params = pcmk__strkey_table(free, free);
for (tmp = cmd->params; tmp; tmp = tmp->next) {
g_hash_table_insert(op.params, strdup(tmp->key), strdup(tmp->value));
}
}
op.call_id = cmd->call_id;
op.remote_nodename = cmd->owner;
lrm_op_callback(&op);
if (op.params) {
g_hash_table_destroy(op.params);
}
}
static void
update_remaining_timeout(remote_ra_cmd_t * cmd)
{
cmd->remaining_timeout = ((cmd->timeout / 1000) - (time(NULL) - cmd->start_time)) * 1000;
}
static gboolean
retry_start_cmd_cb(gpointer data)
{
lrm_state_t *lrm_state = data;
remote_ra_data_t *ra_data = lrm_state->remote_ra_data;
remote_ra_cmd_t *cmd = NULL;
int rc = -1;
if (!ra_data || !ra_data->cur_cmd) {
return FALSE;
}
cmd = ra_data->cur_cmd;
if (!pcmk__strcase_any_of(cmd->action, "start", "migrate_from", NULL)) {
return FALSE;
}
update_remaining_timeout(cmd);
if (cmd->remaining_timeout > 0) {
rc = handle_remote_ra_start(lrm_state, cmd, cmd->remaining_timeout);
}
if (rc != 0) {
cmd->rc = PCMK_OCF_UNKNOWN_ERROR;
cmd->op_status = PCMK_LRM_OP_ERROR;
report_remote_ra_result(cmd);
if (ra_data->cmds) {
mainloop_set_trigger(ra_data->work);
}
ra_data->cur_cmd = NULL;
free_cmd(cmd);
} else {
/* wait for connection event */
}
return FALSE;
}
static gboolean
connection_takeover_timeout_cb(gpointer data)
{
lrm_state_t *lrm_state = NULL;
remote_ra_cmd_t *cmd = data;
crm_info("takeover event timed out for node %s", cmd->rsc_id);
cmd->takeover_timeout_id = 0;
lrm_state = lrm_state_find(cmd->rsc_id);
handle_remote_ra_stop(lrm_state, cmd);
free_cmd(cmd);
return FALSE;
}
static gboolean
monitor_timeout_cb(gpointer data)
{
lrm_state_t *lrm_state = NULL;
remote_ra_cmd_t *cmd = data;
lrm_state = lrm_state_find(cmd->rsc_id);
crm_info("Timed out waiting for remote poke response from %s%s",
cmd->rsc_id, (lrm_state? "" : " (no LRM state)"));
cmd->monitor_timeout_id = 0;
cmd->op_status = PCMK_LRM_OP_TIMEOUT;
cmd->rc = PCMK_OCF_UNKNOWN_ERROR;
if (lrm_state && lrm_state->remote_ra_data) {
remote_ra_data_t *ra_data = lrm_state->remote_ra_data;
if (ra_data->cur_cmd == cmd) {
ra_data->cur_cmd = NULL;
}
if (ra_data->cmds) {
mainloop_set_trigger(ra_data->work);
}
}
report_remote_ra_result(cmd);
free_cmd(cmd);
if(lrm_state) {
lrm_state_disconnect(lrm_state);
}
return FALSE;
}
static void
synthesize_lrmd_success(lrm_state_t *lrm_state, const char *rsc_id, const char *op_type)
{
lrmd_event_data_t op = { 0, };
if (lrm_state == NULL) {
/* if lrm_state not given assume local */
lrm_state = lrm_state_find(fsa_our_uname);
}
CRM_ASSERT(lrm_state != NULL);
op.type = lrmd_event_exec_complete;
op.rsc_id = rsc_id;
op.op_type = op_type;
op.rc = PCMK_OCF_OK;
op.op_status = PCMK_LRM_OP_DONE;
op.t_run = (unsigned int) time(NULL);
op.t_rcchange = op.t_run;
op.call_id = generate_callid();
process_lrm_event(lrm_state, &op, NULL, NULL);
}
void
remote_lrm_op_callback(lrmd_event_data_t * op)
{
gboolean cmd_handled = FALSE;
lrm_state_t *lrm_state = NULL;
remote_ra_data_t *ra_data = NULL;
remote_ra_cmd_t *cmd = NULL;
crm_debug("Processing '%s%s%s' event on remote connection to %s: %s "
"(%d) status=%s (%d)",
(op->op_type? op->op_type : ""), (op->op_type? " " : ""),
lrmd_event_type2str(op->type), op->remote_nodename,
services_ocf_exitcode_str(op->rc), op->rc,
services_lrm_status_str(op->op_status), op->op_status);
lrm_state = lrm_state_find(op->remote_nodename);
if (!lrm_state || !lrm_state->remote_ra_data) {
crm_debug("No state information found for remote connection event");
return;
}
ra_data = lrm_state->remote_ra_data;
if (op->type == lrmd_event_new_client) {
// Another client has connected to the remote daemon
if (ra_data->migrate_status == expect_takeover) {
// Great, we knew this was coming
ra_data->migrate_status = takeover_complete;
} else {
crm_err("Unexpected pacemaker_remote client takeover for %s. Disconnecting", op->remote_nodename);
/* In this case, lrmd_tls_connection_destroy() will be called under the control of mainloop. */
/* Do not free lrm_state->conn yet. */
/* It'll be freed in the following stop action. */
lrm_state_disconnect_only(lrm_state);
}
return;
}
/* filter all EXEC events up */
if (op->type == lrmd_event_exec_complete) {
if (ra_data->migrate_status == takeover_complete) {
crm_debug("ignoring event, this connection is taken over by another node");
} else {
lrm_op_callback(op);
}
return;
}
if ((op->type == lrmd_event_disconnect) && (ra_data->cur_cmd == NULL)) {
if (ra_data->active == FALSE) {
crm_debug("Disconnection from Pacemaker Remote node %s complete",
lrm_state->node_name);
} else if (!remote_ra_is_in_maintenance(lrm_state)) {
crm_err("Lost connection to Pacemaker Remote node %s",
lrm_state->node_name);
ra_data->recurring_cmds = fail_all_monitor_cmds(ra_data->recurring_cmds);
ra_data->cmds = fail_all_monitor_cmds(ra_data->cmds);
} else {
crm_notice("Unmanaged Pacemaker Remote node %s disconnected",
lrm_state->node_name);
/* Do roughly what a 'stop' on the remote-resource would do */
handle_remote_ra_stop(lrm_state, NULL);
remote_node_down(lrm_state->node_name, DOWN_KEEP_LRM);
/* now fake the reply of a successful 'stop' */
synthesize_lrmd_success(NULL, lrm_state->node_name, "stop");
}
return;
}
if (!ra_data->cur_cmd) {
crm_debug("no event to match");
return;
}
cmd = ra_data->cur_cmd;
/* Start actions and migrate from actions complete after connection
* comes back to us. */
if (op->type == lrmd_event_connect && pcmk__strcase_any_of(cmd->action, "start",
"migrate_from", NULL)) {
if (op->connection_rc < 0) {
update_remaining_timeout(cmd);
if (op->connection_rc == -ENOKEY) {
// Hard error, don't retry
cmd->op_status = PCMK_LRM_OP_ERROR;
cmd->rc = PCMK_OCF_INVALID_PARAM;
cmd->exit_reason = strdup("Authentication key not readable");
} else if (cmd->remaining_timeout > 3000) {
crm_trace("rescheduling start, remaining timeout %d", cmd->remaining_timeout);
g_timeout_add(1000, retry_start_cmd_cb, lrm_state);
return;
} else {
crm_trace("can't reschedule start, remaining timeout too small %d",
cmd->remaining_timeout);
cmd->op_status = PCMK_LRM_OP_TIMEOUT;
cmd->rc = PCMK_OCF_UNKNOWN_ERROR;
}
} else {
lrm_state_reset_tables(lrm_state, TRUE);
cmd->rc = PCMK_OCF_OK;
cmd->op_status = PCMK_LRM_OP_DONE;
ra_data->active = TRUE;
}
crm_debug("Remote connection event matched %s action", cmd->action);
report_remote_ra_result(cmd);
cmd_handled = TRUE;
} else if (op->type == lrmd_event_poke && pcmk__str_eq(cmd->action, "monitor", pcmk__str_casei)) {
if (cmd->monitor_timeout_id) {
g_source_remove(cmd->monitor_timeout_id);
cmd->monitor_timeout_id = 0;
}
/* Only report success the first time, after that only worry about failures.
* For this function, if we get the poke pack, it is always a success. Pokes
* only fail if the send fails, or the response times out. */
if (!cmd->reported_success) {
cmd->rc = PCMK_OCF_OK;
cmd->op_status = PCMK_LRM_OP_DONE;
report_remote_ra_result(cmd);
cmd->reported_success = 1;
}
crm_debug("Remote poke event matched %s action", cmd->action);
/* success, keep rescheduling if interval is present. */
if (cmd->interval_ms && (cmd->cancel == FALSE)) {
ra_data->recurring_cmds = g_list_append(ra_data->recurring_cmds, cmd);
cmd->interval_id = g_timeout_add(cmd->interval_ms,
recurring_helper, cmd);
cmd = NULL; /* prevent free */
}
cmd_handled = TRUE;
} else if (op->type == lrmd_event_disconnect && pcmk__str_eq(cmd->action, "monitor", pcmk__str_casei)) {
if (ra_data->active == TRUE && (cmd->cancel == FALSE)) {
cmd->rc = PCMK_OCF_UNKNOWN_ERROR;
cmd->op_status = PCMK_LRM_OP_ERROR;
report_remote_ra_result(cmd);
crm_err("Remote connection to %s unexpectedly dropped during monitor",
lrm_state->node_name);
}
cmd_handled = TRUE;
} else if (op->type == lrmd_event_new_client && pcmk__str_eq(cmd->action, "stop", pcmk__str_casei)) {
handle_remote_ra_stop(lrm_state, cmd);
cmd_handled = TRUE;
} else {
crm_debug("Event did not match %s action", ra_data->cur_cmd->action);
}
if (cmd_handled) {
ra_data->cur_cmd = NULL;
if (ra_data->cmds) {
mainloop_set_trigger(ra_data->work);
}
free_cmd(cmd);
}
}
static void
handle_remote_ra_stop(lrm_state_t * lrm_state, remote_ra_cmd_t * cmd)
{
remote_ra_data_t *ra_data = NULL;
CRM_ASSERT(lrm_state);
ra_data = lrm_state->remote_ra_data;
if (ra_data->migrate_status != takeover_complete) {
/* delete pending ops when ever the remote connection is intentionally stopped */
g_hash_table_remove_all(lrm_state->pending_ops);
} else {
/* we no longer hold the history if this connection has been migrated,
* however, we keep metadata cache for future use */
lrm_state_reset_tables(lrm_state, FALSE);
}
ra_data->active = FALSE;
lrm_state_disconnect(lrm_state);
if (ra_data->cmds) {
g_list_free_full(ra_data->cmds, free_cmd);
}
if (ra_data->recurring_cmds) {
g_list_free_full(ra_data->recurring_cmds, free_cmd);
}
ra_data->cmds = NULL;
ra_data->recurring_cmds = NULL;
ra_data->cur_cmd = NULL;
if (cmd) {
cmd->rc = PCMK_OCF_OK;
cmd->op_status = PCMK_LRM_OP_DONE;
report_remote_ra_result(cmd);
}
}
static int
handle_remote_ra_start(lrm_state_t * lrm_state, remote_ra_cmd_t * cmd, int timeout_ms)
{
const char *server = NULL;
lrmd_key_value_t *tmp = NULL;
int port = 0;
remote_ra_data_t *ra_data = lrm_state->remote_ra_data;
int timeout_used = timeout_ms > MAX_START_TIMEOUT_MS ? MAX_START_TIMEOUT_MS : timeout_ms;
for (tmp = cmd->params; tmp; tmp = tmp->next) {
if (pcmk__strcase_any_of(tmp->key, XML_RSC_ATTR_REMOTE_RA_ADDR,
XML_RSC_ATTR_REMOTE_RA_SERVER, NULL)) {
server = tmp->value;
} else if (pcmk__str_eq(tmp->key, XML_RSC_ATTR_REMOTE_RA_PORT, pcmk__str_casei)) {
port = atoi(tmp->value);
} else if (pcmk__str_eq(tmp->key, CRM_META "_" XML_RSC_ATTR_CONTAINER, pcmk__str_casei)) {
ra_data->controlling_guest = TRUE;
}
}
return lrm_state_remote_connect_async(lrm_state, server, port, timeout_used);
}
static gboolean
handle_remote_ra_exec(gpointer user_data)
{
int rc = 0;
lrm_state_t *lrm_state = user_data;
remote_ra_data_t *ra_data = lrm_state->remote_ra_data;
remote_ra_cmd_t *cmd;
GList *first = NULL;
if (ra_data->cur_cmd) {
/* still waiting on previous cmd */
return TRUE;
}
while (ra_data->cmds) {
first = ra_data->cmds;
cmd = first->data;
if (cmd->delay_id) {
/* still waiting for start delay timer to trip */
return TRUE;
}
ra_data->cmds = g_list_remove_link(ra_data->cmds, first);
g_list_free_1(first);
if (!strcmp(cmd->action, "start") || !strcmp(cmd->action, "migrate_from")) {
ra_data->migrate_status = 0;
rc = handle_remote_ra_start(lrm_state, cmd, cmd->timeout);
if (rc == 0) {
/* take care of this later when we get async connection result */
crm_debug("Initiated async remote connection, %s action will complete after connect event",
cmd->action);
ra_data->cur_cmd = cmd;
return TRUE;
} else {
crm_debug("Could not initiate remote connection for %s action",
cmd->action);
cmd->rc = PCMK_OCF_UNKNOWN_ERROR;
cmd->op_status = PCMK_LRM_OP_ERROR;
}
report_remote_ra_result(cmd);
} else if (!strcmp(cmd->action, "monitor")) {
if (lrm_state_is_connected(lrm_state) == TRUE) {
rc = lrm_state_poke_connection(lrm_state);
if (rc < 0) {
cmd->rc = PCMK_OCF_UNKNOWN_ERROR;
cmd->op_status = PCMK_LRM_OP_ERROR;
}
} else {
rc = -1;
cmd->op_status = PCMK_LRM_OP_DONE;
cmd->rc = PCMK_OCF_NOT_RUNNING;
}
if (rc == 0) {
crm_debug("Poked Pacemaker Remote at node %s, waiting for async response",
cmd->rsc_id);
ra_data->cur_cmd = cmd;
cmd->monitor_timeout_id = g_timeout_add(cmd->timeout, monitor_timeout_cb, cmd);
return TRUE;
}
report_remote_ra_result(cmd);
} else if (!strcmp(cmd->action, "stop")) {
if (ra_data->migrate_status == expect_takeover) {
/* briefly wait on stop for the takeover event to occur. If the
* takeover event does not occur during the wait period, that's fine.
* It just means that the remote-node's lrm_status section is going to get
* cleared which will require all the resources running in the remote-node
* to be explicitly re-detected via probe actions. If the takeover does occur
* successfully, then we can leave the status section intact. */
cmd->takeover_timeout_id = g_timeout_add((cmd->timeout/2), connection_takeover_timeout_cb, cmd);
ra_data->cur_cmd = cmd;
return TRUE;
}
handle_remote_ra_stop(lrm_state, cmd);
} else if (!strcmp(cmd->action, "migrate_to")) {
ra_data->migrate_status = expect_takeover;
cmd->rc = PCMK_OCF_OK;
cmd->op_status = PCMK_LRM_OP_DONE;
report_remote_ra_result(cmd);
- } else if (!strcmp(cmd->action, "reload")) {
- /* reloads are a no-op right now, add logic here when they become important */
+ } else if (pcmk__str_any_of(cmd->action, CRMD_ACTION_RELOAD,
+ CRMD_ACTION_RELOAD_AGENT, NULL)) {
+ /* Currently the only reloadable parameter is reconnect_interval,
+ * which is only used by the scheduler via the CIB, so reloads are a
+ * no-op.
+ *
+ * @COMPAT DC <2.1.0: We only need to check for "reload" in case
+ * we're in a rolling upgrade with a DC scheduling "reload" instead
+ * of "reload-agent". An OCF 1.1 "reload" would be a no-op anyway,
+ * so this would work for that purpose as well.
+ */
cmd->rc = PCMK_OCF_OK;
cmd->op_status = PCMK_LRM_OP_DONE;
report_remote_ra_result(cmd);
}
free_cmd(cmd);
}
return TRUE;
}
static void
remote_ra_data_init(lrm_state_t * lrm_state)
{
remote_ra_data_t *ra_data = NULL;
if (lrm_state->remote_ra_data) {
return;
}
ra_data = calloc(1, sizeof(remote_ra_data_t));
ra_data->work = mainloop_add_trigger(G_PRIORITY_HIGH, handle_remote_ra_exec, lrm_state);
lrm_state->remote_ra_data = ra_data;
}
void
remote_ra_cleanup(lrm_state_t * lrm_state)
{
remote_ra_data_t *ra_data = lrm_state->remote_ra_data;
if (!ra_data) {
return;
}
if (ra_data->cmds) {
g_list_free_full(ra_data->cmds, free_cmd);
}
if (ra_data->recurring_cmds) {
g_list_free_full(ra_data->recurring_cmds, free_cmd);
}
mainloop_destroy_trigger(ra_data->work);
free(ra_data);
lrm_state->remote_ra_data = NULL;
}
gboolean
is_remote_lrmd_ra(const char *agent, const char *provider, const char *id)
{
if (agent && provider && !strcmp(agent, REMOTE_LRMD_RA) && !strcmp(provider, "pacemaker")) {
return TRUE;
}
if (id && lrm_state_find(id) && !pcmk__str_eq(id, fsa_our_uname, pcmk__str_casei)) {
return TRUE;
}
return FALSE;
}
lrmd_rsc_info_t *
remote_ra_get_rsc_info(lrm_state_t * lrm_state, const char *rsc_id)
{
lrmd_rsc_info_t *info = NULL;
if ((lrm_state_find(rsc_id))) {
info = calloc(1, sizeof(lrmd_rsc_info_t));
info->id = strdup(rsc_id);
info->type = strdup(REMOTE_LRMD_RA);
info->standard = strdup(PCMK_RESOURCE_CLASS_OCF);
info->provider = strdup("pacemaker");
}
return info;
}
static gboolean
is_remote_ra_supported_action(const char *action)
{
- if (!action) {
- return FALSE;
- } else if (strcmp(action, "start") &&
- strcmp(action, "stop") &&
- strcmp(action, "reload") &&
- strcmp(action, "migrate_to") &&
- strcmp(action, "migrate_from") && strcmp(action, "monitor")) {
- return FALSE;
- }
-
- return TRUE;
+ return pcmk__str_any_of(action,
+ CRMD_ACTION_START,
+ CRMD_ACTION_STOP,
+ CRMD_ACTION_STATUS,
+ CRMD_ACTION_MIGRATE,
+ CRMD_ACTION_MIGRATED,
+ CRMD_ACTION_RELOAD_AGENT,
+ CRMD_ACTION_RELOAD,
+ NULL);
}
static GList *
fail_all_monitor_cmds(GList * list)
{
GList *rm_list = NULL;
remote_ra_cmd_t *cmd = NULL;
GList *gIter = NULL;
for (gIter = list; gIter != NULL; gIter = gIter->next) {
cmd = gIter->data;
if ((cmd->interval_ms > 0) && pcmk__str_eq(cmd->action, "monitor", pcmk__str_casei)) {
rm_list = g_list_append(rm_list, cmd);
}
}
for (gIter = rm_list; gIter != NULL; gIter = gIter->next) {
cmd = gIter->data;
cmd->rc = PCMK_OCF_UNKNOWN_ERROR;
cmd->op_status = PCMK_LRM_OP_ERROR;
crm_trace("Pre-emptively failing %s %s (interval=%u, %s)",
cmd->action, cmd->rsc_id, cmd->interval_ms, cmd->userdata);
report_remote_ra_result(cmd);
list = g_list_remove(list, cmd);
free_cmd(cmd);
}
/* frees only the list data, not the cmds */
g_list_free(rm_list);
return list;
}
static GList *
remove_cmd(GList * list, const char *action, guint interval_ms)
{
remote_ra_cmd_t *cmd = NULL;
GList *gIter = NULL;
for (gIter = list; gIter != NULL; gIter = gIter->next) {
cmd = gIter->data;
if ((cmd->interval_ms == interval_ms)
&& pcmk__str_eq(cmd->action, action, pcmk__str_casei)) {
break;
}
cmd = NULL;
}
if (cmd) {
list = g_list_remove(list, cmd);
free_cmd(cmd);
}
return list;
}
int
remote_ra_cancel(lrm_state_t *lrm_state, const char *rsc_id,
const char *action, guint interval_ms)
{
lrm_state_t *connection_rsc = NULL;
remote_ra_data_t *ra_data = NULL;
connection_rsc = lrm_state_find(rsc_id);
if (!connection_rsc || !connection_rsc->remote_ra_data) {
return -EINVAL;
}
ra_data = connection_rsc->remote_ra_data;
ra_data->cmds = remove_cmd(ra_data->cmds, action, interval_ms);
ra_data->recurring_cmds = remove_cmd(ra_data->recurring_cmds, action,
interval_ms);
if (ra_data->cur_cmd &&
(ra_data->cur_cmd->interval_ms == interval_ms) &&
(pcmk__str_eq(ra_data->cur_cmd->action, action, pcmk__str_casei))) {
ra_data->cur_cmd->cancel = TRUE;
}
return 0;
}
static remote_ra_cmd_t *
handle_dup_monitor(remote_ra_data_t *ra_data, guint interval_ms,
const char *userdata)
{
GList *gIter = NULL;
remote_ra_cmd_t *cmd = NULL;
/* there are 3 places a potential duplicate monitor operation
* could exist.
* 1. recurring_cmds list. where the op is waiting for its next interval
* 2. cmds list, where the op is queued to get executed immediately
* 3. cur_cmd, which means the monitor op is in flight right now.
*/
if (interval_ms == 0) {
return NULL;
}
if (ra_data->cur_cmd &&
ra_data->cur_cmd->cancel == FALSE &&
(ra_data->cur_cmd->interval_ms == interval_ms) &&
pcmk__str_eq(ra_data->cur_cmd->action, "monitor", pcmk__str_casei)) {
cmd = ra_data->cur_cmd;
goto handle_dup;
}
for (gIter = ra_data->recurring_cmds; gIter != NULL; gIter = gIter->next) {
cmd = gIter->data;
if ((cmd->interval_ms == interval_ms)
&& pcmk__str_eq(cmd->action, "monitor", pcmk__str_casei)) {
goto handle_dup;
}
}
for (gIter = ra_data->cmds; gIter != NULL; gIter = gIter->next) {
cmd = gIter->data;
if ((cmd->interval_ms == interval_ms)
&& pcmk__str_eq(cmd->action, "monitor", pcmk__str_casei)) {
goto handle_dup;
}
}
return NULL;
handle_dup:
crm_trace("merging duplicate monitor cmd " PCMK__OP_FMT,
cmd->rsc_id, "monitor", interval_ms);
/* update the userdata */
if (userdata) {
free(cmd->userdata);
cmd->userdata = strdup(userdata);
}
/* if we've already reported success, generate a new call id */
if (cmd->reported_success) {
cmd->start_time = time(NULL);
cmd->call_id = generate_callid();
cmd->reported_success = 0;
}
/* if we have an interval_id set, that means we are in the process of
* waiting for this cmd's next interval. instead of waiting, cancel
* the timer and execute the action immediately */
if (cmd->interval_id) {
g_source_remove(cmd->interval_id);
cmd->interval_id = 0;
recurring_helper(cmd);
}
return cmd;
}
int
remote_ra_exec(lrm_state_t *lrm_state, const char *rsc_id, const char *action,
const char *userdata, guint interval_ms,
int timeout, /* ms */
int start_delay, /* ms */
lrmd_key_value_t * params)
{
int rc = 0;
lrm_state_t *connection_rsc = NULL;
remote_ra_cmd_t *cmd = NULL;
remote_ra_data_t *ra_data = NULL;
if (is_remote_ra_supported_action(action) == FALSE) {
rc = -EINVAL;
goto exec_done;
}
connection_rsc = lrm_state_find(rsc_id);
if (!connection_rsc) {
rc = -EINVAL;
goto exec_done;
}
remote_ra_data_init(connection_rsc);
ra_data = connection_rsc->remote_ra_data;
cmd = handle_dup_monitor(ra_data, interval_ms, userdata);
if (cmd) {
rc = cmd->call_id;
goto exec_done;
}
cmd = calloc(1, sizeof(remote_ra_cmd_t));
cmd->owner = strdup(lrm_state->node_name);
cmd->rsc_id = strdup(rsc_id);
cmd->action = strdup(action);
cmd->userdata = strdup(userdata);
cmd->interval_ms = interval_ms;
cmd->timeout = timeout;
cmd->start_delay = start_delay;
cmd->params = params;
cmd->start_time = time(NULL);
cmd->call_id = generate_callid();
if (cmd->start_delay) {
cmd->delay_id = g_timeout_add(cmd->start_delay, start_delay_helper, cmd);
}
ra_data->cmds = g_list_append(ra_data->cmds, cmd);
mainloop_set_trigger(ra_data->work);
return cmd->call_id;
exec_done:
lrmd_key_value_freeall(params);
return rc;
}
/*!
* \internal
* \brief Immediately fail all monitors of a remote node, if proxied here
*
* \param[in] node_name Name of pacemaker_remote node
*/
void
remote_ra_fail(const char *node_name)
{
lrm_state_t *lrm_state = lrm_state_find(node_name);
if (lrm_state && lrm_state_is_connected(lrm_state)) {
remote_ra_data_t *ra_data = lrm_state->remote_ra_data;
crm_info("Failing monitors on pacemaker_remote node %s", node_name);
ra_data->recurring_cmds = fail_all_monitor_cmds(ra_data->recurring_cmds);
ra_data->cmds = fail_all_monitor_cmds(ra_data->cmds);
}
}
/* A guest node fencing implied by host fencing looks like:
*
*
*
*
*
*
*
*/
#define XPATH_PSEUDO_FENCE "//" XML_GRAPH_TAG_PSEUDO_EVENT \
"[@" XML_LRM_ATTR_TASK "='stonith']/" XML_GRAPH_TAG_DOWNED \
"/" XML_CIB_TAG_NODE
/*!
* \internal
* \brief Check a pseudo-action for Pacemaker Remote node side effects
*
* \param[in] xml XML of pseudo-action to check
*/
void
remote_ra_process_pseudo(xmlNode *xml)
{
xmlXPathObjectPtr search = xpath_search(xml, XPATH_PSEUDO_FENCE);
if (numXpathResults(search) == 1) {
xmlNode *result = getXpathResult(search, 0);
/* Normally, we handle the necessary side effects of a guest node stop
* action when reporting the remote agent's result. However, if the stop
* is implied due to fencing, it will be a fencing pseudo-event, and
* there won't be a result to report. Handle that case here.
*
* This will result in a duplicate call to remote_node_down() if the
* guest stop was real instead of implied, but that shouldn't hurt.
*
* There is still one corner case that isn't handled: if a guest node
* isn't running any resources when its host is fenced, it will appear
* to be cleanly stopped, so there will be no pseudo-fence, and our
* peer cache state will be incorrect unless and until the guest is
* recovered.
*/
if (result) {
const char *remote = ID(result);
if (remote) {
remote_node_down(remote, DOWN_ERASE_LRM);
}
}
}
freeXpathObject(search);
}
static void
remote_ra_maintenance(lrm_state_t * lrm_state, gboolean maintenance)
{
remote_ra_data_t *ra_data = lrm_state->remote_ra_data;
xmlNode *update, *state;
int call_opt, call_id = 0;
crm_node_t *node;
call_opt = crmd_cib_smart_opt();
node = crm_remote_peer_get(lrm_state->node_name);
CRM_CHECK(node != NULL, return);
update = create_xml_node(NULL, XML_CIB_TAG_STATUS);
state = create_node_state_update(node, node_update_none, update,
__func__);
crm_xml_add(state, XML_NODE_IS_MAINTENANCE, maintenance?"1":"0");
fsa_cib_update(XML_CIB_TAG_STATUS, update, call_opt, call_id, NULL);
if (call_id < 0) {
crm_perror(LOG_WARNING, "%s CIB node state update failed", lrm_state->node_name);
} else {
/* TODO: still not 100% sure that async update will succeed ... */
ra_data->is_maintenance = maintenance;
}
free_xml(update);
}
#define XPATH_PSEUDO_MAINTENANCE "//" XML_GRAPH_TAG_PSEUDO_EVENT \
"[@" XML_LRM_ATTR_TASK "='" CRM_OP_MAINTENANCE_NODES "']/" \
XML_GRAPH_TAG_MAINTENANCE
/*!
* \internal
* \brief Check a pseudo-action holding updates for maintenance state
*
* \param[in] xml XML of pseudo-action to check
*/
void
remote_ra_process_maintenance_nodes(xmlNode *xml)
{
xmlXPathObjectPtr search = xpath_search(xml, XPATH_PSEUDO_MAINTENANCE);
if (numXpathResults(search) == 1) {
xmlNode *node;
int cnt = 0, cnt_remote = 0;
for (node =
first_named_child(getXpathResult(search, 0), XML_CIB_TAG_NODE);
node != NULL; node = pcmk__xml_next(node)) {
lrm_state_t *lrm_state = lrm_state_find(ID(node));
cnt++;
if (lrm_state && lrm_state->remote_ra_data &&
((remote_ra_data_t *) lrm_state->remote_ra_data)->active) {
int is_maint;
cnt_remote++;
pcmk__scan_min_int(crm_element_value(node, XML_NODE_IS_MAINTENANCE),
&is_maint, 0);
remote_ra_maintenance(lrm_state, is_maint);
}
}
crm_trace("Action holds %d nodes (%d remotes found) "
"adjusting maintenance-mode", cnt, cnt_remote);
}
freeXpathObject(search);
}
gboolean
remote_ra_is_in_maintenance(lrm_state_t * lrm_state)
{
remote_ra_data_t *ra_data = lrm_state->remote_ra_data;
return ra_data->is_maintenance;
}
gboolean
remote_ra_controlling_guest(lrm_state_t * lrm_state)
{
remote_ra_data_t *ra_data = lrm_state->remote_ra_data;
return ra_data->controlling_guest;
}
diff --git a/doc/sphinx/Pacemaker_Explained/advanced-options.rst b/doc/sphinx/Pacemaker_Explained/advanced-options.rst
index e6c2966046..28f737518e 100644
--- a/doc/sphinx/Pacemaker_Explained/advanced-options.rst
+++ b/doc/sphinx/Pacemaker_Explained/advanced-options.rst
@@ -1,767 +1,760 @@
Advanced Configuration
----------------------
.. index::
single: start-delay; operation attribute
single: interval-origin; operation attribute
single: interval; interval-origin
single: operation; interval-origin
single: operation; start-delay
Specifying When Recurring Actions are Performed
###############################################
By default, recurring actions are scheduled relative to when the resource
started. In some cases, you might prefer that a recurring action start relative
to a specific date and time. For example, you might schedule an in-depth
monitor to run once every 24 hours, and want it to run outside business hours.
To do this, set the operation's ``interval-origin``. The cluster uses this point
to calculate the correct ``start-delay`` such that the operation will occur
at ``interval-origin`` plus a multiple of the operation interval.
For example, if the recurring operation's interval is 24h, its
``interval-origin`` is set to 02:00, and it is currently 14:32, then the
cluster would initiate the operation after 11 hours and 28 minutes.
The value specified for ``interval`` and ``interval-origin`` can be any
date/time conforming to the
`ISO8601 standard `_. By way of
example, to specify an operation that would run on the first Monday of
2021 and every Monday after that, you would add:
.. topic:: Example recurring action that runs relative to base date/time
.. code-block:: xml
.. index::
single: resource; failure recovery
single: operation; failure recovery
.. _failure-handling:
Handling Resource Failure
#########################
By default, Pacemaker will attempt to recover failed resources by restarting
them. However, failure recovery is highly configurable.
.. index::
single: resource; failure count
single: operation; failure count
Failure Counts
______________
Pacemaker tracks resource failures for each combination of node, resource, and
operation (start, stop, monitor, etc.).
You can query the fail count for a particular node, resource, and/or operation
using the ``crm_failcount`` command. For example, to see how many times the
10-second monitor for ``myrsc`` has failed on ``node1``, run:
.. code-block:: none
# crm_failcount --query -r myrsc -N node1 -n monitor -I 10s
If you omit the node, ``crm_failcount`` will use the local node. If you omit
the operation and interval, ``crm_failcount`` will display the sum of the fail
counts for all operations on the resource.
You can use ``crm_resource --cleanup`` or ``crm_failcount --delete`` to clear
fail counts. For example, to clear the above monitor failures, run:
.. code-block:: none
# crm_resource --cleanup -r myrsc -N node1 -n monitor -I 10s
If you omit the resource, ``crm_resource --cleanup`` will clear failures for
all resources. If you omit the node, it will clear failures on all nodes. If
you omit the operation and interval, it will clear the failures for all
operations on the resource.
.. note::
Even when cleaning up only a single operation, all failed operations will
disappear from the status display. This allows us to trigger a re-check of
the resource's current status.
Higher-level tools may provide other commands for querying and clearing
fail counts.
The ``crm_mon`` tool shows the current cluster status, including any failed
operations. To see the current fail counts for any failed resources, call
``crm_mon`` with the ``--failcounts`` option. This shows the fail counts per
resource (that is, the sum of any operation fail counts for the resource).
.. index::
single: migration-threshold; resource meta-attribute
single: resource; migration-threshold
Failure Response
________________
Normally, if a running resource fails, pacemaker will try to stop it and start
it again. Pacemaker will choose the best location to start it each time, which
may be the same node that it failed on.
However, if a resource fails repeatedly, it is possible that there is an
underlying problem on that node, and you might desire trying a different node
in such a case. Pacemaker allows you to set your preference via the
``migration-threshold`` resource meta-attribute. [#]_
If you define ``migration-threshold`` to *N* for a resource, it will be banned
from the original node after *N* failures there.
.. note::
The ``migration-threshold`` is per *resource*, even though fail counts are
tracked per *operation*. The operation fail counts are added together
to compare against the ``migration-threshold``.
By default, fail counts remain until manually cleared by an administrator
using ``crm_resource --cleanup`` or ``crm_failcount --delete`` (hopefully after
first fixing the failure's cause). It is possible to have fail counts expire
automatically by setting the ``failure-timeout`` resource meta-attribute.
.. important::
A successful operation does not clear past failures. If a recurring monitor
operation fails once, succeeds many times, then fails again days later, its
fail count is 2. Fail counts are cleared only by manual intervention or
falure timeout.
For example, setting ``migration-threshold`` to 2 and ``failure-timeout`` to
``60s`` would cause the resource to move to a new node after 2 failures, and
allow it to move back (depending on stickiness and constraint scores) after one
minute.
.. note::
``failure-timeout`` is measured since the most recent failure. That is, older
failures do not individually time out and lower the fail count. Instead, all
failures are timed out simultaneously (and the fail count is reset to 0) if
there is no new failure for the timeout period.
There are two exceptions to the migration threshold: when a resource either
fails to start or fails to stop.
If the cluster property ``start-failure-is-fatal`` is set to ``true`` (which is
the default), start failures cause the fail count to be set to ``INFINITY`` and
thus always cause the resource to move immediately.
Stop failures are slightly different and crucial. If a resource fails to stop
and fencing is enabled, then the cluster will fence the node in order to be
able to start the resource elsewhere. If fencing is disabled, then the cluster
has no way to continue and will not try to start the resource elsewhere, but
will try to stop it again after any failure timeout or clearing.
.. index::
single: resource; move
Moving Resources
################
Moving Resources Manually
_________________________
There are primarily two occasions when you would want to move a resource from
its current location: when the whole node is under maintenance, and when a
single resource needs to be moved.
.. index::
single: standby mode
single: node; standby mode
Standby Mode
~~~~~~~~~~~~
Since everything eventually comes down to a score, you could create constraints
for every resource to prevent them from running on one node. While Pacemaker
configuration can seem convoluted at times, not even we would require this of
administrators.
Instead, you can set a special node attribute which tells the cluster "don't
let anything run here". There is even a helpful tool to help query and set it,
called ``crm_standby``. To check the standby status of the current machine,
run:
.. code-block:: none
# crm_standby -G
A value of ``on`` indicates that the node is *not* able to host any resources,
while a value of ``off`` says that it *can*.
You can also check the status of other nodes in the cluster by specifying the
`--node` option:
.. code-block:: none
# crm_standby -G --node sles-2
To change the current node's standby status, use ``-v`` instead of ``-G``:
.. code-block:: none
# crm_standby -v on
Again, you can change another host's value by supplying a hostname with
``--node``.
A cluster node in standby mode will not run resources, but still contributes to
quorum, and may fence or be fenced by nodes.
Moving One Resource
~~~~~~~~~~~~~~~~~~~
When only one resource is required to move, we could do this by creating
location constraints. However, once again we provide a user-friendly shortcut
as part of the ``crm_resource`` command, which creates and modifies the extra
constraints for you. If ``Email`` were running on ``sles-1`` and you wanted it
moved to a specific location, the command would look something like:
.. code-block:: none
# crm_resource -M -r Email -H sles-2
Behind the scenes, the tool will create the following location constraint:
.. code-block:: xml
It is important to note that subsequent invocations of ``crm_resource -M`` are
not cumulative. So, if you ran these commands:
.. code-block:: none
# crm_resource -M -r Email -H sles-2
# crm_resource -M -r Email -H sles-3
then it is as if you had never performed the first command.
To allow the resource to move back again, use:
.. code-block:: none
# crm_resource -U -r Email
Note the use of the word *allow*. The resource *can* move back to its original
location, but depending on ``resource-stickiness``, location constraints, and
so forth, it might stay where it is.
To be absolutely certain that it moves back to ``sles-1``, move it there before
issuing the call to ``crm_resource -U``:
.. code-block:: none
# crm_resource -M -r Email -H sles-1
# crm_resource -U -r Email
Alternatively, if you only care that the resource should be moved from its
current location, try:
.. code-block:: none
# crm_resource -B -r Email
which will instead create a negative constraint, like:
.. code-block:: xml
This will achieve the desired effect, but will also have long-term
consequences. As the tool will warn you, the creation of a ``-INFINITY``
constraint will prevent the resource from running on that node until
``crm_resource -U`` is used. This includes the situation where every other
cluster node is no longer available!
In some cases, such as when ``resource-stickiness`` is set to ``INFINITY``, it
is possible that you will end up with the problem described in
:ref:`node-score-equal`. The tool can detect some of these cases and deals with
them by creating both positive and negative constraints. For example:
.. code-block:: xml
which has the same long-term consequences as discussed earlier.
Moving Resources Due to Connectivity Changes
____________________________________________
You can configure the cluster to move resources when external connectivity is
lost in two steps.
.. index::
single: ocf:pacemaker:ping resource
single: ping resource
Tell Pacemaker to Monitor Connectivity
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
First, add an ``ocf:pacemaker:ping`` resource to the cluster. The ``ping``
resource uses the system utility of the same name to a test whether a list of
machines (specified by DNS hostname or IP address) are reachable, and uses the
results to maintain a node attribute.
The node attribute is called ``pingd`` by default, but is customizable in order
to allow multiple ping groups to be defined.
Normally, the ping resource should run on all cluster nodes, which means that
you'll need to create a clone. A template for this can be found below, along
with a description of the most interesting parameters.
.. table:: **Commonly Used ocf:pacemaker:ping Resource Parameters**
+--------------------+--------------------------------------------------------------+
| Resource Parameter | Description |
+====================+==============================================================+
| dampen | .. index:: |
| | single: ocf:pacemaker:ping resource; dampen parameter |
| | single: dampen; ocf:pacemaker:ping resource parameter |
| | |
| | The time to wait (dampening) for further changes to occur. |
| | Use this to prevent a resource from bouncing around the |
| | cluster when cluster nodes notice the loss of connectivity |
| | at slightly different times. |
+--------------------+--------------------------------------------------------------+
| multiplier | .. index:: |
| | single: ocf:pacemaker:ping resource; multiplier parameter |
| | single: multiplier; ocf:pacemaker:ping resource parameter |
| | |
| | The number of connected ping nodes gets multiplied by this |
| | value to get a score. Useful when there are multiple ping |
| | nodes configured. |
+--------------------+--------------------------------------------------------------+
| host_list | .. index:: |
| | single: ocf:pacemaker:ping resource; host_list parameter |
| | single: host_list; ocf:pacemaker:ping resource parameter |
| | |
| | The machines to contact in order to determine the current |
| | connectivity status. Allowed values include resolvable DNS |
| | connectivity host names, IPv4 addresses, and IPv6 addresses. |
+--------------------+--------------------------------------------------------------+
.. topic:: Example ping resource that checks node connectivity once every minute
.. code-block:: xml
.. important::
You're only half done. The next section deals with telling Pacemaker how to
deal with the connectivity status that ``ocf:pacemaker:ping`` is recording.
Tell Pacemaker How to Interpret the Connectivity Data
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. important::
Before attempting the following, make sure you understand
:ref:`rules`.
There are a number of ways to use the connectivity data.
The most common setup is for people to have a single ping target (for example,
the service network's default gateway), to prevent the cluster from running a
resource on any unconnected node.
.. topic:: Don't run a resource on unconnected nodes
.. code-block:: xml
A more complex setup is to have a number of ping targets configured. You can
require the cluster to only run resources on nodes that can connect to all (or
a minimum subset) of them.
.. topic:: Run only on nodes connected to three or more ping targets
.. code-block:: xml
...
...
...
Alternatively, you can tell the cluster only to *prefer* nodes with the best
connectivity, by using ``score-attribute`` in the rule. Just be sure to set
``multiplier`` to a value higher than that of ``resource-stickiness`` (and
don't set either of them to ``INFINITY``).
.. topic:: Prefer node with most connected ping nodes
.. code-block:: xml
It is perhaps easier to think of this in terms of the simple constraints that
the cluster translates it into. For example, if ``sles-1`` is connected to all
five ping nodes but ``sles-2`` is only connected to two, then it would be as if
you instead had the following constraints in your configuration:
.. topic:: How the cluster translates the above location constraint
.. code-block:: xml
The advantage is that you don't have to manually update any constraints
whenever your network connectivity changes.
You can also combine the concepts above into something even more complex. The
example below shows how you can prefer the node with the most connected ping
nodes provided they have connectivity to at least three (again assuming that
``multiplier`` is set to 1000).
.. topic:: More complex example of choosing location based on connectivity
.. code-block:: xml
.. _live-migration:
Migrating Resources
___________________
Normally, when the cluster needs to move a resource, it fully restarts the
resource (that is, it stops the resource on the current node and starts it on
the new node).
However, some types of resources, such as many virtual machines, are able to
move to another location without loss of state (often referred to as live
migration or hot migration). In pacemaker, this is called resource migration.
Pacemaker can be configured to migrate a resource when moving it, rather than
restarting it.
Not all resources are able to migrate; see the
:ref:`migration checklist ` below. Even those that can,
won't do so in all situations. Conceptually, there are two requirements from
which the other prerequisites follow:
* The resource must be active and healthy at the old location; and
* everything required for the resource to run must be available on both the old
and new locations.
The cluster is able to accommodate both *push* and *pull* migration models by
requiring the resource agent to support two special actions: ``migrate_to``
(performed on the current location) and ``migrate_from`` (performed on the
destination).
In push migration, the process on the current location transfers the resource
to the new location where is it later activated. In this scenario, most of the
work would be done in the ``migrate_to`` action and, if anything, the
activation would occur during ``migrate_from``.
Conversely for pull, the ``migrate_to`` action is practically empty and
``migrate_from`` does most of the work, extracting the relevant resource state
from the old location and activating it.
There is no wrong or right way for a resource agent to implement migration, as
long as it works.
.. _migration_checklist:
.. topic:: Migration Checklist
* The resource may not be a clone.
* The resource agent standard must be OCF.
* The resource must not be in a failed or degraded state.
* The resource agent must support ``migrate_to`` and ``migrate_from``
actions, and advertise them in its meta-data.
* The resource must have the ``allow-migrate`` meta-attribute set to
``true`` (which is not the default).
If an otherwise migratable resource depends on another resource via an ordering
constraint, there are special situations in which it will be restarted rather
than migrated.
For example, if the resource depends on a clone, and at the time the resource
needs to be moved, the clone has instances that are stopping and instances that
are starting, then the resource will be restarted. The scheduler is not yet
able to model this situation correctly and so takes the safer (if less optimal)
path.
Also, if a migratable resource depends on a non-migratable resource, and both
need to be moved, the migratable resource will be restarted.
.. index::
single: node; health
.. _node-health:
Tracking Node Health
####################
A node may be functioning adequately as far as cluster membership is concerned,
and yet be "unhealthy" in some respect that makes it an undesirable location
for resources. For example, a disk drive may be reporting SMART errors, or the
CPU may be highly loaded.
Pacemaker offers a way to automatically move resources off unhealthy nodes.
.. index::
single: node attribute; health
Node Health Attributes
______________________
Pacemaker will treat any node attribute whose name starts with ``#health`` as
an indicator of node health. Node health attributes may have one of the
following values:
.. table:: **Allowed Values for Node Health Attributes**
+------------+--------------------------------------------------------------+
| Value | Intended significance |
+============+==============================================================+
| ``red`` | .. index:: |
| | single: red; node health attribute value |
| | single: node attribute; health (red) |
| | |
| | This indicator is unhealthy |
+------------+--------------------------------------------------------------+
| ``yellow`` | .. index:: |
| | single: yellow; node health attribute value |
| | single: node attribute; health (yellow) |
| | |
| | This indicator is becoming unhealthy |
+------------+--------------------------------------------------------------+
| ``green`` | .. index:: |
| | single: green; node health attribute value |
| | single: node attribute; health (green) |
| | |
| | This indicator is healthy |
+------------+--------------------------------------------------------------+
| *integer* | .. index:: |
| | single: score; node health attribute value |
| | single: node attribute; health (score) |
| | |
| | A numeric score to apply to all resources on this node (0 or |
| | positive is healthy, negative is unhealthy) |
+------------+--------------------------------------------------------------+
.. index::
pair: cluster option; node-health-strategy
Node Health Strategy
____________________
Pacemaker assigns a node health score to each node, as the sum of the values of
all its node health attributes. This score will be used as a location
constraint applied to this node for all resources.
The ``node-health-strategy`` cluster option controls how Pacemaker responds to
changes in node health attributes, and how it translates ``red``, ``yellow``,
and ``green`` to scores.
Allowed values are:
.. table:: **Node Health Strategies**
+----------------+----------------------------------------------------------+
| Value | Effect |
+================+==========================================================+
| none | .. index:: |
| | single: node-health-strategy; none |
| | single: none; node-health-strategy value |
| | |
| | Do not track node health attributes at all. |
+----------------+----------------------------------------------------------+
| migrate-on-red | .. index:: |
| | single: node-health-strategy; migrate-on-red |
| | single: migrate-on-red; node-health-strategy value |
| | |
| | Assign the value of ``-INFINITY`` to ``red``, and 0 to |
| | ``yellow`` and ``green``. This will cause all resources |
| | to move off the node if any attribute is ``red``. |
+----------------+----------------------------------------------------------+
| only-green | .. index:: |
| | single: node-health-strategy; only-green |
| | single: only-green; node-health-strategy value |
| | |
| | Assign the value of ``-INFINITY`` to ``red`` and |
| | ``yellow``, and 0 to ``green``. This will cause all |
| | resources to move off the node if any attribute is |
| | ``red`` or ``yellow``. |
+----------------+----------------------------------------------------------+
| progressive | .. index:: |
| | single: node-health-strategy; progressive |
| | single: progressive; node-health-strategy value |
| | |
| | Assign the value of the ``node-health-red`` cluster |
| | option to ``red``, the value of ``node-health-yellow`` |
| | to ``yellow``, and the value of ``node-health-green`` to |
| | ``green``. Each node is additionally assigned a score of |
| | ``node-health-base`` (this allows resources to start |
| | even if some attributes are ``yellow``). This strategy |
| | gives the administrator finer control over how important |
| | each value is. |
+----------------+----------------------------------------------------------+
| custom | .. index:: |
| | single: node-health-strategy; custom |
| | single: custom; node-health-strategy value |
| | |
| | Track node health attributes using the same values as |
| | ``progressive`` for ``red``, ``yellow``, and ``green``, |
| | but do not take them into account. The administrator is |
| | expected to implement a policy by defining :ref:`rules` |
| | referencing node health attributes. |
+----------------+----------------------------------------------------------+
Configuring Node Health Agents
______________________________
Since Pacemaker calculates node health based on node attributes, any method
that sets node attributes may be used to measure node health. The most common
are resource agents and custom daemons.
Pacemaker provides examples that can be used directly or as a basis for custom
code. The ``ocf:pacemaker:HealthCPU``, ``ocf:pacemaker:HealthIOWait``, and
``ocf:pacemaker:HealthSMART`` resource agents set node health attributes based
on CPU and disk status.
To take advantage of this feature, add the resource to your cluster (generally
as a cloned resource with a recurring monitor action, to continually check the
health of all nodes). For example:
.. topic:: Example HealthIOWait resource configuration
.. code-block:: xml
The resource agents use ``attrd_updater`` to set proper status for each node
running this resource, as a node attribute whose name starts with ``#health``
(for ``HealthIOWait``, the node attribute is named ``#health-iowait``).
When a node is no longer faulty, you can force the cluster to make it available
to take resources without waiting for the next monitor, by setting the node
health attribute to green. For example:
.. topic:: **Force node1 to be marked as healthy**
.. code-block:: none
# attrd_updater --name "#health-iowait" --update "green" --node "node1"
.. index::
single: reload
+ single: reload-agent
-Reloading Services After a Definition Change
+Reloading an Agent After a Definition Change
############################################
The cluster automatically detects changes to the configuration of active
resources. The cluster's normal response is to stop the service (using the old
definition) and start it again (with the new definition). This works, but some
resource agents are smarter and can be told to use a new set of options without
restarting.
To take advantage of this capability, the resource agent must:
-* Accept the ``reload`` action and handle it appropriately. The actions
- here depend completely on your application!
+* Implement the ``reload-agent`` action. What it should do depends completely
+ on your application!
- .. warning::
+ .. note::
- Many resource agents define the ``reload`` action to make the managed
- service reload its own *native* configuration. This is not compatible with
- Pacemaker's reload feature, which requires the reload action to
- accommodate changes in the resource's *Pacemaker* configuration (that is,
- the values of the agent's reloadable parameters).
+ Resource agents may also implement a ``reload`` action to make the managed
+ service reload its own *native* configuration. This is different from
+ ``reload-agent``, which makes effective changes in the resource's
+ *Pacemaker* configuration (specifically, the values of the agent's
+ reloadable parameters).
- Pacemaker is likely to use a different action name in a future release, to
- avoid conflicts with agents that perform a native reload.
-
-* Advertise the ``reload`` operation in the ``actions`` section of its
+* Advertise the ``reload-agent`` operation in the ``actions`` section of its
meta-data.
-* Set the ``unique`` attribute set to 0 in the ``parameters`` section of
+* Set the ``reloadable`` attribute to 1 in the ``parameters`` section of
its meta-data for any parameters eligible to be reloaded after a change.
- .. warning::
-
- Pacemaker's use of ``unique`` to indicate reloadability is non-standard and
- likely to change in a future release.
-
Once these requirements are satisfied, the cluster will automatically know to
-reload the resource (instead of restarting) when a non-unique parameter
+reload the resource (instead of restarting) when a reloadable parameter
changes.
.. note::
Metadata will not be re-read unless the resource needs to be started. If you
edit the agent of an already active resource to set a parameter reloadable,
the resource may restart the first time the parameter value changes.
.. note::
- If both a unique and non-unique parameter are changed simultaneously, the
- resource will be restarted.
+ If both a reloadable and non-reloadable parameter are changed
+ simultaneously, the resource will be restarted.
.. rubric:: Footnotes
.. [#] The naming of this option was perhaps unfortunate as it is easily
confused with live migration, the process of moving a resource from one
node to another without stopping it. Xen virtual guests are the most
common example of resources that can be migrated in this manner.
diff --git a/doc/sphinx/Pacemaker_Explained/resources.rst b/doc/sphinx/Pacemaker_Explained/resources.rst
index f80ebb8d9a..003d4ebfb0 100644
--- a/doc/sphinx/Pacemaker_Explained/resources.rst
+++ b/doc/sphinx/Pacemaker_Explained/resources.rst
@@ -1,1016 +1,1036 @@
.. _resource:
Cluster Resources
-----------------
.. _s-resource-primitive:
What is a Cluster Resource?
###########################
.. index::
single: resource
A resource is a service made highly available by a cluster.
The simplest type of resource, a *primitive* resource, is described
in this chapter. More complex forms, such as groups and clones,
are described in later chapters.
Every primitive resource has a *resource agent*. A resource agent is an
external program that abstracts the service it provides and present a
consistent view to the cluster.
This allows the cluster to be agnostic about the resources it manages.
The cluster doesn't need to understand how the resource works because
it relies on the resource agent to do the right thing when given a
**start**, **stop** or **monitor** command. For this reason, it is crucial
that resource agents are well-tested.
Typically, resource agents come in the form of shell scripts. However,
they can be written using any technology (such as C, Python or Perl)
that the author is comfortable with.
.. _s-resource-supported:
.. index::
single: resource; class
Resource Classes
################
Pacemaker supports several classes of agents:
* OCF
* LSB
* Systemd
* Upstart (deprecated)
* Service
* Fencing
* Nagios Plugins
.. index::
single: resource; OCF
single: OCF; resources
single: Open Cluster Framework; resources
Open Cluster Framework
______________________
The OCF standard [#]_ is basically an extension of the Linux Standard
Base conventions for init scripts to:
* support parameters,
* make them self-describing, and
* make them extensible
OCF specs have strict definitions of the exit codes that actions must return [#]_.
The cluster follows these specifications exactly, and giving the wrong
exit code will cause the cluster to behave in ways you will likely
find puzzling and annoying. In particular, the cluster needs to
distinguish a completely stopped resource from one which is in some
erroneous and indeterminate state.
Parameters are passed to the resource agent as environment variables, with the
special prefix ``OCF_RESKEY_``. So, a parameter which the user thinks
of as ``ip`` will be passed to the resource agent as ``OCF_RESKEY_ip``. The
number and purpose of the parameters is left to the resource agent; however,
the resource agent should use the **meta-data** command to advertise any that it
supports.
The OCF class is the most preferred as it is an industry standard,
highly flexible (allowing parameters to be passed to agents in a
non-positional manner) and self-describing.
For more information, see the
`reference `_ and
the *Resource Agents* chapter of *Pacemaker Administration*.
.. index::
single: resource; LSB
single: LSB; resources
single: Linux Standard Base; resources
Linux Standard Base
___________________
*LSB* resource agents are more commonly known as *init scripts*. If a full path
is not given, they are assumed to be located in ``/etc/init.d``.
Commonly, they are provided by the OS distribution. In order to be used
with a Pacemaker cluster, they must conform to the LSB specification [#]_.
.. warning::
Many distributions or particular software packages claim LSB compliance
but ship with broken init scripts. For details on how to check whether
your init script is LSB-compatible, see the `Resource Agents` chapter of
`Pacemaker Administration`. Common problematic violations of the LSB
standard include:
* Not implementing the ``status`` operation at all
* Not observing the correct exit status codes for
``start``/``stop``/``status`` actions
* Starting a started resource returns an error
* Stopping a stopped resource returns an error
.. important::
Remember to make sure the computer is `not` configured to start any
services at boot time -- that should be controlled by the cluster.
.. _s-resource-supported-systemd:
.. index::
single: Resource; Systemd
single: Systemd; resources
Systemd
_______
Most Linux distributions have replaced the old
`SysV `_ style of
initialization daemons and scripts with
`Systemd `_.
Pacemaker is able to manage these services `if they are present`.
Instead of init scripts, systemd has `unit files`. Generally, the
services (unit files) are provided by the OS distribution, but there
are online guides for converting from init scripts [#]_.
.. important::
Remember to make sure the computer is `not` configured to start any
services at boot time -- that should be controlled by the cluster.
.. index::
single: Resource; Upstart
single: Upstart; resources
Upstart
_______
Some distributions replaced the old
`SysV `_ style of
initialization daemons (and scripts) with
`Upstart `_.
Pacemaker is able to manage these services `if they are present`.
Instead of init scripts, Upstart has `jobs`. Generally, the
services (jobs) are provided by the OS distribution.
.. important::
Remember to make sure the computer is `not` configured to start any
services at boot time -- that should be controlled by the cluster.
.. warning::
Upstart support is deprecated in Pacemaker. Upstart is no longer an actively
maintained project, and test platforms for it are no longer readily usable.
Support will likely be dropped entirely at the next major release of
Pacemaker.
.. index::
single: Resource; System Services
single: System Service; resources
System Services
_______________
Since there are various types of system services (``systemd``,
``upstart``, and ``lsb``), Pacemaker supports a special ``service`` alias which
intelligently figures out which one applies to a given cluster node.
This is particularly useful when the cluster contains a mix of
``systemd``, ``upstart``, and ``lsb``.
In order, Pacemaker will try to find the named service as:
* an LSB init script
* a Systemd unit file
* an Upstart job
.. index::
single: Resource; STONITH
single: STONITH; resources
STONITH
_______
The STONITH class is used exclusively for fencing-related resources. This is
discussed later in :ref:`fencing`.
.. index::
single: Resource; Nagios Plugins
single: Nagios Plugins; resources
Nagios Plugins
______________
Nagios Plugins [#]_ allow us to monitor services on remote hosts.
Pacemaker is able to do remote monitoring with the plugins `if they are
present`.
A common use case is to configure them as resources belonging to a resource
container (usually a virtual machine), and the container will be restarted
if any of them has failed. Another use is to configure them as ordinary
resources to be used for monitoring hosts or services via the network.
The supported parameters are same as the long options of the plugin.
.. _primitive-resource:
Resource Properties
###################
These values tell the cluster which resource agent to use for the resource,
where to find that resource agent and what standards it conforms to.
.. table:: **Properties of a Primitive Resource**
+----------+------------------------------------------------------------------+
| Field | Description |
+==========+==================================================================+
| id | .. index:: |
| | single: id; resource |
| | single: resource; property, id |
| | |
| | Your name for the resource |
+----------+------------------------------------------------------------------+
| class | .. index:: |
| | single: class; resource |
| | single: resource; property, class |
| | |
| | The standard the resource agent conforms to. Allowed values: |
| | ``lsb``, ``nagios``, ``ocf``, ``service``, ``stonith``, |
| | ``systemd``, ``upstart`` |
+----------+------------------------------------------------------------------+
| type | .. index:: |
| | single: type; resource |
| | single: resource; property, type |
| | |
| | The name of the Resource Agent you wish to use. E.g. |
| | ``IPaddr`` or ``Filesystem`` |
+----------+------------------------------------------------------------------+
| provider | .. index:: |
| | single: provider; resource |
| | single: resource; property, provider |
| | |
| | The OCF spec allows multiple vendors to supply the same resource |
| | agent. To use the OCF resource agents supplied by the Heartbeat |
| | project, you would specify ``heartbeat`` here. |
+----------+------------------------------------------------------------------+
The XML definition of a resource can be queried with the **crm_resource** tool.
For example:
.. code-block:: none
# crm_resource --resource Email --query-xml
might produce:
.. topic:: A system resource definition
.. code-block:: xml
.. note::
One of the main drawbacks to system services (LSB, systemd or
Upstart) resources is that they do not allow any parameters!
.. topic:: An OCF resource definition
.. code-block:: xml
.. _resource_options:
Resource Options
################
Resources have two types of options: *meta-attributes* and *instance attributes*.
Meta-attributes apply to any type of resource, while instance attributes
are specific to each resource agent.
Resource Meta-Attributes
________________________
Meta-attributes are used by the cluster to decide how a resource should
behave and can be easily set using the ``--meta`` option of the
**crm_resource** command.
.. table:: **Meta-attributes of a Primitive Resource**
+----------------------------+----------------------------------+------------------------------------------------------+
| Field | Default | Description |
+============================+==================================+======================================================+
| priority | 0 | .. index:: |
| | | single: priority; resource option |
| | | single: resource; option, priority |
| | | |
| | | If not all resources can be active, the cluster |
| | | will stop lower priority resources in order to |
| | | keep higher priority ones active. |
+----------------------------+----------------------------------+------------------------------------------------------+
| critical | true | .. index:: |
| | | single: critical; resource option |
| | | single: resource; option, critical |
| | | |
| | | Use this value as the default for ``influence`` in |
| | | all :ref:`colocation constraints |
| | | ` involving this resource, |
| | | as well as the implicit colocation constraints |
| | | created if this resource is in a :ref:`group |
| | | `. For details, see |
| | | :ref:`s-coloc-influence`. |
+----------------------------+----------------------------------+------------------------------------------------------+
| target-role | Started | .. index:: |
| | | single: target-role; resource option |
| | | single: resource; option, target-role |
| | | |
| | | What state should the cluster attempt to keep this |
| | | resource in? Allowed values: |
| | | |
| | | * ``Stopped:`` Force the resource to be stopped |
| | | * ``Started:`` Allow the resource to be started |
| | | (and in the case of :ref:`promotable clone |
| | | resources `, promoted |
| | | if appropriate) |
| | | * ``Unpromoted:`` Allow the resource to be started, |
| | | but only in the unpromoted role if the resource is |
| | | :ref:`promotable ` |
| | | * ``Promoted:`` Equivalent to ``Started`` |
+----------------------------+----------------------------------+------------------------------------------------------+
| is-managed | TRUE | .. index:: |
| | | single: is-managed; resource option |
| | | single: resource; option, is-managed |
| | | |
| | | Is the cluster allowed to start and stop |
| | | the resource? Allowed values: ``true``, ``false`` |
+----------------------------+----------------------------------+------------------------------------------------------+
| maintenance | FALSE | .. index:: |
| | | single: maintenance; resource option |
| | | single: resource; option, maintenance |
| | | |
| | | Similar to the ``maintenance-mode`` |
| | | :ref:`cluster option `, but for |
| | | a single resource. If true, the resource will not |
| | | be started, stopped, or monitored on any node. This |
| | | differs from ``is-managed`` in that monitors will |
| | | not be run. Allowed values: ``true``, ``false`` |
+----------------------------+----------------------------------+------------------------------------------------------+
| resource-stickiness | 1 for individual clone | .. _resource-stickiness: |
| | instances, 0 for all | |
| | other resources | .. index:: |
| | | single: resource-stickiness; resource option |
| | | single: resource; option, resource-stickiness |
| | | |
| | | A score that will be added to the current node when |
| | | a resource is already active. This allows running |
| | | resources to stay where they are, even if they |
| | | would be placed elsewhere if they were being |
| | | started from a stopped state. |
+----------------------------+----------------------------------+------------------------------------------------------+
| requires | ``quorum`` for resources | .. _requires: |
| | with a ``class`` of ``stonith``, | |
| | otherwise ``unfencing`` if | .. index:: |
| | unfencing is active in the | single: requires; resource option |
| | cluster, otherwise ``fencing`` | single: resource; option, requires |
| | if ``stonith-enabled`` is true, | |
| | otherwise ``quorum`` | Conditions under which the resource can be |
| | | started. Allowed values: |
| | | |
| | | * ``nothing:`` can always be started |
| | | * ``quorum:`` The cluster can only start this |
| | | resource if a majority of the configured nodes |
| | | are active |
| | | * ``fencing:`` The cluster can only start this |
| | | resource if a majority of the configured nodes |
| | | are active *and* any failed or unknown nodes |
| | | have been :ref:`fenced ` |
| | | * ``unfencing:`` The cluster can only start this |
| | | resource if a majority of the configured nodes |
| | | are active *and* any failed or unknown nodes have |
| | | been fenced *and* only on nodes that have been |
| | | :ref:`unfenced ` |
+----------------------------+----------------------------------+------------------------------------------------------+
| migration-threshold | INFINITY | .. index:: |
| | | single: migration-threshold; resource option |
| | | single: resource; option, migration-threshold |
| | | |
| | | How many failures may occur for this resource on |
| | | a node, before this node is marked ineligible to |
| | | host this resource. A value of 0 indicates that this |
| | | feature is disabled (the node will never be marked |
| | | ineligible); by constrast, the cluster treats |
| | | INFINITY (the default) as a very large but finite |
| | | number. This option has an effect only if the |
| | | failed operation specifies ``on-fail`` as |
| | | ``restart`` (the default), and additionally for |
| | | failed ``start`` operations, if the cluster |
| | | property ``start-failure-is-fatal`` is ``false``. |
+----------------------------+----------------------------------+------------------------------------------------------+
| failure-timeout | 0 | .. index:: |
| | | single: failure-timeout; resource option |
| | | single: resource; option, failure-timeout |
| | | |
| | | How many seconds to wait before acting as if the |
| | | failure had not occurred, and potentially allowing |
| | | the resource back to the node on which it failed. |
| | | A value of 0 indicates that this feature is |
| | | disabled. |
+----------------------------+----------------------------------+------------------------------------------------------+
| multiple-active | stop_start | .. index:: |
| | | single: multiple-active; resource option |
| | | single: resource; option, multiple-active |
| | | |
| | | What should the cluster do if it ever finds the |
| | | resource active on more than one node? Allowed |
| | | values: |
| | | |
| | | * ``block``: mark the resource as unmanaged |
| | | * ``stop_only``: stop all active instances and |
| | | leave them that way |
| | | * ``stop_start``: stop all active instances and |
| | | start the resource in one location only |
+----------------------------+----------------------------------+------------------------------------------------------+
| allow-migrate | TRUE for ocf:pacemaker:remote | Whether the cluster should try to "live migrate" |
| | resources, FALSE otherwise | this resource when it needs to be moved (see |
| | | :ref:`live-migration`) |
+----------------------------+----------------------------------+------------------------------------------------------+
| container-attribute-target | | Specific to bundle resources; see |
| | | :ref:`s-bundle-attributes` |
+----------------------------+----------------------------------+------------------------------------------------------+
| remote-node | | The name of the Pacemaker Remote guest node this |
| | | resource is associated with, if any. If |
| | | specified, this both enables the resource as a |
| | | guest node and defines the unique name used to |
| | | identify the guest node. The guest must be |
| | | configured to run the Pacemaker Remote daemon |
| | | when it is started. **WARNING:** This value |
| | | cannot overlap with any resource or node IDs. |
+----------------------------+----------------------------------+------------------------------------------------------+
| remote-port | 3121 | If ``remote-node`` is specified, the port on the |
| | | guest used for its Pacemaker Remote connection. |
| | | The Pacemaker Remote daemon on the guest must |
| | | be configured to listen on this port. |
+----------------------------+----------------------------------+------------------------------------------------------+
| remote-addr | value of ``remote-node`` | If ``remote-node`` is specified, the IP |
| | | address or hostname used to connect to the |
| | | guest via Pacemaker Remote. The Pacemaker Remote |
| | | daemon on the guest must be configured to accept |
| | | connections on this address. |
+----------------------------+----------------------------------+------------------------------------------------------+
| remote-connect-timeout | 60s | If ``remote-node`` is specified, how long before |
| | | a pending guest connection will time out. |
+----------------------------+----------------------------------+------------------------------------------------------+
As an example of setting resource options, if you performed the following
commands on an LSB Email resource:
.. code-block:: none
# crm_resource --meta --resource Email --set-parameter priority --parameter-value 100
# crm_resource -m -r Email -p multiple-active -v block
the resulting resource definition might be:
.. topic:: An LSB resource with cluster options
.. code-block:: xml
In addition to the cluster-defined meta-attributes described above, you may
also configure arbitrary meta-attributes of your own choosing. Most commonly,
this would be done for use in :ref:`rules `. For example, an IT department
might define a custom meta-attribute to indicate which company department each
resource is intended for. To reduce the chance of name collisions with
cluster-defined meta-attributes added in the future, it is recommended to use
a unique, organization-specific prefix for such attributes.
.. _s-resource-defaults:
Setting Global Defaults for Resource Meta-Attributes
____________________________________________________
To set a default value for a resource option, add it to the
``rsc_defaults`` section with ``crm_attribute``. For example,
.. code-block:: none
# crm_attribute --type rsc_defaults --name is-managed --update false
would prevent the cluster from starting or stopping any of the
resources in the configuration (unless of course the individual
resources were specifically enabled by having their ``is-managed`` set to
``true``).
Resource Instance Attributes
____________________________
The resource agents of some resource classes (lsb, systemd and upstart *not* among them)
can be given parameters which determine how they behave and which instance
of a service they control.
If your resource agent supports parameters, you can add them with the
``crm_resource`` command. For example,
.. code-block:: none
# crm_resource --resource Public-IP --set-parameter ip --parameter-value 192.0.2.2
would create an entry in the resource like this:
.. topic:: An example OCF resource with instance attributes
.. code-block:: xml
For an OCF resource, the result would be an environment variable
called ``OCF_RESKEY_ip`` with a value of ``192.0.2.2``.
The list of instance attributes supported by an OCF resource agent can be
found by calling the resource agent with the ``meta-data`` command.
The output contains an XML description of all the supported
attributes, their purpose and default values.
.. topic:: Displaying the metadata for the Dummy resource agent template
.. code-block:: none
# export OCF_ROOT=/usr/lib/ocf
# $OCF_ROOT/resource.d/pacemaker/Dummy meta-data
.. code-block:: xml
-
- 1.0
+
+ 1.1
- This is a Dummy Resource Agent. It does absolutely nothing except
- keep track of whether its running or not.
- Its purpose in life is for testing and to serve as a template for RA writers.
-
- NB: Please pay attention to the timeouts specified in the actions
- section below. They should be meaningful for the kind of resource
- the agent manages. They should be the minimum advised timeouts,
- but they shouldn't/cannot cover _all_ possible resource
- instances. So, try to be neither overly generous nor too stingy,
- but moderate. The minimum timeouts should never be below 10 seconds.
+ This is a dummy OCF resource agent. It does absolutely nothing except keep track
+ of whether it is running or not, and can be configured so that actions fail or
+ take a long time. Its purpose is primarily for testing, and to serve as a
+ template for resource agent writers.
Example stateless resource agent
-
+
Location to store the resource state in.
State file
-
+
-
+
+
+ Fake password field
+
+ Password
+
+
+
+
Fake attribute that can be changed to cause a reload
Fake attribute that can be changed to cause a reload
-
+
Number of seconds to sleep during operations. This can be used to test how
the cluster reacts to operation timeouts.
Operation sleep duration in seconds.
+
+
+ Start, migrate_from, and reload-agent actions will return failure if running on
+ the host specified here, but the resource will run successfully anyway (future
+ monitor calls will find it running). This can be used to test on-fail=ignore.
+
+ Report bogus start failure on specified host
+
+
+
+
+ If this is set, the environment will be dumped to this file for every call.
+
+ Environment dump file
+
+
+
-
-
-
-
-
-
-
-
+
+
+
+
+
+
+
+
+
.. index::
single: resource; action
single: resource; operation
.. _operation:
Resource Operations
###################
*Operations* are actions the cluster can perform on a resource by calling the
resource agent. Resource agents must support certain common operations such as
start, stop, and monitor, and may implement any others.
Operations may be explicitly configured for two purposes: to override defaults
for options (such as timeout) that the cluster will use whenever it initiates
the operation, and to run an operation on a recurring basis (for example, to
monitor the resource for failure).
.. topic:: An OCF resource with a non-default start timeout
.. code-block:: xml
Pacemaker identifies operations by a combination of name and interval, so this
combination must be unique for each resource. That is, you should not configure
two operations for the same resource with the same name and interval.
.. _operation_properties:
Operation Properties
____________________
Operation properties may be specified directly in the ``op`` element as
XML attributes, or in a separate ``meta_attributes`` block as ``nvpair`` elements.
XML attributes take precedence over ``nvpair`` elements if both are specified.
.. table:: **Properties of an Operation**
+----------------+-----------------------------------+-----------------------------------------------------+
| Field | Default | Description |
+================+===================================+=====================================================+
| id | | .. index:: |
| | | single: id; action property |
| | | single: action; property, id |
| | | |
| | | A unique name for the operation. |
+----------------+-----------------------------------+-----------------------------------------------------+
| name | | .. index:: |
| | | single: name; action property |
| | | single: action; property, name |
| | | |
| | | The action to perform. This can be any action |
| | | supported by the agent; common values include |
| | | ``monitor``, ``start``, and ``stop``. |
+----------------+-----------------------------------+-----------------------------------------------------+
| interval | 0 | .. index:: |
| | | single: interval; action property |
| | | single: action; property, interval |
| | | |
| | | How frequently (in seconds) to perform the |
| | | operation. A value of 0 means "when needed". |
| | | A positive value defines a *recurring action*, |
| | | which is typically used with |
| | | :ref:`monitor `. |
+----------------+-----------------------------------+-----------------------------------------------------+
| timeout | | .. index:: |
| | | single: timeout; action property |
| | | single: action; property, timeout |
| | | |
| | | How long to wait before declaring the action |
| | | has failed |
+----------------+-----------------------------------+-----------------------------------------------------+
| on-fail | Varies by action: | .. index:: |
| | | single: on-fail; action property |
| | * ``stop``: ``fence`` if | single: action; property, on-fail |
| | ``stonith-enabled`` is true | |
| | or ``block`` otherwise | The action to take if this action ever fails. |
| | * ``demote``: ``on-fail`` of the | Allowed values: |
| | ``monitor`` action with | |
| | ``role`` set to ``Promoted``, | * ``ignore:`` Pretend the resource did not fail. |
| | if present, enabled, and | * ``block:`` Don't perform any further operations |
| | configured to a value other | on the resource. |
| | than ``demote``, or ``restart`` | * ``stop:`` Stop the resource and do not start |
| | otherwise | it elsewhere. |
| | * all other actions: ``restart`` | * ``demote:`` Demote the resource, without a |
| | | full restart. This is valid only for ``promote`` |
| | | actions, and for ``monitor`` actions with both |
| | | a nonzero ``interval`` and ``role`` set to |
| | | ``Promoted``; for any other action, a |
| | | configuration error will be logged, and the |
| | | default behavior will be used. *(since 2.0.5)* |
| | | * ``restart:`` Stop the resource and start it |
| | | again (possibly on a different node). |
| | | * ``fence:`` STONITH the node on which the |
| | | resource failed. |
| | | * ``standby:`` Move *all* resources away from the |
| | | node on which the resource failed. |
+----------------+-----------------------------------+-----------------------------------------------------+
| enabled | TRUE | .. index:: |
| | | single: enabled; action property |
| | | single: action; property, enabled |
| | | |
| | | If ``false``, ignore this operation definition. |
| | | This is typically used to pause a particular |
| | | recurring ``monitor`` operation; for instance, it |
| | | can complement the respective resource being |
| | | unmanaged (``is-managed=false``), as this alone |
| | | will :ref:`not block any configured monitoring |
| | | `. Disabling the operation |
| | | does not suppress all actions of the given type. |
| | | Allowed values: ``true``, ``false``. |
+----------------+-----------------------------------+-----------------------------------------------------+
| record-pending | TRUE | .. index:: |
| | | single: record-pending; action property |
| | | single: action; property, record-pending |
| | | |
| | | If ``true``, the intention to perform the operation |
| | | is recorded so that GUIs and CLI tools can indicate |
| | | that an operation is in progress. This is best set |
| | | as an *operation default* |
| | | (see :ref:`s-operation-defaults`). Allowed values: |
| | | ``true``, ``false``. |
+----------------+-----------------------------------+-----------------------------------------------------+
| role | | .. index:: |
| | | single: role; action property |
| | | single: action; property, role |
| | | |
| | | Run the operation only on node(s) that the cluster |
| | | thinks should be in the specified role. This only |
| | | makes sense for recurring ``monitor`` operations. |
| | | Allowed (case-sensitive) values: ``Stopped``, |
| | | ``Started``, and in the case of :ref:`promotable |
| | | clone resources `, |
| | | ``Unpromoted`` and ``Promoted``. |
+----------------+-----------------------------------+-----------------------------------------------------+
.. note::
When ``on-fail`` is set to ``demote``, recovery from failure by a successful
demote causes the cluster to recalculate whether and where a new instance
should be promoted. The node with the failure is eligible, so if promotion
scores have not changed, it will be promoted again.
There is no direct equivalent of ``migration-threshold`` for the promoted
role, but the same effect can be achieved with a location constraint using a
:ref:`rule ` with a node attribute expression for the resource's fail
count.
For example, to immediately ban the promoted role from a node with any
failed promote or promoted instance monitor:
.. code-block:: xml
This example assumes that there is a promotable clone of the ``my_primitive``
resource (note that the primitive name, not the clone name, is used in the
rule), and that there is a recurring 10-second-interval monitor configured for
the promoted role (fail count attributes specify the interval in
milliseconds).
.. _s-resource-monitoring:
Monitoring Resources for Failure
________________________________
When Pacemaker first starts a resource, it runs one-time ``monitor`` operations
(referred to as *probes*) to ensure the resource is running where it's
supposed to be, and not running where it's not supposed to be. (This behavior
can be affected by the ``resource-discovery`` location constraint property.)
Other than those initial probes, Pacemaker will *not* (by default) check that
the resource continues to stay healthy [#]_. You must configure ``monitor``
operations explicitly to perform these checks.
.. topic:: An OCF resource with a recurring health check
.. code-block:: xml
By default, a ``monitor`` operation will ensure that the resource is running
where it is supposed to. The ``target-role`` property can be used for further
checking.
For example, if a resource has one ``monitor`` operation with
``interval=10 role=Started`` and a second ``monitor`` operation with
``interval=11 role=Stopped``, the cluster will run the first monitor on any nodes
it thinks *should* be running the resource, and the second monitor on any nodes
that it thinks *should not* be running the resource (for the truly paranoid,
who want to know when an administrator manually starts a service by mistake).
.. note::
Currently, monitors with ``role=Stopped`` are not implemented for
:ref:`clone ` resources.
.. _s-monitoring-unmanaged:
Monitoring Resources When Administration is Disabled
____________________________________________________
Recurring ``monitor`` operations behave differently under various administrative
settings:
* When a resource is unmanaged (by setting ``is-managed=false``): No monitors
will be stopped.
If the unmanaged resource is stopped on a node where the cluster thinks it
should be running, the cluster will detect and report that it is not, but it
will not consider the monitor failed, and will not try to start the resource
until it is managed again.
Starting the unmanaged resource on a different node is strongly discouraged
and will at least cause the cluster to consider the resource failed, and
may require the resource's ``target-role`` to be set to ``Stopped`` then
``Started`` to be recovered.
* When a node is put into standby: All resources will be moved away from the
node, and all ``monitor`` operations will be stopped on the node, except those
specifying ``role`` as ``Stopped`` (which will be newly initiated if
appropriate).
* When the cluster is put into maintenance mode: All resources will be marked
as unmanaged. All monitor operations will be stopped, except those
specifying ``role`` as ``Stopped`` (which will be newly initiated if
appropriate). As with single unmanaged resources, starting
a resource on a node other than where the cluster expects it to be will
cause problems.
.. _s-operation-defaults:
Setting Global Defaults for Operations
______________________________________
You can change the global default values for operation properties
in a given cluster. These are defined in an ``op_defaults`` section
of the CIB's ``configuration`` section, and can be set with
``crm_attribute``. For example,
.. code-block:: none
# crm_attribute --type op_defaults --name timeout --update 20s
would default each operation's ``timeout`` to 20 seconds. If an
operation's definition also includes a value for ``timeout``, then that
value would be used for that operation instead.
When Implicit Operations Take a Long Time
_________________________________________
The cluster will always perform a number of implicit operations: ``start``,
``stop`` and a non-recurring ``monitor`` operation used at startup to check
whether the resource is already active. If one of these is taking too long,
then you can create an entry for them and specify a longer timeout.
.. topic:: An OCF resource with custom timeouts for its implicit actions
.. code-block:: xml
Multiple Monitor Operations
___________________________
Provided no two operations (for a single resource) have the same name
and interval, you can have as many ``monitor`` operations as you like.
In this way, you can do a superficial health check every minute and
progressively more intense ones at higher intervals.
To tell the resource agent what kind of check to perform, you need to
provide each monitor with a different value for a common parameter.
The OCF standard creates a special parameter called ``OCF_CHECK_LEVEL``
for this purpose and dictates that it is "made available to the
resource agent without the normal ``OCF_RESKEY`` prefix".
Whatever name you choose, you can specify it by adding an
``instance_attributes`` block to the ``op`` tag. It is up to each
resource agent to look for the parameter and decide how to use it.
.. topic:: An OCF resource with two recurring health checks, performing
different levels of checks specified via ``OCF_CHECK_LEVEL``.
.. code-block:: xml
Disabling a Monitor Operation
_____________________________
The easiest way to stop a recurring monitor is to just delete it.
However, there can be times when you only want to disable it
temporarily. In such cases, simply add ``enabled=false`` to the
operation's definition.
.. topic:: Example of an OCF resource with a disabled health check
.. code-block:: xml
This can be achieved from the command line by executing:
.. code-block:: none
# cibadmin --modify --xml-text ''
Once you've done whatever you needed to do, you can then re-enable it with
.. code-block:: none
# cibadmin --modify --xml-text ''
.. [#] See https://github.com/ClusterLabs/OCF-spec/tree/master/ra. The
Pacemaker implementation has been somewhat extended from the OCF specs.
.. [#] The resource-agents source code includes the **ocf-tester** script,
which can be useful in this regard.
.. [#] See http://refspecs.linux-foundation.org/LSB_3.0.0/LSB-Core-generic/LSB-Core-generic/iniscrptact.html
for the LSB Spec as it relates to init scripts.
.. [#] For example, http://0pointer.de/blog/projects/systemd-for-admins-3.html
.. [#] The project has two independent forks, hosted at
https://www.nagios-plugins.org/ and https://www.monitoring-plugins.org/. Output
from both projects' plugins is similar, so plugins from either project can be
used with pacemaker.
.. [#] Currently, anyway. Automatic monitoring operations may be added in a future
version of Pacemaker.
diff --git a/doc/sphinx/Pacemaker_Remote/options.rst b/doc/sphinx/Pacemaker_Remote/options.rst
index f14b479e78..27baa77a1e 100644
--- a/doc/sphinx/Pacemaker_Remote/options.rst
+++ b/doc/sphinx/Pacemaker_Remote/options.rst
@@ -1,173 +1,171 @@
.. index::
single: configuration
Configuration Explained
-----------------------
The walk-through examples use some of these options, but don't explain exactly
what they mean or do. This section is meant to be the go-to resource for all
the options available for configuring Pacemaker Remote.
.. index::
pair: configuration; guest node
single: guest node; meta-attribute
Resource Meta-Attributes for Guest Nodes
########################################
When configuring a virtual machine as a guest node, the virtual machine is
created using one of the usual resource agents for that purpose (for example,
**ocf:heartbeat:VirtualDomain** or **ocf:heartbeat:Xen**), with additional
meta-attributes.
No restrictions are enforced on what agents may be used to create a guest node,
but obviously the agent must create a distinct environment capable of running
the pacemaker_remote daemon and cluster resources. An additional requirement is
that fencing the host running the guest node resource must be sufficient for
ensuring the guest node is stopped. This means, for example, that not all
hypervisors supported by **VirtualDomain** may be used to create guest nodes;
if the guest can survive the hypervisor being fenced, it may not be used as a
guest node.
Below are the meta-attributes available to enable a resource as a guest node
and define its connection parameters.
.. table:: **Meta-attributes for configuring VM resources as guest nodes**
+------------------------+-----------------+-----------------------------------------------------------+
| Option | Default | Description |
+========================+=================+===========================================================+
| remote-node | none | The node name of the guest node this resource defines. |
| | | This both enables the resource as a guest node and |
| | | defines the unique name used to identify the guest node. |
| | | If no other parameters are set, this value will also be |
| | | assumed as the hostname to use when connecting to |
| | | pacemaker_remote on the VM. This value **must not** |
| | | overlap with any resource or node IDs. |
+------------------------+-----------------+-----------------------------------------------------------+
| remote-port | 3121 | The port on the virtual machine that the cluster will |
| | | use to connect to pacemaker_remote. |
+------------------------+-----------------+-----------------------------------------------------------+
| remote-addr | 'value of' | The IP address or hostname to use when connecting to |
| | ``remote-node`` | pacemaker_remote on the VM. |
+------------------------+-----------------+-----------------------------------------------------------+
| remote-connect-timeout | 60s | How long before a pending guest connection will time out. |
+------------------------+-----------------+-----------------------------------------------------------+
.. index::
pair: configuration; remote node
Connection Resources for Remote Nodes
#####################################
A remote node is defined by a connection resource. That connection resource
has instance attributes that define where the remote node is located on the
network and how to communicate with it.
Descriptions of these instance attributes can be retrieved using the following
``pcs`` command:
.. code-block:: none
# pcs resource describe remote
ocf:pacemaker:remote - remote resource agent
Resource options:
- server: Server location to connect to. This can be an ip address or hostname.
- port: tcp port to connect to.
- reconnect_interval: Interval in seconds at which Pacemaker will attempt to reconnect to a
- remote node after an active connection to the remote node has been
- severed. When this value is nonzero, Pacemaker will retry the connection
- indefinitely, at the specified interval. As with any time-based actions,
- this is not guaranteed to be checked more frequently than the value of the
- cluster-recheck-interval cluster option.
+ server: Server location to connect to (IP address or resolvable host name)
+ port: TCP port at which to contact Pacemaker Remote executor
+ reconnect_interval: If this is a positive time interval, the cluster will attempt to
+ reconnect to a remote node after an active connection has been
+ lost at this interval. Otherwise, the cluster will attempt to
+ reconnect immediately (after any fencing needed).
When defining a remote node's connection resource, it is common and recommended
to name the connection resource the same as the remote node's hostname. By
default, if no ``server`` option is provided, the cluster will attempt to contact
the remote node using the resource name as the hostname.
Environment Variables for Daemon Start-up
#########################################
Authentication and encryption of the connection between cluster nodes
and nodes running pacemaker_remote is achieved using
with `TLS-PSK `_ encryption/authentication
over TCP (port 3121 by default). This means that both the cluster node and
remote node must share the same private key. By default, this
key is placed at ``/etc/pacemaker/authkey`` on each node.
You can change the default port and/or key location for Pacemaker and
``pacemaker_remoted`` via environment variables. How these variables are set
varies by OS, but usually they are set in the ``/etc/sysconfig/pacemaker`` or
``/etc/default/pacemaker`` file.
.. code-block:: none
#==#==# Pacemaker Remote
# Use the contents of this file as the authorization key to use with Pacemaker
# Remote connections. This file must be readable by Pacemaker daemons (that is,
# it must allow read permissions to either the hacluster user or the haclient
# group), and its contents must be identical on all nodes. The default is
# "/etc/pacemaker/authkey".
# PCMK_authkey_location=/etc/pacemaker/authkey
# If the Pacemaker Remote service is run on the local node, it will listen
# for connections on this address. The value may be a resolvable hostname or an
# IPv4 or IPv6 numeric address. When resolving names or using the default
# wildcard address (i.e. listen on all available addresses), IPv6 will be
# preferred if available. When listening on an IPv6 address, IPv4 clients will
# be supported (via IPv4-mapped IPv6 addresses).
# PCMK_remote_address="192.0.2.1"
# Use this TCP port number when connecting to a Pacemaker Remote node. This
# value must be the same on all nodes. The default is "3121".
# PCMK_remote_port=3121
# Use these GnuTLS cipher priorities for TLS connections. See:
#
# https://gnutls.org/manual/html_node/Priority-Strings.html
#
# Pacemaker will append ":+ANON-DH" for remote CIB access (when enabled) and
# ":+DHE-PSK:+PSK" for Pacemaker Remote connections, as they are required for
# the respective functionality.
# PCMK_tls_priorities="NORMAL"
# Set bounds on the bit length of the prime number generated for Diffie-Hellman
# parameters needed by TLS connections. The default is not to set any bounds.
#
# If these values are specified, the server (Pacemaker Remote daemon, or CIB
# manager configured to accept remote clients) will use these values to provide
# a floor and/or ceiling for the value recommended by the GnuTLS library. The
# library will only accept a limited number of specific values, which vary by
# library version, so setting these is recommended only when required for
# compatibility with specific client versions.
#
# If PCMK_dh_min_bits is specified, the client (connecting cluster node or
# remote CIB command) will require that the server use a prime of at least this
# size. This is only recommended when the value must be lowered in order for
# the client's GnuTLS library to accept a connection to an older server.
# The client side does not use PCMK_dh_max_bits.
#
# PCMK_dh_min_bits=1024
# PCMK_dh_max_bits=2048
Removing Remote Nodes and Guest Nodes
#####################################
If the resource creating a guest node, or the **ocf:pacemaker:remote** resource
creating a connection to a remote node, is removed from the configuration, the
affected node will continue to show up in output as an offline node.
If you want to get rid of that output, run (replacing ``$NODE_NAME``
appropriately):
.. code-block:: none
# crm_node --force --remove $NODE_NAME
.. WARNING::
Be absolutely sure that there are no references to the node's resource in the
configuration before running the above command.
diff --git a/extra/resources/Dummy b/extra/resources/Dummy
index 125578967a..ae58d77bc5 100755
--- a/extra/resources/Dummy
+++ b/extra/resources/Dummy
@@ -1,293 +1,317 @@
#!/bin/sh
#
# ocf:pacemaker:Dummy resource agent
#
# Original copyright 2004 SUSE LINUX AG, Lars Marowsky-Bre
-# Later changes copyright 2008-2019 the Pacemaker project contributors
+# Later changes copyright 2008-2021 the Pacemaker project contributors
#
# The version control history for this file may have further details.
#
# This source code is licensed under the GNU General Public License version 2
# (GPLv2) WITHOUT ANY WARRANTY.
#
# The Dummy agent is intended primarily for testing, and has various options to
# make actions intentionally fail or take a long time. It may also be used as a
# template for resource agent writers, in which case:
#
# - Replace all occurrences of "dummy" and "Dummy" with your agent name.
# - Update the meta-data appropriately for your agent, such as the description
# and supported options. Pay particular attention to the timeouts specified in
# the actions section; they should be meaningful for the kind of service the
# agent manages. They should be the minimum advised timeouts, but shouldn't
# try to cover _all_ possible instances. So, try to be neither overly generous
# nor too stingy, but moderate. The minimum timeouts should never be below 10
# seconds.
# - Don't copy the stuff here that is just for testing, such as the
# sigterm_handler() or dump_env().
# - You don't need the state file stuff here if you have a better way of
# determining whether your service is running. It's only useful for agents
# such as health agents that don't actually correspond to a running service.
# - Implement the actions appropriately for your service. Your monitor action
# must differentiate correctly between running, not running, and failed (that
# is THREE states, not just yes/no). The migrate_to, migrate_from, and reload
# actions are optional and not appropriate to all services.
#
#######################################################################
# Initialization:
: ${OCF_FUNCTIONS:="${OCF_ROOT}/resource.d/heartbeat/.ocf-shellfuncs"}
. "${OCF_FUNCTIONS}"
: ${__OCF_ACTION:="$1"}
#######################################################################
meta_data() {
cat <
-
-1.0
+
+1.1
This is a dummy OCF resource agent. It does absolutely nothing except keep track
of whether it is running or not, and can be configured so that actions fail or
take a long time. Its purpose is primarily for testing, and to serve as a
template for resource agent writers.
Example stateless resource agent
-
+
Location to store the resource state in.
State file
-
+
Fake password field
Password
-
+
-Fake attribute that can be changed to cause a reload
+Fake attribute that can be changed to cause an agent reload
-Fake attribute that can be changed to cause a reload
+Fake attribute that can be changed to cause an agent reload
-
+
Number of seconds to sleep during operations. This can be used to test how
the cluster reacts to operation timeouts.
Operation sleep duration in seconds.
-
+
-Start actions will return failure if running on the host specified here, but
-the resource will start successfully anyway (future monitor calls will find it
-running). This can be used to test on-fail=ignore.
+Start, migrate_from, and reload-agent actions will return failure if running on
+the host specified here, but the resource will run successfully anyway (future
+monitor calls will find it running). This can be used to test on-fail=ignore.
Report bogus start failure on specified host
-
+
If this is set, the environment will be dumped to this file for every call.
Environment dump file
+
END
}
#######################################################################
# don't exit on TERM, to test that pacemaker-execd makes sure that we do exit
trap sigterm_handler TERM
sigterm_handler() {
ocf_log info "They use TERM to bring us down. No such luck."
# Since we're likely going to get KILLed, clean up any monitor
# serialization in progress, so the next probe doesn't return an error.
rm -f "${VERIFY_SERIALIZED_FILE}"
return
}
dummy_usage() {
cat <> "${OCF_RESKEY_envfile}"
fi
}
dummy_start() {
dummy_monitor
DS_RETVAL=$?
if [ $DS_RETVAL -eq $OCF_SUCCESS ]; then
if [ "$(uname -n)" = "${OCF_RESKEY_fail_start_on}" ]; then
DS_RETVAL=$OCF_ERR_GENERIC
fi
return $DS_RETVAL
fi
touch "${OCF_RESKEY_state}"
DS_RETVAL=$?
if [ "$(uname -n)" = "${OCF_RESKEY_fail_start_on}" ]; then
DS_RETVAL=$OCF_ERR_GENERIC
fi
return $DS_RETVAL
}
dummy_stop() {
dummy_monitor --force
if [ $? -eq $OCF_SUCCESS ]; then
rm "${OCF_RESKEY_state}"
fi
rm -f "${VERIFY_SERIALIZED_FILE}"
return $OCF_SUCCESS
}
dummy_monitor() {
if [ $OCF_RESKEY_op_sleep -ne 0 ]; then
if [ "$1" = "" ] && [ -f "${VERIFY_SERIALIZED_FILE}" ]; then
# two monitor ops have occurred at the same time.
# This verifies a condition in pacemaker-execd regression tests.
ocf_log err "$VERIFY_SERIALIZED_FILE exists already"
ocf_exit_reason "alternate universe collision"
return $OCF_ERR_GENERIC
fi
touch "${VERIFY_SERIALIZED_FILE}"
sleep ${OCF_RESKEY_op_sleep}
rm "${VERIFY_SERIALIZED_FILE}"
fi
if [ -f "${OCF_RESKEY_state}" ]; then
# Multiple monitor levels are defined to support various tests
case "$OCF_CHECK_LEVEL" in
10)
# monitor level with delay, useful for testing timeouts
sleep 30
;;
20)
# monitor level that fails intermittently
n=$(expr "$(dd if=/dev/urandom bs=1 count=1 2>/dev/null | od | head -1 | cut -f2 -d' ')" % 5)
if [ $n -eq 1 ]; then
ocf_exit_reason "smoke detected near CPU fan"
return $OCF_ERR_GENERIC
fi
;;
30)
# monitor level that always fails
ocf_exit_reason "hyperdrive quota reached"
return $OCF_ERR_GENERIC
;;
40)
# monitor level that returns error code from state file
rc=$(cat ${OCF_RESKEY_state})
[ -n "$rc" ] && ocf_exit_reason "CPU ejected. Observed leaving the Kronosnet galaxy at $rc times the speed of light." && return $rc
;;
*)
;;
esac
return $OCF_SUCCESS
fi
return $OCF_NOT_RUNNING
}
dummy_validate() {
- # Is the state directory writable?
- state_dir=$(dirname "$OCF_RESKEY_state")
- [ -d "$state_dir" ] && [ -w "$state_dir" ] && [ -x "$state_dir" ]
- if [ $? -ne 0 ]; then
- return $OCF_ERR_ARGS
+ # If specified, is op_sleep an integer?
+ case "$OCF_RESKEY_op_sleep" in
+ ""|*[0-9]*) ;;
+ *) return $OCF_ERR_CONFIGURED ;;
+ esac
+
+ # Host-specific checks
+ if [ "$OCF_CHECK_LEVEL" = "10" ]; then
+
+ # Is the state directory writable?
+ state_dir=$(dirname "$OCF_RESKEY_state")
+ [ -d "$state_dir" ] && [ -w "$state_dir" ] && [ -x "$state_dir" ]
+ if [ $? -ne 0 ]; then
+ return $OCF_ERR_ARGS
+ fi
+
+ # If specified, is the environment file directory writable?
+ if [ -n "$OCF_RESKEY_envfile" ]; then
+ envfile_dir=$(dirname "$OCF_RESKEY_envfile")
+ [ -d "$envfile_dir" ] && [ -w "$envfile_dir" ] && [ -x "$envfile_dir" ]
+ if [ $? -ne 0 ]; then
+ return $OCF_ERR_ARGS
+ fi
+ fi
+
fi
return $OCF_SUCCESS
}
: ${OCF_RESKEY_fake:="dummy"}
: ${OCF_RESKEY_op_sleep:=0}
: ${OCF_RESKEY_CRM_meta_interval:=0}
: ${OCF_RESKEY_CRM_meta_globally_unique:="false"}
if [ -z "$OCF_RESKEY_state" ]; then
OCF_RESKEY_state="${HA_VARRUN%%/}/Dummy-${OCF_RESOURCE_INSTANCE}.state"
if [ "${OCF_RESKEY_CRM_meta_globally_unique}" = "false" ]; then
# Strip off the trailing clone marker (note + is not portable in sed)
OCF_RESKEY_state=$(echo $OCF_RESKEY_state | sed s/:[0-9][0-9]*\.state/.state/)
fi
fi
VERIFY_SERIALIZED_FILE="${OCF_RESKEY_state}.serialized"
dump_env
case "$__OCF_ACTION" in
meta-data) meta_data
exit $OCF_SUCCESS
;;
start) dummy_start;;
stop) dummy_stop;;
monitor) dummy_monitor;;
migrate_to) ocf_log info "Migrating ${OCF_RESOURCE_INSTANCE} to ${OCF_RESKEY_CRM_meta_migrate_target}."
dummy_stop
;;
migrate_from) ocf_log info "Migrating ${OCF_RESOURCE_INSTANCE} from ${OCF_RESKEY_CRM_meta_migrate_source}."
dummy_start
;;
-reload) ocf_log err "Reloading..."
+reload) ocf_log debug "Reloading $OCF_RESOURCE_INSTANCE (service)"
+ exit $OCF_SUCCESS
+ ;;
+reload-agent) ocf_log err "Reloading $OCF_RESOURCE_INSTANCE (agent)"
dummy_start
;;
validate-all) dummy_validate;;
usage|help) dummy_usage
exit $OCF_SUCCESS
;;
*) dummy_usage
exit $OCF_ERR_UNIMPLEMENTED
;;
esac
rc=$?
ocf_log debug "${OCF_RESOURCE_INSTANCE} $__OCF_ACTION : $rc"
exit $rc
# vim: set filetype=sh expandtab tabstop=4 softtabstop=4 shiftwidth=4 textwidth=80:
diff --git a/extra/resources/remote b/extra/resources/remote
index e571d820ef..a53262bb69 100755
--- a/extra/resources/remote
+++ b/extra/resources/remote
@@ -1,113 +1,107 @@
#!/bin/sh
#
# ocf:pacemaker:remote OCF resource agent
#
-# Copyright 2013-2019 the Pacemaker project contributors
+# Copyright 2013-2021 the Pacemaker project contributors
#
# The version control history for this file may have further details.
#
# This source code is licensed under the GNU General Public License version 2
# (GPLv2) WITHOUT ANY WARRANTY.
#
# This script provides metadata for Pacemaker's internal remote agent.
# Outside of acting as a placeholder so the agent can be indexed, and
# providing metadata, this script should never be invoked. The actual
# functionality behind the remote connection lives within Pacemaker's
# controller daemon.
#
-#######################################################################
-# Initialization:
: ${OCF_FUNCTIONS:="${OCF_ROOT}/resource.d/heartbeat/.ocf-shellfuncs"}
. "${OCF_FUNCTIONS}"
: ${__OCF_ACTION:="$1"}
-#######################################################################
-
meta_data() {
cat <
-
-1.0
-remote resource agent
+
+ 1.1
+ Pacemaker Remote connection
-
-
- Server location to connect to. This can be an ip address or hostname.
-
- Server location
-
+
+
+ Server location to connect to (IP address or resolvable host name)
+
+ Remote hostname
+
-
-
- tcp port to connect to.
-
- tcp port
-
+
+
+ TCP port at which to contact Pacemaker Remote executor
+
+ Remote port
+
-
-
- Interval in seconds at which Pacemaker will attempt to reconnect to a
- remote node after an active connection to the remote node has been
- severed. When this value is nonzero, Pacemaker will retry the connection
- indefinitely, at the specified interval. As with any time-based actions,
- this is not guaranteed to be checked more frequently than the value of
- the cluster-recheck-interval cluster option.
-
- reconnect interval
-
+
+
+ If this is a positive time interval, the cluster will attempt to
+ reconnect to a remote node after an active connection has been
+ lost at this interval. Otherwise, the cluster will attempt to
+ reconnect immediately (after any fencing needed).
+
+ reconnect interval
+
-
-
-
-
-
-
-
+
+
+
+
+
+
+
+
END
+ return $OCF_SUCCESS
}
-#######################################################################
-
remote_usage() {
+ EXITSTATUS="$1"
+
cat <
-Expects to have a fully populated OCF RA-compliant environment set.
+This conforms to the OCF Resource Agent API version 1.1, and expects
+to have OCF-compliant environment variables provided.
END
+ return $EXITSTATUS
}
remote_unsupported() {
ocf_log info "The ocf:pacemaker:remote agent should not be directly invoked except for meta-data action"
return $OCF_ERR_GENERIC
}
case $__OCF_ACTION in
-meta-data) meta_data
- exit $OCF_SUCCESS
- ;;
-start) remote_unsupported;;
-stop) remote_unsupported;;
-monitor) remote_unsupported;;
-migrate_to) remote_unsupported;;
-migrate_from) remote_unsupported;;
-reload) remote_unsupported;;
-validate-all) remote_unsupported;;
-usage|help) remote_usage
- exit $OCF_SUCCESS
- ;;
-*) remote_usage
- exit $OCF_ERR_UNIMPLEMENTED
- ;;
+ meta-data) meta_data ;;
+ start) remote_unsupported ;;
+ stop) remote_unsupported ;;
+ monitor) remote_unsupported ;;
+ migrate_to) remote_unsupported ;;
+ migrate_from) remote_unsupported ;;
+ reload) remote_unsupported ;;
+ reload-agent) remote_unsupported ;;
+ validate-all) remote_unsupported ;;
+ usage|help) remote_usage $OCF_SUCCESS ;;
+ *) remote_usage $OCF_ERR_UNIMPLEMENTED ;;
esac
+
rc=$?
ocf_log debug "${OCF_RESOURCE_INSTANCE} $__OCF_ACTION : $rc"
exit $rc
# vim: set filetype=sh expandtab tabstop=4 softtabstop=4 shiftwidth=4 textwidth=80:
diff --git a/include/crm/common/agents.h b/include/crm/common/agents.h
index 46f7e1bb56..ea78e42c20 100644
--- a/include/crm/common/agents.h
+++ b/include/crm/common/agents.h
@@ -1,66 +1,71 @@
/*
* Copyright 2017-2021 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
* This source code is licensed under the GNU Lesser General Public License
* version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
*/
#ifndef PCMK__AGENTS__H
# define PCMK__AGENTS__H
#ifdef __cplusplus
extern "C" {
#endif
/**
* \file
* \brief API related to resource agents
* \ingroup core
*/
#include // uint32_t
#include
/* Special stonith-class agent parameters interpreted directly by Pacemaker
* (not including the pcmk_ACTION_{action,retries,timeout} parameters)
*/
#define PCMK_STONITH_ACTION_LIMIT "pcmk_action_limit"
#define PCMK_STONITH_DELAY_BASE "pcmk_delay_base"
#define PCMK_STONITH_DELAY_MAX "pcmk_delay_max"
#define PCMK_STONITH_HOST_ARGUMENT "pcmk_host_argument"
#define PCMK_STONITH_HOST_CHECK "pcmk_host_check"
#define PCMK_STONITH_HOST_LIST "pcmk_host_list"
#define PCMK_STONITH_HOST_MAP "pcmk_host_map"
#define PCMK_STONITH_PROVIDES "provides"
#define PCMK_STONITH_STONITH_TIMEOUT "stonith-timeout"
+// OCF Resource Agent API standard version that this Pacemaker supports
+#define PCMK_OCF_MAJOR_VERSION "1"
+#define PCMK_OCF_MINOR_VERSION "1"
+#define PCMK_OCF_VERSION PCMK_OCF_MAJOR_VERSION "." PCMK_OCF_MINOR_VERSION
+
// Capabilities supported by a resource agent standard
enum pcmk_ra_caps {
pcmk_ra_cap_none = 0,
pcmk_ra_cap_provider = (1 << 0), // Requires provider
pcmk_ra_cap_status = (1 << 1), // Supports status instead of monitor
pcmk_ra_cap_params = (1 << 2), // Supports parameters
pcmk_ra_cap_unique = (1 << 3), // Supports unique clones
pcmk_ra_cap_promotable = (1 << 4), // Supports promotable clones
pcmk_ra_cap_stdin = (1 << 5), // Reads from standard input
pcmk_ra_cap_fence_params = (1 << 6), // Supports pcmk_monitor_timeout, etc.
};
uint32_t pcmk_get_ra_caps(const char *standard);
char *crm_generate_ra_key(const char *standard, const char *provider,
const char *type);
int crm_parse_agent_spec(const char *spec, char **standard, char **provider,
char **type);
bool pcmk_stonith_param(const char *param);
#if !defined(PCMK_ALLOW_DEPRECATED) || (PCMK_ALLOW_DEPRECATED == 1)
#include
#endif
#ifdef __cplusplus
}
#endif
#endif // PCMK__AGENTS__H
diff --git a/include/crm/crm.h b/include/crm/crm.h
index dc3e7b7693..fdfc825481 100644
--- a/include/crm/crm.h
+++ b/include/crm/crm.h
@@ -1,238 +1,239 @@
/*
* Copyright 2004-2021 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
* This source code is licensed under the GNU Lesser General Public License
* version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
*/
#ifndef CRM__H
# define CRM__H
#ifdef __cplusplus
extern "C" {
#endif
/**
* \file
* \brief A dumping ground
* \ingroup core
*/
# include
# include
# include
# include
# include
# include
#ifndef PCMK_ALLOW_DEPRECATED
/*!
* \brief Allow use of deprecated Pacemaker APIs
*
* By default, external code using Pacemaker headers is allowed to use
* deprecated Pacemaker APIs. If PCMK_ALLOW_DEPRECATED is defined to 0 before
* including any Pacemaker headers, deprecated APIs will be unusable. It is
* strongly recommended to leave this unchanged for production and release
* builds, to avoid breakage when users upgrade to new Pacemaker releases that
* deprecate more APIs. This should be defined to 0 only for development and
* testing builds when desiring to check for usage of currently deprecated APIs.
*/
#define PCMK_ALLOW_DEPRECATED 1
#endif
/*!
* The CRM feature set assists with compatibility in mixed-version clusters.
* The major version number increases when nodes with different versions
* would not work (rolling upgrades are not allowed). The minor version
* number increases when mixed-version clusters are allowed only during
* rolling upgrades (a node with the oldest feature set will be elected DC). The
* minor-minor version number is ignored, but allows resource agents to detect
* cluster support for various features.
*
* The feature set also affects the processing of old saved CIBs (such as for
* many scheduler regression tests).
*
* Particular feature points currently tested by Pacemaker code:
*
* >2.1: Operation updates include timing data
* >=3.0.5: XML v2 digests are created
* >=3.0.8: Peers do not need acks for cancellations
* >=3.0.9: DC will send its own shutdown request to all peers
* XML v2 patchsets are created by default
* >=3.0.13: Fail counts include operation name and interval
* >=3.2.0: DC supports PCMK_LRM_OP_INVALID and PCMK_LRM_OP_NOT_CONNECTED
*/
-# define CRM_FEATURE_SET "3.9.0"
+# define CRM_FEATURE_SET "3.10.0"
/* Pacemaker's CPG protocols use fixed-width binary fields for the sender and
* recipient of a CPG message. This imposes an arbitrary limit on cluster node
* names.
*/
//! \brief Maximum length of a Corosync cluster node name (in bytes)
#define MAX_NAME 256
# define CRM_META "CRM_meta"
extern char *crm_system_name;
/* *INDENT-OFF* */
// How we represent "infinite" scores
# define CRM_SCORE_INFINITY 1000000
# define CRM_INFINITY_S "INFINITY"
# define CRM_PLUS_INFINITY_S "+" CRM_INFINITY_S
# define CRM_MINUS_INFINITY_S "-" CRM_INFINITY_S
/* @COMPAT API < 2.0.0 Deprecated "infinity" aliases
*
* INFINITY might be defined elsewhere (e.g. math.h), so undefine it first.
* This, of course, complicates any attempt to use the other definition in any
* code that includes this header.
*/
# undef INFINITY
# define INFINITY_S "INFINITY"
# define MINUS_INFINITY_S "-INFINITY"
# define INFINITY 1000000
/* Sub-systems */
# define CRM_SYSTEM_DC "dc"
# define CRM_SYSTEM_DCIB "dcib"
/* The master CIB */
# define CRM_SYSTEM_CIB "cib"
# define CRM_SYSTEM_CRMD "crmd"
# define CRM_SYSTEM_LRMD "lrmd"
# define CRM_SYSTEM_PENGINE "pengine"
# define CRM_SYSTEM_TENGINE "tengine"
# define CRM_SYSTEM_STONITHD "stonithd"
# define CRM_SYSTEM_MCP "pacemakerd"
// Names of internally generated node attributes
# define CRM_ATTR_UNAME "#uname"
# define CRM_ATTR_ID "#id"
# define CRM_ATTR_KIND "#kind"
# define CRM_ATTR_ROLE "#role"
# define CRM_ATTR_IS_DC "#is_dc"
# define CRM_ATTR_CLUSTER_NAME "#cluster-name"
# define CRM_ATTR_SITE_NAME "#site-name"
# define CRM_ATTR_UNFENCED "#node-unfenced"
# define CRM_ATTR_DIGESTS_ALL "#digests-all"
# define CRM_ATTR_DIGESTS_SECURE "#digests-secure"
# define CRM_ATTR_RA_VERSION "#ra-version"
# define CRM_ATTR_PROTOCOL "#attrd-protocol"
/* Valid operations */
# define CRM_OP_NOOP "noop"
# define CRM_OP_JOIN_ANNOUNCE "join_announce"
# define CRM_OP_JOIN_OFFER "join_offer"
# define CRM_OP_JOIN_REQUEST "join_request"
# define CRM_OP_JOIN_ACKNAK "join_ack_nack"
# define CRM_OP_JOIN_CONFIRM "join_confirm"
# define CRM_OP_PING "ping"
# define CRM_OP_NODE_INFO "node-info"
# define CRM_OP_THROTTLE "throttle"
# define CRM_OP_VOTE "vote"
# define CRM_OP_NOVOTE "no-vote"
# define CRM_OP_HELLO "hello"
# define CRM_OP_PECALC "pe_calc"
# define CRM_OP_QUIT "quit"
# define CRM_OP_LOCAL_SHUTDOWN "start_shutdown"
# define CRM_OP_SHUTDOWN_REQ "req_shutdown"
# define CRM_OP_SHUTDOWN "do_shutdown"
# define CRM_OP_FENCE "stonith"
# define CRM_OP_REGISTER "register"
# define CRM_OP_IPC_FWD "ipc_fwd"
# define CRM_OP_INVOKE_LRM "lrm_invoke"
# define CRM_OP_LRM_REFRESH "lrm_refresh" /* Deprecated */
# define CRM_OP_LRM_QUERY "lrm_query"
# define CRM_OP_LRM_DELETE "lrm_delete"
# define CRM_OP_LRM_FAIL "lrm_fail"
# define CRM_OP_PROBED "probe_complete"
# define CRM_OP_REPROBE "probe_again"
# define CRM_OP_CLEAR_FAILCOUNT "clear_failcount"
# define CRM_OP_REMOTE_STATE "remote_state"
# define CRM_OP_RELAXED_SET "one-or-more"
# define CRM_OP_RELAXED_CLONE "clone-one-or-more"
# define CRM_OP_RM_NODE_CACHE "rm_node_cache"
# define CRM_OP_MAINTENANCE_NODES "maintenance_nodes"
/* Possible cluster membership states */
# define CRMD_JOINSTATE_DOWN "down"
# define CRMD_JOINSTATE_PENDING "pending"
# define CRMD_JOINSTATE_MEMBER "member"
# define CRMD_JOINSTATE_NACK "banned"
# define CRMD_ACTION_DELETE "delete"
# define CRMD_ACTION_CANCEL "cancel"
# define CRMD_ACTION_RELOAD "reload"
+# define CRMD_ACTION_RELOAD_AGENT "reload-agent"
# define CRMD_ACTION_MIGRATE "migrate_to"
# define CRMD_ACTION_MIGRATED "migrate_from"
# define CRMD_ACTION_START "start"
# define CRMD_ACTION_STARTED "running"
# define CRMD_ACTION_STOP "stop"
# define CRMD_ACTION_STOPPED "stopped"
# define CRMD_ACTION_PROMOTE "promote"
# define CRMD_ACTION_PROMOTED "promoted"
# define CRMD_ACTION_DEMOTE "demote"
# define CRMD_ACTION_DEMOTED "demoted"
# define CRMD_ACTION_NOTIFY "notify"
# define CRMD_ACTION_NOTIFIED "notified"
# define CRMD_ACTION_STATUS "monitor"
# define CRMD_ACTION_METADATA "meta-data"
# define CRMD_METADATA_CALL_TIMEOUT 30000
/* short names */
# define RSC_DELETE CRMD_ACTION_DELETE
# define RSC_CANCEL CRMD_ACTION_CANCEL
# define RSC_MIGRATE CRMD_ACTION_MIGRATE
# define RSC_MIGRATED CRMD_ACTION_MIGRATED
# define RSC_START CRMD_ACTION_START
# define RSC_STARTED CRMD_ACTION_STARTED
# define RSC_STOP CRMD_ACTION_STOP
# define RSC_STOPPED CRMD_ACTION_STOPPED
# define RSC_PROMOTE CRMD_ACTION_PROMOTE
# define RSC_PROMOTED CRMD_ACTION_PROMOTED
# define RSC_DEMOTE CRMD_ACTION_DEMOTE
# define RSC_DEMOTED CRMD_ACTION_DEMOTED
# define RSC_NOTIFY CRMD_ACTION_NOTIFY
# define RSC_NOTIFIED CRMD_ACTION_NOTIFIED
# define RSC_STATUS CRMD_ACTION_STATUS
# define RSC_METADATA CRMD_ACTION_METADATA
/* *INDENT-ON* */
# include
# include
static inline const char *
crm_action_str(const char *task, guint interval_ms) {
if ((task != NULL) && (interval_ms == 0)
&& (strcasecmp(task, RSC_STATUS) == 0)) {
return "probe";
}
return task;
}
#if !defined(PCMK_ALLOW_DEPRECATED) || (PCMK_ALLOW_DEPRECATED == 1)
#include
#endif
#ifdef __cplusplus
}
#endif
#endif
diff --git a/include/crm/pengine/internal.h b/include/crm/pengine/internal.h
index ab7238f423..c464295aac 100644
--- a/include/crm/pengine/internal.h
+++ b/include/crm/pengine/internal.h
@@ -1,598 +1,598 @@
/*
* Copyright 2004-2021 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
* This source code is licensed under the GNU Lesser General Public License
* version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
*/
#ifndef PE_INTERNAL__H
# define PE_INTERNAL__H
# include
# include
# include
# include
# include
# define pe_rsc_info(rsc, fmt, args...) crm_log_tag(LOG_INFO, rsc ? rsc->id : "", fmt, ##args)
# define pe_rsc_debug(rsc, fmt, args...) crm_log_tag(LOG_DEBUG, rsc ? rsc->id : "", fmt, ##args)
# define pe_rsc_trace(rsc, fmt, args...) crm_log_tag(LOG_TRACE, rsc ? rsc->id : "", fmt, ##args)
# define pe_err(fmt...) do { \
was_processing_error = TRUE; \
pcmk__config_err(fmt); \
} while (0)
# define pe_warn(fmt...) do { \
was_processing_warning = TRUE; \
pcmk__config_warn(fmt); \
} while (0)
# define pe_proc_err(fmt...) { was_processing_error = TRUE; crm_err(fmt); }
# define pe_proc_warn(fmt...) { was_processing_warning = TRUE; crm_warn(fmt); }
#define pe__set_working_set_flags(working_set, flags_to_set) do { \
(working_set)->flags = pcmk__set_flags_as(__func__, __LINE__, \
LOG_TRACE, "Working set", crm_system_name, \
(working_set)->flags, (flags_to_set), #flags_to_set); \
} while (0)
#define pe__clear_working_set_flags(working_set, flags_to_clear) do { \
(working_set)->flags = pcmk__clear_flags_as(__func__, __LINE__, \
LOG_TRACE, "Working set", crm_system_name, \
(working_set)->flags, (flags_to_clear), #flags_to_clear); \
} while (0)
#define pe__set_resource_flags(resource, flags_to_set) do { \
(resource)->flags = pcmk__set_flags_as(__func__, __LINE__, \
LOG_TRACE, "Resource", (resource)->id, (resource)->flags, \
(flags_to_set), #flags_to_set); \
} while (0)
#define pe__clear_resource_flags(resource, flags_to_clear) do { \
(resource)->flags = pcmk__clear_flags_as(__func__, __LINE__, \
LOG_TRACE, "Resource", (resource)->id, (resource)->flags, \
(flags_to_clear), #flags_to_clear); \
} while (0)
#define pe__set_action_flags(action, flags_to_set) do { \
(action)->flags = pcmk__set_flags_as(__func__, __LINE__, \
LOG_TRACE, \
"Action", (action)->uuid, \
(action)->flags, \
(flags_to_set), \
#flags_to_set); \
} while (0)
#define pe__clear_action_flags(action, flags_to_clear) do { \
(action)->flags = pcmk__clear_flags_as(__func__, __LINE__, \
LOG_TRACE, \
"Action", (action)->uuid, \
(action)->flags, \
(flags_to_clear), \
#flags_to_clear); \
} while (0)
#define pe__set_raw_action_flags(action_flags, action_name, flags_to_set) do { \
action_flags = pcmk__set_flags_as(__func__, __LINE__, \
LOG_TRACE, "Action", action_name, \
(action_flags), \
(flags_to_set), #flags_to_set); \
} while (0)
#define pe__clear_raw_action_flags(action_flags, action_name, flags_to_clear) do { \
action_flags = pcmk__clear_flags_as(__func__, __LINE__, \
LOG_TRACE, \
"Action", action_name, \
(action_flags), \
(flags_to_clear), \
#flags_to_clear); \
} while (0)
#define pe__set_action_flags_as(function, line, action, flags_to_set) do { \
(action)->flags = pcmk__set_flags_as((function), (line), \
LOG_TRACE, \
"Action", (action)->uuid, \
(action)->flags, \
(flags_to_set), \
#flags_to_set); \
} while (0)
#define pe__clear_action_flags_as(function, line, action, flags_to_clear) do { \
(action)->flags = pcmk__clear_flags_as((function), (line), \
LOG_TRACE, \
"Action", (action)->uuid, \
(action)->flags, \
(flags_to_clear), \
#flags_to_clear); \
} while (0)
#define pe__set_order_flags(order_flags, flags_to_set) do { \
order_flags = pcmk__set_flags_as(__func__, __LINE__, LOG_TRACE, \
"Ordering", "constraint", \
order_flags, (flags_to_set), \
#flags_to_set); \
} while (0)
#define pe__clear_order_flags(order_flags, flags_to_clear) do { \
order_flags = pcmk__clear_flags_as(__func__, __LINE__, LOG_TRACE, \
"Ordering", "constraint", \
order_flags, (flags_to_clear), \
#flags_to_clear); \
} while (0)
#define pe__set_graph_flags(graph_flags, gr_action, flags_to_set) do { \
graph_flags = pcmk__set_flags_as(__func__, __LINE__, \
LOG_TRACE, "Graph", \
(gr_action)->uuid, graph_flags, \
(flags_to_set), #flags_to_set); \
} while (0)
#define pe__clear_graph_flags(graph_flags, gr_action, flags_to_clear) do { \
graph_flags = pcmk__clear_flags_as(__func__, __LINE__, \
LOG_TRACE, "Graph", \
(gr_action)->uuid, graph_flags, \
(flags_to_clear), #flags_to_clear); \
} while (0)
// Some warnings we don't want to print every transition
enum pe_warn_once_e {
pe_wo_blind = (1 << 0),
pe_wo_restart_type = (1 << 1),
pe_wo_role_after = (1 << 2),
pe_wo_poweroff = (1 << 3),
pe_wo_require_all = (1 << 4),
pe_wo_order_score = (1 << 5),
pe_wo_neg_threshold = (1 << 6),
pe_wo_remove_after = (1 << 7),
};
extern uint32_t pe_wo;
#define pe_warn_once(pe_wo_bit, fmt...) do { \
if (!pcmk_is_set(pe_wo, pe_wo_bit)) { \
if (pe_wo_bit == pe_wo_blind) { \
crm_warn(fmt); \
} else { \
pe_warn(fmt); \
} \
pe_wo = pcmk__set_flags_as(__func__, __LINE__, LOG_TRACE, \
"Warn-once", "logging", pe_wo, \
(pe_wo_bit), #pe_wo_bit); \
} \
} while (0);
typedef struct pe__location_constraint_s {
char *id; // Constraint XML ID
pe_resource_t *rsc_lh; // Resource being located
enum rsc_role_e role_filter; // Role to locate
enum pe_discover_e discover_mode; // Resource discovery
GList *node_list_rh; // List of pe_node_t*
} pe__location_t;
typedef struct pe__order_constraint_s {
int id;
enum pe_ordering type;
void *lh_opaque;
pe_resource_t *lh_rsc;
pe_action_t *lh_action;
char *lh_action_task;
void *rh_opaque;
pe_resource_t *rh_rsc;
pe_action_t *rh_action;
char *rh_action_task;
} pe__ordering_t;
typedef struct notify_data_s {
GSList *keys; // Environment variable name/value pairs
const char *action;
pe_action_t *pre;
pe_action_t *post;
pe_action_t *pre_done;
pe_action_t *post_done;
GList *active; /* notify_entry_t* */
GList *inactive; /* notify_entry_t* */
GList *start; /* notify_entry_t* */
GList *stop; /* notify_entry_t* */
GList *demote; /* notify_entry_t* */
GList *promote; /* notify_entry_t* */
GList *promoted; /* notify_entry_t* */
GList *unpromoted; /* notify_entry_t* */
GHashTable *allowed_nodes;
} notify_data_t;
bool pe_can_fence(pe_working_set_t *data_set, pe_node_t *node);
int pe__add_scores(int score1, int score2);
void add_hash_param(GHashTable * hash, const char *name, const char *value);
char *native_parameter(pe_resource_t * rsc, pe_node_t * node, gboolean create, const char *name,
pe_working_set_t * data_set);
pe_node_t *native_location(const pe_resource_t *rsc, GList **list, int current);
void pe_metadata(void);
void verify_pe_options(GHashTable * options);
void common_update_score(pe_resource_t * rsc, const char *id, int score);
void native_add_running(pe_resource_t * rsc, pe_node_t * node, pe_working_set_t * data_set);
gboolean native_unpack(pe_resource_t * rsc, pe_working_set_t * data_set);
gboolean group_unpack(pe_resource_t * rsc, pe_working_set_t * data_set);
gboolean clone_unpack(pe_resource_t * rsc, pe_working_set_t * data_set);
gboolean pe__unpack_bundle(pe_resource_t *rsc, pe_working_set_t *data_set);
pe_resource_t *native_find_rsc(pe_resource_t *rsc, const char *id, const pe_node_t *node,
int flags);
gboolean native_active(pe_resource_t * rsc, gboolean all);
gboolean group_active(pe_resource_t * rsc, gboolean all);
gboolean clone_active(pe_resource_t * rsc, gboolean all);
gboolean pe__bundle_active(pe_resource_t *rsc, gboolean all);
void native_print(pe_resource_t * rsc, const char *pre_text, long options, void *print_data);
void group_print(pe_resource_t * rsc, const char *pre_text, long options, void *print_data);
void clone_print(pe_resource_t * rsc, const char *pre_text, long options, void *print_data);
void pe__print_bundle(pe_resource_t *rsc, const char *pre_text, long options,
void *print_data);
gchar * pcmk__native_output_string(pe_resource_t *rsc, const char *name, pe_node_t *node,
long options, const char *target_role, bool show_nodes);
int pe__name_and_nvpairs_xml(pcmk__output_t *out, bool is_list, const char *tag_name
, size_t pairs_count, ...);
char *pe__node_display_name(pe_node_t *node, bool print_detail);
static inline const char *
pe__rsc_bool_str(pe_resource_t *rsc, uint64_t rsc_flag)
{
return pcmk__btoa(pcmk_is_set(rsc->flags, rsc_flag));
}
int pe__clone_xml(pcmk__output_t *out, va_list args);
int pe__clone_html(pcmk__output_t *out, va_list args);
int pe__clone_text(pcmk__output_t *out, va_list args);
int pe__group_xml(pcmk__output_t *out, va_list args);
int pe__group_html(pcmk__output_t *out, va_list args);
int pe__group_text(pcmk__output_t *out, va_list args);
int pe__bundle_xml(pcmk__output_t *out, va_list args);
int pe__bundle_html(pcmk__output_t *out, va_list args);
int pe__bundle_text(pcmk__output_t *out, va_list args);
int pe__node_html(pcmk__output_t *out, va_list args);
int pe__node_text(pcmk__output_t *out, va_list args);
int pe__node_xml(pcmk__output_t *out, va_list args);
int pe__resource_xml(pcmk__output_t *out, va_list args);
int pe__resource_html(pcmk__output_t *out, va_list args);
int pe__resource_text(pcmk__output_t *out, va_list args);
/* Exported for crm_mon to reference */
int pe__ban_text(pcmk__output_t *out, va_list args);
int pe__cluster_counts_text(pcmk__output_t *out, va_list args);
int pe__cluster_dc_text(pcmk__output_t *out, va_list args);
int pe__cluster_maint_mode_text(pcmk__output_t *out, va_list args);
int pe__cluster_options_text(pcmk__output_t *out, va_list args);
int pe__cluster_stack_text(pcmk__output_t *out, va_list args);
int pe__cluster_summary(pcmk__output_t *out, va_list args);
int pe__cluster_times_text(pcmk__output_t *out, va_list args);
int pe__failed_action_text(pcmk__output_t *out, va_list args);
int pe__node_attribute_text(pcmk__output_t *out, va_list args);
int pe__node_list_text(pcmk__output_t *out, va_list args);
int pe__op_history_text(pcmk__output_t *out, va_list args);
int pe__resource_history_text(pcmk__output_t *out, va_list args);
int pe__ticket_text(pcmk__output_t *out, va_list args);
void native_free(pe_resource_t * rsc);
void group_free(pe_resource_t * rsc);
void clone_free(pe_resource_t * rsc);
void pe__free_bundle(pe_resource_t *rsc);
enum rsc_role_e native_resource_state(const pe_resource_t * rsc, gboolean current);
enum rsc_role_e group_resource_state(const pe_resource_t * rsc, gboolean current);
enum rsc_role_e clone_resource_state(const pe_resource_t * rsc, gboolean current);
enum rsc_role_e pe__bundle_resource_state(const pe_resource_t *rsc,
gboolean current);
void pe__count_common(pe_resource_t *rsc);
void pe__count_bundle(pe_resource_t *rsc);
gboolean common_unpack(xmlNode * xml_obj, pe_resource_t ** rsc, pe_resource_t * parent,
pe_working_set_t * data_set);
void common_free(pe_resource_t * rsc);
pe_node_t *pe__copy_node(const pe_node_t *this_node);
extern time_t get_effective_time(pe_working_set_t * data_set);
/* Failure handling utilities (from failcounts.c) */
// bit flags for fail count handling options
enum pe_fc_flags_e {
pe_fc_default = (1 << 0),
pe_fc_effective = (1 << 1), // don't count expired failures
pe_fc_fillers = (1 << 2), // if container, include filler failures in count
};
int pe_get_failcount(pe_node_t *node, pe_resource_t *rsc, time_t *last_failure,
uint32_t flags, xmlNode *xml_op,
pe_working_set_t *data_set);
pe_action_t *pe__clear_failcount(pe_resource_t *rsc, pe_node_t *node,
const char *reason,
pe_working_set_t *data_set);
/* Functions for finding/counting a resource's active nodes */
pe_node_t *pe__find_active_on(const pe_resource_t *rsc,
unsigned int *count_all,
unsigned int *count_clean);
pe_node_t *pe__find_active_requires(const pe_resource_t *rsc,
unsigned int *count);
static inline pe_node_t *
pe__current_node(const pe_resource_t *rsc)
{
return pe__find_active_on(rsc, NULL, NULL);
}
/* Binary like operators for lists of nodes */
extern void node_list_exclude(GHashTable * list, GList *list2, gboolean merge_scores);
GHashTable *pe__node_list2table(GList *list);
static inline gpointer
pe_hash_table_lookup(GHashTable * hash, gconstpointer key)
{
if (hash) {
return g_hash_table_lookup(hash, key);
}
return NULL;
}
extern pe_action_t *get_pseudo_op(const char *name, pe_working_set_t * data_set);
extern gboolean order_actions(pe_action_t * lh_action, pe_action_t * rh_action, enum pe_ordering order);
/* Printing functions for debug */
extern void print_str_str(gpointer key, gpointer value, gpointer user_data);
extern void pe__output_node(pe_node_t * node, gboolean details, pcmk__output_t *out);
void pe__show_node_weights_as(const char *file, const char *function,
int line, bool to_log, pe_resource_t *rsc,
const char *comment, GHashTable *nodes,
pe_working_set_t *data_set);
#define pe__show_node_weights(level, rsc, text, nodes, data_set) \
pe__show_node_weights_as(__FILE__, __func__, __LINE__, \
(level), (rsc), (text), (nodes), (data_set))
/* Sorting functions */
extern gint sort_rsc_priority(gconstpointer a, gconstpointer b);
extern gint sort_rsc_index(gconstpointer a, gconstpointer b);
extern xmlNode *find_rsc_op_entry(pe_resource_t * rsc, const char *key);
extern pe_action_t *custom_action(pe_resource_t * rsc, char *key, const char *task, pe_node_t * on_node,
gboolean optional, gboolean foo, pe_working_set_t * data_set);
# define delete_key(rsc) pcmk__op_key(rsc->id, CRMD_ACTION_DELETE, 0)
# define delete_action(rsc, node, optional) custom_action( \
rsc, delete_key(rsc), CRMD_ACTION_DELETE, node, \
optional, TRUE, data_set);
# define stopped_key(rsc) pcmk__op_key(rsc->id, CRMD_ACTION_STOPPED, 0)
# define stopped_action(rsc, node, optional) custom_action( \
rsc, stopped_key(rsc), CRMD_ACTION_STOPPED, node, \
optional, TRUE, data_set);
# define stop_key(rsc) pcmk__op_key(rsc->id, CRMD_ACTION_STOP, 0)
# define stop_action(rsc, node, optional) custom_action( \
rsc, stop_key(rsc), CRMD_ACTION_STOP, node, \
optional, TRUE, data_set);
-# define reload_key(rsc) pcmk__op_key(rsc->id, CRMD_ACTION_RELOAD, 0)
+# define reload_key(rsc) pcmk__op_key(rsc->id, CRMD_ACTION_RELOAD_AGENT, 0)
# define start_key(rsc) pcmk__op_key(rsc->id, CRMD_ACTION_START, 0)
# define start_action(rsc, node, optional) custom_action( \
rsc, start_key(rsc), CRMD_ACTION_START, node, \
optional, TRUE, data_set)
# define started_key(rsc) pcmk__op_key(rsc->id, CRMD_ACTION_STARTED, 0)
# define started_action(rsc, node, optional) custom_action( \
rsc, started_key(rsc), CRMD_ACTION_STARTED, node, \
optional, TRUE, data_set)
# define promote_key(rsc) pcmk__op_key(rsc->id, CRMD_ACTION_PROMOTE, 0)
# define promote_action(rsc, node, optional) custom_action( \
rsc, promote_key(rsc), CRMD_ACTION_PROMOTE, node, \
optional, TRUE, data_set)
# define promoted_key(rsc) pcmk__op_key(rsc->id, CRMD_ACTION_PROMOTED, 0)
# define promoted_action(rsc, node, optional) custom_action( \
rsc, promoted_key(rsc), CRMD_ACTION_PROMOTED, node, \
optional, TRUE, data_set)
# define demote_key(rsc) pcmk__op_key(rsc->id, CRMD_ACTION_DEMOTE, 0)
# define demote_action(rsc, node, optional) custom_action( \
rsc, demote_key(rsc), CRMD_ACTION_DEMOTE, node, \
optional, TRUE, data_set)
# define demoted_key(rsc) pcmk__op_key(rsc->id, CRMD_ACTION_DEMOTED, 0)
# define demoted_action(rsc, node, optional) custom_action( \
rsc, demoted_key(rsc), CRMD_ACTION_DEMOTED, node, \
optional, TRUE, data_set)
extern int pe_get_configured_timeout(pe_resource_t *rsc, const char *action,
pe_working_set_t *data_set);
extern pe_action_t *find_first_action(GList *input, const char *uuid, const char *task,
pe_node_t * on_node);
extern enum action_tasks get_complex_task(pe_resource_t * rsc, const char *name,
gboolean allow_non_atomic);
extern GList *find_actions(GList *input, const char *key, const pe_node_t *on_node);
GList *find_actions_exact(GList *input, const char *key,
const pe_node_t *on_node);
extern GList *find_recurring_actions(GList *input, pe_node_t * not_on_node);
GList *pe__resource_actions(const pe_resource_t *rsc, const pe_node_t *node,
const char *task, bool require_node);
extern void pe_free_action(pe_action_t * action);
extern void resource_location(pe_resource_t * rsc, pe_node_t * node, int score, const char *tag,
pe_working_set_t * data_set);
extern gint sort_op_by_callid(gconstpointer a, gconstpointer b);
extern gboolean get_target_role(pe_resource_t * rsc, enum rsc_role_e *role);
extern pe_resource_t *find_clone_instance(pe_resource_t * rsc, const char *sub_id,
pe_working_set_t * data_set);
extern void destroy_ticket(gpointer data);
extern pe_ticket_t *ticket_new(const char *ticket_id, pe_working_set_t * data_set);
// Resources for manipulating resource names
const char *pe_base_name_end(const char *id);
char *clone_strip(const char *last_rsc_id);
char *clone_zero(const char *last_rsc_id);
static inline bool
pe_base_name_eq(pe_resource_t *rsc, const char *id)
{
if (id && rsc && rsc->id) {
// Number of characters in rsc->id before any clone suffix
size_t base_len = pe_base_name_end(rsc->id) - rsc->id + 1;
return (strlen(id) == base_len) && !strncmp(id, rsc->id, base_len);
}
return FALSE;
}
int pe__target_rc_from_xml(xmlNode *xml_op);
gint sort_node_uname(gconstpointer a, gconstpointer b);
bool is_set_recursive(pe_resource_t * rsc, long long flag, bool any);
enum rsc_digest_cmp_val {
/*! Digests are the same */
RSC_DIGEST_MATCH = 0,
/*! Params that require a restart changed */
RSC_DIGEST_RESTART,
/*! Some parameter changed. */
RSC_DIGEST_ALL,
/*! rsc op didn't have a digest associated with it, so
* it is unknown if parameters changed or not. */
RSC_DIGEST_UNKNOWN,
};
typedef struct op_digest_cache_s {
enum rsc_digest_cmp_val rc;
xmlNode *params_all;
xmlNode *params_secure;
xmlNode *params_restart;
char *digest_all_calc;
char *digest_secure_calc;
char *digest_restart_calc;
} op_digest_cache_t;
op_digest_cache_t *pe__calculate_digests(pe_resource_t *rsc, const char *task,
guint *interval_ms, pe_node_t *node,
xmlNode *xml_op, GHashTable *overrides,
bool calc_secure,
pe_working_set_t *data_set);
void pe__free_digests(gpointer ptr);
op_digest_cache_t *rsc_action_digest_cmp(pe_resource_t * rsc, xmlNode * xml_op, pe_node_t * node,
pe_working_set_t * data_set);
pe_action_t *pe_fence_op(pe_node_t * node, const char *op, bool optional, const char *reason, bool priority_delay, pe_working_set_t * data_set);
void trigger_unfencing(
pe_resource_t * rsc, pe_node_t *node, const char *reason, pe_action_t *dependency, pe_working_set_t * data_set);
void pe_action_set_reason(pe_action_t *action, const char *reason, bool overwrite);
void pe_action_set_flag_reason(const char *function, long line, pe_action_t *action, pe_action_t *reason, const char *text, enum pe_action_flags flags, bool overwrite);
#define pe_action_required(action, reason, text) \
pe_action_set_flag_reason(__func__, __LINE__, action, reason, text, \
pe_action_optional, FALSE)
#define pe_action_implies(action, reason, flag) \
pe_action_set_flag_reason(__func__, __LINE__, action, reason, NULL, \
flag, FALSE)
void pe__set_resource_flags_recursive(pe_resource_t *rsc, uint64_t flags);
void pe__clear_resource_flags_recursive(pe_resource_t *rsc, uint64_t flags);
void pe__clear_resource_flags_on_all(pe_working_set_t *data_set, uint64_t flag);
gboolean add_tag_ref(GHashTable * tags, const char * tag_name, const char * obj_ref);
void print_rscs_brief(GList *rsc_list, const char * pre_text, long options,
void * print_data, gboolean print_all);
int pe__rscs_brief_output(pcmk__output_t *out, GList *rsc_list, long options, gboolean print_all);
void pe_fence_node(pe_working_set_t * data_set, pe_node_t * node, const char *reason, bool priority_delay);
pe_node_t *pe_create_node(const char *id, const char *uname, const char *type,
const char *score, pe_working_set_t * data_set);
void common_print(pe_resource_t * rsc, const char *pre_text, const char *name, pe_node_t *node, long options, void *print_data);
int pe__common_output_text(pcmk__output_t *out, pe_resource_t * rsc, const char *name, pe_node_t *node, long options);
int pe__common_output_html(pcmk__output_t *out, pe_resource_t * rsc, const char *name, pe_node_t *node, long options);
pe_resource_t *pe__find_bundle_replica(const pe_resource_t *bundle,
const pe_node_t *node);
bool pe__bundle_needs_remote_name(pe_resource_t *rsc,
pe_working_set_t *data_set);
const char *pe__add_bundle_remote_name(pe_resource_t *rsc,
pe_working_set_t *data_set,
xmlNode *xml, const char *field);
const char *pe_node_attribute_calculated(const pe_node_t *node,
const char *name,
const pe_resource_t *rsc);
const char *pe_node_attribute_raw(pe_node_t *node, const char *name);
bool pe__is_universal_clone(pe_resource_t *rsc,
pe_working_set_t *data_set);
void pe__add_param_check(xmlNode *rsc_op, pe_resource_t *rsc, pe_node_t *node,
enum pe_check_parameters, pe_working_set_t *data_set);
void pe__foreach_param_check(pe_working_set_t *data_set,
void (*cb)(pe_resource_t*, pe_node_t*, xmlNode*,
enum pe_check_parameters,
pe_working_set_t*));
void pe__free_param_checks(pe_working_set_t *data_set);
bool pe__shutdown_requested(pe_node_t *node);
void pe__update_recheck_time(time_t recheck, pe_working_set_t *data_set);
/*!
* \internal
* \brief Register xml formatting message functions.
*/
void pe__register_messages(pcmk__output_t *out);
void pe__unpack_dataset_nvpairs(xmlNode *xml_obj, const char *set_name,
pe_rule_eval_data_t *rule_data, GHashTable *hash,
const char *always_first, gboolean overwrite,
pe_working_set_t *data_set);
bool pe__resource_is_disabled(pe_resource_t *rsc);
pe_action_t *pe__clear_resource_history(pe_resource_t *rsc, pe_node_t *node,
pe_working_set_t *data_set);
GList *pe__rscs_with_tag(pe_working_set_t *data_set, const char *tag_name);
GList *pe__unames_with_tag(pe_working_set_t *data_set, const char *tag_name);
bool pe__rsc_has_tag(pe_working_set_t *data_set, const char *rsc, const char *tag);
bool pe__uname_has_tag(pe_working_set_t *data_set, const char *node, const char *tag);
bool pe__rsc_running_on_any_node_in_list(pe_resource_t *rsc, GList *node_list);
GList *pe__filter_rsc_list(GList *rscs, GList *filter);
bool pcmk__rsc_filtered_by_node(pe_resource_t *rsc, GList *only_node);
gboolean pe__bundle_is_filtered(pe_resource_t *rsc, GList *only_rsc, gboolean check_parent);
gboolean pe__clone_is_filtered(pe_resource_t *rsc, GList *only_rsc, gboolean check_parent);
gboolean pe__group_is_filtered(pe_resource_t *rsc, GList *only_rsc, gboolean check_parent);
gboolean pe__native_is_filtered(pe_resource_t *rsc, GList *only_rsc, gboolean check_parent);
#endif
diff --git a/lib/common/operations.c b/lib/common/operations.c
index 420f0785f5..b7bea4b125 100644
--- a/lib/common/operations.c
+++ b/lib/common/operations.c
@@ -1,522 +1,523 @@
/*
* Copyright 2004-2021 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
* This source code is licensed under the GNU Lesser General Public License
* version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
*/
#include
#ifndef _GNU_SOURCE
# define _GNU_SOURCE
#endif
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
static regex_t *notify_migrate_re = NULL;
/*!
* \brief Generate an operation key (RESOURCE_ACTION_INTERVAL)
*
* \param[in] rsc_id ID of resource being operated on
* \param[in] op_type Operation name
* \param[in] interval_ms Operation interval
*
* \return Newly allocated memory containing operation key as string
*
* \note This function asserts on errors, so it will never return NULL.
* The caller is responsible for freeing the result with free().
*/
char *
pcmk__op_key(const char *rsc_id, const char *op_type, guint interval_ms)
{
CRM_ASSERT(rsc_id != NULL);
CRM_ASSERT(op_type != NULL);
return crm_strdup_printf(PCMK__OP_FMT, rsc_id, op_type, interval_ms);
}
static inline gboolean
convert_interval(const char *s, guint *interval_ms)
{
unsigned long l;
errno = 0;
l = strtoul(s, NULL, 10);
if (errno != 0) {
return FALSE;
}
*interval_ms = (guint) l;
return TRUE;
}
static gboolean
try_fast_match(const char *key, const char *underbar1, const char *underbar2,
char **rsc_id, char **op_type, guint *interval_ms)
{
if (interval_ms) {
if (!convert_interval(underbar2+1, interval_ms)) {
return FALSE;
}
}
if (rsc_id) {
*rsc_id = strndup(key, underbar1-key);
}
if (op_type) {
*op_type = strndup(underbar1+1, underbar2-underbar1-1);
}
return TRUE;
}
static gboolean
try_basic_match(const char *key, char **rsc_id, char **op_type, guint *interval_ms)
{
char *interval_sep = NULL;
char *type_sep = NULL;
// Parse interval at end of string
interval_sep = strrchr(key, '_');
if (interval_sep == NULL) {
return FALSE;
}
if (interval_ms) {
if (!convert_interval(interval_sep+1, interval_ms)) {
return FALSE;
}
}
type_sep = interval_sep-1;
while (1) {
if (*type_sep == '_') {
break;
} else if (type_sep == key) {
if (interval_ms) {
*interval_ms = 0;
}
return FALSE;
}
type_sep--;
}
if (op_type) {
// Add one here to skip the leading underscore we landed on in the
// while loop.
*op_type = strndup(type_sep+1, interval_sep-type_sep-1);
}
// Everything else is the name of the resource.
if (rsc_id) {
*rsc_id = strndup(key, type_sep-key);
}
return TRUE;
}
static gboolean
try_migrate_notify_match(const char *key, char **rsc_id, char **op_type, guint *interval_ms)
{
int rc = 0;
size_t nmatch = 8;
regmatch_t pmatch[nmatch];
if (notify_migrate_re == NULL) {
// cppcheck-suppress memleak
notify_migrate_re = calloc(1, sizeof(regex_t));
rc = regcomp(notify_migrate_re, "^(.*)_(migrate_(from|to)|(pre|post)_notify_([a-z]+|migrate_(from|to)))_([0-9]+)$",
REG_EXTENDED);
CRM_ASSERT(rc == 0);
}
rc = regexec(notify_migrate_re, key, nmatch, pmatch, 0);
if (rc == REG_NOMATCH) {
return FALSE;
}
if (rsc_id) {
*rsc_id = strndup(key+pmatch[1].rm_so, pmatch[1].rm_eo-pmatch[1].rm_so);
}
if (op_type) {
*op_type = strndup(key+pmatch[2].rm_so, pmatch[2].rm_eo-pmatch[2].rm_so);
}
if (interval_ms) {
if (!convert_interval(key+pmatch[7].rm_so, interval_ms)) {
if (rsc_id) {
free(*rsc_id);
*rsc_id = NULL;
}
if (op_type) {
free(*op_type);
*op_type = NULL;
}
return FALSE;
}
}
return TRUE;
}
gboolean
parse_op_key(const char *key, char **rsc_id, char **op_type, guint *interval_ms)
{
char *underbar1 = NULL;
char *underbar2 = NULL;
char *underbar3 = NULL;
// Initialize output variables in case of early return
if (rsc_id) {
*rsc_id = NULL;
}
if (op_type) {
*op_type = NULL;
}
if (interval_ms) {
*interval_ms = 0;
}
CRM_CHECK(key && *key, return FALSE);
underbar1 = strchr(key, '_');
if (!underbar1) {
return FALSE;
}
underbar2 = strchr(underbar1+1, '_');
if (!underbar2) {
return FALSE;
}
underbar3 = strchr(underbar2+1, '_');
if (!underbar3) {
return try_fast_match(key, underbar1, underbar2,
rsc_id, op_type, interval_ms);
} else if (try_migrate_notify_match(key, rsc_id, op_type, interval_ms)) {
return TRUE;
} else {
return try_basic_match(key, rsc_id, op_type, interval_ms);
}
}
char *
pcmk__notify_key(const char *rsc_id, const char *notify_type,
const char *op_type)
{
CRM_CHECK(rsc_id != NULL, return NULL);
CRM_CHECK(op_type != NULL, return NULL);
CRM_CHECK(notify_type != NULL, return NULL);
return crm_strdup_printf("%s_%s_notify_%s_0",
rsc_id, notify_type, op_type);
}
/*!
* \brief Parse a transition magic string into its constituent parts
*
* \param[in] magic Magic string to parse (must be non-NULL)
* \param[out] uuid If non-NULL, where to store copy of parsed UUID
* \param[out] transition_id If non-NULL, where to store parsed transition ID
* \param[out] action_id If non-NULL, where to store parsed action ID
* \param[out] op_status If non-NULL, where to store parsed result status
* \param[out] op_rc If non-NULL, where to store parsed actual rc
* \param[out] target_rc If non-NULL, where to stored parsed target rc
*
* \return TRUE if key was valid, FALSE otherwise
* \note If uuid is supplied and this returns TRUE, the caller is responsible
* for freeing the memory for *uuid using free().
*/
gboolean
decode_transition_magic(const char *magic, char **uuid, int *transition_id, int *action_id,
int *op_status, int *op_rc, int *target_rc)
{
int res = 0;
char *key = NULL;
gboolean result = TRUE;
int local_op_status = -1;
int local_op_rc = -1;
CRM_CHECK(magic != NULL, return FALSE);
#ifdef SSCANF_HAS_M
res = sscanf(magic, "%d:%d;%ms", &local_op_status, &local_op_rc, &key);
#else
key = calloc(1, strlen(magic) - 3); // magic must have >=4 other characters
CRM_ASSERT(key);
res = sscanf(magic, "%d:%d;%s", &local_op_status, &local_op_rc, key);
#endif
if (res == EOF) {
crm_err("Could not decode transition information '%s': %s",
magic, pcmk_strerror(errno));
result = FALSE;
} else if (res < 3) {
crm_warn("Transition information '%s' incomplete (%d of 3 expected items)",
magic, res);
result = FALSE;
} else {
if (op_status) {
*op_status = local_op_status;
}
if (op_rc) {
*op_rc = local_op_rc;
}
result = decode_transition_key(key, uuid, transition_id, action_id,
target_rc);
}
free(key);
return result;
}
char *
pcmk__transition_key(int transition_id, int action_id, int target_rc,
const char *node)
{
CRM_CHECK(node != NULL, return NULL);
return crm_strdup_printf("%d:%d:%d:%-*s",
action_id, transition_id, target_rc, 36, node);
}
/*!
* \brief Parse a transition key into its constituent parts
*
* \param[in] key Transition key to parse (must be non-NULL)
* \param[out] uuid If non-NULL, where to store copy of parsed UUID
* \param[out] transition_id If non-NULL, where to store parsed transition ID
* \param[out] action_id If non-NULL, where to store parsed action ID
* \param[out] target_rc If non-NULL, where to stored parsed target rc
*
* \return TRUE if key was valid, FALSE otherwise
* \note If uuid is supplied and this returns TRUE, the caller is responsible
* for freeing the memory for *uuid using free().
*/
gboolean
decode_transition_key(const char *key, char **uuid, int *transition_id, int *action_id,
int *target_rc)
{
int local_transition_id = -1;
int local_action_id = -1;
int local_target_rc = -1;
char local_uuid[37] = { '\0' };
// Initialize any supplied output arguments
if (uuid) {
*uuid = NULL;
}
if (transition_id) {
*transition_id = -1;
}
if (action_id) {
*action_id = -1;
}
if (target_rc) {
*target_rc = -1;
}
CRM_CHECK(key != NULL, return FALSE);
if (sscanf(key, "%d:%d:%d:%36s", &local_action_id, &local_transition_id,
&local_target_rc, local_uuid) != 4) {
crm_err("Invalid transition key '%s'", key);
return FALSE;
}
if (strlen(local_uuid) != 36) {
crm_warn("Invalid UUID '%s' in transition key '%s'", local_uuid, key);
}
if (uuid) {
*uuid = strdup(local_uuid);
CRM_ASSERT(*uuid);
}
if (transition_id) {
*transition_id = local_transition_id;
}
if (action_id) {
*action_id = local_action_id;
}
if (target_rc) {
*target_rc = local_target_rc;
}
return TRUE;
}
// Return true if a is an attribute that should be filtered
static bool
should_filter_for_digest(xmlAttrPtr a, void *user_data)
{
if (strncmp((const char *) a->name, CRM_META "_",
sizeof(CRM_META " ") - 1) == 0) {
return true;
}
return pcmk__str_any_of((const char *) a->name,
XML_ATTR_ID,
XML_ATTR_CRM_VERSION,
XML_LRM_ATTR_OP_DIGEST,
XML_LRM_ATTR_TARGET,
XML_LRM_ATTR_TARGET_UUID,
"pcmk_external_ip",
NULL);
}
/*!
* \internal
* \brief Remove XML attributes not needed for operation digest
*
* \param[in,out] param_set XML with operation parameters
*/
void
pcmk__filter_op_for_digest(xmlNode *param_set)
{
char *key = NULL;
char *timeout = NULL;
guint interval_ms = 0;
if (param_set == NULL) {
return;
}
/* Timeout is useful for recurring operation digests, so grab it before
* removing meta-attributes
*/
key = crm_meta_name(XML_LRM_ATTR_INTERVAL_MS);
if (crm_element_value_ms(param_set, key, &interval_ms) != pcmk_ok) {
interval_ms = 0;
}
free(key);
key = NULL;
if (interval_ms != 0) {
key = crm_meta_name(XML_ATTR_TIMEOUT);
timeout = crm_element_value_copy(param_set, key);
}
// Remove all CRM_meta_* attributes and certain other attributes
pcmk__xe_remove_matching_attrs(param_set, should_filter_for_digest, NULL);
// Add timeout back for recurring operation digests
if (timeout != NULL) {
crm_xml_add(param_set, key, timeout);
}
free(timeout);
free(key);
}
int
rsc_op_expected_rc(lrmd_event_data_t * op)
{
int rc = 0;
if (op && op->user_data) {
decode_transition_key(op->user_data, NULL, NULL, NULL, &rc);
}
return rc;
}
gboolean
did_rsc_op_fail(lrmd_event_data_t * op, int target_rc)
{
switch (op->op_status) {
case PCMK_LRM_OP_CANCELLED:
case PCMK_LRM_OP_PENDING:
return FALSE;
case PCMK_LRM_OP_NOTSUPPORTED:
case PCMK_LRM_OP_TIMEOUT:
case PCMK_LRM_OP_ERROR:
case PCMK_LRM_OP_NOT_CONNECTED:
case PCMK_LRM_OP_INVALID:
return TRUE;
default:
if (target_rc != op->rc) {
return TRUE;
}
}
return FALSE;
}
/*!
* \brief Create a CIB XML element for an operation
*
* \param[in] parent If not NULL, make new XML node a child of this one
* \param[in] prefix Generate an ID using this prefix
* \param[in] task Operation task to set
* \param[in] interval_spec Operation interval to set
* \param[in] timeout If not NULL, operation timeout to set
*
* \return New XML object on success, NULL otherwise
*/
xmlNode *
crm_create_op_xml(xmlNode *parent, const char *prefix, const char *task,
const char *interval_spec, const char *timeout)
{
xmlNode *xml_op;
CRM_CHECK(prefix && task && interval_spec, return NULL);
xml_op = create_xml_node(parent, XML_ATTR_OP);
crm_xml_set_id(xml_op, "%s-%s-%s", prefix, task, interval_spec);
crm_xml_add(xml_op, XML_LRM_ATTR_INTERVAL, interval_spec);
crm_xml_add(xml_op, "name", task);
if (timeout) {
crm_xml_add(xml_op, XML_ATTR_TIMEOUT, timeout);
}
return xml_op;
}
/*!
* \brief Check whether an operation requires resource agent meta-data
*
* \param[in] rsc_class Resource agent class (or NULL to skip class check)
* \param[in] op Operation action (or NULL to skip op check)
*
* \return TRUE if operation needs meta-data, FALSE otherwise
* \note At least one of rsc_class and op must be specified.
*/
bool
crm_op_needs_metadata(const char *rsc_class, const char *op)
{
- /* Agent meta-data is used to determine whether a reload is possible, and to
- * evaluate versioned parameters -- so if this op is not relevant to those
- * features, we don't need the meta-data.
+ /* Agent meta-data is used to determine whether an agent reload is possible,
+ * and to evaluate versioned parameters -- so if this op is not relevant to
+ * those features, we don't need the meta-data.
*/
CRM_CHECK((rsc_class != NULL) || (op != NULL), return false);
if ((rsc_class != NULL)
&& !pcmk_is_set(pcmk_get_ra_caps(rsc_class), pcmk_ra_cap_params)) {
/* Meta-data is only needed for resource classes that use parameters */
return false;
}
if (op == NULL) {
return true;
}
/* Meta-data is only needed for these actions */
return pcmk__str_any_of(op, CRMD_ACTION_START, CRMD_ACTION_STATUS,
CRMD_ACTION_PROMOTE, CRMD_ACTION_DEMOTE,
- CRMD_ACTION_RELOAD, CRMD_ACTION_MIGRATE,
- CRMD_ACTION_MIGRATED, CRMD_ACTION_NOTIFY, NULL);
+ CRMD_ACTION_RELOAD, CRMD_ACTION_RELOAD_AGENT,
+ CRMD_ACTION_MIGRATE, CRMD_ACTION_MIGRATED,
+ CRMD_ACTION_NOTIFY, NULL);
}
diff --git a/lib/pacemaker/pcmk_sched_allocate.c b/lib/pacemaker/pcmk_sched_allocate.c
index 73c23b8fd0..f51955b666 100644
--- a/lib/pacemaker/pcmk_sched_allocate.c
+++ b/lib/pacemaker/pcmk_sched_allocate.c
@@ -1,3092 +1,3093 @@
/*
* Copyright 2004-2021 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
* This source code is licensed under the GNU General Public License version 2
* or later (GPLv2+) WITHOUT ANY WARRANTY.
*/
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
CRM_TRACE_INIT_DATA(pacemaker);
extern bool pcmk__is_daemon;
void set_alloc_actions(pe_working_set_t * data_set);
extern void ReloadRsc(pe_resource_t * rsc, pe_node_t *node, pe_working_set_t * data_set);
extern gboolean DeleteRsc(pe_resource_t * rsc, pe_node_t * node, gboolean optional, pe_working_set_t * data_set);
static void apply_remote_node_ordering(pe_working_set_t *data_set);
static enum remote_connection_state get_remote_node_state(pe_node_t *node);
enum remote_connection_state {
remote_state_unknown = 0,
remote_state_alive = 1,
remote_state_resting = 2,
remote_state_failed = 3,
remote_state_stopped = 4
};
static const char *
state2text(enum remote_connection_state state)
{
switch (state) {
case remote_state_unknown:
return "unknown";
case remote_state_alive:
return "alive";
case remote_state_resting:
return "resting";
case remote_state_failed:
return "failed";
case remote_state_stopped:
return "stopped";
}
return "impossible";
}
resource_alloc_functions_t resource_class_alloc_functions[] = {
{
pcmk__native_merge_weights,
pcmk__native_allocate,
native_create_actions,
native_create_probe,
native_internal_constraints,
native_rsc_colocation_lh,
native_rsc_colocation_rh,
native_rsc_location,
native_action_flags,
native_update_actions,
native_expand,
native_append_meta,
},
{
pcmk__group_merge_weights,
pcmk__group_allocate,
group_create_actions,
native_create_probe,
group_internal_constraints,
group_rsc_colocation_lh,
group_rsc_colocation_rh,
group_rsc_location,
group_action_flags,
group_update_actions,
group_expand,
group_append_meta,
},
{
pcmk__native_merge_weights,
pcmk__clone_allocate,
clone_create_actions,
clone_create_probe,
clone_internal_constraints,
clone_rsc_colocation_lh,
clone_rsc_colocation_rh,
clone_rsc_location,
clone_action_flags,
pcmk__multi_update_actions,
clone_expand,
clone_append_meta,
},
{
pcmk__native_merge_weights,
pcmk__bundle_allocate,
pcmk__bundle_create_actions,
pcmk__bundle_create_probe,
pcmk__bundle_internal_constraints,
pcmk__bundle_rsc_colocation_lh,
pcmk__bundle_rsc_colocation_rh,
pcmk__bundle_rsc_location,
pcmk__bundle_action_flags,
pcmk__multi_update_actions,
pcmk__bundle_expand,
pcmk__bundle_append_meta,
}
};
gboolean
update_action_flags(pe_action_t * action, enum pe_action_flags flags, const char *source, int line)
{
static unsigned long calls = 0;
gboolean changed = FALSE;
gboolean clear = pcmk_is_set(flags, pe_action_clear);
enum pe_action_flags last = action->flags;
if (clear) {
pe__clear_action_flags_as(source, line, action, flags);
} else {
pe__set_action_flags_as(source, line, action, flags);
}
if (last != action->flags) {
calls++;
changed = TRUE;
/* Useful for tracking down _who_ changed a specific flag */
/* CRM_ASSERT(calls != 534); */
pe__clear_raw_action_flags(flags, "action update", pe_action_clear);
crm_trace("%s on %s: %sset flags 0x%.6x (was 0x%.6x, now 0x%.6x, %lu, %s)",
action->uuid, action->node ? action->node->details->uname : "[none]",
clear ? "un-" : "", flags, last, action->flags, calls, source);
}
return changed;
}
static gboolean
check_rsc_parameters(pe_resource_t * rsc, pe_node_t * node, xmlNode * rsc_entry,
gboolean active_here, pe_working_set_t * data_set)
{
int attr_lpc = 0;
gboolean force_restart = FALSE;
gboolean delete_resource = FALSE;
gboolean changed = FALSE;
const char *value = NULL;
const char *old_value = NULL;
const char *attr_list[] = {
XML_ATTR_TYPE,
XML_AGENT_ATTR_CLASS,
XML_AGENT_ATTR_PROVIDER
};
for (; attr_lpc < PCMK__NELEM(attr_list); attr_lpc++) {
value = crm_element_value(rsc->xml, attr_list[attr_lpc]);
old_value = crm_element_value(rsc_entry, attr_list[attr_lpc]);
if (value == old_value /* i.e. NULL */
|| pcmk__str_eq(value, old_value, pcmk__str_none)) {
continue;
}
changed = TRUE;
trigger_unfencing(rsc, node, "Device definition changed", NULL, data_set);
if (active_here) {
force_restart = TRUE;
crm_notice("Forcing restart of %s on %s, %s changed: %s -> %s",
rsc->id, node->details->uname, attr_list[attr_lpc],
crm_str(old_value), crm_str(value));
}
}
if (force_restart) {
/* make sure the restart happens */
stop_action(rsc, node, FALSE);
pe__set_resource_flags(rsc, pe_rsc_start_pending);
delete_resource = TRUE;
} else if (changed) {
delete_resource = TRUE;
}
return delete_resource;
}
static void
CancelXmlOp(pe_resource_t * rsc, xmlNode * xml_op, pe_node_t * active_node,
const char *reason, pe_working_set_t * data_set)
{
guint interval_ms = 0;
pe_action_t *cancel = NULL;
const char *task = NULL;
const char *call_id = NULL;
CRM_CHECK(xml_op != NULL, return);
CRM_CHECK(active_node != NULL, return);
task = crm_element_value(xml_op, XML_LRM_ATTR_TASK);
call_id = crm_element_value(xml_op, XML_LRM_ATTR_CALLID);
crm_element_value_ms(xml_op, XML_LRM_ATTR_INTERVAL_MS, &interval_ms);
crm_info("Action " PCMK__OP_FMT " on %s will be stopped: %s",
rsc->id, task, interval_ms,
active_node->details->uname, (reason? reason : "unknown"));
cancel = pe_cancel_op(rsc, task, interval_ms, active_node, data_set);
add_hash_param(cancel->meta, XML_LRM_ATTR_CALLID, call_id);
custom_action_order(rsc, stop_key(rsc), NULL, rsc, NULL, cancel, pe_order_optional, data_set);
}
static gboolean
check_action_definition(pe_resource_t * rsc, pe_node_t * active_node, xmlNode * xml_op,
pe_working_set_t * data_set)
{
char *key = NULL;
guint interval_ms = 0;
const op_digest_cache_t *digest_data = NULL;
gboolean did_change = FALSE;
const char *task = crm_element_value(xml_op, XML_LRM_ATTR_TASK);
const char *digest_secure = NULL;
CRM_CHECK(active_node != NULL, return FALSE);
crm_element_value_ms(xml_op, XML_LRM_ATTR_INTERVAL_MS, &interval_ms);
if (interval_ms > 0) {
xmlNode *op_match = NULL;
/* we need to reconstruct the key because of the way we used to construct resource IDs */
key = pcmk__op_key(rsc->id, task, interval_ms);
pe_rsc_trace(rsc, "Checking parameters for %s", key);
op_match = find_rsc_op_entry(rsc, key);
if ((op_match == NULL)
&& pcmk_is_set(data_set->flags, pe_flag_stop_action_orphans)) {
CancelXmlOp(rsc, xml_op, active_node, "orphan", data_set);
free(key);
return TRUE;
} else if (op_match == NULL) {
pe_rsc_debug(rsc, "Orphan action detected: %s on %s", key, active_node->details->uname);
free(key);
return TRUE;
}
free(key);
key = NULL;
}
crm_trace("Testing " PCMK__OP_FMT " on %s",
rsc->id, task, interval_ms, active_node->details->uname);
if ((interval_ms == 0) && pcmk__str_eq(task, RSC_STATUS, pcmk__str_casei)) {
/* Reload based on the start action not a probe */
task = RSC_START;
} else if ((interval_ms == 0) && pcmk__str_eq(task, RSC_MIGRATED, pcmk__str_casei)) {
/* Reload based on the start action not a migrate */
task = RSC_START;
} else if ((interval_ms == 0) && pcmk__str_eq(task, RSC_PROMOTE, pcmk__str_casei)) {
/* Reload based on the start action not a promote */
task = RSC_START;
}
digest_data = rsc_action_digest_cmp(rsc, xml_op, active_node, data_set);
if (pcmk_is_set(data_set->flags, pe_flag_sanitized)) {
digest_secure = crm_element_value(xml_op, XML_LRM_ATTR_SECURE_DIGEST);
}
if(digest_data->rc != RSC_DIGEST_MATCH
&& digest_secure
&& digest_data->digest_secure_calc
&& strcmp(digest_data->digest_secure_calc, digest_secure) == 0) {
if (!pcmk__is_daemon && data_set->priv != NULL) {
pcmk__output_t *out = data_set->priv;
out->info(out, "Only 'private' parameters to "
PCMK__OP_FMT " on %s changed: %s", rsc->id, task,
interval_ms, active_node->details->uname,
crm_element_value(xml_op, XML_ATTR_TRANSITION_MAGIC));
}
} else if (digest_data->rc == RSC_DIGEST_RESTART) {
/* Changes that force a restart */
pe_action_t *required = NULL;
did_change = TRUE;
key = pcmk__op_key(rsc->id, task, interval_ms);
crm_log_xml_info(digest_data->params_restart, "params:restart");
required = custom_action(rsc, key, task, NULL, TRUE, TRUE, data_set);
pe_action_set_flag_reason(__func__, __LINE__, required, NULL,
"resource definition change", pe_action_optional, TRUE);
trigger_unfencing(rsc, active_node, "Device parameters changed", NULL, data_set);
} else if ((digest_data->rc == RSC_DIGEST_ALL) || (digest_data->rc == RSC_DIGEST_UNKNOWN)) {
- /* Changes that can potentially be handled by a reload */
+ // Changes that can potentially be handled by an agent reload
const char *digest_restart = crm_element_value(xml_op, XML_LRM_ATTR_RESTART_DIGEST);
did_change = TRUE;
trigger_unfencing(rsc, active_node, "Device parameters changed (reload)", NULL, data_set);
crm_log_xml_info(digest_data->params_all, "params:reload");
key = pcmk__op_key(rsc->id, task, interval_ms);
if (interval_ms > 0) {
pe_action_t *op = NULL;
#if 0
/* Always reload/restart the entire resource */
ReloadRsc(rsc, active_node, data_set);
#else
/* Re-sending the recurring op is sufficient - the old one will be cancelled automatically */
op = custom_action(rsc, key, task, active_node, TRUE, TRUE, data_set);
pe__set_action_flags(op, pe_action_reschedule);
#endif
} else if (digest_restart) {
pe_rsc_trace(rsc, "Reloading '%s' action for resource %s", task, rsc->id);
/* Reload this resource */
ReloadRsc(rsc, active_node, data_set);
free(key);
} else {
pe_action_t *required = NULL;
- pe_rsc_trace(rsc, "Resource %s doesn't know how to reload", rsc->id);
+ pe_rsc_trace(rsc, "Resource %s doesn't support agent reloads",
+ rsc->id);
/* Re-send the start/demote/promote op
* Recurring ops will be detected independently
*/
required = custom_action(rsc, key, task, NULL, TRUE, TRUE, data_set);
pe_action_set_flag_reason(__func__, __LINE__, required, NULL,
"resource definition change", pe_action_optional, TRUE);
}
}
return did_change;
}
/*!
* \internal
* \brief Do deferred action checks after allocation
*
* \param[in] data_set Working set for cluster
*/
static void
check_params(pe_resource_t *rsc, pe_node_t *node, xmlNode *rsc_op,
enum pe_check_parameters check, pe_working_set_t *data_set)
{
const char *reason = NULL;
op_digest_cache_t *digest_data = NULL;
switch (check) {
case pe_check_active:
if (check_action_definition(rsc, node, rsc_op, data_set)
&& pe_get_failcount(node, rsc, NULL, pe_fc_effective, NULL,
data_set)) {
reason = "action definition changed";
}
break;
case pe_check_last_failure:
digest_data = rsc_action_digest_cmp(rsc, rsc_op, node, data_set);
switch (digest_data->rc) {
case RSC_DIGEST_UNKNOWN:
crm_trace("Resource %s history entry %s on %s has no digest to compare",
rsc->id, ID(rsc_op), node->details->id);
break;
case RSC_DIGEST_MATCH:
break;
default:
reason = "resource parameters have changed";
break;
}
break;
}
if (reason) {
pe__clear_failcount(rsc, node, reason, data_set);
}
}
static void
check_actions_for(xmlNode * rsc_entry, pe_resource_t * rsc, pe_node_t * node, pe_working_set_t * data_set)
{
GList *gIter = NULL;
int offset = -1;
int stop_index = 0;
int start_index = 0;
const char *task = NULL;
xmlNode *rsc_op = NULL;
GList *op_list = NULL;
GList *sorted_op_list = NULL;
CRM_CHECK(node != NULL, return);
if (pcmk_is_set(rsc->flags, pe_rsc_orphan)) {
pe_resource_t *parent = uber_parent(rsc);
if(parent == NULL
|| pe_rsc_is_clone(parent) == FALSE
|| pcmk_is_set(parent->flags, pe_rsc_unique)) {
pe_rsc_trace(rsc, "Skipping param check for %s and deleting: orphan", rsc->id);
DeleteRsc(rsc, node, FALSE, data_set);
} else {
pe_rsc_trace(rsc, "Skipping param check for %s (orphan clone)", rsc->id);
}
return;
} else if (pe_find_node_id(rsc->running_on, node->details->id) == NULL) {
if (check_rsc_parameters(rsc, node, rsc_entry, FALSE, data_set)) {
DeleteRsc(rsc, node, FALSE, data_set);
}
pe_rsc_trace(rsc, "Skipping param check for %s: no longer active on %s",
rsc->id, node->details->uname);
return;
}
pe_rsc_trace(rsc, "Processing %s on %s", rsc->id, node->details->uname);
if (check_rsc_parameters(rsc, node, rsc_entry, TRUE, data_set)) {
DeleteRsc(rsc, node, FALSE, data_set);
}
for (rsc_op = pcmk__xe_first_child(rsc_entry); rsc_op != NULL;
rsc_op = pcmk__xe_next(rsc_op)) {
if (pcmk__str_eq((const char *)rsc_op->name, XML_LRM_TAG_RSC_OP, pcmk__str_none)) {
op_list = g_list_prepend(op_list, rsc_op);
}
}
sorted_op_list = g_list_sort(op_list, sort_op_by_callid);
calculate_active_ops(sorted_op_list, &start_index, &stop_index);
for (gIter = sorted_op_list; gIter != NULL; gIter = gIter->next) {
xmlNode *rsc_op = (xmlNode *) gIter->data;
guint interval_ms = 0;
offset++;
if (start_index < stop_index) {
/* stopped */
continue;
} else if (offset < start_index) {
/* action occurred prior to a start */
continue;
}
task = crm_element_value(rsc_op, XML_LRM_ATTR_TASK);
crm_element_value_ms(rsc_op, XML_LRM_ATTR_INTERVAL_MS, &interval_ms);
if ((interval_ms > 0) &&
(pcmk_is_set(rsc->flags, pe_rsc_maintenance) || node->details->maintenance)) {
// Maintenance mode cancels recurring operations
CancelXmlOp(rsc, rsc_op, node, "maintenance mode", data_set);
} else if ((interval_ms > 0) || pcmk__strcase_any_of(task, RSC_STATUS, RSC_START,
RSC_PROMOTE, RSC_MIGRATED, NULL)) {
/* If a resource operation failed, and the operation's definition
* has changed, clear any fail count so they can be retried fresh.
*/
if (pe__bundle_needs_remote_name(rsc, data_set)) {
/* We haven't allocated resources to nodes yet, so if the
* REMOTE_CONTAINER_HACK is used, we may calculate the digest
* based on the literal "#uname" value rather than the properly
* substituted value. That would mistakenly make the action
* definition appear to have been changed. Defer the check until
* later in this case.
*/
pe__add_param_check(rsc_op, rsc, node, pe_check_active,
data_set);
} else if (check_action_definition(rsc, node, rsc_op, data_set)
&& pe_get_failcount(node, rsc, NULL, pe_fc_effective, NULL,
data_set)) {
pe__clear_failcount(rsc, node, "action definition changed",
data_set);
}
}
}
g_list_free(sorted_op_list);
}
static GList *
find_rsc_list(GList *result, pe_resource_t * rsc, const char *id, gboolean renamed_clones,
gboolean partial, pe_working_set_t * data_set)
{
GList *gIter = NULL;
gboolean match = FALSE;
if (id == NULL) {
return NULL;
}
if (rsc == NULL) {
if (data_set == NULL) {
return NULL;
}
for (gIter = data_set->resources; gIter != NULL; gIter = gIter->next) {
pe_resource_t *child = (pe_resource_t *) gIter->data;
result = find_rsc_list(result, child, id, renamed_clones, partial,
NULL);
}
return result;
}
if (partial) {
if (strstr(rsc->id, id)) {
match = TRUE;
} else if (renamed_clones && rsc->clone_name && strstr(rsc->clone_name, id)) {
match = TRUE;
}
} else {
if (strcmp(rsc->id, id) == 0) {
match = TRUE;
} else if (renamed_clones && rsc->clone_name && strcmp(rsc->clone_name, id) == 0) {
match = TRUE;
}
}
if (match) {
result = g_list_prepend(result, rsc);
}
if (rsc->children) {
gIter = rsc->children;
for (; gIter != NULL; gIter = gIter->next) {
pe_resource_t *child = (pe_resource_t *) gIter->data;
result = find_rsc_list(result, child, id, renamed_clones, partial, NULL);
}
}
return result;
}
static void
check_actions(pe_working_set_t * data_set)
{
const char *id = NULL;
pe_node_t *node = NULL;
xmlNode *lrm_rscs = NULL;
xmlNode *status = get_object_root(XML_CIB_TAG_STATUS, data_set->input);
xmlNode *node_state = NULL;
for (node_state = pcmk__xe_first_child(status); node_state != NULL;
node_state = pcmk__xe_next(node_state)) {
if (pcmk__str_eq((const char *)node_state->name, XML_CIB_TAG_STATE,
pcmk__str_none)) {
id = crm_element_value(node_state, XML_ATTR_ID);
lrm_rscs = find_xml_node(node_state, XML_CIB_TAG_LRM, FALSE);
lrm_rscs = find_xml_node(lrm_rscs, XML_LRM_TAG_RESOURCES, FALSE);
node = pe_find_node_id(data_set->nodes, id);
if (node == NULL) {
continue;
/* Still need to check actions for a maintenance node to cancel existing monitor operations */
} else if (can_run_resources(node) == FALSE && node->details->maintenance == FALSE) {
crm_trace("Skipping param check for %s: can't run resources",
node->details->uname);
continue;
}
crm_trace("Processing node %s", node->details->uname);
if (node->details->online
|| pcmk_is_set(data_set->flags, pe_flag_stonith_enabled)) {
xmlNode *rsc_entry = NULL;
for (rsc_entry = pcmk__xe_first_child(lrm_rscs);
rsc_entry != NULL;
rsc_entry = pcmk__xe_next(rsc_entry)) {
if (pcmk__str_eq((const char *)rsc_entry->name, XML_LRM_TAG_RESOURCE, pcmk__str_none)) {
if (xml_has_children(rsc_entry)) {
GList *gIter = NULL;
GList *result = NULL;
const char *rsc_id = ID(rsc_entry);
CRM_CHECK(rsc_id != NULL, return);
result = find_rsc_list(NULL, NULL, rsc_id, TRUE, FALSE, data_set);
for (gIter = result; gIter != NULL; gIter = gIter->next) {
pe_resource_t *rsc = (pe_resource_t *) gIter->data;
if (rsc->variant != pe_native) {
continue;
}
check_actions_for(rsc_entry, rsc, node, data_set);
}
g_list_free(result);
}
}
}
}
}
}
}
static void
apply_placement_constraints(pe_working_set_t * data_set)
{
for (GList *gIter = data_set->placement_constraints;
gIter != NULL; gIter = gIter->next) {
pe__location_t *cons = gIter->data;
cons->rsc_lh->cmds->rsc_location(cons->rsc_lh, cons);
}
}
static gboolean
failcount_clear_action_exists(pe_node_t * node, pe_resource_t * rsc)
{
gboolean rc = FALSE;
GList *list = pe__resource_actions(rsc, node, CRM_OP_CLEAR_FAILCOUNT, TRUE);
if (list) {
rc = TRUE;
}
g_list_free(list);
return rc;
}
/*!
* \internal
* \brief Force resource away if failures hit migration threshold
*
* \param[in,out] rsc Resource to check for failures
* \param[in,out] node Node to check for failures
* \param[in,out] data_set Cluster working set to update
*/
static void
check_migration_threshold(pe_resource_t *rsc, pe_node_t *node,
pe_working_set_t *data_set)
{
int fail_count, countdown;
pe_resource_t *failed;
/* Migration threshold of 0 means never force away */
if (rsc->migration_threshold == 0) {
return;
}
// If we're ignoring failures, also ignore the migration threshold
if (pcmk_is_set(rsc->flags, pe_rsc_failure_ignored)) {
return;
}
/* If there are no failures, there's no need to force away */
fail_count = pe_get_failcount(node, rsc, NULL,
pe_fc_effective|pe_fc_fillers, NULL,
data_set);
if (fail_count <= 0) {
return;
}
/* How many more times recovery will be tried on this node */
countdown = QB_MAX(rsc->migration_threshold - fail_count, 0);
/* If failed resource has a parent, we'll force the parent away */
failed = rsc;
if (!pcmk_is_set(rsc->flags, pe_rsc_unique)) {
failed = uber_parent(rsc);
}
if (countdown == 0) {
resource_location(failed, node, -INFINITY, "__fail_limit__", data_set);
crm_warn("Forcing %s away from %s after %d failures (max=%d)",
failed->id, node->details->uname, fail_count,
rsc->migration_threshold);
} else {
crm_info("%s can fail %d more times on %s before being forced off",
failed->id, countdown, node->details->uname);
}
}
static void
common_apply_stickiness(pe_resource_t * rsc, pe_node_t * node, pe_working_set_t * data_set)
{
if (rsc->children) {
GList *gIter = rsc->children;
for (; gIter != NULL; gIter = gIter->next) {
pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
common_apply_stickiness(child_rsc, node, data_set);
}
return;
}
if (pcmk_is_set(rsc->flags, pe_rsc_managed)
&& rsc->stickiness != 0 && pcmk__list_of_1(rsc->running_on)) {
pe_node_t *current = pe_find_node_id(rsc->running_on, node->details->id);
pe_node_t *match = pe_hash_table_lookup(rsc->allowed_nodes, node->details->id);
if (current == NULL) {
} else if ((match != NULL)
|| pcmk_is_set(data_set->flags, pe_flag_symmetric_cluster)) {
pe_resource_t *sticky_rsc = rsc;
resource_location(sticky_rsc, node, rsc->stickiness, "stickiness", data_set);
pe_rsc_debug(sticky_rsc, "Resource %s: preferring current location"
" (node=%s, weight=%d)", sticky_rsc->id,
node->details->uname, rsc->stickiness);
} else {
GHashTableIter iter;
pe_node_t *nIter = NULL;
pe_rsc_debug(rsc, "Ignoring stickiness for %s: the cluster is asymmetric"
" and node %s is not explicitly allowed", rsc->id, node->details->uname);
g_hash_table_iter_init(&iter, rsc->allowed_nodes);
while (g_hash_table_iter_next(&iter, NULL, (void **)&nIter)) {
crm_err("%s[%s] = %d", rsc->id, nIter->details->uname, nIter->weight);
}
}
}
/* Check the migration threshold only if a failcount clear action
* has not already been placed for this resource on the node.
* There is no sense in potentially forcing the resource from this
* node if the failcount is being reset anyway.
*
* @TODO A clear_failcount operation can be scheduled in stage4() via
* check_actions_for(), or in stage5() via check_params(). This runs in
* stage2(), so it cannot detect those, meaning we might check the migration
* threshold when we shouldn't -- worst case, we stop or move the resource,
* then move it back next transition.
*/
if (failcount_clear_action_exists(node, rsc) == FALSE) {
check_migration_threshold(rsc, node, data_set);
}
}
void
complex_set_cmds(pe_resource_t * rsc)
{
GList *gIter = rsc->children;
rsc->cmds = &resource_class_alloc_functions[rsc->variant];
for (; gIter != NULL; gIter = gIter->next) {
pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
complex_set_cmds(child_rsc);
}
}
void
set_alloc_actions(pe_working_set_t * data_set)
{
GList *gIter = data_set->resources;
for (; gIter != NULL; gIter = gIter->next) {
pe_resource_t *rsc = (pe_resource_t *) gIter->data;
complex_set_cmds(rsc);
}
}
static void
calculate_system_health(gpointer gKey, gpointer gValue, gpointer user_data)
{
const char *key = (const char *)gKey;
const char *value = (const char *)gValue;
int *system_health = (int *)user_data;
if (!gKey || !gValue || !user_data) {
return;
}
if (pcmk__starts_with(key, "#health")) {
int score;
/* Convert the value into an integer */
score = char2score(value);
/* Add it to the running total */
*system_health = pe__add_scores(score, *system_health);
}
}
static gboolean
apply_system_health(pe_working_set_t * data_set)
{
GList *gIter = NULL;
const char *health_strategy = pe_pref(data_set->config_hash, "node-health-strategy");
int base_health = 0;
if (pcmk__str_eq(health_strategy, "none", pcmk__str_null_matches | pcmk__str_casei)) {
/* Prevent any accidental health -> score translation */
pcmk__score_red = 0;
pcmk__score_yellow = 0;
pcmk__score_green = 0;
return TRUE;
} else if (pcmk__str_eq(health_strategy, "migrate-on-red", pcmk__str_casei)) {
/* Resources on nodes which have health values of red are
* weighted away from that node.
*/
pcmk__score_red = -INFINITY;
pcmk__score_yellow = 0;
pcmk__score_green = 0;
} else if (pcmk__str_eq(health_strategy, "only-green", pcmk__str_casei)) {
/* Resources on nodes which have health values of red or yellow
* are forced away from that node.
*/
pcmk__score_red = -INFINITY;
pcmk__score_yellow = -INFINITY;
pcmk__score_green = 0;
} else if (pcmk__str_eq(health_strategy, "progressive", pcmk__str_casei)) {
/* Same as the above, but use the r/y/g scores provided by the user
* Defaults are provided by the pe_prefs table
* Also, custom health "base score" can be used
*/
base_health = char2score(pe_pref(data_set->config_hash,
"node-health-base"));
} else if (pcmk__str_eq(health_strategy, "custom", pcmk__str_casei)) {
/* Requires the admin to configure the rsc_location constaints for
* processing the stored health scores
*/
/* TODO: Check for the existence of appropriate node health constraints */
return TRUE;
} else {
crm_err("Unknown node health strategy: %s", health_strategy);
return FALSE;
}
crm_info("Applying automated node health strategy: %s", health_strategy);
for (gIter = data_set->nodes; gIter != NULL; gIter = gIter->next) {
int system_health = base_health;
pe_node_t *node = (pe_node_t *) gIter->data;
/* Search through the node hash table for system health entries. */
g_hash_table_foreach(node->details->attrs, calculate_system_health, &system_health);
crm_info(" Node %s has an combined system health of %d",
node->details->uname, system_health);
/* If the health is non-zero, then create a new rsc2node so that the
* weight will be added later on.
*/
if (system_health != 0) {
GList *gIter2 = data_set->resources;
for (; gIter2 != NULL; gIter2 = gIter2->next) {
pe_resource_t *rsc = (pe_resource_t *) gIter2->data;
rsc2node_new(health_strategy, rsc, system_health, NULL, node, data_set);
}
}
}
return TRUE;
}
gboolean
stage0(pe_working_set_t * data_set)
{
xmlNode *cib_constraints = get_object_root(XML_CIB_TAG_CONSTRAINTS, data_set->input);
if (data_set->input == NULL) {
return FALSE;
}
if (!pcmk_is_set(data_set->flags, pe_flag_have_status)) {
crm_trace("Calculating status");
cluster_status(data_set);
}
set_alloc_actions(data_set);
apply_system_health(data_set);
unpack_constraints(cib_constraints, data_set);
return TRUE;
}
/*
* Check nodes for resources started outside of the LRM
*/
gboolean
probe_resources(pe_working_set_t * data_set)
{
pe_action_t *probe_node_complete = NULL;
for (GList *gIter = data_set->nodes; gIter != NULL; gIter = gIter->next) {
pe_node_t *node = (pe_node_t *) gIter->data;
const char *probed = pe_node_attribute_raw(node, CRM_OP_PROBED);
if (node->details->online == FALSE) {
if (pe__is_remote_node(node) && node->details->remote_rsc
&& (get_remote_node_state(node) == remote_state_failed)) {
pe_fence_node(data_set, node, "the connection is unrecoverable", FALSE);
}
continue;
} else if (node->details->unclean) {
continue;
} else if (node->details->rsc_discovery_enabled == FALSE) {
/* resource discovery is disabled for this node */
continue;
}
if (probed != NULL && crm_is_true(probed) == FALSE) {
pe_action_t *probe_op = custom_action(NULL, crm_strdup_printf("%s-%s", CRM_OP_REPROBE, node->details->uname),
CRM_OP_REPROBE, node, FALSE, TRUE, data_set);
add_hash_param(probe_op->meta, XML_ATTR_TE_NOWAIT, XML_BOOLEAN_TRUE);
continue;
}
for (GList *gIter2 = data_set->resources; gIter2 != NULL; gIter2 = gIter2->next) {
pe_resource_t *rsc = (pe_resource_t *) gIter2->data;
rsc->cmds->create_probe(rsc, node, probe_node_complete, FALSE, data_set);
}
}
return TRUE;
}
static void
rsc_discover_filter(pe_resource_t *rsc, pe_node_t *node)
{
GList *gIter = rsc->children;
pe_resource_t *top = uber_parent(rsc);
pe_node_t *match;
if (rsc->exclusive_discover == FALSE && top->exclusive_discover == FALSE) {
return;
}
for (; gIter != NULL; gIter = gIter->next) {
pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
rsc_discover_filter(child_rsc, node);
}
match = g_hash_table_lookup(rsc->allowed_nodes, node->details->id);
if (match && match->rsc_discover_mode != pe_discover_exclusive) {
match->weight = -INFINITY;
}
}
static time_t
shutdown_time(pe_node_t *node, pe_working_set_t *data_set)
{
const char *shutdown = pe_node_attribute_raw(node, XML_CIB_ATTR_SHUTDOWN);
time_t result = 0;
if (shutdown) {
long long result_ll;
if (pcmk__scan_ll(shutdown, &result_ll, 0LL) == pcmk_rc_ok) {
result = (time_t) result_ll;
}
}
return result? result : get_effective_time(data_set);
}
static void
apply_shutdown_lock(pe_resource_t *rsc, pe_working_set_t *data_set)
{
const char *class;
// Only primitives and (uncloned) groups may be locked
if (rsc->variant == pe_group) {
for (GList *item = rsc->children; item != NULL;
item = item->next) {
apply_shutdown_lock((pe_resource_t *) item->data, data_set);
}
} else if (rsc->variant != pe_native) {
return;
}
// Fence devices and remote connections can't be locked
class = crm_element_value(rsc->xml, XML_AGENT_ATTR_CLASS);
if (pcmk__str_eq(class, PCMK_RESOURCE_CLASS_STONITH, pcmk__str_null_matches)
|| pe__resource_is_remote_conn(rsc, data_set)) {
return;
}
if (rsc->lock_node != NULL) {
// The lock was obtained from resource history
if (rsc->running_on != NULL) {
/* The resource was started elsewhere even though it is now
* considered locked. This shouldn't be possible, but as a
* failsafe, we don't want to disturb the resource now.
*/
pe_rsc_info(rsc,
"Cancelling shutdown lock because %s is already active",
rsc->id);
pe__clear_resource_history(rsc, rsc->lock_node, data_set);
rsc->lock_node = NULL;
rsc->lock_time = 0;
}
// Only a resource active on exactly one node can be locked
} else if (pcmk__list_of_1(rsc->running_on)) {
pe_node_t *node = rsc->running_on->data;
if (node->details->shutdown) {
if (node->details->unclean) {
pe_rsc_debug(rsc, "Not locking %s to unclean %s for shutdown",
rsc->id, node->details->uname);
} else {
rsc->lock_node = node;
rsc->lock_time = shutdown_time(node, data_set);
}
}
}
if (rsc->lock_node == NULL) {
// No lock needed
return;
}
if (data_set->shutdown_lock > 0) {
time_t lock_expiration = rsc->lock_time + data_set->shutdown_lock;
pe_rsc_info(rsc, "Locking %s to %s due to shutdown (expires @%lld)",
rsc->id, rsc->lock_node->details->uname,
(long long) lock_expiration);
pe__update_recheck_time(++lock_expiration, data_set);
} else {
pe_rsc_info(rsc, "Locking %s to %s due to shutdown",
rsc->id, rsc->lock_node->details->uname);
}
// If resource is locked to one node, ban it from all other nodes
for (GList *item = data_set->nodes; item != NULL; item = item->next) {
pe_node_t *node = item->data;
if (strcmp(node->details->uname, rsc->lock_node->details->uname)) {
resource_location(rsc, node, -CRM_SCORE_INFINITY,
XML_CONFIG_ATTR_SHUTDOWN_LOCK, data_set);
}
}
}
/*
* \internal
* \brief Stage 2 of cluster status: apply node-specific criteria
*
* Count known nodes, and apply location constraints, stickiness, and exclusive
* resource discovery.
*/
gboolean
stage2(pe_working_set_t * data_set)
{
GList *gIter = NULL;
if (pcmk_is_set(data_set->flags, pe_flag_shutdown_lock)) {
for (gIter = data_set->resources; gIter != NULL; gIter = gIter->next) {
apply_shutdown_lock((pe_resource_t *) gIter->data, data_set);
}
}
if (!pcmk_is_set(data_set->flags, pe_flag_no_compat)) {
// @COMPAT API backward compatibility
for (gIter = data_set->nodes; gIter != NULL; gIter = gIter->next) {
pe_node_t *node = (pe_node_t *) gIter->data;
if (node && (node->weight >= 0) && node->details->online
&& (node->details->type != node_ping)) {
data_set->max_valid_nodes++;
}
}
}
apply_placement_constraints(data_set);
gIter = data_set->nodes;
for (; gIter != NULL; gIter = gIter->next) {
GList *gIter2 = NULL;
pe_node_t *node = (pe_node_t *) gIter->data;
gIter2 = data_set->resources;
for (; gIter2 != NULL; gIter2 = gIter2->next) {
pe_resource_t *rsc = (pe_resource_t *) gIter2->data;
common_apply_stickiness(rsc, node, data_set);
rsc_discover_filter(rsc, node);
}
}
return TRUE;
}
/*
* Create internal resource constraints before allocation
*/
gboolean
stage3(pe_working_set_t * data_set)
{
GList *gIter = data_set->resources;
for (; gIter != NULL; gIter = gIter->next) {
pe_resource_t *rsc = (pe_resource_t *) gIter->data;
rsc->cmds->internal_constraints(rsc, data_set);
}
return TRUE;
}
/*
* Check for orphaned or redefined actions
*/
gboolean
stage4(pe_working_set_t * data_set)
{
check_actions(data_set);
return TRUE;
}
static void *
convert_const_pointer(const void *ptr)
{
/* Worst function ever */
return (void *)ptr;
}
static gint
sort_rsc_process_order(gconstpointer a, gconstpointer b, gpointer data)
{
int rc = 0;
int r1_weight = -INFINITY;
int r2_weight = -INFINITY;
const char *reason = "existence";
GList *nodes = (GList *) data;
const pe_resource_t *resource1 = a;
const pe_resource_t *resource2 = b;
pe_node_t *r1_node = NULL;
pe_node_t *r2_node = NULL;
GList *gIter = NULL;
GHashTable *r1_nodes = NULL;
GHashTable *r2_nodes = NULL;
reason = "priority";
r1_weight = resource1->priority;
r2_weight = resource2->priority;
if (r1_weight > r2_weight) {
rc = -1;
goto done;
}
if (r1_weight < r2_weight) {
rc = 1;
goto done;
}
reason = "no node list";
if (nodes == NULL) {
goto done;
}
r1_nodes = pcmk__native_merge_weights(convert_const_pointer(resource1),
resource1->id, NULL, NULL, 1,
pe_weights_forward | pe_weights_init);
pe__show_node_weights(true, NULL, resource1->id, r1_nodes,
resource1->cluster);
r2_nodes = pcmk__native_merge_weights(convert_const_pointer(resource2),
resource2->id, NULL, NULL, 1,
pe_weights_forward | pe_weights_init);
pe__show_node_weights(true, NULL, resource2->id, r2_nodes,
resource2->cluster);
/* Current location score */
reason = "current location";
r1_weight = -INFINITY;
r2_weight = -INFINITY;
if (resource1->running_on) {
r1_node = pe__current_node(resource1);
r1_node = g_hash_table_lookup(r1_nodes, r1_node->details->id);
if (r1_node != NULL) {
r1_weight = r1_node->weight;
}
}
if (resource2->running_on) {
r2_node = pe__current_node(resource2);
r2_node = g_hash_table_lookup(r2_nodes, r2_node->details->id);
if (r2_node != NULL) {
r2_weight = r2_node->weight;
}
}
if (r1_weight > r2_weight) {
rc = -1;
goto done;
}
if (r1_weight < r2_weight) {
rc = 1;
goto done;
}
reason = "score";
for (gIter = nodes; gIter != NULL; gIter = gIter->next) {
pe_node_t *node = (pe_node_t *) gIter->data;
r1_node = NULL;
r2_node = NULL;
r1_weight = -INFINITY;
if (r1_nodes) {
r1_node = g_hash_table_lookup(r1_nodes, node->details->id);
}
if (r1_node) {
r1_weight = r1_node->weight;
}
r2_weight = -INFINITY;
if (r2_nodes) {
r2_node = g_hash_table_lookup(r2_nodes, node->details->id);
}
if (r2_node) {
r2_weight = r2_node->weight;
}
if (r1_weight > r2_weight) {
rc = -1;
goto done;
}
if (r1_weight < r2_weight) {
rc = 1;
goto done;
}
}
done:
crm_trace("%s (%d) on %s %c %s (%d) on %s: %s",
resource1->id, r1_weight, r1_node ? r1_node->details->id : "n/a",
rc < 0 ? '>' : rc > 0 ? '<' : '=',
resource2->id, r2_weight, r2_node ? r2_node->details->id : "n/a", reason);
if (r1_nodes) {
g_hash_table_destroy(r1_nodes);
}
if (r2_nodes) {
g_hash_table_destroy(r2_nodes);
}
return rc;
}
static void
allocate_resources(pe_working_set_t * data_set)
{
GList *gIter = NULL;
if (pcmk_is_set(data_set->flags, pe_flag_have_remote_nodes)) {
/* Allocate remote connection resources first (which will also allocate
* any colocation dependencies). If the connection is migrating, always
* prefer the partial migration target.
*/
for (gIter = data_set->resources; gIter != NULL; gIter = gIter->next) {
pe_resource_t *rsc = (pe_resource_t *) gIter->data;
if (rsc->is_remote_node == FALSE) {
continue;
}
pe_rsc_trace(rsc, "Allocating remote connection resource '%s'",
rsc->id);
rsc->cmds->allocate(rsc, rsc->partial_migration_target, data_set);
}
}
/* now do the rest of the resources */
for (gIter = data_set->resources; gIter != NULL; gIter = gIter->next) {
pe_resource_t *rsc = (pe_resource_t *) gIter->data;
if (rsc->is_remote_node == TRUE) {
continue;
}
pe_rsc_trace(rsc, "Allocating %s resource '%s'",
crm_element_name(rsc->xml), rsc->id);
rsc->cmds->allocate(rsc, NULL, data_set);
}
}
/* We always use pe_order_preserve with these convenience functions to exempt
* internally generated constraints from the prohibition of user constraints
* involving remote connection resources.
*
* The start ordering additionally uses pe_order_runnable_left so that the
* specified action is not runnable if the start is not runnable.
*/
static inline void
order_start_then_action(pe_resource_t *lh_rsc, pe_action_t *rh_action,
enum pe_ordering extra, pe_working_set_t *data_set)
{
if (lh_rsc && rh_action && data_set) {
custom_action_order(lh_rsc, start_key(lh_rsc), NULL,
rh_action->rsc, NULL, rh_action,
pe_order_preserve | pe_order_runnable_left | extra,
data_set);
}
}
static inline void
order_action_then_stop(pe_action_t *lh_action, pe_resource_t *rh_rsc,
enum pe_ordering extra, pe_working_set_t *data_set)
{
if (lh_action && rh_rsc && data_set) {
custom_action_order(lh_action->rsc, NULL, lh_action,
rh_rsc, stop_key(rh_rsc), NULL,
pe_order_preserve | extra, data_set);
}
}
// Clear fail counts for orphaned rsc on all online nodes
static void
cleanup_orphans(pe_resource_t * rsc, pe_working_set_t * data_set)
{
GList *gIter = NULL;
for (gIter = data_set->nodes; gIter != NULL; gIter = gIter->next) {
pe_node_t *node = (pe_node_t *) gIter->data;
if (node->details->online
&& pe_get_failcount(node, rsc, NULL, pe_fc_effective, NULL,
data_set)) {
pe_action_t *clear_op = NULL;
clear_op = pe__clear_failcount(rsc, node, "it is orphaned",
data_set);
/* We can't use order_action_then_stop() here because its
* pe_order_preserve breaks things
*/
custom_action_order(clear_op->rsc, NULL, clear_op,
rsc, stop_key(rsc), NULL,
pe_order_optional, data_set);
}
}
}
gboolean
stage5(pe_working_set_t * data_set)
{
pcmk__output_t *out = data_set->priv;
GList *gIter = NULL;
if (!pcmk__str_eq(data_set->placement_strategy, "default", pcmk__str_casei)) {
GList *nodes = g_list_copy(data_set->nodes);
nodes = sort_nodes_by_weight(nodes, NULL, data_set);
data_set->resources =
g_list_sort_with_data(data_set->resources, sort_rsc_process_order, nodes);
g_list_free(nodes);
}
gIter = data_set->nodes;
for (; gIter != NULL; gIter = gIter->next) {
pe_node_t *node = (pe_node_t *) gIter->data;
if (pcmk_is_set(data_set->flags, pe_flag_show_utilization)) {
out->message(out, "node-capacity", node, "Original");
}
}
crm_trace("Allocating services");
/* Take (next) highest resource, assign it and create its actions */
allocate_resources(data_set);
gIter = data_set->nodes;
for (; gIter != NULL; gIter = gIter->next) {
pe_node_t *node = (pe_node_t *) gIter->data;
if (pcmk_is_set(data_set->flags, pe_flag_show_utilization)) {
out->message(out, "node-capacity", node, "Remaining");
}
}
// Process deferred action checks
pe__foreach_param_check(data_set, check_params);
pe__free_param_checks(data_set);
if (pcmk_is_set(data_set->flags, pe_flag_startup_probes)) {
crm_trace("Calculating needed probes");
/* This code probably needs optimization
* ptest -x with 100 nodes, 100 clones and clone-max=100:
With probes:
ptest[14781]: 2010/09/27_17:56:46 notice: TRACE: do_calculations: pengine.c:258 Calculate cluster status
ptest[14781]: 2010/09/27_17:56:46 notice: TRACE: do_calculations: pengine.c:278 Applying placement constraints
ptest[14781]: 2010/09/27_17:56:47 notice: TRACE: do_calculations: pengine.c:285 Create internal constraints
ptest[14781]: 2010/09/27_17:56:47 notice: TRACE: do_calculations: pengine.c:292 Check actions
ptest[14781]: 2010/09/27_17:56:48 notice: TRACE: do_calculations: pengine.c:299 Allocate resources
ptest[14781]: 2010/09/27_17:56:48 notice: TRACE: stage5: allocate.c:881 Allocating services
ptest[14781]: 2010/09/27_17:56:49 notice: TRACE: stage5: allocate.c:894 Calculating needed probes
ptest[14781]: 2010/09/27_17:56:51 notice: TRACE: stage5: allocate.c:899 Creating actions
ptest[14781]: 2010/09/27_17:56:52 notice: TRACE: stage5: allocate.c:905 Creating done
ptest[14781]: 2010/09/27_17:56:52 notice: TRACE: do_calculations: pengine.c:306 Processing fencing and shutdown cases
ptest[14781]: 2010/09/27_17:56:52 notice: TRACE: do_calculations: pengine.c:313 Applying ordering constraints
36s
ptest[14781]: 2010/09/27_17:57:28 notice: TRACE: do_calculations: pengine.c:320 Create transition graph
Without probes:
ptest[14637]: 2010/09/27_17:56:21 notice: TRACE: do_calculations: pengine.c:258 Calculate cluster status
ptest[14637]: 2010/09/27_17:56:22 notice: TRACE: do_calculations: pengine.c:278 Applying placement constraints
ptest[14637]: 2010/09/27_17:56:22 notice: TRACE: do_calculations: pengine.c:285 Create internal constraints
ptest[14637]: 2010/09/27_17:56:22 notice: TRACE: do_calculations: pengine.c:292 Check actions
ptest[14637]: 2010/09/27_17:56:23 notice: TRACE: do_calculations: pengine.c:299 Allocate resources
ptest[14637]: 2010/09/27_17:56:23 notice: TRACE: stage5: allocate.c:881 Allocating services
ptest[14637]: 2010/09/27_17:56:24 notice: TRACE: stage5: allocate.c:899 Creating actions
ptest[14637]: 2010/09/27_17:56:25 notice: TRACE: stage5: allocate.c:905 Creating done
ptest[14637]: 2010/09/27_17:56:25 notice: TRACE: do_calculations: pengine.c:306 Processing fencing and shutdown cases
ptest[14637]: 2010/09/27_17:56:25 notice: TRACE: do_calculations: pengine.c:313 Applying ordering constraints
ptest[14637]: 2010/09/27_17:56:25 notice: TRACE: do_calculations: pengine.c:320 Create transition graph
*/
probe_resources(data_set);
}
crm_trace("Handle orphans");
if (pcmk_is_set(data_set->flags, pe_flag_stop_rsc_orphans)) {
for (gIter = data_set->resources; gIter != NULL; gIter = gIter->next) {
pe_resource_t *rsc = (pe_resource_t *) gIter->data;
/* There's no need to recurse into rsc->children because those
* should just be unallocated clone instances.
*/
if (pcmk_is_set(rsc->flags, pe_rsc_orphan)) {
cleanup_orphans(rsc, data_set);
}
}
}
crm_trace("Creating actions");
for (gIter = data_set->resources; gIter != NULL; gIter = gIter->next) {
pe_resource_t *rsc = (pe_resource_t *) gIter->data;
rsc->cmds->create_actions(rsc, data_set);
}
crm_trace("Creating done");
return TRUE;
}
static gboolean
is_managed(const pe_resource_t * rsc)
{
GList *gIter = rsc->children;
if (pcmk_is_set(rsc->flags, pe_rsc_managed)) {
return TRUE;
}
for (; gIter != NULL; gIter = gIter->next) {
pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
if (is_managed(child_rsc)) {
return TRUE;
}
}
return FALSE;
}
static gboolean
any_managed_resources(pe_working_set_t * data_set)
{
GList *gIter = data_set->resources;
for (; gIter != NULL; gIter = gIter->next) {
pe_resource_t *rsc = (pe_resource_t *) gIter->data;
if (is_managed(rsc)) {
return TRUE;
}
}
return FALSE;
}
/*!
* \internal
* \brief Create pseudo-op for guest node fence, and order relative to it
*
* \param[in] node Guest node to fence
* \param[in] data_set Working set of CIB state
*/
static void
fence_guest(pe_node_t *node, pe_working_set_t *data_set)
{
pe_resource_t *container = node->details->remote_rsc->container;
pe_action_t *stop = NULL;
pe_action_t *stonith_op = NULL;
/* The fence action is just a label; we don't do anything differently for
* off vs. reboot. We specify it explicitly, rather than let it default to
* cluster's default action, because we are not _initiating_ fencing -- we
* are creating a pseudo-event to describe fencing that is already occurring
* by other means (container recovery).
*/
const char *fence_action = "off";
/* Check whether guest's container resource has any explicit stop or
* start (the stop may be implied by fencing of the guest's host).
*/
if (container) {
stop = find_first_action(container->actions, NULL, CRMD_ACTION_STOP, NULL);
if (find_first_action(container->actions, NULL, CRMD_ACTION_START, NULL)) {
fence_action = "reboot";
}
}
/* Create a fence pseudo-event, so we have an event to order actions
* against, and the controller can always detect it.
*/
stonith_op = pe_fence_op(node, fence_action, FALSE, "guest is unclean", FALSE, data_set);
update_action_flags(stonith_op, pe_action_pseudo | pe_action_runnable,
__func__, __LINE__);
/* We want to imply stops/demotes after the guest is stopped, not wait until
* it is restarted, so we always order pseudo-fencing after stop, not start
* (even though start might be closer to what is done for a real reboot).
*/
if ((stop != NULL) && pcmk_is_set(stop->flags, pe_action_pseudo)) {
pe_action_t *parent_stonith_op = pe_fence_op(stop->node, NULL, FALSE, NULL, FALSE, data_set);
crm_info("Implying guest node %s is down (action %d) after %s fencing",
node->details->uname, stonith_op->id, stop->node->details->uname);
order_actions(parent_stonith_op, stonith_op,
pe_order_runnable_left|pe_order_implies_then);
} else if (stop) {
order_actions(stop, stonith_op,
pe_order_runnable_left|pe_order_implies_then);
crm_info("Implying guest node %s is down (action %d) "
"after container %s is stopped (action %d)",
node->details->uname, stonith_op->id,
container->id, stop->id);
} else {
/* If we're fencing the guest node but there's no stop for the guest
* resource, we must think the guest is already stopped. However, we may
* think so because its resource history was just cleaned. To avoid
* unnecessarily considering the guest node down if it's really up,
* order the pseudo-fencing after any stop of the connection resource,
* which will be ordered after any container (re-)probe.
*/
stop = find_first_action(node->details->remote_rsc->actions, NULL,
RSC_STOP, NULL);
if (stop) {
order_actions(stop, stonith_op, pe_order_optional);
crm_info("Implying guest node %s is down (action %d) "
"after connection is stopped (action %d)",
node->details->uname, stonith_op->id, stop->id);
} else {
/* Not sure why we're fencing, but everything must already be
* cleanly stopped.
*/
crm_info("Implying guest node %s is down (action %d) ",
node->details->uname, stonith_op->id);
}
}
/* Order/imply other actions relative to pseudo-fence as with real fence */
pcmk__order_vs_fence(stonith_op, data_set);
}
/*
* Create dependencies for stonith and shutdown operations
*/
gboolean
stage6(pe_working_set_t * data_set)
{
pe_action_t *dc_down = NULL;
pe_action_t *stonith_op = NULL;
gboolean integrity_lost = FALSE;
gboolean need_stonith = TRUE;
GList *gIter;
GList *stonith_ops = NULL;
GList *shutdown_ops = NULL;
/* Remote ordering constraints need to happen prior to calculating fencing
* because it is one more place we will mark the node as dirty.
*
* A nice side effect of doing them early is that apply_*_ordering() can be
* simpler because pe_fence_node() has already done some of the work.
*/
crm_trace("Creating remote ordering constraints");
apply_remote_node_ordering(data_set);
crm_trace("Processing fencing and shutdown cases");
if (any_managed_resources(data_set) == FALSE) {
crm_notice("Delaying fencing operations until there are resources to manage");
need_stonith = FALSE;
}
/* Check each node for stonith/shutdown */
for (gIter = data_set->nodes; gIter != NULL; gIter = gIter->next) {
pe_node_t *node = (pe_node_t *) gIter->data;
/* Guest nodes are "fenced" by recovering their container resource,
* so handle them separately.
*/
if (pe__is_guest_node(node)) {
if (node->details->remote_requires_reset && need_stonith
&& pe_can_fence(data_set, node)) {
fence_guest(node, data_set);
}
continue;
}
stonith_op = NULL;
if (node->details->unclean
&& need_stonith && pe_can_fence(data_set, node)) {
stonith_op = pe_fence_op(node, NULL, FALSE, "node is unclean", FALSE, data_set);
pe_warn("Scheduling Node %s for STONITH", node->details->uname);
pcmk__order_vs_fence(stonith_op, data_set);
if (node->details->is_dc) {
// Remember if the DC is being fenced
dc_down = stonith_op;
} else {
if (!pcmk_is_set(data_set->flags, pe_flag_concurrent_fencing)
&& (stonith_ops != NULL)) {
/* Concurrent fencing is disabled, so order each non-DC
* fencing in a chain. If there is any DC fencing or
* shutdown, it will be ordered after the last action in the
* chain later.
*/
order_actions((pe_action_t *) stonith_ops->data,
stonith_op, pe_order_optional);
}
// Remember all non-DC fencing actions in a separate list
stonith_ops = g_list_prepend(stonith_ops, stonith_op);
}
} else if (node->details->online && node->details->shutdown &&
/* TODO define what a shutdown op means for a remote node.
* For now we do not send shutdown operations for remote nodes, but
* if we can come up with a good use for this in the future, we will. */
pe__is_guest_or_remote_node(node) == FALSE) {
pe_action_t *down_op = sched_shutdown_op(node, data_set);
if (node->details->is_dc) {
// Remember if the DC is being shut down
dc_down = down_op;
} else {
// Remember non-DC shutdowns for later ordering
shutdown_ops = g_list_prepend(shutdown_ops, down_op);
}
}
if (node->details->unclean && stonith_op == NULL) {
integrity_lost = TRUE;
pe_warn("Node %s is unclean!", node->details->uname);
}
}
if (integrity_lost) {
if (!pcmk_is_set(data_set->flags, pe_flag_stonith_enabled)) {
pe_warn("YOUR RESOURCES ARE NOW LIKELY COMPROMISED");
pe_err("ENABLE STONITH TO KEEP YOUR RESOURCES SAFE");
} else if (!pcmk_is_set(data_set->flags, pe_flag_have_quorum)) {
crm_notice("Cannot fence unclean nodes until quorum is"
" attained (or no-quorum-policy is set to ignore)");
}
}
if (dc_down != NULL) {
/* Order any non-DC shutdowns before any DC shutdown, to avoid repeated
* DC elections. However, we don't want to order non-DC shutdowns before
* a DC *fencing*, because even though we don't want a node that's
* shutting down to become DC, the DC fencing could be ordered before a
* clone stop that's also ordered before the shutdowns, thus leading to
* a graph loop.
*/
if (pcmk__str_eq(dc_down->task, CRM_OP_SHUTDOWN, pcmk__str_casei)) {
for (gIter = shutdown_ops; gIter != NULL; gIter = gIter->next) {
pe_action_t *node_stop = (pe_action_t *) gIter->data;
crm_debug("Ordering shutdown on %s before %s on DC %s",
node_stop->node->details->uname,
dc_down->task, dc_down->node->details->uname);
order_actions(node_stop, dc_down, pe_order_optional);
}
}
// Order any non-DC fencing before any DC fencing or shutdown
if (pcmk_is_set(data_set->flags, pe_flag_concurrent_fencing)) {
/* With concurrent fencing, order each non-DC fencing action
* separately before any DC fencing or shutdown.
*/
for (gIter = stonith_ops; gIter != NULL; gIter = gIter->next) {
order_actions((pe_action_t *) gIter->data, dc_down,
pe_order_optional);
}
} else if (stonith_ops) {
/* Without concurrent fencing, the non-DC fencing actions are
* already ordered relative to each other, so we just need to order
* the DC fencing after the last action in the chain (which is the
* first item in the list).
*/
order_actions((pe_action_t *) stonith_ops->data, dc_down,
pe_order_optional);
}
}
g_list_free(stonith_ops);
g_list_free(shutdown_ops);
return TRUE;
}
/*
* Determine the sets of independent actions and the correct order for the
* actions in each set.
*
* Mark dependencies of un-runnable actions un-runnable
*
*/
static GList *
find_actions_by_task(GList *actions, pe_resource_t * rsc, const char *original_key)
{
GList *list = NULL;
list = find_actions(actions, original_key, NULL);
if (list == NULL) {
/* we're potentially searching a child of the original resource */
char *key = NULL;
char *task = NULL;
guint interval_ms = 0;
if (parse_op_key(original_key, NULL, &task, &interval_ms)) {
key = pcmk__op_key(rsc->id, task, interval_ms);
list = find_actions(actions, key, NULL);
} else {
crm_err("search key: %s", original_key);
}
free(key);
free(task);
}
return list;
}
static void
rsc_order_then(pe_action_t *lh_action, pe_resource_t *rsc,
pe__ordering_t *order)
{
GList *gIter = NULL;
GList *rh_actions = NULL;
pe_action_t *rh_action = NULL;
enum pe_ordering type;
CRM_CHECK(rsc != NULL, return);
CRM_CHECK(order != NULL, return);
type = order->type;
rh_action = order->rh_action;
crm_trace("Processing RH of ordering constraint %d", order->id);
if (rh_action != NULL) {
rh_actions = g_list_prepend(NULL, rh_action);
} else if (rsc != NULL) {
rh_actions = find_actions_by_task(rsc->actions, rsc, order->rh_action_task);
}
if (rh_actions == NULL) {
pe_rsc_trace(rsc, "No RH-Side (%s/%s) found for constraint..."
" ignoring", rsc->id, order->rh_action_task);
if (lh_action) {
pe_rsc_trace(rsc, "LH-Side was: %s", lh_action->uuid);
}
return;
}
if ((lh_action != NULL) && (lh_action->rsc == rsc)
&& pcmk_is_set(lh_action->flags, pe_action_dangle)) {
pe_rsc_trace(rsc, "Detected dangling operation %s -> %s", lh_action->uuid,
order->rh_action_task);
pe__clear_order_flags(type, pe_order_implies_then);
}
gIter = rh_actions;
for (; gIter != NULL; gIter = gIter->next) {
pe_action_t *rh_action_iter = (pe_action_t *) gIter->data;
if (lh_action) {
order_actions(lh_action, rh_action_iter, type);
} else if (type & pe_order_implies_then) {
update_action_flags(rh_action_iter, pe_action_runnable | pe_action_clear,
__func__, __LINE__);
crm_warn("Unrunnable %s 0x%.6x", rh_action_iter->uuid, type);
} else {
crm_warn("neither %s 0x%.6x", rh_action_iter->uuid, type);
}
}
g_list_free(rh_actions);
}
static void
rsc_order_first(pe_resource_t *lh_rsc, pe__ordering_t *order,
pe_working_set_t *data_set)
{
GList *gIter = NULL;
GList *lh_actions = NULL;
pe_action_t *lh_action = order->lh_action;
pe_resource_t *rh_rsc = order->rh_rsc;
crm_trace("Processing LH of ordering constraint %d", order->id);
CRM_ASSERT(lh_rsc != NULL);
if (lh_action != NULL) {
lh_actions = g_list_prepend(NULL, lh_action);
} else {
lh_actions = find_actions_by_task(lh_rsc->actions, lh_rsc, order->lh_action_task);
}
if (lh_actions == NULL && lh_rsc != rh_rsc) {
char *key = NULL;
char *op_type = NULL;
guint interval_ms = 0;
parse_op_key(order->lh_action_task, NULL, &op_type, &interval_ms);
key = pcmk__op_key(lh_rsc->id, op_type, interval_ms);
if (lh_rsc->fns->state(lh_rsc, TRUE) == RSC_ROLE_STOPPED && pcmk__str_eq(op_type, RSC_STOP, pcmk__str_casei)) {
free(key);
pe_rsc_trace(lh_rsc, "No LH-Side (%s/%s) found for constraint %d with %s - ignoring",
lh_rsc->id, order->lh_action_task, order->id, order->rh_action_task);
} else if ((lh_rsc->fns->state(lh_rsc, TRUE) == RSC_ROLE_UNPROMOTED)
&& pcmk__str_eq(op_type, RSC_DEMOTE, pcmk__str_casei)) {
free(key);
pe_rsc_trace(lh_rsc, "No LH-Side (%s/%s) found for constraint %d with %s - ignoring",
lh_rsc->id, order->lh_action_task, order->id, order->rh_action_task);
} else {
pe_rsc_trace(lh_rsc, "No LH-Side (%s/%s) found for constraint %d with %s - creating",
lh_rsc->id, order->lh_action_task, order->id, order->rh_action_task);
lh_action = custom_action(lh_rsc, key, op_type, NULL, TRUE, TRUE, data_set);
lh_actions = g_list_prepend(NULL, lh_action);
}
free(op_type);
}
gIter = lh_actions;
for (; gIter != NULL; gIter = gIter->next) {
pe_action_t *lh_action_iter = (pe_action_t *) gIter->data;
if (rh_rsc == NULL && order->rh_action) {
rh_rsc = order->rh_action->rsc;
}
if (rh_rsc) {
rsc_order_then(lh_action_iter, rh_rsc, order);
} else if (order->rh_action) {
order_actions(lh_action_iter, order->rh_action, order->type);
}
}
g_list_free(lh_actions);
}
extern void update_colo_start_chain(pe_action_t *action,
pe_working_set_t *data_set);
static int
is_recurring_action(pe_action_t *action)
{
guint interval_ms;
if (pcmk__guint_from_hash(action->meta,
XML_LRM_ATTR_INTERVAL_MS, 0,
&interval_ms) != pcmk_rc_ok) {
return 0;
}
return (interval_ms > 0);
}
static void
apply_container_ordering(pe_action_t *action, pe_working_set_t *data_set)
{
/* VMs are also classified as containers for these purposes... in
* that they both involve a 'thing' running on a real or remote
* cluster node.
*
* This allows us to be smarter about the type and extent of
* recovery actions required in various scenarios
*/
pe_resource_t *remote_rsc = NULL;
pe_resource_t *container = NULL;
enum action_tasks task = text2task(action->task);
CRM_ASSERT(action->rsc);
CRM_ASSERT(action->node);
CRM_ASSERT(pe__is_guest_or_remote_node(action->node));
remote_rsc = action->node->details->remote_rsc;
CRM_ASSERT(remote_rsc);
container = remote_rsc->container;
CRM_ASSERT(container);
if (pcmk_is_set(container->flags, pe_rsc_failed)) {
pe_fence_node(data_set, action->node, "container failed", FALSE);
}
crm_trace("Order %s action %s relative to %s%s for %s%s",
action->task, action->uuid,
pcmk_is_set(remote_rsc->flags, pe_rsc_failed)? "failed " : "",
remote_rsc->id,
pcmk_is_set(container->flags, pe_rsc_failed)? "failed " : "",
container->id);
if (pcmk__strcase_any_of(action->task, CRMD_ACTION_MIGRATE, CRMD_ACTION_MIGRATED, NULL)) {
/* Migration ops map to "no_action", but we need to apply the same
* ordering as for stop or demote (see get_router_node()).
*/
task = stop_rsc;
}
switch (task) {
case start_rsc:
case action_promote:
/* Force resource recovery if the container is recovered */
order_start_then_action(container, action, pe_order_implies_then,
data_set);
/* Wait for the connection resource to be up too */
order_start_then_action(remote_rsc, action, pe_order_none,
data_set);
break;
case stop_rsc:
case action_demote:
if (pcmk_is_set(container->flags, pe_rsc_failed)) {
/* When the container representing a guest node fails, any stop
* or demote actions for resources running on the guest node
* are implied by the container stopping. This is similar to
* how fencing operations work for cluster nodes and remote
* nodes.
*/
} else {
/* Ensure the operation happens before the connection is brought
* down.
*
* If we really wanted to, we could order these after the
* connection start, IFF the container's current role was
* stopped (otherwise we re-introduce an ordering loop when the
* connection is restarting).
*/
order_action_then_stop(action, remote_rsc, pe_order_none,
data_set);
}
break;
default:
/* Wait for the connection resource to be up */
if (is_recurring_action(action)) {
/* In case we ever get the recovery logic wrong, force
* recurring monitors to be restarted, even if just
* the connection was re-established
*/
if(task != no_action) {
order_start_then_action(remote_rsc, action,
pe_order_implies_then, data_set);
}
} else {
order_start_then_action(remote_rsc, action, pe_order_none,
data_set);
}
break;
}
}
static enum remote_connection_state
get_remote_node_state(pe_node_t *node)
{
pe_resource_t *remote_rsc = NULL;
pe_node_t *cluster_node = NULL;
CRM_ASSERT(node);
remote_rsc = node->details->remote_rsc;
CRM_ASSERT(remote_rsc);
cluster_node = pe__current_node(remote_rsc);
/* If the cluster node the remote connection resource resides on
* is unclean or went offline, we can't process any operations
* on that remote node until after it starts elsewhere.
*/
if(remote_rsc->next_role == RSC_ROLE_STOPPED || remote_rsc->allocated_to == NULL) {
/* The connection resource is not going to run anywhere */
if (cluster_node && cluster_node->details->unclean) {
/* The remote connection is failed because its resource is on a
* failed node and can't be recovered elsewhere, so we must fence.
*/
return remote_state_failed;
}
if (!pcmk_is_set(remote_rsc->flags, pe_rsc_failed)) {
/* Connection resource is cleanly stopped */
return remote_state_stopped;
}
/* Connection resource is failed */
if ((remote_rsc->next_role == RSC_ROLE_STOPPED)
&& remote_rsc->remote_reconnect_ms
&& node->details->remote_was_fenced
&& !pe__shutdown_requested(node)) {
/* We won't know whether the connection is recoverable until the
* reconnect interval expires and we reattempt connection.
*/
return remote_state_unknown;
}
/* The remote connection is in a failed state. If there are any
* resources known to be active on it (stop) or in an unknown state
* (probe), we must assume the worst and fence it.
*/
return remote_state_failed;
} else if (cluster_node == NULL) {
/* Connection is recoverable but not currently running anywhere, see if we can recover it first */
return remote_state_unknown;
} else if(cluster_node->details->unclean == TRUE
|| cluster_node->details->online == FALSE) {
/* Connection is running on a dead node, see if we can recover it first */
return remote_state_resting;
} else if (pcmk__list_of_multiple(remote_rsc->running_on)
&& remote_rsc->partial_migration_source
&& remote_rsc->partial_migration_target) {
/* We're in the middle of migrating a connection resource,
* wait until after the resource migrates before performing
* any actions.
*/
return remote_state_resting;
}
return remote_state_alive;
}
/*!
* \internal
* \brief Order actions on remote node relative to actions for the connection
*/
static void
apply_remote_ordering(pe_action_t *action, pe_working_set_t *data_set)
{
pe_resource_t *remote_rsc = NULL;
enum action_tasks task = text2task(action->task);
enum remote_connection_state state = get_remote_node_state(action->node);
enum pe_ordering order_opts = pe_order_none;
if (action->rsc == NULL) {
return;
}
CRM_ASSERT(action->node);
CRM_ASSERT(pe__is_guest_or_remote_node(action->node));
remote_rsc = action->node->details->remote_rsc;
CRM_ASSERT(remote_rsc);
crm_trace("Order %s action %s relative to %s%s (state: %s)",
action->task, action->uuid,
pcmk_is_set(remote_rsc->flags, pe_rsc_failed)? "failed " : "",
remote_rsc->id, state2text(state));
if (pcmk__strcase_any_of(action->task, CRMD_ACTION_MIGRATE, CRMD_ACTION_MIGRATED, NULL)) {
/* Migration ops map to "no_action", but we need to apply the same
* ordering as for stop or demote (see get_router_node()).
*/
task = stop_rsc;
}
switch (task) {
case start_rsc:
case action_promote:
order_opts = pe_order_none;
if (state == remote_state_failed) {
/* Force recovery, by making this action required */
pe__set_order_flags(order_opts, pe_order_implies_then);
}
/* Ensure connection is up before running this action */
order_start_then_action(remote_rsc, action, order_opts, data_set);
break;
case stop_rsc:
if(state == remote_state_alive) {
order_action_then_stop(action, remote_rsc,
pe_order_implies_first, data_set);
} else if(state == remote_state_failed) {
/* The resource is active on the node, but since we don't have a
* valid connection, the only way to stop the resource is by
* fencing the node. There is no need to order the stop relative
* to the remote connection, since the stop will become implied
* by the fencing.
*/
pe_fence_node(data_set, action->node, "resources are active and the connection is unrecoverable", FALSE);
} else if(remote_rsc->next_role == RSC_ROLE_STOPPED) {
/* State must be remote_state_unknown or remote_state_stopped.
* Since the connection is not coming back up in this
* transition, stop this resource first.
*/
order_action_then_stop(action, remote_rsc,
pe_order_implies_first, data_set);
} else {
/* The connection is going to be started somewhere else, so
* stop this resource after that completes.
*/
order_start_then_action(remote_rsc, action, pe_order_none, data_set);
}
break;
case action_demote:
/* Only order this demote relative to the connection start if the
* connection isn't being torn down. Otherwise, the demote would be
* blocked because the connection start would not be allowed.
*/
if(state == remote_state_resting || state == remote_state_unknown) {
order_start_then_action(remote_rsc, action, pe_order_none,
data_set);
} /* Otherwise we can rely on the stop ordering */
break;
default:
/* Wait for the connection resource to be up */
if (is_recurring_action(action)) {
/* In case we ever get the recovery logic wrong, force
* recurring monitors to be restarted, even if just
* the connection was re-established
*/
order_start_then_action(remote_rsc, action,
pe_order_implies_then, data_set);
} else {
pe_node_t *cluster_node = pe__current_node(remote_rsc);
if(task == monitor_rsc && state == remote_state_failed) {
/* We would only be here if we do not know the
* state of the resource on the remote node.
* Since we have no way to find out, it is
* necessary to fence the node.
*/
pe_fence_node(data_set, action->node, "resources are in an unknown state and the connection is unrecoverable", FALSE);
}
if(cluster_node && state == remote_state_stopped) {
/* The connection is currently up, but is going
* down permanently.
*
* Make sure we check services are actually
* stopped _before_ we let the connection get
* closed
*/
order_action_then_stop(action, remote_rsc,
pe_order_runnable_left, data_set);
} else {
order_start_then_action(remote_rsc, action, pe_order_none,
data_set);
}
}
break;
}
}
static void
apply_remote_node_ordering(pe_working_set_t *data_set)
{
if (!pcmk_is_set(data_set->flags, pe_flag_have_remote_nodes)) {
return;
}
for (GList *gIter = data_set->actions; gIter != NULL; gIter = gIter->next) {
pe_action_t *action = (pe_action_t *) gIter->data;
pe_resource_t *remote = NULL;
// We are only interested in resource actions
if (action->rsc == NULL) {
continue;
}
/* Special case: If we are clearing the failcount of an actual
* remote connection resource, then make sure this happens before
* any start of the resource in this transition.
*/
if (action->rsc->is_remote_node &&
pcmk__str_eq(action->task, CRM_OP_CLEAR_FAILCOUNT, pcmk__str_casei)) {
custom_action_order(action->rsc,
NULL,
action,
action->rsc,
pcmk__op_key(action->rsc->id, RSC_START, 0),
NULL,
pe_order_optional,
data_set);
continue;
}
// We are only interested in actions allocated to a node
if (action->node == NULL) {
continue;
}
if (!pe__is_guest_or_remote_node(action->node)) {
continue;
}
/* We are only interested in real actions.
*
* @TODO This is probably wrong; pseudo-actions might be converted to
* real actions and vice versa later in update_actions() at the end of
* stage7().
*/
if (pcmk_is_set(action->flags, pe_action_pseudo)) {
continue;
}
remote = action->node->details->remote_rsc;
if (remote == NULL) {
// Orphaned
continue;
}
/* Another special case: if a resource is moving to a Pacemaker Remote
* node, order the stop on the original node after any start of the
* remote connection. This ensures that if the connection fails to
* start, we leave the resource running on the original node.
*/
if (pcmk__str_eq(action->task, RSC_START, pcmk__str_casei)) {
for (GList *item = action->rsc->actions; item != NULL;
item = item->next) {
pe_action_t *rsc_action = item->data;
if ((rsc_action->node->details != action->node->details)
&& pcmk__str_eq(rsc_action->task, RSC_STOP, pcmk__str_casei)) {
custom_action_order(remote, start_key(remote), NULL,
action->rsc, NULL, rsc_action,
pe_order_optional, data_set);
}
}
}
/* The action occurs across a remote connection, so create
* ordering constraints that guarantee the action occurs while the node
* is active (after start, before stop ... things like that).
*
* This is somewhat brittle in that we need to make sure the results of
* this ordering are compatible with the result of get_router_node().
* It would probably be better to add XML_LRM_ATTR_ROUTER_NODE as part
* of this logic rather than action2xml().
*/
if (remote->container) {
crm_trace("Container ordering for %s", action->uuid);
apply_container_ordering(action, data_set);
} else {
crm_trace("Remote ordering for %s", action->uuid);
apply_remote_ordering(action, data_set);
}
}
}
static gboolean
order_first_probe_unneeded(pe_action_t * probe, pe_action_t * rh_action)
{
/* No need to probe the resource on the node that is being
* unfenced. Otherwise it might introduce transition loop
* since probe will be performed after the node is
* unfenced.
*/
if (pcmk__str_eq(rh_action->task, CRM_OP_FENCE, pcmk__str_casei)
&& probe->node && rh_action->node
&& probe->node->details == rh_action->node->details) {
const char *op = g_hash_table_lookup(rh_action->meta, "stonith_action");
if (pcmk__str_eq(op, "on", pcmk__str_casei)) {
return TRUE;
}
}
// Shutdown waits for probe to complete only if it's on the same node
if ((pcmk__str_eq(rh_action->task, CRM_OP_SHUTDOWN, pcmk__str_casei))
&& probe->node && rh_action->node
&& probe->node->details != rh_action->node->details) {
return TRUE;
}
return FALSE;
}
static void
order_first_probes_imply_stops(pe_working_set_t * data_set)
{
GList *gIter = NULL;
for (gIter = data_set->ordering_constraints; gIter != NULL; gIter = gIter->next) {
pe__ordering_t *order = gIter->data;
enum pe_ordering order_type = pe_order_optional;
pe_resource_t *lh_rsc = order->lh_rsc;
pe_resource_t *rh_rsc = order->rh_rsc;
pe_action_t *lh_action = order->lh_action;
pe_action_t *rh_action = order->rh_action;
const char *lh_action_task = order->lh_action_task;
const char *rh_action_task = order->rh_action_task;
GList *probes = NULL;
GList *rh_actions = NULL;
GList *pIter = NULL;
if (lh_rsc == NULL) {
continue;
} else if (rh_rsc && lh_rsc == rh_rsc) {
continue;
}
if (lh_action == NULL && lh_action_task == NULL) {
continue;
}
if (rh_action == NULL && rh_action_task == NULL) {
continue;
}
/* Technically probe is expected to return "not running", which could be
* the alternative of stop action if the status of the resource is
* unknown yet.
*/
if (lh_action && !pcmk__str_eq(lh_action->task, RSC_STOP, pcmk__str_casei)) {
continue;
} else if (lh_action == NULL
&& lh_action_task
&& !pcmk__ends_with(lh_action_task, "_" RSC_STOP "_0")) {
continue;
}
/* Do not probe the resource inside of a stopping container. Otherwise
* it might introduce transition loop since probe will be performed
* after the container starts again.
*/
if (rh_rsc && lh_rsc->container == rh_rsc) {
if (rh_action && pcmk__str_eq(rh_action->task, RSC_STOP, pcmk__str_casei)) {
continue;
} else if (rh_action == NULL && rh_action_task
&& pcmk__ends_with(rh_action_task,"_" RSC_STOP "_0")) {
continue;
}
}
if (order->type == pe_order_none) {
continue;
}
// Preserve the order options for future filtering
if (pcmk_is_set(order->type, pe_order_apply_first_non_migratable)) {
pe__set_order_flags(order_type,
pe_order_apply_first_non_migratable);
}
if (pcmk_is_set(order->type, pe_order_same_node)) {
pe__set_order_flags(order_type, pe_order_same_node);
}
// Keep the order types for future filtering
if (order->type == pe_order_anti_colocation
|| order->type == pe_order_load) {
order_type = order->type;
}
probes = pe__resource_actions(lh_rsc, NULL, RSC_STATUS, FALSE);
if (probes == NULL) {
continue;
}
if (rh_action) {
rh_actions = g_list_prepend(rh_actions, rh_action);
} else if (rh_rsc && rh_action_task) {
rh_actions = find_actions(rh_rsc->actions, rh_action_task, NULL);
}
if (rh_actions == NULL) {
g_list_free(probes);
continue;
}
crm_trace("Processing for LH probe based on ordering constraint %s -> %s"
" (id=%d, type=%.6x)",
lh_action ? lh_action->uuid : lh_action_task,
rh_action ? rh_action->uuid : rh_action_task,
order->id, order->type);
for (pIter = probes; pIter != NULL; pIter = pIter->next) {
pe_action_t *probe = (pe_action_t *) pIter->data;
GList *rIter = NULL;
for (rIter = rh_actions; rIter != NULL; rIter = rIter->next) {
pe_action_t *rh_action_iter = (pe_action_t *) rIter->data;
if (order_first_probe_unneeded(probe, rh_action_iter)) {
continue;
}
order_actions(probe, rh_action_iter, order_type);
}
}
g_list_free(rh_actions);
g_list_free(probes);
}
}
static void
order_first_probe_then_restart_repromote(pe_action_t * probe,
pe_action_t * after,
pe_working_set_t * data_set)
{
GList *gIter = NULL;
bool interleave = FALSE;
pe_resource_t *compatible_rsc = NULL;
if (probe == NULL
|| probe->rsc == NULL
|| probe->rsc->variant != pe_native) {
return;
}
if (after == NULL
// Avoid running into any possible loop
|| pcmk_is_set(after->flags, pe_action_tracking)) {
return;
}
if (!pcmk__str_eq(probe->task, RSC_STATUS, pcmk__str_casei)) {
return;
}
pe__set_action_flags(after, pe_action_tracking);
crm_trace("Processing based on %s %s -> %s %s",
probe->uuid,
probe->node ? probe->node->details->uname: "",
after->uuid,
after->node ? after->node->details->uname : "");
if (after->rsc
/* Better not build a dependency directly with a clone/group.
* We are going to proceed through the ordering chain and build
* dependencies with its children.
*/
&& after->rsc->variant == pe_native
&& probe->rsc != after->rsc) {
GList *then_actions = NULL;
enum pe_ordering probe_order_type = pe_order_optional;
if (pcmk__str_eq(after->task, RSC_START, pcmk__str_casei)) {
then_actions = pe__resource_actions(after->rsc, NULL, RSC_STOP, FALSE);
} else if (pcmk__str_eq(after->task, RSC_PROMOTE, pcmk__str_casei)) {
then_actions = pe__resource_actions(after->rsc, NULL, RSC_DEMOTE, FALSE);
}
for (gIter = then_actions; gIter != NULL; gIter = gIter->next) {
pe_action_t *then = (pe_action_t *) gIter->data;
// Skip any pseudo action which for example is implied by fencing
if (pcmk_is_set(then->flags, pe_action_pseudo)) {
continue;
}
order_actions(probe, then, probe_order_type);
}
g_list_free(then_actions);
}
if (after->rsc
&& after->rsc->variant > pe_group) {
const char *interleave_s = g_hash_table_lookup(after->rsc->meta,
XML_RSC_ATTR_INTERLEAVE);
interleave = crm_is_true(interleave_s);
if (interleave) {
/* For an interleaved clone, we should build a dependency only
* with the relevant clone child.
*/
compatible_rsc = find_compatible_child(probe->rsc,
after->rsc,
RSC_ROLE_UNKNOWN,
FALSE, data_set);
}
}
for (gIter = after->actions_after; gIter != NULL; gIter = gIter->next) {
pe_action_wrapper_t *after_wrapper = (pe_action_wrapper_t *) gIter->data;
/* pe_order_implies_then is the reason why a required A.start
* implies/enforces B.start to be required too, which is the cause of
* B.restart/re-promote.
*
* Not sure about pe_order_implies_then_on_node though. It's now only
* used for unfencing case, which tends to introduce transition
* loops...
*/
if (!pcmk_is_set(after_wrapper->type, pe_order_implies_then)) {
/* The order type between a group/clone and its child such as
* B.start-> B_child.start is:
* pe_order_implies_first_printed | pe_order_runnable_left
*
* Proceed through the ordering chain and build dependencies with
* its children.
*/
if (after->rsc == NULL
|| after->rsc->variant < pe_group
|| probe->rsc->parent == after->rsc
|| after_wrapper->action->rsc == NULL
|| after_wrapper->action->rsc->variant > pe_group
|| after->rsc != after_wrapper->action->rsc->parent) {
continue;
}
/* Proceed to the children of a group or a non-interleaved clone.
* For an interleaved clone, proceed only to the relevant child.
*/
if (after->rsc->variant > pe_group
&& interleave == TRUE
&& (compatible_rsc == NULL
|| compatible_rsc != after_wrapper->action->rsc)) {
continue;
}
}
crm_trace("Proceeding through %s %s -> %s %s (type=0x%.6x)",
after->uuid,
after->node ? after->node->details->uname: "",
after_wrapper->action->uuid,
after_wrapper->action->node ? after_wrapper->action->node->details->uname : "",
after_wrapper->type);
order_first_probe_then_restart_repromote(probe, after_wrapper->action, data_set);
}
}
static void clear_actions_tracking_flag(pe_working_set_t * data_set)
{
GList *gIter = NULL;
for (gIter = data_set->actions; gIter != NULL; gIter = gIter->next) {
pe_action_t *action = (pe_action_t *) gIter->data;
if (pcmk_is_set(action->flags, pe_action_tracking)) {
pe__clear_action_flags(action, pe_action_tracking);
}
}
}
static void
order_first_rsc_probes(pe_resource_t * rsc, pe_working_set_t * data_set)
{
GList *gIter = NULL;
GList *probes = NULL;
for (gIter = rsc->children; gIter != NULL; gIter = gIter->next) {
pe_resource_t * child = (pe_resource_t *) gIter->data;
order_first_rsc_probes(child, data_set);
}
if (rsc->variant != pe_native) {
return;
}
probes = pe__resource_actions(rsc, NULL, RSC_STATUS, FALSE);
for (gIter = probes; gIter != NULL; gIter= gIter->next) {
pe_action_t *probe = (pe_action_t *) gIter->data;
GList *aIter = NULL;
for (aIter = probe->actions_after; aIter != NULL; aIter = aIter->next) {
pe_action_wrapper_t *after_wrapper = (pe_action_wrapper_t *) aIter->data;
order_first_probe_then_restart_repromote(probe, after_wrapper->action, data_set);
clear_actions_tracking_flag(data_set);
}
}
g_list_free(probes);
}
static void
order_first_probes(pe_working_set_t * data_set)
{
GList *gIter = NULL;
for (gIter = data_set->resources; gIter != NULL; gIter = gIter->next) {
pe_resource_t *rsc = (pe_resource_t *) gIter->data;
order_first_rsc_probes(rsc, data_set);
}
order_first_probes_imply_stops(data_set);
}
static void
order_then_probes(pe_working_set_t * data_set)
{
#if 0
GList *gIter = NULL;
for (gIter = data_set->resources; gIter != NULL; gIter = gIter->next) {
pe_resource_t *rsc = (pe_resource_t *) gIter->data;
/* Given "A then B", we would prefer to wait for A to be
* started before probing B.
*
* If A was a filesystem on which the binaries and data for B
* lived, it would have been useful if the author of B's agent
* could assume that A is running before B.monitor will be
* called.
*
* However we can't _only_ probe once A is running, otherwise
* we'd not detect the state of B if A could not be started
* for some reason.
*
* In practice however, we cannot even do an opportunistic
* version of this because B may be moving:
*
* B.probe -> B.start
* B.probe -> B.stop
* B.stop -> B.start
* A.stop -> A.start
* A.start -> B.probe
*
* So far so good, but if we add the result of this code:
*
* B.stop -> A.stop
*
* Then we get a loop:
*
* B.probe -> B.stop -> A.stop -> A.start -> B.probe
*
* We could kill the 'B.probe -> B.stop' dependency, but that
* could mean stopping B "too" soon, because B.start must wait
* for the probes to complete.
*
* Another option is to allow it only if A is a non-unique
* clone with clone-max == node-max (since we'll never be
* moving it). However, we could still be stopping one
* instance at the same time as starting another.
* The complexity of checking for allowed conditions combined
* with the ever narrowing usecase suggests that this code
* should remain disabled until someone gets smarter.
*/
pe_action_t *start = NULL;
GList *actions = NULL;
GList *probes = NULL;
actions = pe__resource_actions(rsc, NULL, RSC_START, FALSE);
if (actions) {
start = actions->data;
g_list_free(actions);
}
if(start == NULL) {
crm_err("No start action for %s", rsc->id);
continue;
}
probes = pe__resource_actions(rsc, NULL, RSC_STATUS, FALSE);
for (actions = start->actions_before; actions != NULL; actions = actions->next) {
pe_action_wrapper_t *before = (pe_action_wrapper_t *) actions->data;
GList *pIter = NULL;
pe_action_t *first = before->action;
pe_resource_t *first_rsc = first->rsc;
if(first->required_runnable_before) {
GList *clone_actions = NULL;
for (clone_actions = first->actions_before; clone_actions != NULL; clone_actions = clone_actions->next) {
before = (pe_action_wrapper_t *) clone_actions->data;
crm_trace("Testing %s -> %s (%p) for %s", first->uuid, before->action->uuid, before->action->rsc, start->uuid);
CRM_ASSERT(before->action->rsc);
first_rsc = before->action->rsc;
break;
}
} else if(!pcmk__str_eq(first->task, RSC_START, pcmk__str_casei)) {
crm_trace("Not a start op %s for %s", first->uuid, start->uuid);
}
if(first_rsc == NULL) {
continue;
} else if(uber_parent(first_rsc) == uber_parent(start->rsc)) {
crm_trace("Same parent %s for %s", first_rsc->id, start->uuid);
continue;
} else if(FALSE && pe_rsc_is_clone(uber_parent(first_rsc)) == FALSE) {
crm_trace("Not a clone %s for %s", first_rsc->id, start->uuid);
continue;
}
crm_err("Applying %s before %s %d", first->uuid, start->uuid, uber_parent(first_rsc)->variant);
for (pIter = probes; pIter != NULL; pIter = pIter->next) {
pe_action_t *probe = (pe_action_t *) pIter->data;
crm_err("Ordering %s before %s", first->uuid, probe->uuid);
order_actions(first, probe, pe_order_optional);
}
}
}
#endif
}
static void
order_probes(pe_working_set_t * data_set)
{
order_first_probes(data_set);
order_then_probes(data_set);
}
gboolean
stage7(pe_working_set_t * data_set)
{
pcmk__output_t *prev_out = data_set->priv;
pcmk__output_t *out = NULL;
GList *gIter = NULL;
crm_trace("Applying ordering constraints");
/* Don't ask me why, but apparently they need to be processed in
* the order they were created in... go figure
*
* Also g_list_append() has horrendous performance characteristics
* So we need to use g_list_prepend() and then reverse the list here
*/
data_set->ordering_constraints = g_list_reverse(data_set->ordering_constraints);
for (gIter = data_set->ordering_constraints; gIter != NULL; gIter = gIter->next) {
pe__ordering_t *order = gIter->data;
pe_resource_t *rsc = order->lh_rsc;
crm_trace("Applying ordering constraint: %d", order->id);
if (rsc != NULL) {
crm_trace("rsc_action-to-*");
rsc_order_first(rsc, order, data_set);
continue;
}
rsc = order->rh_rsc;
if (rsc != NULL) {
crm_trace("action-to-rsc_action");
rsc_order_then(order->lh_action, rsc, order);
} else {
crm_trace("action-to-action");
order_actions(order->lh_action, order->rh_action, order->type);
}
}
for (gIter = data_set->actions; gIter != NULL; gIter = gIter->next) {
pe_action_t *action = (pe_action_t *) gIter->data;
update_colo_start_chain(action, data_set);
}
crm_trace("Ordering probes");
order_probes(data_set);
crm_trace("Updating %d actions", g_list_length(data_set->actions));
for (gIter = data_set->actions; gIter != NULL; gIter = gIter->next) {
pe_action_t *action = (pe_action_t *) gIter->data;
update_action(action, data_set);
}
// Check for invalid orderings
for (gIter = data_set->actions; gIter != NULL; gIter = gIter->next) {
pe_action_t *action = (pe_action_t *) gIter->data;
pe_action_wrapper_t *input = NULL;
for (GList *input_iter = action->actions_before;
input_iter != NULL; input_iter = input_iter->next) {
input = (pe_action_wrapper_t *) input_iter->data;
if (pcmk__ordering_is_invalid(action, input)) {
input->type = pe_order_none;
}
}
}
/* stage7 only ever outputs to the log, so ignore whatever output object was
* previously set and just log instead.
*/
out = pcmk__new_logger();
if (out == NULL) {
return FALSE;
}
pcmk__output_set_log_level(out, LOG_NOTICE);
data_set->priv = out;
out->begin_list(out, NULL, NULL, "Actions");
LogNodeActions(data_set);
for (gIter = data_set->resources; gIter != NULL; gIter = gIter->next) {
pe_resource_t *rsc = (pe_resource_t *) gIter->data;
LogActions(rsc, data_set);
}
out->end_list(out);
out->finish(out, CRM_EX_OK, true, NULL);
pcmk__output_free(out);
data_set->priv = prev_out;
return TRUE;
}
static int transition_id = -1;
/*!
* \internal
* \brief Log a message after calculating a transition
*
* \param[in] filename Where transition input is stored
*/
void
pcmk__log_transition_summary(const char *filename)
{
if (was_processing_error) {
crm_err("Calculated transition %d (with errors)%s%s",
transition_id,
(filename == NULL)? "" : ", saving inputs in ",
(filename == NULL)? "" : filename);
} else if (was_processing_warning) {
crm_warn("Calculated transition %d (with warnings)%s%s",
transition_id,
(filename == NULL)? "" : ", saving inputs in ",
(filename == NULL)? "" : filename);
} else {
crm_notice("Calculated transition %d%s%s",
transition_id,
(filename == NULL)? "" : ", saving inputs in ",
(filename == NULL)? "" : filename);
}
if (crm_config_error) {
crm_notice("Configuration errors found during scheduler processing,"
" please run \"crm_verify -L\" to identify issues");
}
}
/*
* Create a dependency graph to send to the transitioner (via the controller)
*/
gboolean
stage8(pe_working_set_t * data_set)
{
GList *gIter = NULL;
const char *value = NULL;
long long limit = 0LL;
transition_id++;
crm_trace("Creating transition graph %d.", transition_id);
data_set->graph = create_xml_node(NULL, XML_TAG_GRAPH);
value = pe_pref(data_set->config_hash, "cluster-delay");
crm_xml_add(data_set->graph, "cluster-delay", value);
value = pe_pref(data_set->config_hash, "stonith-timeout");
crm_xml_add(data_set->graph, "stonith-timeout", value);
crm_xml_add(data_set->graph, "failed-stop-offset", "INFINITY");
if (pcmk_is_set(data_set->flags, pe_flag_start_failure_fatal)) {
crm_xml_add(data_set->graph, "failed-start-offset", "INFINITY");
} else {
crm_xml_add(data_set->graph, "failed-start-offset", "1");
}
value = pe_pref(data_set->config_hash, "batch-limit");
crm_xml_add(data_set->graph, "batch-limit", value);
crm_xml_add_int(data_set->graph, "transition_id", transition_id);
value = pe_pref(data_set->config_hash, "migration-limit");
if ((pcmk__scan_ll(value, &limit, 0LL) == pcmk_rc_ok) && (limit > 0)) {
crm_xml_add(data_set->graph, "migration-limit", value);
}
if (data_set->recheck_by > 0) {
char *recheck_epoch = NULL;
recheck_epoch = crm_strdup_printf("%llu",
(long long) data_set->recheck_by);
crm_xml_add(data_set->graph, "recheck-by", recheck_epoch);
free(recheck_epoch);
}
/* errors...
slist_iter(action, pe_action_t, action_list, lpc,
if(action->optional == FALSE && action->runnable == FALSE) {
print_action("Ignoring", action, TRUE);
}
);
*/
/* The following code will de-duplicate action inputs, so nothing past this
* should rely on the action input type flags retaining their original
* values.
*/
gIter = data_set->resources;
for (; gIter != NULL; gIter = gIter->next) {
pe_resource_t *rsc = (pe_resource_t *) gIter->data;
pe_rsc_trace(rsc, "processing actions for rsc=%s", rsc->id);
rsc->cmds->expand(rsc, data_set);
}
crm_log_xml_trace(data_set->graph, "created resource-driven action list");
/* pseudo action to distribute list of nodes with maintenance state update */
add_maintenance_update(data_set);
/* catch any non-resource specific actions */
crm_trace("processing non-resource actions");
gIter = data_set->actions;
for (; gIter != NULL; gIter = gIter->next) {
pe_action_t *action = (pe_action_t *) gIter->data;
if (action->rsc
&& action->node
&& action->node->details->shutdown
&& !pcmk_is_set(action->rsc->flags, pe_rsc_maintenance)
&& !pcmk_any_flags_set(action->flags,
pe_action_optional|pe_action_runnable)
&& pcmk__str_eq(action->task, RSC_STOP, pcmk__str_none)
) {
/* Eventually we should just ignore the 'fence' case
* But for now it's the best way to detect (in CTS) when
* CIB resource updates are being lost
*/
if (pcmk_is_set(data_set->flags, pe_flag_have_quorum)
|| data_set->no_quorum_policy == no_quorum_ignore) {
crm_crit("Cannot %s node '%s' because of %s:%s%s (%s)",
action->node->details->unclean ? "fence" : "shut down",
action->node->details->uname, action->rsc->id,
pcmk_is_set(action->rsc->flags, pe_rsc_managed)? " blocked" : " unmanaged",
pcmk_is_set(action->rsc->flags, pe_rsc_failed)? " failed" : "",
action->uuid);
}
}
graph_element_from_action(action, data_set);
}
crm_log_xml_trace(data_set->graph, "created generic action list");
crm_trace("Created transition graph %d.", transition_id);
return TRUE;
}
void
LogNodeActions(pe_working_set_t * data_set)
{
pcmk__output_t *out = data_set->priv;
GList *gIter = NULL;
for (gIter = data_set->actions; gIter != NULL; gIter = gIter->next) {
char *node_name = NULL;
char *task = NULL;
pe_action_t *action = (pe_action_t *) gIter->data;
if (action->rsc != NULL) {
continue;
} else if (pcmk_is_set(action->flags, pe_action_optional)) {
continue;
}
if (pe__is_guest_node(action->node)) {
node_name = crm_strdup_printf("%s (resource: %s)", action->node->details->uname, action->node->details->remote_rsc->container->id);
} else if(action->node) {
node_name = crm_strdup_printf("%s", action->node->details->uname);
}
if (pcmk__str_eq(action->task, CRM_OP_SHUTDOWN, pcmk__str_casei)) {
task = strdup("Shutdown");
} else if (pcmk__str_eq(action->task, CRM_OP_FENCE, pcmk__str_casei)) {
const char *op = g_hash_table_lookup(action->meta, "stonith_action");
task = crm_strdup_printf("Fence (%s)", op);
}
out->message(out, "node-action", task, node_name, action->reason);
free(node_name);
free(task);
}
}
diff --git a/lib/pacemaker/pcmk_sched_graph.c b/lib/pacemaker/pcmk_sched_graph.c
index 2d749b21cd..76d5365041 100644
--- a/lib/pacemaker/pcmk_sched_graph.c
+++ b/lib/pacemaker/pcmk_sched_graph.c
@@ -1,1902 +1,1902 @@
/*
* Copyright 2004-2021 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
* This source code is licensed under the GNU General Public License version 2
* or later (GPLv2+) WITHOUT ANY WARRANTY.
*/
#include
#include
#include
#include
#include
#include
#include
#include
void update_colo_start_chain(pe_action_t *action, pe_working_set_t *data_set);
gboolean rsc_update_action(pe_action_t * first, pe_action_t * then, enum pe_ordering type);
static enum pe_action_flags
get_action_flags(pe_action_t * action, pe_node_t * node)
{
enum pe_action_flags flags = action->flags;
if (action->rsc) {
flags = action->rsc->cmds->action_flags(action, NULL);
if (pe_rsc_is_clone(action->rsc) && node) {
/* We only care about activity on $node */
enum pe_action_flags clone_flags = action->rsc->cmds->action_flags(action, node);
/* Go to great lengths to ensure the correct value for pe_action_runnable...
*
* If we are a clone, then for _ordering_ constraints, it's only relevant
* if we are runnable _anywhere_.
*
* This only applies to _runnable_ though, and only for ordering constraints.
* If this function is ever used during colocation, then we'll need additional logic
*
* Not very satisfying, but it's logical and appears to work well.
*/
if (!pcmk_is_set(clone_flags, pe_action_runnable)
&& pcmk_is_set(flags, pe_action_runnable)) {
pe__set_raw_action_flags(clone_flags, action->rsc->id,
pe_action_runnable);
}
flags = clone_flags;
}
}
return flags;
}
static char *
convert_non_atomic_uuid(char *old_uuid, pe_resource_t * rsc, gboolean allow_notify,
gboolean free_original)
{
guint interval_ms = 0;
char *uuid = NULL;
char *rid = NULL;
char *raw_task = NULL;
int task = no_action;
CRM_ASSERT(rsc);
pe_rsc_trace(rsc, "Processing %s", old_uuid);
if (old_uuid == NULL) {
return NULL;
} else if (strstr(old_uuid, "notify") != NULL) {
goto done; /* no conversion */
} else if (rsc->variant < pe_group) {
goto done; /* no conversion */
}
CRM_ASSERT(parse_op_key(old_uuid, &rid, &raw_task, &interval_ms));
if (interval_ms > 0) {
goto done; /* no conversion */
}
task = text2task(raw_task);
switch (task) {
case stop_rsc:
case start_rsc:
case action_notify:
case action_promote:
case action_demote:
break;
case stopped_rsc:
case started_rsc:
case action_notified:
case action_promoted:
case action_demoted:
task--;
break;
case monitor_rsc:
case shutdown_crm:
case stonith_node:
task = no_action;
break;
default:
crm_err("Unknown action: %s", raw_task);
task = no_action;
break;
}
if (task != no_action) {
if (pcmk_is_set(rsc->flags, pe_rsc_notify) && allow_notify) {
uuid = pcmk__notify_key(rid, "confirmed-post", task2text(task + 1));
} else {
uuid = pcmk__op_key(rid, task2text(task + 1), 0);
}
pe_rsc_trace(rsc, "Converted %s -> %s", old_uuid, uuid);
}
done:
if (uuid == NULL) {
uuid = strdup(old_uuid);
}
if (free_original) {
free(old_uuid);
}
free(raw_task);
free(rid);
return uuid;
}
static pe_action_t *
rsc_expand_action(pe_action_t * action)
{
gboolean notify = FALSE;
pe_action_t *result = action;
pe_resource_t *rsc = action->rsc;
if (rsc == NULL) {
return action;
}
if ((rsc->parent == NULL)
|| (pe_rsc_is_clone(rsc) && (rsc->parent->variant == pe_container))) {
/* Only outermost resources have notification actions.
* The exception is those in bundles.
*/
notify = pcmk_is_set(rsc->flags, pe_rsc_notify);
}
if (rsc->variant >= pe_group) {
/* Expand 'start' -> 'started' */
char *uuid = NULL;
uuid = convert_non_atomic_uuid(action->uuid, rsc, notify, FALSE);
if (uuid) {
pe_rsc_trace(rsc, "Converting %s to %s %d", action->uuid, uuid,
pcmk_is_set(rsc->flags, pe_rsc_notify));
result = find_first_action(rsc->actions, uuid, NULL, NULL);
if (result == NULL) {
crm_err("Couldn't expand %s to %s in %s", action->uuid, uuid, rsc->id);
result = action;
}
free(uuid);
}
}
return result;
}
static enum pe_graph_flags
graph_update_action(pe_action_t * first, pe_action_t * then, pe_node_t * node,
enum pe_action_flags first_flags, enum pe_action_flags then_flags,
pe_action_wrapper_t *order, pe_working_set_t *data_set)
{
enum pe_graph_flags changed = pe_graph_none;
enum pe_ordering type = order->type;
gboolean processed = FALSE;
/* TODO: Do as many of these in parallel as possible */
if (pcmk_is_set(type, pe_order_implies_then_on_node)) {
/* Normally we want the _whole_ 'then' clone to
* restart if 'first' is restarted, so then->node is
* needed.
*
* However for unfencing, we want to limit this to
* instances on the same node as 'first' (the
* unfencing operation), so first->node is supplied.
*
* Swap the node, from then on we can can treat it
* like any other 'pe_order_implies_then'
*/
pe__clear_order_flags(type, pe_order_implies_then_on_node);
pe__set_order_flags(type, pe_order_implies_then);
node = first->node;
}
pe__clear_raw_action_flags(first_flags, "first action update",
pe_action_pseudo);
if (type & pe_order_implies_then) {
processed = TRUE;
if (then->rsc) {
changed |= then->rsc->cmds->update_actions(first, then, node,
first_flags & pe_action_optional, pe_action_optional,
pe_order_implies_then, data_set);
} else if (!pcmk_is_set(first_flags, pe_action_optional)) {
if (update_action_flags(then, pe_action_optional | pe_action_clear, __func__, __LINE__)) {
pe__set_graph_flags(changed, first, pe_graph_updated_then);
}
}
if (changed) {
pe_rsc_trace(then->rsc, "implies right: %s then %s: changed", first->uuid, then->uuid);
} else {
crm_trace("implies right: %s then %s %p", first->uuid, then->uuid, then->rsc);
}
}
if ((type & pe_order_restart) && then->rsc) {
enum pe_action_flags restart = (pe_action_optional | pe_action_runnable);
processed = TRUE;
changed |= then->rsc->cmds->update_actions(first, then, node,
first_flags, restart,
pe_order_restart, data_set);
if (changed) {
pe_rsc_trace(then->rsc, "restart: %s then %s: changed", first->uuid, then->uuid);
} else {
crm_trace("restart: %s then %s", first->uuid, then->uuid);
}
}
if (type & pe_order_implies_first) {
processed = TRUE;
if (first->rsc) {
changed |= first->rsc->cmds->update_actions(first, then, node,
first_flags, pe_action_optional, pe_order_implies_first,
data_set);
} else if (!pcmk_is_set(first_flags, pe_action_optional)) {
pe_rsc_trace(first->rsc, "first unrunnable: %s (%d) then %s (%d)",
first->uuid, pcmk_is_set(first_flags, pe_action_optional),
then->uuid, pcmk_is_set(then_flags, pe_action_optional));
if (update_action_flags(first, pe_action_runnable | pe_action_clear, __func__, __LINE__)) {
pe__set_graph_flags(changed, first, pe_graph_updated_first);
}
}
if (changed) {
pe_rsc_trace(then->rsc, "implies left: %s then %s: changed", first->uuid, then->uuid);
} else {
crm_trace("implies left: %s (%d) then %s (%d)",
first->uuid, pcmk_is_set(first_flags, pe_action_optional),
then->uuid, pcmk_is_set(then_flags, pe_action_optional));
}
}
if (type & pe_order_promoted_implies_first) {
processed = TRUE;
if (then->rsc) {
changed |= then->rsc->cmds->update_actions(first, then, node,
first_flags & pe_action_optional, pe_action_optional,
pe_order_promoted_implies_first, data_set);
}
if (changed) {
pe_rsc_trace(then->rsc,
"implies left when right resource is promoted: "
"%s then %s: changed", first->uuid, then->uuid);
} else {
crm_trace("implies left when right resource is promoted: "
"%s then %s", first->uuid, then->uuid);
}
}
if (type & pe_order_one_or_more) {
processed = TRUE;
if (then->rsc) {
changed |= then->rsc->cmds->update_actions(first, then, node,
first_flags, pe_action_runnable, pe_order_one_or_more,
data_set);
} else if (pcmk_is_set(first_flags, pe_action_runnable)) {
/* alright. a "first" action is considered runnable, incremente
* the 'runnable_before' counter */
then->runnable_before++;
/* if the runnable before count for then exceeds the required number
* of "before" runnable actions... mark then as runnable */
if (then->runnable_before >= then->required_runnable_before) {
if (update_action_flags(then, pe_action_runnable, __func__, __LINE__)) {
pe__set_graph_flags(changed, first, pe_graph_updated_then);
}
}
}
if (changed) {
pe_rsc_trace(then->rsc, "runnable_one_or_more: %s then %s: changed", first->uuid,
then->uuid);
} else {
crm_trace("runnable_one_or_more: %s then %s", first->uuid, then->uuid);
}
}
if (then->rsc && pcmk_is_set(type, pe_order_probe)) {
processed = TRUE;
if (!pcmk_is_set(first_flags, pe_action_runnable)
&& (first->rsc->running_on != NULL)) {
pe_rsc_trace(then->rsc, "Ignoring %s then %s - %s is about to be stopped",
first->uuid, then->uuid, first->rsc->id);
type = pe_order_none;
order->type = pe_order_none;
} else {
pe_rsc_trace(then->rsc, "Enforcing %s then %s", first->uuid, then->uuid);
changed |= then->rsc->cmds->update_actions(first, then, node,
first_flags, pe_action_runnable, pe_order_runnable_left,
data_set);
}
if (changed) {
pe_rsc_trace(then->rsc, "runnable: %s then %s: changed", first->uuid, then->uuid);
} else {
crm_trace("runnable: %s then %s", first->uuid, then->uuid);
}
}
if (type & pe_order_runnable_left) {
processed = TRUE;
if (then->rsc) {
changed |= then->rsc->cmds->update_actions(first, then, node,
first_flags, pe_action_runnable, pe_order_runnable_left,
data_set);
} else if (!pcmk_is_set(first_flags, pe_action_runnable)) {
pe_rsc_trace(then->rsc, "then unrunnable: %s then %s", first->uuid, then->uuid);
if (update_action_flags(then, pe_action_runnable | pe_action_clear, __func__, __LINE__)) {
pe__set_graph_flags(changed, first, pe_graph_updated_then);
}
}
if (changed) {
pe_rsc_trace(then->rsc, "runnable: %s then %s: changed", first->uuid, then->uuid);
} else {
crm_trace("runnable: %s then %s", first->uuid, then->uuid);
}
}
if (type & pe_order_implies_first_migratable) {
processed = TRUE;
if (then->rsc) {
changed |= then->rsc->cmds->update_actions(first, then, node,
first_flags, pe_action_optional,
pe_order_implies_first_migratable, data_set);
}
if (changed) {
pe_rsc_trace(then->rsc, "optional: %s then %s: changed", first->uuid, then->uuid);
} else {
crm_trace("optional: %s then %s", first->uuid, then->uuid);
}
}
if (type & pe_order_pseudo_left) {
processed = TRUE;
if (then->rsc) {
changed |= then->rsc->cmds->update_actions(first, then, node,
first_flags, pe_action_optional, pe_order_pseudo_left,
data_set);
}
if (changed) {
pe_rsc_trace(then->rsc, "optional: %s then %s: changed", first->uuid, then->uuid);
} else {
crm_trace("optional: %s then %s", first->uuid, then->uuid);
}
}
if (type & pe_order_optional) {
processed = TRUE;
if (then->rsc) {
changed |= then->rsc->cmds->update_actions(first, then, node,
first_flags, pe_action_runnable, pe_order_optional, data_set);
}
if (changed) {
pe_rsc_trace(then->rsc, "optional: %s then %s: changed", first->uuid, then->uuid);
} else {
crm_trace("optional: %s then %s", first->uuid, then->uuid);
}
}
if (type & pe_order_asymmetrical) {
processed = TRUE;
if (then->rsc) {
changed |= then->rsc->cmds->update_actions(first, then, node,
first_flags, pe_action_runnable, pe_order_asymmetrical,
data_set);
}
if (changed) {
pe_rsc_trace(then->rsc, "asymmetrical: %s then %s: changed", first->uuid, then->uuid);
} else {
crm_trace("asymmetrical: %s then %s", first->uuid, then->uuid);
}
}
if ((first->flags & pe_action_runnable) && (type & pe_order_implies_then_printed)
&& (first_flags & pe_action_optional) == 0) {
processed = TRUE;
crm_trace("%s implies %s printed", first->uuid, then->uuid);
update_action_flags(then, pe_action_print_always, __func__, __LINE__); /* don't care about changed */
}
if (pcmk_is_set(type, pe_order_implies_first_printed)
&& !pcmk_is_set(then_flags, pe_action_optional)) {
processed = TRUE;
crm_trace("%s implies %s printed", then->uuid, first->uuid);
update_action_flags(first, pe_action_print_always, __func__, __LINE__); /* don't care about changed */
}
if ((type & pe_order_implies_then
|| type & pe_order_implies_first
|| type & pe_order_restart)
&& first->rsc
&& pcmk__str_eq(first->task, RSC_STOP, pcmk__str_casei)
&& !pcmk_is_set(first->rsc->flags, pe_rsc_managed)
&& pcmk_is_set(first->rsc->flags, pe_rsc_block)
&& !pcmk_is_set(first->flags, pe_action_runnable)) {
if (update_action_flags(then, pe_action_runnable | pe_action_clear, __func__, __LINE__)) {
pe__set_graph_flags(changed, first, pe_graph_updated_then);
}
if (changed) {
pe_rsc_trace(then->rsc, "unmanaged left: %s then %s: changed", first->uuid, then->uuid);
} else {
crm_trace("unmanaged left: %s then %s", first->uuid, then->uuid);
}
}
if (processed == FALSE) {
crm_trace("Constraint 0x%.6x not applicable", type);
}
return changed;
}
static void
mark_start_blocked(pe_resource_t *rsc, pe_resource_t *reason,
pe_working_set_t *data_set)
{
GList *gIter = rsc->actions;
char *reason_text = crm_strdup_printf("colocation with %s", reason->id);
for (; gIter != NULL; gIter = gIter->next) {
pe_action_t *action = (pe_action_t *) gIter->data;
if (!pcmk__str_eq(action->task, RSC_START, pcmk__str_casei)) {
continue;
}
if (pcmk_is_set(action->flags, pe_action_runnable)) {
pe_action_set_flag_reason(__func__, __LINE__, action, NULL,
reason_text, pe_action_runnable, FALSE);
update_colo_start_chain(action, data_set);
update_action(action, data_set);
}
}
free(reason_text);
}
void
update_colo_start_chain(pe_action_t *action, pe_working_set_t *data_set)
{
GList *gIter = NULL;
pe_resource_t *rsc = NULL;
if (!pcmk_is_set(action->flags, pe_action_runnable)
&& pcmk__str_eq(action->task, RSC_START, pcmk__str_casei)) {
rsc = uber_parent(action->rsc);
if (rsc->parent) {
/* For bundles, uber_parent() returns the clone, not the bundle, so
* the existence of rsc->parent implies this is a bundle.
* In this case, we need the bundle resource, so that we can check
* if all containers are stopped/stopping.
*/
rsc = rsc->parent;
}
}
if (rsc == NULL || rsc->rsc_cons_lhs == NULL) {
return;
}
/* if rsc has children, all the children need to have start set to
* unrunnable before we follow the colo chain for the parent. */
for (gIter = rsc->children; gIter != NULL; gIter = gIter->next) {
pe_resource_t *child = (pe_resource_t *)gIter->data;
pe_action_t *start = find_first_action(child->actions, NULL, RSC_START, NULL);
if ((start == NULL) || pcmk_is_set(start->flags, pe_action_runnable)) {
return;
}
}
for (gIter = rsc->rsc_cons_lhs; gIter != NULL; gIter = gIter->next) {
pcmk__colocation_t *colocate_with = (pcmk__colocation_t *) gIter->data;
if (colocate_with->score == INFINITY) {
mark_start_blocked(colocate_with->rsc_lh, action->rsc, data_set);
}
}
}
gboolean
update_action(pe_action_t *then, pe_working_set_t *data_set)
{
GList *lpc = NULL;
enum pe_graph_flags changed = pe_graph_none;
int last_flags = then->flags;
crm_trace("Processing %s (%s %s %s)",
then->uuid,
pcmk_is_set(then->flags, pe_action_optional)? "optional" : "required",
pcmk_is_set(then->flags, pe_action_runnable)? "runnable" : "unrunnable",
pcmk_is_set(then->flags, pe_action_pseudo)? "pseudo"
: (then->node? then->node->details->uname : ""));
if (pcmk_is_set(then->flags, pe_action_requires_any)) {
/* initialize current known runnable before actions to 0
* from here as graph_update_action is called for each of
* then's before actions, this number will increment as
* runnable 'first' actions are encountered */
then->runnable_before = 0;
/* for backwards compatibility with previous options that use
* the 'requires_any' flag, initialize required to 1 if it is
* not set. */
if (then->required_runnable_before == 0) {
then->required_runnable_before = 1;
}
pe__clear_action_flags(then, pe_action_runnable);
/* We are relying on the pe_order_one_or_more clause of
* graph_update_action(), called as part of the:
*
* 'if (first == other->action)'
*
* block below, to set this back if appropriate
*/
}
for (lpc = then->actions_before; lpc != NULL; lpc = lpc->next) {
pe_action_wrapper_t *other = (pe_action_wrapper_t *) lpc->data;
pe_action_t *first = other->action;
pe_node_t *then_node = then->node;
pe_node_t *first_node = first->node;
enum pe_action_flags then_flags = 0;
enum pe_action_flags first_flags = 0;
if (first->rsc && first->rsc->variant == pe_group && pcmk__str_eq(first->task, RSC_START, pcmk__str_casei)) {
first_node = first->rsc->fns->location(first->rsc, NULL, FALSE);
if (first_node) {
crm_trace("First: Found node %s for %s", first_node->details->uname, first->uuid);
}
}
if (then->rsc && then->rsc->variant == pe_group && pcmk__str_eq(then->task, RSC_START, pcmk__str_casei)) {
then_node = then->rsc->fns->location(then->rsc, NULL, FALSE);
if (then_node) {
crm_trace("Then: Found node %s for %s", then_node->details->uname, then->uuid);
}
}
/* Disable constraint if it only applies when on same node, but isn't */
if (pcmk_is_set(other->type, pe_order_same_node)
&& (first_node != NULL) && (then_node != NULL)
&& (first_node->details != then_node->details)) {
crm_trace("Disabled constraint %s on %s -> %s on %s",
other->action->uuid, first_node->details->uname,
then->uuid, then_node->details->uname);
other->type = pe_order_none;
continue;
}
pe__clear_graph_flags(changed, then, pe_graph_updated_first);
if (first->rsc && pcmk_is_set(other->type, pe_order_then_cancels_first)
&& !pcmk_is_set(then->flags, pe_action_optional)) {
/* 'then' is required, so we must abandon 'first'
- * (e.g. a required stop cancels any reload).
+ * (e.g. a required stop cancels any agent reload).
*/
pe__set_action_flags(other->action, pe_action_optional);
- if (!strcmp(first->task, CRMD_ACTION_RELOAD)) {
+ if (!strcmp(first->task, CRMD_ACTION_RELOAD_AGENT)) {
pe__clear_resource_flags(first->rsc, pe_rsc_reload);
}
}
if (first->rsc && then->rsc && (first->rsc != then->rsc)
&& (is_parent(then->rsc, first->rsc) == FALSE)) {
first = rsc_expand_action(first);
}
if (first != other->action) {
crm_trace("Ordering %s after %s instead of %s", then->uuid, first->uuid,
other->action->uuid);
}
first_flags = get_action_flags(first, then_node);
then_flags = get_action_flags(then, first_node);
crm_trace("Checking %s (%s %s %s) against %s (%s %s %s) filter=0x%.6x type=0x%.6x",
then->uuid,
pcmk_is_set(then_flags, pe_action_optional)? "optional" : "required",
pcmk_is_set(then_flags, pe_action_runnable)? "runnable" : "unrunnable",
pcmk_is_set(then_flags, pe_action_pseudo)? "pseudo"
: (then->node? then->node->details->uname : ""),
first->uuid,
pcmk_is_set(first_flags, pe_action_optional)? "optional" : "required",
pcmk_is_set(first_flags, pe_action_runnable)? "runnable" : "unrunnable",
pcmk_is_set(first_flags, pe_action_pseudo)? "pseudo"
: (first->node? first->node->details->uname : ""),
first_flags, other->type);
if (first == other->action) {
/*
* 'first' was not expanded (e.g. from 'start' to 'running'), which could mean it:
* - has no associated resource,
* - was a primitive,
* - was pre-expanded (e.g. 'running' instead of 'start')
*
* The third argument here to graph_update_action() is a node which is used under two conditions:
* - Interleaving, in which case first->node and
* then->node are equal (and NULL)
* - If 'then' is a clone, to limit the scope of the
* constraint to instances on the supplied node
*
*/
pe_node_t *node = then->node;
changed |= graph_update_action(first, then, node, first_flags,
then_flags, other, data_set);
/* 'first' was for a complex resource (clone, group, etc),
* create a new dependency if necessary
*/
} else if (order_actions(first, then, other->type)) {
/* This was the first time 'first' and 'then' were associated,
* start again to get the new actions_before list
*/
pe__set_graph_flags(changed, then,
pe_graph_updated_then|pe_graph_disable);
}
if (changed & pe_graph_disable) {
crm_trace("Disabled constraint %s -> %s in favor of %s -> %s",
other->action->uuid, then->uuid, first->uuid, then->uuid);
pe__clear_graph_flags(changed, then, pe_graph_disable);
other->type = pe_order_none;
}
if (changed & pe_graph_updated_first) {
GList *lpc2 = NULL;
crm_trace("Updated %s (first %s %s %s), processing dependents ",
first->uuid,
pcmk_is_set(first->flags, pe_action_optional)? "optional" : "required",
pcmk_is_set(first->flags, pe_action_runnable)? "runnable" : "unrunnable",
pcmk_is_set(first->flags, pe_action_pseudo)? "pseudo"
: (first->node? first->node->details->uname : ""));
for (lpc2 = first->actions_after; lpc2 != NULL; lpc2 = lpc2->next) {
pe_action_wrapper_t *other = (pe_action_wrapper_t *) lpc2->data;
update_action(other->action, data_set);
}
update_action(first, data_set);
}
}
if (pcmk_is_set(then->flags, pe_action_requires_any)) {
if (last_flags != then->flags) {
pe__set_graph_flags(changed, then, pe_graph_updated_then);
} else {
pe__clear_graph_flags(changed, then, pe_graph_updated_then);
}
}
if (changed & pe_graph_updated_then) {
crm_trace("Updated %s (then %s %s %s), processing dependents ",
then->uuid,
pcmk_is_set(then->flags, pe_action_optional)? "optional" : "required",
pcmk_is_set(then->flags, pe_action_runnable)? "runnable" : "unrunnable",
pcmk_is_set(then->flags, pe_action_pseudo)? "pseudo"
: (then->node? then->node->details-> uname : ""));
if (pcmk_is_set(last_flags, pe_action_runnable)
&& !pcmk_is_set(then->flags, pe_action_runnable)) {
update_colo_start_chain(then, data_set);
}
update_action(then, data_set);
for (lpc = then->actions_after; lpc != NULL; lpc = lpc->next) {
pe_action_wrapper_t *other = (pe_action_wrapper_t *) lpc->data;
update_action(other->action, data_set);
}
}
return FALSE;
}
gboolean
shutdown_constraints(pe_node_t * node, pe_action_t * shutdown_op, pe_working_set_t * data_set)
{
/* add the stop to the before lists so it counts as a pre-req
* for the shutdown
*/
GList *lpc = NULL;
for (lpc = data_set->actions; lpc != NULL; lpc = lpc->next) {
pe_action_t *action = (pe_action_t *) lpc->data;
if (action->rsc == NULL || action->node == NULL) {
continue;
} else if (action->node->details != node->details) {
continue;
} else if (pcmk_is_set(action->rsc->flags, pe_rsc_maintenance)) {
pe_rsc_trace(action->rsc, "Skipping %s: maintenance mode", action->uuid);
continue;
} else if (node->details->maintenance) {
pe_rsc_trace(action->rsc, "Skipping %s: node %s is in maintenance mode",
action->uuid, node->details->uname);
continue;
} else if (!pcmk__str_eq(action->task, RSC_STOP, pcmk__str_casei)) {
continue;
} else if (!pcmk_any_flags_set(action->rsc->flags,
pe_rsc_managed|pe_rsc_block)) {
/*
* If another action depends on this one, we may still end up blocking
*/
pe_rsc_trace(action->rsc, "Skipping %s: unmanaged", action->uuid);
continue;
}
pe_rsc_trace(action->rsc, "Ordering %s before shutdown on %s", action->uuid,
node->details->uname);
pe__clear_action_flags(action, pe_action_optional);
custom_action_order(action->rsc, NULL, action,
NULL, strdup(CRM_OP_SHUTDOWN), shutdown_op,
pe_order_optional | pe_order_runnable_left, data_set);
}
return TRUE;
}
/*!
* \internal
* \brief Order all actions appropriately relative to a fencing operation
*
* Ensure start operations of affected resources are ordered after fencing,
* imply stop and demote operations of affected resources by marking them as
* pseudo-actions, etc.
*
* \param[in] stonith_op Fencing operation
* \param[in,out] data_set Working set of cluster
*/
void
pcmk__order_vs_fence(pe_action_t *stonith_op, pe_working_set_t *data_set)
{
CRM_CHECK(stonith_op && data_set, return);
for (GList *r = data_set->resources; r != NULL; r = r->next) {
rsc_stonith_ordering((pe_resource_t *) r->data, stonith_op, data_set);
}
}
static pe_node_t *
get_router_node(pe_action_t *action)
{
pe_node_t *began_on = NULL;
pe_node_t *ended_on = NULL;
bool partial_migration = FALSE;
const char *task = action->task;
if (pcmk__str_eq(task, CRM_OP_FENCE, pcmk__str_casei)
|| !pe__is_guest_or_remote_node(action->node)) {
return NULL;
}
CRM_ASSERT(action->node->details->remote_rsc != NULL);
began_on = pe__current_node(action->node->details->remote_rsc);
ended_on = action->node->details->remote_rsc->allocated_to;
if (action->node->details->remote_rsc
&& (action->node->details->remote_rsc->container == NULL)
&& action->node->details->remote_rsc->partial_migration_target) {
partial_migration = TRUE;
}
if (began_on == NULL) {
crm_trace("Routing %s for %s through remote connection's "
"next node %s (starting)%s",
action->task, (action->rsc? action->rsc->id : "no resource"),
(ended_on? ended_on->details->uname : "none"),
partial_migration? " (partial migration)" : "");
return ended_on;
}
if (ended_on == NULL) {
crm_trace("Routing %s for %s through remote connection's "
"current node %s (stopping)%s",
action->task, (action->rsc? action->rsc->id : "no resource"),
(began_on? began_on->details->uname : "none"),
partial_migration? " (partial migration)" : "");
return began_on;
}
if (began_on->details == ended_on->details) {
crm_trace("Routing %s for %s through remote connection's "
"current node %s (not moving)%s",
action->task, (action->rsc? action->rsc->id : "no resource"),
(began_on? began_on->details->uname : "none"),
partial_migration? " (partial migration)" : "");
return began_on;
}
/* If we get here, the remote connection is moving during this transition.
* This means some actions for resources behind the connection will get
* routed through the cluster node the connection reource is currently on,
* and others are routed through the cluster node the connection will end up
* on.
*/
if (pcmk__str_eq(task, "notify", pcmk__str_casei)) {
task = g_hash_table_lookup(action->meta, "notify_operation");
}
/*
* Stop, demote, and migration actions must occur before the connection can
* move (these actions are required before the remote resource can stop). In
* this case, we know these actions have to be routed through the initial
* cluster node the connection resource lived on before the move takes
* place.
*
* The exception is a partial migration of a (non-guest) remote connection
* resource; in that case, all actions (even these) will be ordered after
* the connection's pseudo-start on the migration target, so the target is
* the router node.
*/
if (pcmk__strcase_any_of(task, "cancel", "stop", "demote", "migrate_from",
"migrate_to", NULL) && !partial_migration) {
crm_trace("Routing %s for %s through remote connection's "
"current node %s (moving)%s",
action->task, (action->rsc? action->rsc->id : "no resource"),
(began_on? began_on->details->uname : "none"),
partial_migration? " (partial migration)" : "");
return began_on;
}
/* Everything else (start, promote, monitor, probe, refresh,
* clear failcount, delete, ...) must occur after the connection starts on
* the node it is moving to.
*/
crm_trace("Routing %s for %s through remote connection's "
"next node %s (moving)%s",
action->task, (action->rsc? action->rsc->id : "no resource"),
(ended_on? ended_on->details->uname : "none"),
partial_migration? " (partial migration)" : "");
return ended_on;
}
/*!
* \internal
* \brief Add an XML node tag for a specified ID
*
* \param[in] id Node UUID to add
* \param[in,out] xml Parent XML tag to add to
*/
static xmlNode*
add_node_to_xml_by_id(const char *id, xmlNode *xml)
{
xmlNode *node_xml;
node_xml = create_xml_node(xml, XML_CIB_TAG_NODE);
crm_xml_add(node_xml, XML_ATTR_UUID, id);
return node_xml;
}
/*!
* \internal
* \brief Add an XML node tag for a specified node
*
* \param[in] node Node to add
* \param[in,out] xml XML to add node to
*/
static void
add_node_to_xml(const pe_node_t *node, void *xml)
{
add_node_to_xml_by_id(node->details->id, (xmlNode *) xml);
}
/*!
* \internal
* \brief Add XML with nodes that need an update of their maintenance state
*
* \param[in,out] xml Parent XML tag to add to
* \param[in] data_set Working set for cluster
*/
static int
add_maintenance_nodes(xmlNode *xml, const pe_working_set_t *data_set)
{
GList *gIter = NULL;
xmlNode *maintenance =
xml?create_xml_node(xml, XML_GRAPH_TAG_MAINTENANCE):NULL;
int count = 0;
for (gIter = data_set->nodes; gIter != NULL;
gIter = gIter->next) {
pe_node_t *node = (pe_node_t *) gIter->data;
struct pe_node_shared_s *details = node->details;
if (!pe__is_guest_or_remote_node(node)) {
continue; /* just remote nodes need to know atm */
}
if (details->maintenance != details->remote_maintenance) {
if (maintenance) {
crm_xml_add(
add_node_to_xml_by_id(node->details->id, maintenance),
XML_NODE_IS_MAINTENANCE, details->maintenance?"1":"0");
}
count++;
}
}
crm_trace("%s %d nodes to adjust maintenance-mode "
"to transition", maintenance?"Added":"Counted", count);
return count;
}
/*!
* \internal
* \brief Add pseudo action with nodes needing maintenance state update
*
* \param[in,out] data_set Working set for cluster
*/
void
add_maintenance_update(pe_working_set_t *data_set)
{
pe_action_t *action = NULL;
if (add_maintenance_nodes(NULL, data_set)) {
crm_trace("adding maintenance state update pseudo action");
action = get_pseudo_op(CRM_OP_MAINTENANCE_NODES, data_set);
pe__set_action_flags(action, pe_action_print_always);
}
}
/*!
* \internal
* \brief Add XML with nodes that an action is expected to bring down
*
* If a specified action is expected to bring any nodes down, add an XML block
* with their UUIDs. When a node is lost, this allows the controller to
* determine whether it was expected.
*
* \param[in,out] xml Parent XML tag to add to
* \param[in] action Action to check for downed nodes
* \param[in] data_set Working set for cluster
*/
static void
add_downed_nodes(xmlNode *xml, const pe_action_t *action,
const pe_working_set_t *data_set)
{
CRM_CHECK(xml && action && action->node && data_set, return);
if (pcmk__str_eq(action->task, CRM_OP_SHUTDOWN, pcmk__str_casei)) {
/* Shutdown makes the action's node down */
xmlNode *downed = create_xml_node(xml, XML_GRAPH_TAG_DOWNED);
add_node_to_xml_by_id(action->node->details->id, downed);
} else if (pcmk__str_eq(action->task, CRM_OP_FENCE, pcmk__str_casei)) {
/* Fencing makes the action's node and any hosted guest nodes down */
const char *fence = g_hash_table_lookup(action->meta, "stonith_action");
if (pcmk__strcase_any_of(fence, "off", "reboot", NULL)) {
xmlNode *downed = create_xml_node(xml, XML_GRAPH_TAG_DOWNED);
add_node_to_xml_by_id(action->node->details->id, downed);
pe_foreach_guest_node(data_set, action->node, add_node_to_xml, downed);
}
} else if (action->rsc && action->rsc->is_remote_node
&& pcmk__str_eq(action->task, CRMD_ACTION_STOP, pcmk__str_casei)) {
/* Stopping a remote connection resource makes connected node down,
* unless it's part of a migration
*/
GList *iter;
pe_action_t *input;
gboolean migrating = FALSE;
for (iter = action->actions_before; iter != NULL; iter = iter->next) {
input = ((pe_action_wrapper_t *) iter->data)->action;
if (input->rsc && pcmk__str_eq(action->rsc->id, input->rsc->id, pcmk__str_casei)
&& pcmk__str_eq(input->task, CRMD_ACTION_MIGRATED, pcmk__str_casei)) {
migrating = TRUE;
break;
}
}
if (!migrating) {
xmlNode *downed = create_xml_node(xml, XML_GRAPH_TAG_DOWNED);
add_node_to_xml_by_id(action->rsc->id, downed);
}
}
}
static bool
should_lock_action(pe_action_t *action)
{
// Only actions taking place on resource's lock node are locked
if ((action->rsc->lock_node == NULL) || (action->node == NULL)
|| (action->node->details != action->rsc->lock_node->details)) {
return false;
}
/* During shutdown, only stops are locked (otherwise, another action such as
* a demote would cause the controller to clear the lock)
*/
if (action->node->details->shutdown && action->task
&& strcmp(action->task, RSC_STOP)) {
return false;
}
return true;
}
static xmlNode *
action2xml(pe_action_t * action, gboolean as_input, pe_working_set_t *data_set)
{
gboolean needs_node_info = TRUE;
gboolean needs_maintenance_info = FALSE;
xmlNode *action_xml = NULL;
xmlNode *args_xml = NULL;
#if ENABLE_VERSIONED_ATTRS
pe_rsc_action_details_t *rsc_details = NULL;
#endif
if (action == NULL) {
return NULL;
}
if (pcmk__str_eq(action->task, CRM_OP_FENCE, pcmk__str_casei)) {
/* All fences need node info; guest node fences are pseudo-events */
action_xml = create_xml_node(NULL,
pcmk_is_set(action->flags, pe_action_pseudo)?
XML_GRAPH_TAG_PSEUDO_EVENT :
XML_GRAPH_TAG_CRM_EVENT);
} else if (pcmk__str_eq(action->task, CRM_OP_SHUTDOWN, pcmk__str_casei)) {
action_xml = create_xml_node(NULL, XML_GRAPH_TAG_CRM_EVENT);
} else if (pcmk__str_eq(action->task, CRM_OP_CLEAR_FAILCOUNT, pcmk__str_casei)) {
action_xml = create_xml_node(NULL, XML_GRAPH_TAG_CRM_EVENT);
} else if (pcmk__str_eq(action->task, CRM_OP_LRM_REFRESH, pcmk__str_casei)) {
action_xml = create_xml_node(NULL, XML_GRAPH_TAG_CRM_EVENT);
} else if (pcmk__str_eq(action->task, CRM_OP_LRM_DELETE, pcmk__str_casei)) {
// CIB-only clean-up for shutdown locks
action_xml = create_xml_node(NULL, XML_GRAPH_TAG_CRM_EVENT);
crm_xml_add(action_xml, PCMK__XA_MODE, XML_TAG_CIB);
/* } else if(pcmk__str_eq(action->task, RSC_PROBED, pcmk__str_casei)) { */
/* action_xml = create_xml_node(NULL, XML_GRAPH_TAG_CRM_EVENT); */
} else if (pcmk_is_set(action->flags, pe_action_pseudo)) {
if (pcmk__str_eq(action->task, CRM_OP_MAINTENANCE_NODES, pcmk__str_casei)) {
needs_maintenance_info = TRUE;
}
action_xml = create_xml_node(NULL, XML_GRAPH_TAG_PSEUDO_EVENT);
needs_node_info = FALSE;
} else {
action_xml = create_xml_node(NULL, XML_GRAPH_TAG_RSC_OP);
#if ENABLE_VERSIONED_ATTRS
rsc_details = pe_rsc_action_details(action);
#endif
}
crm_xml_add_int(action_xml, XML_ATTR_ID, action->id);
crm_xml_add(action_xml, XML_LRM_ATTR_TASK, action->task);
if (action->rsc != NULL && action->rsc->clone_name != NULL) {
char *clone_key = NULL;
guint interval_ms;
if (pcmk__guint_from_hash(action->meta,
XML_LRM_ATTR_INTERVAL_MS, 0,
&interval_ms) != pcmk_rc_ok) {
interval_ms = 0;
}
if (pcmk__str_eq(action->task, RSC_NOTIFY, pcmk__str_casei)) {
const char *n_type = g_hash_table_lookup(action->meta, "notify_type");
const char *n_task = g_hash_table_lookup(action->meta, "notify_operation");
CRM_CHECK(n_type != NULL, crm_err("No notify type value found for %s", action->uuid));
CRM_CHECK(n_task != NULL,
crm_err("No notify operation value found for %s", action->uuid));
clone_key = pcmk__notify_key(action->rsc->clone_name,
n_type, n_task);
} else if(action->cancel_task) {
clone_key = pcmk__op_key(action->rsc->clone_name,
action->cancel_task, interval_ms);
} else {
clone_key = pcmk__op_key(action->rsc->clone_name,
action->task, interval_ms);
}
CRM_CHECK(clone_key != NULL, crm_err("Could not generate a key for %s", action->uuid));
crm_xml_add(action_xml, XML_LRM_ATTR_TASK_KEY, clone_key);
crm_xml_add(action_xml, "internal_" XML_LRM_ATTR_TASK_KEY, action->uuid);
free(clone_key);
} else {
crm_xml_add(action_xml, XML_LRM_ATTR_TASK_KEY, action->uuid);
}
if (needs_node_info && action->node != NULL) {
pe_node_t *router_node = get_router_node(action);
crm_xml_add(action_xml, XML_LRM_ATTR_TARGET, action->node->details->uname);
crm_xml_add(action_xml, XML_LRM_ATTR_TARGET_UUID, action->node->details->id);
if (router_node) {
crm_xml_add(action_xml, XML_LRM_ATTR_ROUTER_NODE, router_node->details->uname);
}
g_hash_table_insert(action->meta, strdup(XML_LRM_ATTR_TARGET), strdup(action->node->details->uname));
g_hash_table_insert(action->meta, strdup(XML_LRM_ATTR_TARGET_UUID), strdup(action->node->details->id));
}
/* No details if this action is only being listed in the inputs section */
if (as_input) {
return action_xml;
}
if (action->rsc && !pcmk_is_set(action->flags, pe_action_pseudo)) {
int lpc = 0;
xmlNode *rsc_xml = NULL;
const char *attr_list[] = {
XML_AGENT_ATTR_CLASS,
XML_AGENT_ATTR_PROVIDER,
XML_ATTR_TYPE
};
/* If a resource is locked to a node via shutdown-lock, mark its actions
* so the controller can preserve the lock when the action completes.
*/
if (should_lock_action(action)) {
crm_xml_add_ll(action_xml, XML_CONFIG_ATTR_SHUTDOWN_LOCK,
(long long) action->rsc->lock_time);
}
// List affected resource
rsc_xml = create_xml_node(action_xml,
crm_element_name(action->rsc->xml));
if (pcmk_is_set(action->rsc->flags, pe_rsc_orphan)
&& action->rsc->clone_name) {
/* Do not use the 'instance free' name here as that
* might interfere with the instance we plan to keep.
* Ie. if there are more than two named /anonymous/
* instances on a given node, we need to make sure the
* command goes to the right one.
*
* Keep this block, even when everyone is using
* 'instance free' anonymous clone names - it means
* we'll do the right thing if anyone toggles the
* unique flag to 'off'
*/
crm_debug("Using orphan clone name %s instead of %s", action->rsc->id,
action->rsc->clone_name);
crm_xml_add(rsc_xml, XML_ATTR_ID, action->rsc->clone_name);
crm_xml_add(rsc_xml, XML_ATTR_ID_LONG, action->rsc->id);
} else if (!pcmk_is_set(action->rsc->flags, pe_rsc_unique)) {
const char *xml_id = ID(action->rsc->xml);
crm_debug("Using anonymous clone name %s for %s (aka. %s)", xml_id, action->rsc->id,
action->rsc->clone_name);
/* ID is what we'd like client to use
* ID_LONG is what they might know it as instead
*
* ID_LONG is only strictly needed /here/ during the
* transition period until all nodes in the cluster
* are running the new software /and/ have rebooted
* once (meaning that they've only ever spoken to a DC
* supporting this feature).
*
* If anyone toggles the unique flag to 'on', the
* 'instance free' name will correspond to an orphan
* and fall into the clause above instead
*/
crm_xml_add(rsc_xml, XML_ATTR_ID, xml_id);
if (action->rsc->clone_name && !pcmk__str_eq(xml_id, action->rsc->clone_name, pcmk__str_casei)) {
crm_xml_add(rsc_xml, XML_ATTR_ID_LONG, action->rsc->clone_name);
} else {
crm_xml_add(rsc_xml, XML_ATTR_ID_LONG, action->rsc->id);
}
} else {
CRM_ASSERT(action->rsc->clone_name == NULL);
crm_xml_add(rsc_xml, XML_ATTR_ID, action->rsc->id);
}
for (lpc = 0; lpc < PCMK__NELEM(attr_list); lpc++) {
crm_xml_add(rsc_xml, attr_list[lpc],
g_hash_table_lookup(action->rsc->meta, attr_list[lpc]));
}
}
/* List any attributes in effect */
args_xml = create_xml_node(NULL, XML_TAG_ATTRS);
crm_xml_add(args_xml, XML_ATTR_CRM_VERSION, CRM_FEATURE_SET);
g_hash_table_foreach(action->extra, hash2field, args_xml);
if (action->rsc != NULL && action->node) {
// Get the resource instance attributes, evaluated properly for node
GHashTable *params = pe_rsc_params(action->rsc, action->node, data_set);
/* REMOTE_CONTAINER_HACK: If this is a remote connection resource with
* addr="#uname", pull the actual value from the parameters evaluated
* without a node (which was put there earlier in stage8() when the
* bundle's expand() method was called).
*/
const char *remote_addr = g_hash_table_lookup(params,
XML_RSC_ATTR_REMOTE_RA_ADDR);
if (pcmk__str_eq(remote_addr, "#uname", pcmk__str_none)) {
GHashTable *base = pe_rsc_params(action->rsc, NULL, data_set);
remote_addr = g_hash_table_lookup(base,
XML_RSC_ATTR_REMOTE_RA_ADDR);
if (remote_addr != NULL) {
g_hash_table_insert(params, strdup(XML_RSC_ATTR_REMOTE_RA_ADDR),
strdup(remote_addr));
}
}
g_hash_table_foreach(params, hash2smartfield, args_xml);
#if ENABLE_VERSIONED_ATTRS
{
xmlNode *versioned_parameters = create_xml_node(NULL, XML_TAG_RSC_VER_ATTRS);
pe_get_versioned_attributes(versioned_parameters, action->rsc,
action->node, data_set);
if (xml_has_children(versioned_parameters)) {
add_node_copy(action_xml, versioned_parameters);
}
free_xml(versioned_parameters);
}
#endif
} else if(action->rsc && action->rsc->variant <= pe_native) {
GHashTable *params = pe_rsc_params(action->rsc, NULL, data_set);
g_hash_table_foreach(params, hash2smartfield, args_xml);
#if ENABLE_VERSIONED_ATTRS
if (xml_has_children(action->rsc->versioned_parameters)) {
add_node_copy(action_xml, action->rsc->versioned_parameters);
}
#endif
}
#if ENABLE_VERSIONED_ATTRS
if (rsc_details) {
if (xml_has_children(rsc_details->versioned_parameters)) {
add_node_copy(action_xml, rsc_details->versioned_parameters);
}
if (xml_has_children(rsc_details->versioned_meta)) {
add_node_copy(action_xml, rsc_details->versioned_meta);
}
}
#endif
g_hash_table_foreach(action->meta, hash2metafield, args_xml);
if (action->rsc != NULL) {
const char *value = g_hash_table_lookup(action->rsc->meta, "external-ip");
pe_resource_t *parent = action->rsc;
while (parent != NULL) {
parent->cmds->append_meta(parent, args_xml);
parent = parent->parent;
}
if(value) {
hash2smartfield((gpointer)"pcmk_external_ip", (gpointer)value, (gpointer)args_xml);
}
if (action->node && /* make clang analyzer happy */
pe__is_guest_node(action->node)) {
pe_node_t *host = NULL;
enum action_tasks task = text2task(action->task);
if(task == action_notify || task == action_notified) {
const char *n_task = g_hash_table_lookup(action->meta, "notify_operation");
task = text2task(n_task);
}
// Differentiate between up and down actions
switch (task) {
case stop_rsc:
case stopped_rsc:
case action_demote:
case action_demoted:
host = pe__current_node(action->node->details->remote_rsc->container);
break;
case start_rsc:
case started_rsc:
case monitor_rsc:
case action_promote:
case action_promoted:
host = action->node->details->remote_rsc->container->allocated_to;
break;
default:
break;
}
if(host) {
hash2metafield((gpointer)XML_RSC_ATTR_TARGET,
(gpointer)g_hash_table_lookup(action->rsc->meta, XML_RSC_ATTR_TARGET), (gpointer)args_xml);
hash2metafield((gpointer) PCMK__ENV_PHYSICAL_HOST,
(gpointer)host->details->uname,
(gpointer)args_xml);
}
}
} else if (pcmk__str_eq(action->task, CRM_OP_FENCE, pcmk__str_casei) && action->node) {
/* Pass the node's attributes as meta-attributes.
*
* @TODO: Determine whether it is still necessary to do this. It was
* added in 33d99707, probably for the libfence-based implementation in
* c9a90bd, which is no longer used.
*/
g_hash_table_foreach(action->node->details->attrs, hash2metafield, args_xml);
}
sorted_xml(args_xml, action_xml, FALSE);
free_xml(args_xml);
/* List any nodes this action is expected to make down */
if (needs_node_info && (action->node != NULL)) {
add_downed_nodes(action_xml, action, data_set);
}
if (needs_maintenance_info) {
add_maintenance_nodes(action_xml, data_set);
}
crm_log_xml_trace(action_xml, "dumped action");
return action_xml;
}
static bool
should_dump_action(pe_action_t *action)
{
CRM_CHECK(action != NULL, return false);
if (pcmk_is_set(action->flags, pe_action_dumped)) {
crm_trace("Action %s (%d) already dumped", action->uuid, action->id);
return false;
} else if (pcmk_is_set(action->flags, pe_action_pseudo)
&& pcmk__str_eq(action->task, CRM_OP_PROBED, pcmk__str_casei)) {
GList *lpc = NULL;
/* This is a horrible but convenient hack
*
* It mimimizes the number of actions with unsatisfied inputs
* (i.e. not included in the graph)
*
* This in turn, means we can be more concise when printing
* aborted/incomplete graphs.
*
* It also makes it obvious which node is preventing
* probe_complete from running (presumably because it is only
* partially up)
*
* For these reasons we tolerate such perversions
*/
for (lpc = action->actions_after; lpc != NULL; lpc = lpc->next) {
pe_action_wrapper_t *wrapper = (pe_action_wrapper_t *) lpc->data;
if (!pcmk_is_set(wrapper->action->flags, pe_action_runnable)) {
/* Only interested in runnable operations */
} else if (!pcmk__str_eq(wrapper->action->task, RSC_START, pcmk__str_casei)) {
/* Only interested in start operations */
} else if (pcmk_is_set(wrapper->action->flags, pe_action_dumped)
|| should_dump_action(wrapper->action)) {
crm_trace("Action %s (%d) should be dumped: "
"dependency of %s (%d)",
action->uuid, action->id,
wrapper->action->uuid, wrapper->action->id);
return true;
}
}
}
if (!pcmk_is_set(action->flags, pe_action_runnable)) {
crm_trace("Ignoring action %s (%d): unrunnable",
action->uuid, action->id);
return false;
} else if (pcmk_is_set(action->flags, pe_action_optional)
&& !pcmk_is_set(action->flags, pe_action_print_always)) {
crm_trace("Ignoring action %s (%d): optional",
action->uuid, action->id);
return false;
// Monitors should be dumped even for unmanaged resources
} else if (action->rsc && !pcmk_is_set(action->rsc->flags, pe_rsc_managed)
&& !pcmk__str_eq(action->task, RSC_STATUS, pcmk__str_casei)) {
const char *interval_ms_s = g_hash_table_lookup(action->meta,
XML_LRM_ATTR_INTERVAL_MS);
// Cancellation of recurring monitors should still be dumped
if (pcmk__str_eq(interval_ms_s, "0", pcmk__str_null_matches)) {
crm_trace("Ignoring action %s (%d): for unmanaged resource (%s)",
action->uuid, action->id, action->rsc->id);
return false;
}
}
if (pcmk_is_set(action->flags, pe_action_pseudo) ||
pcmk__strcase_any_of(action->task, CRM_OP_FENCE, CRM_OP_SHUTDOWN, NULL)) {
/* skip the next checks */
return true;
}
if (action->node == NULL) {
pe_err("Skipping action %s (%d) "
"because it was not allocated to a node (bug?)",
action->uuid, action->id);
log_action(LOG_DEBUG, "Unallocated action", action, false);
return false;
} else if (pcmk_is_set(action->flags, pe_action_dc)) {
crm_trace("Action %s (%d) should be dumped: "
"can run on DC instead of %s",
action->uuid, action->id, action->node->details->uname);
} else if (pe__is_guest_node(action->node)
&& !action->node->details->remote_requires_reset) {
crm_trace("Action %s (%d) should be dumped: "
"assuming will be runnable on guest node %s",
action->uuid, action->id, action->node->details->uname);
} else if (action->node->details->online == false) {
pe_err("Skipping action %s (%d) "
"because it was scheduled for offline node (bug?)",
action->uuid, action->id);
log_action(LOG_DEBUG, "Action for offline node", action, FALSE);
return false;
#if 0
/* but this would also affect resources that can be safely
* migrated before a fencing op
*/
} else if (action->node->details->unclean == false) {
pe_err("Skipping action %s (%d) "
"because it was scheduled for unclean node (bug?)",
action->uuid, action->id);
log_action(LOG_DEBUG, "Action for unclean node", action, false);
return false;
#endif
}
return true;
}
/* lowest to highest */
static gint
sort_action_id(gconstpointer a, gconstpointer b)
{
const pe_action_wrapper_t *action_wrapper2 = (const pe_action_wrapper_t *)a;
const pe_action_wrapper_t *action_wrapper1 = (const pe_action_wrapper_t *)b;
if (a == NULL) {
return 1;
}
if (b == NULL) {
return -1;
}
if (action_wrapper1->action->id > action_wrapper2->action->id) {
return -1;
}
if (action_wrapper1->action->id < action_wrapper2->action->id) {
return 1;
}
return 0;
}
/*!
* \internal
* \brief Check whether an action input should be in the transition graph
*
* \param[in] action Action to check
* \param[in,out] input Action input to check
*
* \return true if input should be in graph, false otherwise
* \note This function may not only check an input, but disable it under certian
* circumstances (load or anti-colocation orderings that are not needed).
*/
static bool
check_dump_input(pe_action_t *action, pe_action_wrapper_t *input)
{
int type = input->type;
if (input->state == pe_link_dumped) {
return true;
}
pe__clear_order_flags(type, pe_order_implies_first_printed
|pe_order_implies_then_printed
|pe_order_optional);
if (input->type == pe_order_none) {
crm_trace("Ignoring %s (%d) input %s (%d): "
"ordering disabled",
action->uuid, action->id,
input->action->uuid, input->action->id);
return false;
} else if (!pcmk_is_set(input->action->flags, pe_action_runnable)
&& (type == pe_order_none)
&& !pcmk__str_eq(input->action->uuid, CRM_OP_PROBED, pcmk__str_casei)) {
crm_trace("Ignoring %s (%d) input %s (%d): "
"optional and input unrunnable",
action->uuid, action->id,
input->action->uuid, input->action->id);
return false;
} else if (!pcmk_is_set(input->action->flags, pe_action_runnable)
&& pcmk_is_set(input->type, pe_order_one_or_more)) {
crm_trace("Ignoring %s (%d) input %s (%d): "
"one-or-more and input unrunnable",
action->uuid, action->id,
input->action->uuid, input->action->id);
return false;
} else if (pcmk_is_set(action->flags, pe_action_pseudo)
&& pcmk_is_set(input->type, pe_order_stonith_stop)) {
crm_trace("Ignoring %s (%d) input %s (%d): "
"stonith stop but action is pseudo",
action->uuid, action->id,
input->action->uuid, input->action->id);
return false;
} else if (pcmk_is_set(input->type, pe_order_implies_first_migratable)
&& !pcmk_is_set(input->action->flags, pe_action_runnable)) {
crm_trace("Ignoring %s (%d) input %s (%d): "
"implies input migratable but input unrunnable",
action->uuid, action->id,
input->action->uuid, input->action->id);
return false;
} else if (pcmk_is_set(input->type, pe_order_apply_first_non_migratable)
&& pcmk_is_set(input->action->flags, pe_action_migrate_runnable)) {
crm_trace("Ignoring %s (%d) input %s (%d): "
"only if input unmigratable but input unrunnable",
action->uuid, action->id,
input->action->uuid, input->action->id);
return false;
} else if ((input->type == pe_order_optional)
&& pcmk_is_set(input->action->flags, pe_action_migrate_runnable)
&& pcmk__ends_with(input->action->uuid, "_stop_0")) {
crm_trace("Ignoring %s (%d) input %s (%d): "
"optional but stop in migration",
action->uuid, action->id,
input->action->uuid, input->action->id);
return false;
} else if (input->type == pe_order_load) {
pe_node_t *input_node = input->action->node;
// load orderings are relevant only if actions are for same node
if (action->rsc && pcmk__str_eq(action->task, RSC_MIGRATE, pcmk__str_casei)) {
pe_node_t *allocated = action->rsc->allocated_to;
/* For load_stopped -> migrate_to orderings, we care about where it
* has been allocated to, not where it will be executed.
*/
if ((input_node == NULL) || (allocated == NULL)
|| (input_node->details != allocated->details)) {
crm_trace("Ignoring %s (%d) input %s (%d): "
"load ordering node mismatch %s vs %s",
action->uuid, action->id,
input->action->uuid, input->action->id,
(allocated? allocated->details->uname : ""),
(input_node? input_node->details->uname : ""));
input->type = pe_order_none;
return false;
}
} else if ((input_node == NULL) || (action->node == NULL)
|| (input_node->details != action->node->details)) {
crm_trace("Ignoring %s (%d) input %s (%d): "
"load ordering node mismatch %s vs %s",
action->uuid, action->id,
input->action->uuid, input->action->id,
(action->node? action->node->details->uname : ""),
(input_node? input_node->details->uname : ""));
input->type = pe_order_none;
return false;
} else if (pcmk_is_set(input->action->flags, pe_action_optional)) {
crm_trace("Ignoring %s (%d) input %s (%d): "
"load ordering input optional",
action->uuid, action->id,
input->action->uuid, input->action->id);
input->type = pe_order_none;
return false;
}
} else if (input->type == pe_order_anti_colocation) {
if (input->action->node && action->node
&& (input->action->node->details != action->node->details)) {
crm_trace("Ignoring %s (%d) input %s (%d): "
"anti-colocation node mismatch %s vs %s",
action->uuid, action->id,
input->action->uuid, input->action->id,
action->node->details->uname,
input->action->node->details->uname);
input->type = pe_order_none;
return false;
} else if (pcmk_is_set(input->action->flags, pe_action_optional)) {
crm_trace("Ignoring %s (%d) input %s (%d): "
"anti-colocation input optional",
action->uuid, action->id,
input->action->uuid, input->action->id);
input->type = pe_order_none;
return false;
}
} else if (input->action->rsc
&& input->action->rsc != action->rsc
&& pcmk_is_set(input->action->rsc->flags, pe_rsc_failed)
&& !pcmk_is_set(input->action->rsc->flags, pe_rsc_managed)
&& pcmk__ends_with(input->action->uuid, "_stop_0")
&& action->rsc && pe_rsc_is_clone(action->rsc)) {
crm_warn("Ignoring requirement that %s complete before %s:"
" unmanaged failed resources cannot prevent clone shutdown",
input->action->uuid, action->uuid);
return false;
} else if (pcmk_is_set(input->action->flags, pe_action_optional)
&& !pcmk_any_flags_set(input->action->flags,
pe_action_print_always|pe_action_dumped)
&& !should_dump_action(input->action)) {
crm_trace("Ignoring %s (%d) input %s (%d): "
"input optional",
action->uuid, action->id,
input->action->uuid, input->action->id);
return false;
}
crm_trace("%s (%d) input %s (%d) @ %s should be dumped: %s, %s, %s, 0x%.6x",
action->uuid, action->id,
input->action->uuid, input->action->id,
input->action->node? input->action->node->details->uname : "no node",
pcmk_is_set(input->action->flags, pe_action_pseudo)? "pseudo" : "real",
pcmk_is_set(input->action->flags, pe_action_runnable)? "runnable" : "unrunnable",
pcmk_is_set(input->action->flags, pe_action_optional)? "optional" : "required",
input->type);
return true;
}
static bool
graph_has_loop(pe_action_t *init_action, pe_action_t *action,
pe_action_wrapper_t *input)
{
bool has_loop = false;
if (pcmk_is_set(input->action->flags, pe_action_tracking)) {
crm_trace("Breaking tracking loop: %s@%s -> %s@%s (0x%.6x)",
input->action->uuid,
input->action->node? input->action->node->details->uname : "",
action->uuid,
action->node? action->node->details->uname : "",
input->type);
return false;
}
// Don't need to check inputs that won't be used
if (!check_dump_input(action, input)) {
return false;
}
if (input->action == init_action) {
crm_debug("Input loop found in %s@%s ->...-> %s@%s",
action->uuid,
action->node? action->node->details->uname : "",
init_action->uuid,
init_action->node? init_action->node->details->uname : "");
return true;
}
pe__set_action_flags(input->action, pe_action_tracking);
crm_trace("Checking inputs of action %s@%s input %s@%s (0x%.6x)"
"for graph loop with %s@%s ",
action->uuid,
action->node? action->node->details->uname : "",
input->action->uuid,
input->action->node? input->action->node->details->uname : "",
input->type,
init_action->uuid,
init_action->node? init_action->node->details->uname : "");
// Recursively check input itself for loops
for (GList *iter = input->action->actions_before;
iter != NULL; iter = iter->next) {
if (graph_has_loop(init_action, input->action,
(pe_action_wrapper_t *) iter->data)) {
// Recursive call already logged a debug message
has_loop = true;
goto done;
}
}
done:
pe__clear_action_flags(input->action, pe_action_tracking);
if (!has_loop) {
crm_trace("No input loop found in %s@%s -> %s@%s (0x%.6x)",
input->action->uuid,
input->action->node? input->action->node->details->uname : "",
action->uuid,
action->node? action->node->details->uname : "",
input->type);
}
return has_loop;
}
bool
pcmk__ordering_is_invalid(pe_action_t *action, pe_action_wrapper_t *input)
{
/* Prevent user-defined ordering constraints between resources
* running in a guest node and the resource that defines that node.
*/
if (!pcmk_is_set(input->type, pe_order_preserve)
&& action->rsc && action->rsc->fillers
&& input->action->rsc && input->action->node
&& input->action->node->details->remote_rsc
&& (input->action->node->details->remote_rsc->container == action->rsc)) {
crm_warn("Invalid ordering constraint between %s and %s",
input->action->rsc->id, action->rsc->id);
return true;
}
/* If there's an order like
* "rscB_stop node2"-> "load_stopped_node2" -> "rscA_migrate_to node1"
*
* then rscA is being migrated from node1 to node2, while rscB is being
* migrated from node2 to node1. If there would be a graph loop,
* break the order "load_stopped_node2" -> "rscA_migrate_to node1".
*/
if ((input->type == pe_order_load) && action->rsc
&& pcmk__str_eq(action->task, RSC_MIGRATE, pcmk__str_casei)
&& graph_has_loop(action, action, input)) {
return true;
}
return false;
}
// Remove duplicate inputs (regardless of flags)
static void
deduplicate_inputs(pe_action_t *action)
{
GList *item = NULL;
GList *next = NULL;
pe_action_wrapper_t *last_input = NULL;
action->actions_before = g_list_sort(action->actions_before,
sort_action_id);
for (item = action->actions_before; item != NULL; item = next) {
pe_action_wrapper_t *input = (pe_action_wrapper_t *) item->data;
next = item->next;
if (last_input && (input->action->id == last_input->action->id)) {
crm_trace("Input %s (%d) duplicate skipped for action %s (%d)",
input->action->uuid, input->action->id,
action->uuid, action->id);
/* For the purposes of scheduling, the ordering flags no longer
* matter, but crm_simulate looks at certain ones when creating a
* dot graph. Combining the flags is sufficient for that purpose.
*/
last_input->type |= input->type;
if (input->state == pe_link_dumped) {
last_input->state = pe_link_dumped;
}
free(item->data);
action->actions_before = g_list_delete_link(action->actions_before,
item);
} else {
last_input = input;
input->state = pe_link_not_dumped;
}
}
}
/*!
* \internal
* \brief Add an action to the transition graph XML if appropriate
*
* \param[in] action Action to possibly add
* \param[in] data_set Cluster working set
*
* \note This will de-duplicate the action inputs, meaning that the
* pe_action_wrapper_t:type flags can no longer be relied on to retain
* their original settings. That means this MUST be called after stage7()
* is complete, and nothing after this should rely on those type flags.
* (For example, some code looks for type equal to some flag rather than
* whether the flag is set, and some code looks for particular
* combinations of flags -- such code must be done before stage8().)
*/
void
graph_element_from_action(pe_action_t *action, pe_working_set_t *data_set)
{
GList *lpc = NULL;
int synapse_priority = 0;
xmlNode *syn = NULL;
xmlNode *set = NULL;
xmlNode *in = NULL;
xmlNode *xml_action = NULL;
pe_action_wrapper_t *input = NULL;
/* If we haven't already, de-duplicate inputs -- even if we won't be dumping
* the action, so that crm_simulate dot graphs don't have duplicates.
*/
if (!pcmk_is_set(action->flags, pe_action_dedup)) {
deduplicate_inputs(action);
pe__set_action_flags(action, pe_action_dedup);
}
if (should_dump_action(action) == FALSE) {
return;
}
pe__set_action_flags(action, pe_action_dumped);
syn = create_xml_node(data_set->graph, "synapse");
set = create_xml_node(syn, "action_set");
in = create_xml_node(syn, "inputs");
crm_xml_add_int(syn, XML_ATTR_ID, data_set->num_synapse);
data_set->num_synapse++;
if (action->rsc != NULL) {
synapse_priority = action->rsc->priority;
}
if (action->priority > synapse_priority) {
synapse_priority = action->priority;
}
if (synapse_priority > 0) {
crm_xml_add_int(syn, XML_CIB_ATTR_PRIORITY, synapse_priority);
}
xml_action = action2xml(action, FALSE, data_set);
add_node_nocopy(set, crm_element_name(xml_action), xml_action);
for (lpc = action->actions_before; lpc != NULL; lpc = lpc->next) {
input = (pe_action_wrapper_t *) lpc->data;
if (check_dump_input(action, input)) {
xmlNode *input_xml = create_xml_node(in, "trigger");
input->state = pe_link_dumped;
xml_action = action2xml(input->action, TRUE, data_set);
add_node_nocopy(input_xml, crm_element_name(xml_action), xml_action);
}
}
}
diff --git a/lib/pacemaker/pcmk_sched_native.c b/lib/pacemaker/pcmk_sched_native.c
index 9c7fa6b935..88a4a0374b 100644
--- a/lib/pacemaker/pcmk_sched_native.c
+++ b/lib/pacemaker/pcmk_sched_native.c
@@ -1,3221 +1,3223 @@
/*
* Copyright 2004-2021 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
* This source code is licensed under the GNU General Public License version 2
* or later (GPLv2+) WITHOUT ANY WARRANTY.
*/
#include
#include
#include
#include
#include
#include
#include
// The controller removes the resource from the CIB, making this redundant
// #define DELETE_THEN_REFRESH 1
#define INFINITY_HACK (INFINITY * -100)
#define VARIANT_NATIVE 1
#include
extern bool pcmk__is_daemon;
static void Recurring(pe_resource_t *rsc, pe_action_t *start, pe_node_t *node,
pe_working_set_t *data_set);
static void RecurringOp(pe_resource_t *rsc, pe_action_t *start, pe_node_t *node,
xmlNode *operation, pe_working_set_t *data_set);
static void Recurring_Stopped(pe_resource_t *rsc, pe_action_t *start, pe_node_t *node,
pe_working_set_t *data_set);
static void RecurringOp_Stopped(pe_resource_t *rsc, pe_action_t *start, pe_node_t *node,
xmlNode *operation, pe_working_set_t *data_set);
void ReloadRsc(pe_resource_t * rsc, pe_node_t *node, pe_working_set_t * data_set);
gboolean DeleteRsc(pe_resource_t * rsc, pe_node_t * node, gboolean optional, pe_working_set_t * data_set);
gboolean StopRsc(pe_resource_t * rsc, pe_node_t * next, gboolean optional, pe_working_set_t * data_set);
gboolean StartRsc(pe_resource_t * rsc, pe_node_t * next, gboolean optional, pe_working_set_t * data_set);
gboolean DemoteRsc(pe_resource_t * rsc, pe_node_t * next, gboolean optional, pe_working_set_t * data_set);
gboolean PromoteRsc(pe_resource_t * rsc, pe_node_t * next, gboolean optional,
pe_working_set_t * data_set);
gboolean RoleError(pe_resource_t * rsc, pe_node_t * next, gboolean optional, pe_working_set_t * data_set);
gboolean NullOp(pe_resource_t * rsc, pe_node_t * next, gboolean optional, pe_working_set_t * data_set);
/* This array says what the *next* role should be when transitioning from one
* role to another. For example going from Stopped to Promoted, the next role is
* RSC_ROLE_UNPROMOTED, because the resource must be started before being promoted.
* The current state then becomes Started, which is fed into this array again,
* giving a next role of RSC_ROLE_PROMOTED.
*/
static enum rsc_role_e rsc_state_matrix[RSC_ROLE_MAX][RSC_ROLE_MAX] = {
/* Current state Next state*/
/* Unknown Stopped Started Unpromoted Promoted */
/* Unknown */ { RSC_ROLE_UNKNOWN, RSC_ROLE_STOPPED, RSC_ROLE_STOPPED, RSC_ROLE_STOPPED, RSC_ROLE_STOPPED },
/* Stopped */ { RSC_ROLE_STOPPED, RSC_ROLE_STOPPED, RSC_ROLE_STARTED, RSC_ROLE_UNPROMOTED, RSC_ROLE_UNPROMOTED },
/* Started */ { RSC_ROLE_STOPPED, RSC_ROLE_STOPPED, RSC_ROLE_STARTED, RSC_ROLE_UNPROMOTED, RSC_ROLE_PROMOTED },
/* Unpromoted */ { RSC_ROLE_STOPPED, RSC_ROLE_STOPPED, RSC_ROLE_STOPPED, RSC_ROLE_UNPROMOTED, RSC_ROLE_PROMOTED },
/* Promoted */ { RSC_ROLE_STOPPED, RSC_ROLE_UNPROMOTED, RSC_ROLE_UNPROMOTED, RSC_ROLE_UNPROMOTED, RSC_ROLE_PROMOTED },
};
typedef gboolean (*rsc_transition_fn)(pe_resource_t *rsc, pe_node_t *next,
gboolean optional,
pe_working_set_t *data_set);
// This array picks the function needed to transition from one role to another
static rsc_transition_fn rsc_action_matrix[RSC_ROLE_MAX][RSC_ROLE_MAX] = {
/* Current state Next state */
/* Unknown Stopped Started Unpromoted Promoted */
/* Unknown */ { RoleError, StopRsc, RoleError, RoleError, RoleError, },
/* Stopped */ { RoleError, NullOp, StartRsc, StartRsc, RoleError, },
/* Started */ { RoleError, StopRsc, NullOp, NullOp, PromoteRsc, },
/* Unpromoted */ { RoleError, StopRsc, StopRsc, NullOp, PromoteRsc, },
/* Promoted */ { RoleError, DemoteRsc, DemoteRsc, DemoteRsc, NullOp, },
};
#define clear_node_weights_flags(nw_flags, nw_rsc, flags_to_clear) do { \
flags = pcmk__clear_flags_as(__func__, __LINE__, LOG_TRACE, \
"Node weight", (nw_rsc)->id, (flags), \
(flags_to_clear), #flags_to_clear); \
} while (0)
static gboolean
native_choose_node(pe_resource_t * rsc, pe_node_t * prefer, pe_working_set_t * data_set)
{
GList *nodes = NULL;
pe_node_t *chosen = NULL;
pe_node_t *best = NULL;
int multiple = 1;
int length = 0;
gboolean result = FALSE;
process_utilization(rsc, &prefer, data_set);
if (!pcmk_is_set(rsc->flags, pe_rsc_provisional)) {
return rsc->allocated_to ? TRUE : FALSE;
}
// Sort allowed nodes by weight
if (rsc->allowed_nodes) {
length = g_hash_table_size(rsc->allowed_nodes);
}
if (length > 0) {
nodes = g_hash_table_get_values(rsc->allowed_nodes);
nodes = sort_nodes_by_weight(nodes, pe__current_node(rsc), data_set);
// First node in sorted list has the best score
best = g_list_nth_data(nodes, 0);
}
if (prefer && nodes) {
chosen = g_hash_table_lookup(rsc->allowed_nodes, prefer->details->id);
if (chosen == NULL) {
pe_rsc_trace(rsc, "Preferred node %s for %s was unknown",
prefer->details->uname, rsc->id);
/* Favor the preferred node as long as its weight is at least as good as
* the best allowed node's.
*
* An alternative would be to favor the preferred node even if the best
* node is better, when the best node's weight is less than INFINITY.
*/
} else if ((chosen->weight < 0) || (chosen->weight < best->weight)) {
pe_rsc_trace(rsc, "Preferred node %s for %s was unsuitable",
chosen->details->uname, rsc->id);
chosen = NULL;
} else if (!can_run_resources(chosen)) {
pe_rsc_trace(rsc, "Preferred node %s for %s was unavailable",
chosen->details->uname, rsc->id);
chosen = NULL;
} else {
pe_rsc_trace(rsc,
"Chose preferred node %s for %s (ignoring %d candidates)",
chosen->details->uname, rsc->id, length);
}
}
if ((chosen == NULL) && nodes) {
/* Either there is no preferred node, or the preferred node is not
* available, but there are other nodes allowed to run the resource.
*/
chosen = best;
pe_rsc_trace(rsc, "Chose node %s for %s from %d candidates",
chosen ? chosen->details->uname : "", rsc->id, length);
if (!pe_rsc_is_unique_clone(rsc->parent)
&& chosen && (chosen->weight > 0) && can_run_resources(chosen)) {
/* If the resource is already running on a node, prefer that node if
* it is just as good as the chosen node.
*
* We don't do this for unique clone instances, because
* distribute_children() has already assigned instances to their
* running nodes when appropriate, and if we get here, we don't want
* remaining unallocated instances to prefer a node that's already
* running another instance.
*/
pe_node_t *running = pe__current_node(rsc);
if (running && (can_run_resources(running) == FALSE)) {
pe_rsc_trace(rsc, "Current node for %s (%s) can't run resources",
rsc->id, running->details->uname);
} else if (running) {
for (GList *iter = nodes->next; iter; iter = iter->next) {
pe_node_t *tmp = (pe_node_t *) iter->data;
if (tmp->weight != chosen->weight) {
// The nodes are sorted by weight, so no more are equal
break;
}
if (tmp->details == running->details) {
// Scores are equal, so prefer the current node
chosen = tmp;
}
multiple++;
}
}
}
}
if (multiple > 1) {
static char score[33];
int log_level = (chosen->weight >= INFINITY)? LOG_WARNING : LOG_INFO;
score2char_stack(chosen->weight, score, sizeof(score));
do_crm_log(log_level,
"Chose node %s for %s from %d nodes with score %s",
chosen->details->uname, rsc->id, multiple, score);
}
result = native_assign_node(rsc, chosen, FALSE);
g_list_free(nodes);
return result;
}
/*!
* \internal
* \brief Find score of highest-scored node that matches colocation attribute
*
* \param[in] rsc Resource whose allowed nodes should be searched
* \param[in] attr Colocation attribute name (must not be NULL)
* \param[in] value Colocation attribute value to require
*/
static int
best_node_score_matching_attr(const pe_resource_t *rsc, const char *attr,
const char *value)
{
GHashTableIter iter;
pe_node_t *node = NULL;
int best_score = -INFINITY;
const char *best_node = NULL;
// Find best allowed node with matching attribute
g_hash_table_iter_init(&iter, rsc->allowed_nodes);
while (g_hash_table_iter_next(&iter, NULL, (void **) &node)) {
if ((node->weight > best_score) && can_run_resources(node)
&& pcmk__str_eq(value, pe_node_attribute_raw(node, attr), pcmk__str_casei)) {
best_score = node->weight;
best_node = node->details->uname;
}
}
if (!pcmk__str_eq(attr, CRM_ATTR_UNAME, pcmk__str_casei)) {
if (best_node == NULL) {
crm_info("No allowed node for %s matches node attribute %s=%s",
rsc->id, attr, value);
} else {
crm_info("Allowed node %s for %s had best score (%d) "
"of those matching node attribute %s=%s",
best_node, rsc->id, best_score, attr, value);
}
}
return best_score;
}
/*!
* \internal
* \brief Add resource's colocation matches to current node allocation scores
*
* For each node in a given table, if any of a given resource's allowed nodes
* have a matching value for the colocation attribute, add the highest of those
* nodes' scores to the node's score.
*
* \param[in,out] nodes Hash table of nodes with allocation scores so far
* \param[in] rsc Resource whose allowed nodes should be compared
* \param[in] attr Colocation attribute that must match (NULL for default)
* \param[in] factor Factor by which to multiply scores being added
* \param[in] only_positive Whether to add only positive scores
*/
static void
add_node_scores_matching_attr(GHashTable *nodes, const pe_resource_t *rsc,
const char *attr, float factor,
bool only_positive)
{
GHashTableIter iter;
pe_node_t *node = NULL;
if (attr == NULL) {
attr = CRM_ATTR_UNAME;
}
// Iterate through each node
g_hash_table_iter_init(&iter, nodes);
while (g_hash_table_iter_next(&iter, NULL, (void **)&node)) {
float weight_f = 0;
int weight = 0;
int score = 0;
int new_score = 0;
score = best_node_score_matching_attr(rsc, attr,
pe_node_attribute_raw(node, attr));
if ((factor < 0) && (score < 0)) {
/* Negative preference for a node with a negative score
* should not become a positive preference.
*
* @TODO Consider filtering only if weight is -INFINITY
*/
crm_trace("%s: Filtering %d + %f * %d (double negative disallowed)",
node->details->uname, node->weight, factor, score);
continue;
}
if (node->weight == INFINITY_HACK) {
crm_trace("%s: Filtering %d + %f * %d (node was marked unusable)",
node->details->uname, node->weight, factor, score);
continue;
}
weight_f = factor * score;
// Round the number; see http://c-faq.com/fp/round.html
weight = (int) ((weight_f < 0)? (weight_f - 0.5) : (weight_f + 0.5));
/* Small factors can obliterate the small scores that are often actually
* used in configurations. If the score and factor are nonzero, ensure
* that the result is nonzero as well.
*/
if ((weight == 0) && (score != 0)) {
if (factor > 0.0) {
weight = 1;
} else if (factor < 0.0) {
weight = -1;
}
}
new_score = pe__add_scores(weight, node->weight);
if (only_positive && (new_score < 0) && (node->weight > 0)) {
crm_trace("%s: Filtering %d + %f * %d = %d "
"(negative disallowed, marking node unusable)",
node->details->uname, node->weight, factor, score,
new_score);
node->weight = INFINITY_HACK;
continue;
}
if (only_positive && (new_score < 0) && (node->weight == 0)) {
crm_trace("%s: Filtering %d + %f * %d = %d (negative disallowed)",
node->details->uname, node->weight, factor, score,
new_score);
continue;
}
crm_trace("%s: %d + %f * %d = %d", node->details->uname,
node->weight, factor, score, new_score);
node->weight = new_score;
}
}
static inline bool
is_nonempty_group(pe_resource_t *rsc)
{
return rsc && (rsc->variant == pe_group) && (rsc->children != NULL);
}
/*!
* \internal
* \brief Incorporate colocation constraint scores into node weights
*
* \param[in,out] rsc Resource being placed
* \param[in] rhs ID of 'with' resource
* \param[in,out] nodes Nodes, with scores as of this point
* \param[in] attr Colocation attribute (ID by default)
* \param[in] factor Incorporate scores multiplied by this factor
* \param[in] flags Bitmask of enum pe_weights values
*
* \return Nodes, with scores modified by this constraint
* \note This function assumes ownership of the nodes argument. The caller
* should free the returned copy rather than the original.
*/
GHashTable *
pcmk__native_merge_weights(pe_resource_t *rsc, const char *rhs,
GHashTable *nodes, const char *attr, float factor,
uint32_t flags)
{
GHashTable *work = NULL;
// Avoid infinite recursion
if (pcmk_is_set(rsc->flags, pe_rsc_merging)) {
pe_rsc_info(rsc, "%s: Breaking dependency loop at %s", rhs, rsc->id);
return nodes;
}
pe__set_resource_flags(rsc, pe_rsc_merging);
if (pcmk_is_set(flags, pe_weights_init)) {
if (is_nonempty_group(rsc)) {
GList *last = g_list_last(rsc->children);
pe_resource_t *last_rsc = last->data;
pe_rsc_trace(rsc, "%s: Merging scores from group %s "
"using last member %s (at %.6f)",
rhs, rsc->id, last_rsc->id, factor);
work = pcmk__native_merge_weights(last_rsc, rhs, NULL, attr, factor,
flags);
} else {
work = pcmk__copy_node_table(rsc->allowed_nodes);
}
clear_node_weights_flags(flags, rsc, pe_weights_init);
} else if (is_nonempty_group(rsc)) {
/* The first member of the group will recursively incorporate any
* constraints involving other members (including the group internal
* colocation).
*
* @TODO The indirect colocations from the dependent group's other
* members will be incorporated at full strength rather than by
* factor, so the group's combined stickiness will be treated as
* (factor + (#members - 1)) * stickiness. It is questionable what
* the right approach should be.
*/
pe_rsc_trace(rsc, "%s: Merging scores from first member of group %s "
"(at %.6f)", rhs, rsc->id, factor);
work = pcmk__copy_node_table(nodes);
work = pcmk__native_merge_weights(rsc->children->data, rhs, work, attr,
factor, flags);
} else {
pe_rsc_trace(rsc, "%s: Merging scores from %s (at %.6f)",
rhs, rsc->id, factor);
work = pcmk__copy_node_table(nodes);
add_node_scores_matching_attr(work, rsc, attr, factor,
pcmk_is_set(flags, pe_weights_positive));
}
if (can_run_any(work)) {
GList *gIter = NULL;
int multiplier = (factor < 0)? -1 : 1;
if (pcmk_is_set(flags, pe_weights_forward)) {
gIter = rsc->rsc_cons;
pe_rsc_trace(rsc,
"Checking additional %d optional '%s with' constraints",
g_list_length(gIter), rsc->id);
} else if (is_nonempty_group(rsc)) {
pe_resource_t *last_rsc = g_list_last(rsc->children)->data;
gIter = last_rsc->rsc_cons_lhs;
pe_rsc_trace(rsc, "Checking additional %d optional 'with group %s' "
"constraints using last member %s",
g_list_length(gIter), rsc->id, last_rsc->id);
} else {
gIter = rsc->rsc_cons_lhs;
pe_rsc_trace(rsc,
"Checking additional %d optional 'with %s' constraints",
g_list_length(gIter), rsc->id);
}
for (; gIter != NULL; gIter = gIter->next) {
pe_resource_t *other = NULL;
pcmk__colocation_t *constraint = (pcmk__colocation_t *) gIter->data;
if (pcmk_is_set(flags, pe_weights_forward)) {
other = constraint->rsc_rh;
} else if (!pcmk__colocation_has_influence(constraint, NULL)) {
continue;
} else {
other = constraint->rsc_lh;
}
pe_rsc_trace(rsc, "Optionally merging score of '%s' constraint (%s with %s)",
constraint->id, constraint->rsc_lh->id,
constraint->rsc_rh->id);
work = pcmk__native_merge_weights(other, rhs, work,
constraint->node_attribute,
multiplier * constraint->score / (float) INFINITY,
flags|pe_weights_rollback);
pe__show_node_weights(true, NULL, rhs, work, rsc->cluster);
}
} else if (pcmk_is_set(flags, pe_weights_rollback)) {
pe_rsc_info(rsc, "%s: Rolling back optional scores from %s",
rhs, rsc->id);
g_hash_table_destroy(work);
pe__clear_resource_flags(rsc, pe_rsc_merging);
return nodes;
}
if (pcmk_is_set(flags, pe_weights_positive)) {
pe_node_t *node = NULL;
GHashTableIter iter;
g_hash_table_iter_init(&iter, work);
while (g_hash_table_iter_next(&iter, NULL, (void **)&node)) {
if (node->weight == INFINITY_HACK) {
node->weight = 1;
}
}
}
if (nodes) {
g_hash_table_destroy(nodes);
}
pe__clear_resource_flags(rsc, pe_rsc_merging);
return work;
}
static inline bool
node_has_been_unfenced(pe_node_t *node)
{
const char *unfenced = pe_node_attribute_raw(node, CRM_ATTR_UNFENCED);
return !pcmk__str_eq(unfenced, "0", pcmk__str_null_matches);
}
static inline bool
is_unfence_device(pe_resource_t *rsc, pe_working_set_t *data_set)
{
return pcmk_is_set(rsc->flags, pe_rsc_fence_device)
&& pcmk_is_set(data_set->flags, pe_flag_enable_unfencing);
}
pe_node_t *
pcmk__native_allocate(pe_resource_t *rsc, pe_node_t *prefer,
pe_working_set_t *data_set)
{
GList *gIter = NULL;
if (rsc->parent && !pcmk_is_set(rsc->parent->flags, pe_rsc_allocating)) {
/* never allocate children on their own */
pe_rsc_debug(rsc, "Escalating allocation of %s to its parent: %s", rsc->id,
rsc->parent->id);
rsc->parent->cmds->allocate(rsc->parent, prefer, data_set);
}
if (!pcmk_is_set(rsc->flags, pe_rsc_provisional)) {
return rsc->allocated_to;
}
if (pcmk_is_set(rsc->flags, pe_rsc_allocating)) {
pe_rsc_debug(rsc, "Dependency loop detected involving %s", rsc->id);
return NULL;
}
pe__set_resource_flags(rsc, pe_rsc_allocating);
pe__show_node_weights(true, rsc, "Pre-alloc", rsc->allowed_nodes, data_set);
for (gIter = rsc->rsc_cons; gIter != NULL; gIter = gIter->next) {
pcmk__colocation_t *constraint = (pcmk__colocation_t *) gIter->data;
GHashTable *archive = NULL;
pe_resource_t *rsc_rh = constraint->rsc_rh;
if ((constraint->role_lh >= RSC_ROLE_PROMOTED)
|| (constraint->score < 0 && constraint->score > -INFINITY)) {
archive = pcmk__copy_node_table(rsc->allowed_nodes);
}
pe_rsc_trace(rsc,
"%s: Allocating %s first (constraint=%s score=%d role=%s)",
rsc->id, rsc_rh->id, constraint->id,
constraint->score, role2text(constraint->role_lh));
rsc_rh->cmds->allocate(rsc_rh, NULL, data_set);
rsc->cmds->rsc_colocation_lh(rsc, rsc_rh, constraint, data_set);
if (archive && can_run_any(rsc->allowed_nodes) == FALSE) {
pe_rsc_info(rsc, "%s: Rolling back scores from %s", rsc->id, rsc_rh->id);
g_hash_table_destroy(rsc->allowed_nodes);
rsc->allowed_nodes = archive;
archive = NULL;
}
if (archive) {
g_hash_table_destroy(archive);
}
}
pe__show_node_weights(true, rsc, "Post-coloc", rsc->allowed_nodes, data_set);
for (gIter = rsc->rsc_cons_lhs; gIter != NULL; gIter = gIter->next) {
pcmk__colocation_t *constraint = (pcmk__colocation_t *) gIter->data;
if (!pcmk__colocation_has_influence(constraint, NULL)) {
continue;
}
pe_rsc_trace(rsc, "Merging score of '%s' constraint (%s with %s)",
constraint->id, constraint->rsc_lh->id,
constraint->rsc_rh->id);
rsc->allowed_nodes =
constraint->rsc_lh->cmds->merge_weights(constraint->rsc_lh, rsc->id, rsc->allowed_nodes,
constraint->node_attribute,
(float)constraint->score / INFINITY,
pe_weights_rollback);
}
if (rsc->next_role == RSC_ROLE_STOPPED) {
pe_rsc_trace(rsc, "Making sure %s doesn't get allocated", rsc->id);
/* make sure it doesn't come up again */
resource_location(rsc, NULL, -INFINITY, XML_RSC_ATTR_TARGET_ROLE, data_set);
} else if(rsc->next_role > rsc->role
&& !pcmk_is_set(data_set->flags, pe_flag_have_quorum)
&& data_set->no_quorum_policy == no_quorum_freeze) {
crm_notice("Resource %s cannot be elevated from %s to %s: no-quorum-policy=freeze",
rsc->id, role2text(rsc->role), role2text(rsc->next_role));
pe__set_next_role(rsc, rsc->role, "no-quorum-policy=freeze");
}
pe__show_node_weights(!pcmk_is_set(data_set->flags, pe_flag_show_scores),
rsc, __func__, rsc->allowed_nodes, data_set);
if (pcmk_is_set(data_set->flags, pe_flag_stonith_enabled)
&& !pcmk_is_set(data_set->flags, pe_flag_have_stonith_resource)) {
pe__clear_resource_flags(rsc, pe_rsc_managed);
}
if (!pcmk_is_set(rsc->flags, pe_rsc_managed)) {
const char *reason = NULL;
pe_node_t *assign_to = NULL;
pe__set_next_role(rsc, rsc->role, "unmanaged");
assign_to = pe__current_node(rsc);
if (assign_to == NULL) {
reason = "inactive";
} else if (rsc->role == RSC_ROLE_PROMOTED) {
reason = "promoted";
} else if (pcmk_is_set(rsc->flags, pe_rsc_failed)) {
reason = "failed";
} else {
reason = "active";
}
pe_rsc_info(rsc, "Unmanaged resource %s allocated to %s: %s", rsc->id,
(assign_to? assign_to->details->uname : "no node"), reason);
native_assign_node(rsc, assign_to, TRUE);
} else if (pcmk_is_set(data_set->flags, pe_flag_stop_everything)) {
pe_rsc_debug(rsc, "Forcing %s to stop", rsc->id);
native_assign_node(rsc, NULL, TRUE);
} else if (pcmk_is_set(rsc->flags, pe_rsc_provisional)
&& native_choose_node(rsc, prefer, data_set)) {
pe_rsc_trace(rsc, "Allocated resource %s to %s", rsc->id,
rsc->allocated_to->details->uname);
} else if (rsc->allocated_to == NULL) {
if (!pcmk_is_set(rsc->flags, pe_rsc_orphan)) {
pe_rsc_info(rsc, "Resource %s cannot run anywhere", rsc->id);
} else if (rsc->running_on != NULL) {
pe_rsc_info(rsc, "Stopping orphan resource %s", rsc->id);
}
} else {
pe_rsc_debug(rsc, "Pre-Allocated resource %s to %s", rsc->id,
rsc->allocated_to->details->uname);
}
pe__clear_resource_flags(rsc, pe_rsc_allocating);
if (rsc->is_remote_node) {
pe_node_t *remote_node = pe_find_node(data_set->nodes, rsc->id);
CRM_ASSERT(remote_node != NULL);
if (rsc->allocated_to && rsc->next_role != RSC_ROLE_STOPPED) {
crm_trace("Setting Pacemaker Remote node %s to ONLINE",
remote_node->details->id);
remote_node->details->online = TRUE;
/* We shouldn't consider an unseen remote-node unclean if we are going
* to try and connect to it. Otherwise we get an unnecessary fence */
if (remote_node->details->unseen == TRUE) {
remote_node->details->unclean = FALSE;
}
} else {
crm_trace("Setting Pacemaker Remote node %s to SHUTDOWN (next role %s, %sallocated)",
remote_node->details->id, role2text(rsc->next_role),
(rsc->allocated_to? "" : "un"));
remote_node->details->shutdown = TRUE;
}
}
return rsc->allocated_to;
}
static gboolean
is_op_dup(pe_resource_t *rsc, const char *name, guint interval_ms)
{
gboolean dup = FALSE;
const char *id = NULL;
const char *value = NULL;
xmlNode *operation = NULL;
guint interval2_ms = 0;
CRM_ASSERT(rsc);
for (operation = pcmk__xe_first_child(rsc->ops_xml); operation != NULL;
operation = pcmk__xe_next(operation)) {
if (pcmk__str_eq((const char *)operation->name, "op", pcmk__str_none)) {
value = crm_element_value(operation, "name");
if (!pcmk__str_eq(value, name, pcmk__str_casei)) {
continue;
}
value = crm_element_value(operation, XML_LRM_ATTR_INTERVAL);
interval2_ms = crm_parse_interval_spec(value);
if (interval_ms != interval2_ms) {
continue;
}
if (id == NULL) {
id = ID(operation);
} else {
pcmk__config_err("Operation %s is duplicate of %s (do not use "
"same name and interval combination more "
"than once per resource)", ID(operation), id);
dup = TRUE;
}
}
}
return dup;
}
static bool
op_cannot_recur(const char *name)
{
return pcmk__strcase_any_of(name, RSC_STOP, RSC_START, RSC_DEMOTE, RSC_PROMOTE, NULL);
}
static void
RecurringOp(pe_resource_t * rsc, pe_action_t * start, pe_node_t * node,
xmlNode * operation, pe_working_set_t * data_set)
{
char *key = NULL;
const char *name = NULL;
const char *role = NULL;
const char *interval_spec = NULL;
const char *node_uname = node? node->details->uname : "n/a";
guint interval_ms = 0;
pe_action_t *mon = NULL;
gboolean is_optional = TRUE;
GList *possible_matches = NULL;
CRM_ASSERT(rsc);
/* Only process for the operations without role="Stopped" */
role = crm_element_value(operation, "role");
if (role && text2role(role) == RSC_ROLE_STOPPED) {
return;
}
interval_spec = crm_element_value(operation, XML_LRM_ATTR_INTERVAL);
interval_ms = crm_parse_interval_spec(interval_spec);
if (interval_ms == 0) {
return;
}
name = crm_element_value(operation, "name");
if (is_op_dup(rsc, name, interval_ms)) {
crm_trace("Not creating duplicate recurring action %s for %dms %s",
ID(operation), interval_ms, name);
return;
}
if (op_cannot_recur(name)) {
pcmk__config_err("Ignoring %s because action '%s' cannot be recurring",
ID(operation), name);
return;
}
key = pcmk__op_key(rsc->id, name, interval_ms);
if (find_rsc_op_entry(rsc, key) == NULL) {
crm_trace("Not creating recurring action %s for disabled resource %s",
ID(operation), rsc->id);
free(key);
return;
}
pe_rsc_trace(rsc, "Creating recurring action %s for %s in role %s on %s",
ID(operation), rsc->id, role2text(rsc->next_role), node_uname);
if (start != NULL) {
pe_rsc_trace(rsc, "Marking %s %s due to %s", key,
pcmk_is_set(start->flags, pe_action_optional)? "optional" : "mandatory",
start->uuid);
is_optional = (rsc->cmds->action_flags(start, NULL) & pe_action_optional);
} else {
pe_rsc_trace(rsc, "Marking %s optional", key);
is_optional = TRUE;
}
/* start a monitor for an already active resource */
possible_matches = find_actions_exact(rsc->actions, key, node);
if (possible_matches == NULL) {
is_optional = FALSE;
pe_rsc_trace(rsc, "Marking %s mandatory: not active", key);
} else {
GList *gIter = NULL;
for (gIter = possible_matches; gIter != NULL; gIter = gIter->next) {
pe_action_t *op = (pe_action_t *) gIter->data;
if (pcmk_is_set(op->flags, pe_action_reschedule)) {
is_optional = FALSE;
break;
}
}
g_list_free(possible_matches);
}
if (((rsc->next_role == RSC_ROLE_PROMOTED) && (role == NULL))
|| (role != NULL && text2role(role) != rsc->next_role)) {
int log_level = LOG_TRACE;
const char *result = "Ignoring";
if (is_optional) {
char *after_key = NULL;
pe_action_t *cancel_op = NULL;
// It's running, so cancel it
log_level = LOG_INFO;
result = "Cancelling";
cancel_op = pe_cancel_op(rsc, name, interval_ms, node, data_set);
switch (rsc->role) {
case RSC_ROLE_UNPROMOTED:
case RSC_ROLE_STARTED:
if (rsc->next_role == RSC_ROLE_PROMOTED) {
after_key = promote_key(rsc);
} else if (rsc->next_role == RSC_ROLE_STOPPED) {
after_key = stop_key(rsc);
}
break;
case RSC_ROLE_PROMOTED:
after_key = demote_key(rsc);
break;
default:
break;
}
if (after_key) {
custom_action_order(rsc, NULL, cancel_op, rsc, after_key, NULL,
pe_order_runnable_left, data_set);
}
}
do_crm_log(log_level, "%s action %s (%s vs. %s)",
result, key, role ? role : role2text(RSC_ROLE_UNPROMOTED),
role2text(rsc->next_role));
free(key);
return;
}
mon = custom_action(rsc, key, name, node, is_optional, TRUE, data_set);
key = mon->uuid;
if (is_optional) {
pe_rsc_trace(rsc, "%s\t %s (optional)", node_uname, mon->uuid);
}
if ((start == NULL) || !pcmk_is_set(start->flags, pe_action_runnable)) {
pe_rsc_debug(rsc, "%s\t %s (cancelled : start un-runnable)",
node_uname, mon->uuid);
update_action_flags(mon, pe_action_runnable | pe_action_clear,
__func__, __LINE__);
} else if (node == NULL || node->details->online == FALSE || node->details->unclean) {
pe_rsc_debug(rsc, "%s\t %s (cancelled : no node available)",
node_uname, mon->uuid);
update_action_flags(mon, pe_action_runnable | pe_action_clear,
__func__, __LINE__);
} else if (!pcmk_is_set(mon->flags, pe_action_optional)) {
pe_rsc_info(rsc, " Start recurring %s (%us) for %s on %s",
mon->task, interval_ms / 1000, rsc->id, node_uname);
}
if (rsc->next_role == RSC_ROLE_PROMOTED) {
char *running_promoted = pcmk__itoa(PCMK_OCF_RUNNING_PROMOTED);
add_hash_param(mon->meta, XML_ATTR_TE_TARGET_RC, running_promoted);
free(running_promoted);
}
if ((node == NULL) || pcmk_is_set(rsc->flags, pe_rsc_managed)) {
custom_action_order(rsc, start_key(rsc), NULL,
NULL, strdup(key), mon,
pe_order_implies_then | pe_order_runnable_left, data_set);
custom_action_order(rsc, reload_key(rsc), NULL,
NULL, strdup(key), mon,
pe_order_implies_then | pe_order_runnable_left, data_set);
if (rsc->next_role == RSC_ROLE_PROMOTED) {
custom_action_order(rsc, promote_key(rsc), NULL,
rsc, NULL, mon,
pe_order_optional | pe_order_runnable_left, data_set);
} else if (rsc->role == RSC_ROLE_PROMOTED) {
custom_action_order(rsc, demote_key(rsc), NULL,
rsc, NULL, mon,
pe_order_optional | pe_order_runnable_left, data_set);
}
}
}
static void
Recurring(pe_resource_t * rsc, pe_action_t * start, pe_node_t * node, pe_working_set_t * data_set)
{
if (!pcmk_is_set(rsc->flags, pe_rsc_maintenance) &&
(node == NULL || node->details->maintenance == FALSE)) {
xmlNode *operation = NULL;
for (operation = pcmk__xe_first_child(rsc->ops_xml);
operation != NULL;
operation = pcmk__xe_next(operation)) {
if (pcmk__str_eq((const char *)operation->name, "op", pcmk__str_none)) {
RecurringOp(rsc, start, node, operation, data_set);
}
}
}
}
static void
RecurringOp_Stopped(pe_resource_t * rsc, pe_action_t * start, pe_node_t * node,
xmlNode * operation, pe_working_set_t * data_set)
{
char *key = NULL;
const char *name = NULL;
const char *role = NULL;
const char *interval_spec = NULL;
const char *node_uname = node? node->details->uname : "n/a";
guint interval_ms = 0;
GList *possible_matches = NULL;
GList *gIter = NULL;
/* Only process for the operations with role="Stopped" */
role = crm_element_value(operation, "role");
if (role == NULL || text2role(role) != RSC_ROLE_STOPPED) {
return;
}
interval_spec = crm_element_value(operation, XML_LRM_ATTR_INTERVAL);
interval_ms = crm_parse_interval_spec(interval_spec);
if (interval_ms == 0) {
return;
}
name = crm_element_value(operation, "name");
if (is_op_dup(rsc, name, interval_ms)) {
crm_trace("Not creating duplicate recurring action %s for %dms %s",
ID(operation), interval_ms, name);
return;
}
if (op_cannot_recur(name)) {
pcmk__config_err("Ignoring %s because action '%s' cannot be recurring",
ID(operation), name);
return;
}
key = pcmk__op_key(rsc->id, name, interval_ms);
if (find_rsc_op_entry(rsc, key) == NULL) {
crm_trace("Not creating recurring action %s for disabled resource %s",
ID(operation), rsc->id);
free(key);
return;
}
// @TODO add support
if (!pcmk_is_set(rsc->flags, pe_rsc_unique)) {
crm_notice("Ignoring %s (recurring monitors for Stopped role are "
"not supported for anonymous clones)",
ID(operation));
return;
}
pe_rsc_trace(rsc,
"Creating recurring action %s for %s in role %s on nodes where it should not be running",
ID(operation), rsc->id, role2text(rsc->next_role));
/* if the monitor exists on the node where the resource will be running, cancel it */
if (node != NULL) {
possible_matches = find_actions_exact(rsc->actions, key, node);
if (possible_matches) {
pe_action_t *cancel_op = NULL;
g_list_free(possible_matches);
cancel_op = pe_cancel_op(rsc, name, interval_ms, node, data_set);
if ((rsc->next_role == RSC_ROLE_STARTED)
|| (rsc->next_role == RSC_ROLE_UNPROMOTED)) {
/* rsc->role == RSC_ROLE_STOPPED: cancel the monitor before start */
/* rsc->role == RSC_ROLE_STARTED: for a migration, cancel the monitor on the target node before start */
custom_action_order(rsc, NULL, cancel_op, rsc, start_key(rsc), NULL,
pe_order_runnable_left, data_set);
}
pe_rsc_info(rsc, "Cancel action %s (%s vs. %s) on %s",
key, role, role2text(rsc->next_role), node_uname);
}
}
for (gIter = data_set->nodes; gIter != NULL; gIter = gIter->next) {
pe_node_t *stop_node = (pe_node_t *) gIter->data;
const char *stop_node_uname = stop_node->details->uname;
gboolean is_optional = TRUE;
gboolean probe_is_optional = TRUE;
gboolean stop_is_optional = TRUE;
pe_action_t *stopped_mon = NULL;
char *rc_inactive = NULL;
GList *probe_complete_ops = NULL;
GList *stop_ops = NULL;
GList *local_gIter = NULL;
if (node && pcmk__str_eq(stop_node_uname, node_uname, pcmk__str_casei)) {
continue;
}
pe_rsc_trace(rsc, "Creating recurring action %s for %s on %s",
ID(operation), rsc->id, crm_str(stop_node_uname));
/* start a monitor for an already stopped resource */
possible_matches = find_actions_exact(rsc->actions, key, stop_node);
if (possible_matches == NULL) {
pe_rsc_trace(rsc, "Marking %s mandatory on %s: not active", key,
crm_str(stop_node_uname));
is_optional = FALSE;
} else {
pe_rsc_trace(rsc, "Marking %s optional on %s: already active", key,
crm_str(stop_node_uname));
is_optional = TRUE;
g_list_free(possible_matches);
}
stopped_mon = custom_action(rsc, strdup(key), name, stop_node, is_optional, TRUE, data_set);
rc_inactive = pcmk__itoa(PCMK_OCF_NOT_RUNNING);
add_hash_param(stopped_mon->meta, XML_ATTR_TE_TARGET_RC, rc_inactive);
free(rc_inactive);
if (pcmk_is_set(rsc->flags, pe_rsc_managed)) {
GList *probes = pe__resource_actions(rsc, stop_node, RSC_STATUS,
FALSE);
GList *pIter = NULL;
for (pIter = probes; pIter != NULL; pIter = pIter->next) {
pe_action_t *probe = (pe_action_t *) pIter->data;
order_actions(probe, stopped_mon, pe_order_runnable_left);
crm_trace("%s then %s on %s", probe->uuid, stopped_mon->uuid, stop_node->details->uname);
}
g_list_free(probes);
}
if (probe_complete_ops) {
g_list_free(probe_complete_ops);
}
stop_ops = pe__resource_actions(rsc, stop_node, RSC_STOP, TRUE);
for (local_gIter = stop_ops; local_gIter != NULL; local_gIter = local_gIter->next) {
pe_action_t *stop = (pe_action_t *) local_gIter->data;
if (!pcmk_is_set(stop->flags, pe_action_optional)) {
stop_is_optional = FALSE;
}
if (!pcmk_is_set(stop->flags, pe_action_runnable)) {
crm_debug("%s\t %s (cancelled : stop un-runnable)",
crm_str(stop_node_uname), stopped_mon->uuid);
update_action_flags(stopped_mon, pe_action_runnable | pe_action_clear,
__func__, __LINE__);
}
if (pcmk_is_set(rsc->flags, pe_rsc_managed)) {
custom_action_order(rsc, stop_key(rsc), stop,
NULL, strdup(key), stopped_mon,
pe_order_implies_then | pe_order_runnable_left, data_set);
}
}
if (stop_ops) {
g_list_free(stop_ops);
}
if (is_optional == FALSE && probe_is_optional && stop_is_optional
&& !pcmk_is_set(rsc->flags, pe_rsc_managed)) {
pe_rsc_trace(rsc, "Marking %s optional on %s due to unmanaged",
key, crm_str(stop_node_uname));
update_action_flags(stopped_mon, pe_action_optional, __func__,
__LINE__);
}
if (pcmk_is_set(stopped_mon->flags, pe_action_optional)) {
pe_rsc_trace(rsc, "%s\t %s (optional)", crm_str(stop_node_uname), stopped_mon->uuid);
}
if (stop_node->details->online == FALSE || stop_node->details->unclean) {
pe_rsc_debug(rsc, "%s\t %s (cancelled : no node available)",
crm_str(stop_node_uname), stopped_mon->uuid);
update_action_flags(stopped_mon, pe_action_runnable | pe_action_clear,
__func__, __LINE__);
}
if (pcmk_is_set(stopped_mon->flags, pe_action_runnable)
&& !pcmk_is_set(stopped_mon->flags, pe_action_optional)) {
crm_notice(" Start recurring %s (%us) for %s on %s", stopped_mon->task,
interval_ms / 1000, rsc->id, crm_str(stop_node_uname));
}
}
free(key);
}
static void
Recurring_Stopped(pe_resource_t * rsc, pe_action_t * start, pe_node_t * node, pe_working_set_t * data_set)
{
if (!pcmk_is_set(rsc->flags, pe_rsc_maintenance) &&
(node == NULL || node->details->maintenance == FALSE)) {
xmlNode *operation = NULL;
for (operation = pcmk__xe_first_child(rsc->ops_xml);
operation != NULL;
operation = pcmk__xe_next(operation)) {
if (pcmk__str_eq((const char *)operation->name, "op", pcmk__str_none)) {
RecurringOp_Stopped(rsc, start, node, operation, data_set);
}
}
}
}
static void
handle_migration_actions(pe_resource_t * rsc, pe_node_t *current, pe_node_t *chosen, pe_working_set_t * data_set)
{
pe_action_t *migrate_to = NULL;
pe_action_t *migrate_from = NULL;
pe_action_t *start = NULL;
pe_action_t *stop = NULL;
gboolean partial = rsc->partial_migration_target ? TRUE : FALSE;
pe_rsc_trace(rsc, "Processing migration actions %s moving from %s to %s . partial migration = %s",
rsc->id, current->details->id, chosen->details->id, partial ? "TRUE" : "FALSE");
start = start_action(rsc, chosen, TRUE);
stop = stop_action(rsc, current, TRUE);
if (partial == FALSE) {
migrate_to = custom_action(rsc, pcmk__op_key(rsc->id, RSC_MIGRATE, 0),
RSC_MIGRATE, current, TRUE, TRUE, data_set);
}
migrate_from = custom_action(rsc, pcmk__op_key(rsc->id, RSC_MIGRATED, 0),
RSC_MIGRATED, chosen, TRUE, TRUE, data_set);
if ((migrate_to && migrate_from) || (migrate_from && partial)) {
pe__set_action_flags(start, pe_action_migrate_runnable);
pe__set_action_flags(stop, pe_action_migrate_runnable);
// This is easier than trying to delete it from the graph
update_action_flags(start, pe_action_pseudo, __func__, __LINE__);
/* order probes before migrations */
if (partial) {
pe__set_action_flags(migrate_from, pe_action_migrate_runnable);
migrate_from->needs = start->needs;
custom_action_order(rsc, pcmk__op_key(rsc->id, RSC_STATUS, 0), NULL,
rsc, pcmk__op_key(rsc->id, RSC_MIGRATED, 0),
NULL, pe_order_optional, data_set);
} else {
pe__set_action_flags(migrate_from, pe_action_migrate_runnable);
pe__set_action_flags(migrate_to, pe_action_migrate_runnable);
migrate_to->needs = start->needs;
custom_action_order(rsc, pcmk__op_key(rsc->id, RSC_STATUS, 0), NULL,
rsc, pcmk__op_key(rsc->id, RSC_MIGRATE, 0),
NULL, pe_order_optional, data_set);
custom_action_order(rsc, pcmk__op_key(rsc->id, RSC_MIGRATE, 0),
NULL, rsc,
pcmk__op_key(rsc->id, RSC_MIGRATED, 0), NULL,
pe_order_optional|pe_order_implies_first_migratable,
data_set);
}
custom_action_order(rsc, pcmk__op_key(rsc->id, RSC_MIGRATED, 0), NULL,
rsc, pcmk__op_key(rsc->id, RSC_STOP, 0), NULL,
pe_order_optional|pe_order_implies_first_migratable,
data_set);
custom_action_order(rsc, pcmk__op_key(rsc->id, RSC_MIGRATED, 0), NULL,
rsc, pcmk__op_key(rsc->id, RSC_START, 0), NULL,
pe_order_optional|pe_order_implies_first_migratable|pe_order_pseudo_left,
data_set);
}
if (migrate_to) {
add_hash_param(migrate_to->meta, XML_LRM_ATTR_MIGRATE_SOURCE, current->details->uname);
add_hash_param(migrate_to->meta, XML_LRM_ATTR_MIGRATE_TARGET, chosen->details->uname);
/* Pacemaker Remote connections don't require pending to be recorded in
* the CIB. We can reduce CIB writes by not setting PENDING for them.
*/
if (rsc->is_remote_node == FALSE) {
/* migrate_to takes place on the source node, but can
* have an effect on the target node depending on how
* the agent is written. Because of this, we have to maintain
* a record that the migrate_to occurred, in case the source node
* loses membership while the migrate_to action is still in-flight.
*/
add_hash_param(migrate_to->meta, XML_OP_ATTR_PENDING, "true");
}
}
if (migrate_from) {
add_hash_param(migrate_from->meta, XML_LRM_ATTR_MIGRATE_SOURCE, current->details->uname);
add_hash_param(migrate_from->meta, XML_LRM_ATTR_MIGRATE_TARGET, chosen->details->uname);
}
}
void
native_create_actions(pe_resource_t * rsc, pe_working_set_t * data_set)
{
pe_action_t *start = NULL;
pe_node_t *chosen = NULL;
pe_node_t *current = NULL;
gboolean need_stop = FALSE;
bool need_promote = FALSE;
gboolean is_moving = FALSE;
gboolean allow_migrate = pcmk_is_set(rsc->flags, pe_rsc_allow_migrate)? TRUE : FALSE;
GList *gIter = NULL;
unsigned int num_all_active = 0;
unsigned int num_clean_active = 0;
bool multiply_active = FALSE;
enum rsc_role_e role = RSC_ROLE_UNKNOWN;
enum rsc_role_e next_role = RSC_ROLE_UNKNOWN;
CRM_ASSERT(rsc);
chosen = rsc->allocated_to;
next_role = rsc->next_role;
if (next_role == RSC_ROLE_UNKNOWN) {
pe__set_next_role(rsc,
(chosen == NULL)? RSC_ROLE_STOPPED : RSC_ROLE_STARTED,
"allocation");
}
pe_rsc_trace(rsc, "Creating all actions for %s transition from %s to %s (%s) on %s",
rsc->id, role2text(rsc->role), role2text(rsc->next_role),
((next_role == RSC_ROLE_UNKNOWN)? "implicit" : "explicit"),
((chosen == NULL)? "no node" : chosen->details->uname));
current = pe__find_active_on(rsc, &num_all_active, &num_clean_active);
for (gIter = rsc->dangling_migrations; gIter != NULL; gIter = gIter->next) {
pe_node_t *dangling_source = (pe_node_t *) gIter->data;
pe_action_t *stop = NULL;
pe_rsc_trace(rsc, "Creating stop action %sfor %s on %s due to dangling migration",
pcmk_is_set(data_set->flags, pe_flag_remove_after_stop)? "and cleanup " : "",
rsc->id, dangling_source->details->uname);
stop = stop_action(rsc, dangling_source, FALSE);
pe__set_action_flags(stop, pe_action_dangle);
if (pcmk_is_set(data_set->flags, pe_flag_remove_after_stop)) {
DeleteRsc(rsc, dangling_source, FALSE, data_set);
}
}
if ((num_all_active == 2) && (num_clean_active == 2) && chosen
&& rsc->partial_migration_source && rsc->partial_migration_target
&& (current->details == rsc->partial_migration_source->details)
&& (chosen->details == rsc->partial_migration_target->details)) {
/* The chosen node is still the migration target from a partial
* migration. Attempt to continue the migration instead of recovering
* by stopping the resource everywhere and starting it on a single node.
*/
pe_rsc_trace(rsc, "Will attempt to continue with partial migration "
"to target %s from %s",
rsc->partial_migration_target->details->id,
rsc->partial_migration_source->details->id);
} else if (!pcmk_is_set(rsc->flags, pe_rsc_needs_fencing)) {
/* If a resource has "requires" set to nothing or quorum, don't consider
* it active on unclean nodes (similar to how all resources behave when
* stonith-enabled is false). We can start such resources elsewhere
* before fencing completes, and if we considered the resource active on
* the failed node, we would attempt recovery for being active on
* multiple nodes.
*/
multiply_active = (num_clean_active > 1);
} else {
multiply_active = (num_all_active > 1);
}
if (multiply_active) {
if (rsc->partial_migration_target && rsc->partial_migration_source) {
// Migration was in progress, but we've chosen a different target
crm_notice("Resource %s can no longer migrate from %s to %s "
"(will stop on both nodes)",
rsc->id, rsc->partial_migration_source->details->uname,
rsc->partial_migration_target->details->uname);
} else {
const char *class = crm_element_value(rsc->xml, XML_AGENT_ATTR_CLASS);
// Resource was incorrectly multiply active
pe_proc_err("%s resource %s is active on %u nodes (%s)",
crm_str(class), rsc->id, num_all_active,
recovery2text(rsc->recovery_type));
crm_notice("See https://wiki.clusterlabs.org/wiki/FAQ#Resource_is_Too_Active for more information");
}
if (rsc->recovery_type == recovery_stop_start) {
need_stop = TRUE;
}
/* If by chance a partial migration is in process, but the migration
* target is not chosen still, clear all partial migration data.
*/
rsc->partial_migration_source = rsc->partial_migration_target = NULL;
allow_migrate = FALSE;
}
if (pcmk_is_set(rsc->flags, pe_rsc_start_pending)) {
pe_rsc_trace(rsc, "Creating start action for %s to represent already pending start",
rsc->id);
start = start_action(rsc, chosen, TRUE);
pe__set_action_flags(start, pe_action_print_always);
}
if (current && chosen && current->details != chosen->details) {
pe_rsc_trace(rsc, "Moving %s from %s to %s",
rsc->id, crm_str(current->details->uname),
crm_str(chosen->details->uname));
is_moving = TRUE;
need_stop = TRUE;
} else if (pcmk_is_set(rsc->flags, pe_rsc_failed)) {
if (pcmk_is_set(rsc->flags, pe_rsc_stop)) {
need_stop = TRUE;
pe_rsc_trace(rsc, "Recovering %s", rsc->id);
} else {
pe_rsc_trace(rsc, "Recovering %s by demotion", rsc->id);
if (rsc->next_role == RSC_ROLE_PROMOTED) {
need_promote = TRUE;
}
}
} else if (pcmk_is_set(rsc->flags, pe_rsc_block)) {
pe_rsc_trace(rsc, "Blocking further actions on %s", rsc->id);
need_stop = TRUE;
} else if (rsc->role > RSC_ROLE_STARTED && current != NULL && chosen != NULL) {
pe_rsc_trace(rsc, "Creating start action for promoted resource %s",
rsc->id);
start = start_action(rsc, chosen, TRUE);
if (!pcmk_is_set(start->flags, pe_action_optional)) {
// Recovery of a promoted resource
pe_rsc_trace(rsc, "%s restart is required for recovery", rsc->id);
need_stop = TRUE;
}
}
/* Create any additional actions required when bringing resource down and
* back up to same level.
*/
role = rsc->role;
while (role != RSC_ROLE_STOPPED) {
next_role = rsc_state_matrix[role][RSC_ROLE_STOPPED];
pe_rsc_trace(rsc, "Creating %s action to take %s down from %s to %s",
(need_stop? "required" : "optional"), rsc->id,
role2text(role), role2text(next_role));
if (rsc_action_matrix[role][next_role] (rsc, current, !need_stop, data_set) == FALSE) {
break;
}
role = next_role;
}
while ((rsc->role <= rsc->next_role) && (role != rsc->role)
&& !pcmk_is_set(rsc->flags, pe_rsc_block)) {
bool required = need_stop;
next_role = rsc_state_matrix[role][rsc->role];
if ((next_role == RSC_ROLE_PROMOTED) && need_promote) {
required = true;
}
pe_rsc_trace(rsc, "Creating %s action to take %s up from %s to %s",
(required? "required" : "optional"), rsc->id,
role2text(role), role2text(next_role));
if (rsc_action_matrix[role][next_role](rsc, chosen, !required,
data_set) == FALSE) {
break;
}
role = next_role;
}
role = rsc->role;
/* Required steps from this role to the next */
while (role != rsc->next_role) {
next_role = rsc_state_matrix[role][rsc->next_role];
pe_rsc_trace(rsc, "Creating action to take %s from %s to %s (ending at %s)",
rsc->id, role2text(role), role2text(next_role),
role2text(rsc->next_role));
if (rsc_action_matrix[role][next_role] (rsc, chosen, FALSE, data_set) == FALSE) {
break;
}
role = next_role;
}
if (pcmk_is_set(rsc->flags, pe_rsc_block)) {
pe_rsc_trace(rsc, "Not creating recurring monitors for blocked resource %s",
rsc->id);
} else if ((rsc->next_role != RSC_ROLE_STOPPED)
|| !pcmk_is_set(rsc->flags, pe_rsc_managed)) {
pe_rsc_trace(rsc, "Creating recurring monitors for %s resource %s",
((rsc->next_role == RSC_ROLE_STOPPED)? "unmanaged" : "active"),
rsc->id);
start = start_action(rsc, chosen, TRUE);
Recurring(rsc, start, chosen, data_set);
Recurring_Stopped(rsc, start, chosen, data_set);
} else {
pe_rsc_trace(rsc, "Creating recurring monitors for inactive resource %s",
rsc->id);
Recurring_Stopped(rsc, NULL, NULL, data_set);
}
/* if we are stuck in a partial migration, where the target
* of the partial migration no longer matches the chosen target.
* A full stop/start is required */
if (rsc->partial_migration_target && (chosen == NULL || rsc->partial_migration_target->details != chosen->details)) {
pe_rsc_trace(rsc, "Not allowing partial migration of %s to continue",
rsc->id);
allow_migrate = FALSE;
} else if (!is_moving || !pcmk_is_set(rsc->flags, pe_rsc_managed)
|| pcmk_any_flags_set(rsc->flags,
pe_rsc_failed|pe_rsc_start_pending)
|| (current && current->details->unclean)
|| rsc->next_role < RSC_ROLE_STARTED) {
allow_migrate = FALSE;
}
if (allow_migrate) {
handle_migration_actions(rsc, current, chosen, data_set);
}
}
static void
rsc_avoids_remote_nodes(pe_resource_t *rsc)
{
GHashTableIter iter;
pe_node_t *node = NULL;
g_hash_table_iter_init(&iter, rsc->allowed_nodes);
while (g_hash_table_iter_next(&iter, NULL, (void **)&node)) {
if (node->details->remote_rsc) {
node->weight = -INFINITY;
}
}
}
/*!
* \internal
* \brief Return allowed nodes as (possibly sorted) list
*
* Convert a resource's hash table of allowed nodes to a list. If printing to
* stdout, sort the list, to keep action ID numbers consistent for regression
* test output (while avoiding the performance hit on a live cluster).
*
* \param[in] rsc Resource to check for allowed nodes
* \param[in] data_set Cluster working set
*
* \return List of resource's allowed nodes
* \note Callers should take care not to rely on the list being sorted.
*/
static GList *
allowed_nodes_as_list(pe_resource_t *rsc, pe_working_set_t *data_set)
{
GList *allowed_nodes = NULL;
if (rsc->allowed_nodes) {
allowed_nodes = g_hash_table_get_values(rsc->allowed_nodes);
}
if (!pcmk__is_daemon) {
allowed_nodes = g_list_sort(allowed_nodes, sort_node_uname);
}
return allowed_nodes;
}
void
native_internal_constraints(pe_resource_t * rsc, pe_working_set_t * data_set)
{
/* This function is on the critical path and worth optimizing as much as possible */
pe_resource_t *top = NULL;
GList *allowed_nodes = NULL;
bool check_unfencing = FALSE;
bool check_utilization = FALSE;
if (!pcmk_is_set(rsc->flags, pe_rsc_managed)) {
pe_rsc_trace(rsc,
"Skipping native constraints for unmanaged resource: %s",
rsc->id);
return;
}
top = uber_parent(rsc);
// Whether resource requires unfencing
check_unfencing = !pcmk_is_set(rsc->flags, pe_rsc_fence_device)
&& pcmk_is_set(data_set->flags, pe_flag_enable_unfencing)
&& pcmk_is_set(rsc->flags, pe_rsc_needs_unfencing);
// Whether a non-default placement strategy is used
check_utilization = (g_hash_table_size(rsc->utilization) > 0)
&& !pcmk__str_eq(data_set->placement_strategy,
"default", pcmk__str_casei);
// Order stops before starts (i.e. restart)
custom_action_order(rsc, pcmk__op_key(rsc->id, RSC_STOP, 0), NULL,
rsc, pcmk__op_key(rsc->id, RSC_START, 0), NULL,
pe_order_optional|pe_order_implies_then|pe_order_restart,
data_set);
// Promotable ordering: demote before stop, start before promote
if (pcmk_is_set(top->flags, pe_rsc_promotable)
|| (rsc->role > RSC_ROLE_UNPROMOTED)) {
custom_action_order(rsc, pcmk__op_key(rsc->id, RSC_DEMOTE, 0), NULL,
rsc, pcmk__op_key(rsc->id, RSC_STOP, 0), NULL,
pe_order_promoted_implies_first, data_set);
custom_action_order(rsc, pcmk__op_key(rsc->id, RSC_START, 0), NULL,
rsc, pcmk__op_key(rsc->id, RSC_PROMOTE, 0), NULL,
pe_order_runnable_left, data_set);
}
// Don't clear resource history if probing on same node
custom_action_order(rsc, pcmk__op_key(rsc->id, CRM_OP_LRM_DELETE, 0),
NULL, rsc, pcmk__op_key(rsc->id, RSC_STATUS, 0),
NULL, pe_order_same_node|pe_order_then_cancels_first,
data_set);
// Certain checks need allowed nodes
if (check_unfencing || check_utilization || rsc->container) {
allowed_nodes = allowed_nodes_as_list(rsc, data_set);
}
if (check_unfencing) {
/* Check if the node needs to be unfenced first */
for (GList *item = allowed_nodes; item; item = item->next) {
pe_node_t *node = item->data;
pe_action_t *unfence = pe_fence_op(node, "on", TRUE, NULL, FALSE, data_set);
crm_debug("Ordering any stops of %s before %s, and any starts after",
rsc->id, unfence->uuid);
/*
* It would be more efficient to order clone resources once,
* rather than order each instance, but ordering the instance
* allows us to avoid unnecessary dependencies that might conflict
* with user constraints.
*
* @TODO: This constraint can still produce a transition loop if the
* resource has a stop scheduled on the node being unfenced, and
* there is a user ordering constraint to start some other resource
* (which will be ordered after the unfence) before stopping this
* resource. An example is "start some slow-starting cloned service
* before stopping an associated virtual IP that may be moving to
* it":
* stop this -> unfencing -> start that -> stop this
*/
custom_action_order(rsc, stop_key(rsc), NULL,
NULL, strdup(unfence->uuid), unfence,
pe_order_optional|pe_order_same_node, data_set);
custom_action_order(NULL, strdup(unfence->uuid), unfence,
rsc, start_key(rsc), NULL,
pe_order_implies_then_on_node|pe_order_same_node,
data_set);
}
}
if (check_utilization) {
GList *gIter = NULL;
pe_rsc_trace(rsc, "Creating utilization constraints for %s - strategy: %s",
rsc->id, data_set->placement_strategy);
for (gIter = rsc->running_on; gIter != NULL; gIter = gIter->next) {
pe_node_t *current = (pe_node_t *) gIter->data;
char *load_stopped_task = crm_strdup_printf(LOAD_STOPPED "_%s",
current->details->uname);
pe_action_t *load_stopped = get_pseudo_op(load_stopped_task, data_set);
if (load_stopped->node == NULL) {
load_stopped->node = pe__copy_node(current);
update_action_flags(load_stopped, pe_action_optional | pe_action_clear,
__func__, __LINE__);
}
custom_action_order(rsc, stop_key(rsc), NULL,
NULL, load_stopped_task, load_stopped, pe_order_load, data_set);
}
for (GList *item = allowed_nodes; item; item = item->next) {
pe_node_t *next = item->data;
char *load_stopped_task = crm_strdup_printf(LOAD_STOPPED "_%s",
next->details->uname);
pe_action_t *load_stopped = get_pseudo_op(load_stopped_task, data_set);
if (load_stopped->node == NULL) {
load_stopped->node = pe__copy_node(next);
update_action_flags(load_stopped, pe_action_optional | pe_action_clear,
__func__, __LINE__);
}
custom_action_order(NULL, strdup(load_stopped_task), load_stopped,
rsc, start_key(rsc), NULL, pe_order_load, data_set);
custom_action_order(NULL, strdup(load_stopped_task), load_stopped,
rsc, pcmk__op_key(rsc->id, RSC_MIGRATE, 0),
NULL, pe_order_load, data_set);
free(load_stopped_task);
}
}
if (rsc->container) {
pe_resource_t *remote_rsc = NULL;
if (rsc->is_remote_node) {
// rsc is the implicit remote connection for a guest or bundle node
/* Do not allow a guest resource to live on a Pacemaker Remote node,
* to avoid nesting remotes. However, allow bundles to run on remote
* nodes.
*/
if (!pcmk_is_set(rsc->flags, pe_rsc_allow_remote_remotes)) {
rsc_avoids_remote_nodes(rsc->container);
}
/* If someone cleans up a guest or bundle node's container, we will
* likely schedule a (re-)probe of the container and recovery of the
* connection. Order the connection stop after the container probe,
* so that if we detect the container running, we will trigger a new
* transition and avoid the unnecessary recovery.
*/
new_rsc_order(rsc->container, RSC_STATUS, rsc, RSC_STOP,
pe_order_optional, data_set);
/* A user can specify that a resource must start on a Pacemaker Remote
* node by explicitly configuring it with the container=NODENAME
* meta-attribute. This is of questionable merit, since location
* constraints can accomplish the same thing. But we support it, so here
* we check whether a resource (that is not itself a remote connection)
* has container set to a remote node or guest node resource.
*/
} else if (rsc->container->is_remote_node) {
remote_rsc = rsc->container;
} else {
remote_rsc = pe__resource_contains_guest_node(data_set,
rsc->container);
}
if (remote_rsc) {
/* Force the resource on the Pacemaker Remote node instead of
* colocating the resource with the container resource.
*/
for (GList *item = allowed_nodes; item; item = item->next) {
pe_node_t *node = item->data;
if (node->details->remote_rsc != remote_rsc) {
node->weight = -INFINITY;
}
}
} else {
/* This resource is either a filler for a container that does NOT
* represent a Pacemaker Remote node, or a Pacemaker Remote
* connection resource for a guest node or bundle.
*/
int score;
crm_trace("Order and colocate %s relative to its container %s",
rsc->id, rsc->container->id);
custom_action_order(rsc->container,
pcmk__op_key(rsc->container->id, RSC_START, 0),
NULL, rsc, pcmk__op_key(rsc->id, RSC_START, 0),
NULL,
pe_order_implies_then|pe_order_runnable_left,
data_set);
custom_action_order(rsc, pcmk__op_key(rsc->id, RSC_STOP, 0), NULL,
rsc->container,
pcmk__op_key(rsc->container->id, RSC_STOP, 0),
NULL, pe_order_implies_first, data_set);
if (pcmk_is_set(rsc->flags, pe_rsc_allow_remote_remotes)) {
score = 10000; /* Highly preferred but not essential */
} else {
score = INFINITY; /* Force them to run on the same host */
}
pcmk__new_colocation("resource-with-container", NULL, score, rsc,
rsc->container, NULL, NULL, true, data_set);
}
}
if (rsc->is_remote_node || pcmk_is_set(rsc->flags, pe_rsc_fence_device)) {
/* don't allow remote nodes to run stonith devices
* or remote connection resources.*/
rsc_avoids_remote_nodes(rsc);
}
g_list_free(allowed_nodes);
}
void
native_rsc_colocation_lh(pe_resource_t *rsc_lh, pe_resource_t *rsc_rh,
pcmk__colocation_t *constraint,
pe_working_set_t *data_set)
{
if (rsc_lh == NULL) {
pe_err("rsc_lh was NULL for %s", constraint->id);
return;
} else if (constraint->rsc_rh == NULL) {
pe_err("rsc_rh was NULL for %s", constraint->id);
return;
}
pe_rsc_trace(rsc_lh, "Processing colocation constraint between %s and %s", rsc_lh->id,
rsc_rh->id);
rsc_rh->cmds->rsc_colocation_rh(rsc_lh, rsc_rh, constraint, data_set);
}
enum filter_colocation_res
filter_colocation_constraint(pe_resource_t * rsc_lh, pe_resource_t * rsc_rh,
pcmk__colocation_t *constraint, gboolean preview)
{
/* rh side must be allocated before we can process constraint */
if (!preview && pcmk_is_set(rsc_rh->flags, pe_rsc_provisional)) {
return influence_nothing;
}
if ((constraint->role_lh >= RSC_ROLE_UNPROMOTED) &&
rsc_lh->parent && pcmk_is_set(rsc_lh->parent->flags, pe_rsc_promotable)
&& !pcmk_is_set(rsc_lh->flags, pe_rsc_provisional)) {
/* LH and RH resources have already been allocated, place the correct
* priority on LH rsc for the given promotable clone resource role */
return influence_rsc_priority;
}
if (!preview && !pcmk_is_set(rsc_lh->flags, pe_rsc_provisional)) {
// Log an error if we violated a mandatory colocation constraint
const pe_node_t *rh_node = rsc_rh->allocated_to;
if (rsc_lh->allocated_to == NULL) {
// Dependent resource isn't allocated, so constraint doesn't matter
return influence_nothing;
}
if (constraint->score >= INFINITY) {
// Dependent resource must colocate with rh_node
if ((rh_node == NULL)
|| (rh_node->details != rsc_lh->allocated_to->details)) {
crm_err("%s must be colocated with %s but is not (%s vs. %s)",
rsc_lh->id, rsc_rh->id,
rsc_lh->allocated_to->details->uname,
(rh_node? rh_node->details->uname : "unallocated"));
}
} else if (constraint->score <= -INFINITY) {
// Dependent resource must anti-colocate with rh_node
if ((rh_node != NULL)
&& (rsc_lh->allocated_to->details == rh_node->details)) {
crm_err("%s and %s must be anti-colocated but are allocated "
"to the same node (%s)",
rsc_lh->id, rsc_rh->id, rh_node->details->uname);
}
}
return influence_nothing;
}
if (constraint->score > 0
&& constraint->role_lh != RSC_ROLE_UNKNOWN && constraint->role_lh != rsc_lh->next_role) {
crm_trace("LH: Skipping constraint: \"%s\" state filter nextrole is %s",
role2text(constraint->role_lh), role2text(rsc_lh->next_role));
return influence_nothing;
}
if (constraint->score > 0
&& constraint->role_rh != RSC_ROLE_UNKNOWN && constraint->role_rh != rsc_rh->next_role) {
crm_trace("RH: Skipping constraint: \"%s\" state filter", role2text(constraint->role_rh));
return influence_nothing;
}
if (constraint->score < 0
&& constraint->role_lh != RSC_ROLE_UNKNOWN && constraint->role_lh == rsc_lh->next_role) {
crm_trace("LH: Skipping negative constraint: \"%s\" state filter",
role2text(constraint->role_lh));
return influence_nothing;
}
if (constraint->score < 0
&& constraint->role_rh != RSC_ROLE_UNKNOWN && constraint->role_rh == rsc_rh->next_role) {
crm_trace("RH: Skipping negative constraint: \"%s\" state filter",
role2text(constraint->role_rh));
return influence_nothing;
}
return influence_rsc_location;
}
static void
influence_priority(pe_resource_t *rsc_lh, pe_resource_t *rsc_rh,
pcmk__colocation_t *constraint)
{
const char *rh_value = NULL;
const char *lh_value = NULL;
const char *attribute = CRM_ATTR_ID;
int score_multiplier = 1;
if (!rsc_rh->allocated_to || !rsc_lh->allocated_to) {
return;
}
if (constraint->node_attribute != NULL) {
attribute = constraint->node_attribute;
}
lh_value = pe_node_attribute_raw(rsc_lh->allocated_to, attribute);
rh_value = pe_node_attribute_raw(rsc_rh->allocated_to, attribute);
if (!pcmk__str_eq(lh_value, rh_value, pcmk__str_casei)) {
if ((constraint->score == INFINITY)
&& (constraint->role_lh == RSC_ROLE_PROMOTED)) {
rsc_lh->priority = -INFINITY;
}
return;
}
if (constraint->role_rh && (constraint->role_rh != rsc_rh->next_role)) {
return;
}
if (constraint->role_lh == RSC_ROLE_UNPROMOTED) {
score_multiplier = -1;
}
rsc_lh->priority = pe__add_scores(score_multiplier * constraint->score,
rsc_lh->priority);
}
static void
colocation_match(pe_resource_t *rsc_lh, pe_resource_t *rsc_rh,
pcmk__colocation_t *constraint)
{
const char *attribute = CRM_ATTR_ID;
const char *value = NULL;
GHashTable *work = NULL;
GHashTableIter iter;
pe_node_t *node = NULL;
if (constraint->node_attribute != NULL) {
attribute = constraint->node_attribute;
}
if (rsc_rh->allocated_to) {
value = pe_node_attribute_raw(rsc_rh->allocated_to, attribute);
} else if (constraint->score < 0) {
// Nothing to do (anti-colocation with something that is not running)
return;
}
work = pcmk__copy_node_table(rsc_lh->allowed_nodes);
g_hash_table_iter_init(&iter, work);
while (g_hash_table_iter_next(&iter, NULL, (void **)&node)) {
if (rsc_rh->allocated_to == NULL) {
pe_rsc_trace(rsc_lh, "%s: %s@%s -= %d (%s inactive)",
constraint->id, rsc_lh->id, node->details->uname,
constraint->score, rsc_rh->id);
node->weight = pe__add_scores(-constraint->score, node->weight);
} else if (pcmk__str_eq(pe_node_attribute_raw(node, attribute), value, pcmk__str_casei)) {
if (constraint->score < CRM_SCORE_INFINITY) {
pe_rsc_trace(rsc_lh, "%s: %s@%s += %d",
constraint->id, rsc_lh->id,
node->details->uname, constraint->score);
node->weight = pe__add_scores(constraint->score, node->weight);
}
} else if (constraint->score >= CRM_SCORE_INFINITY) {
pe_rsc_trace(rsc_lh, "%s: %s@%s -= %d (%s mismatch)",
constraint->id, rsc_lh->id, node->details->uname,
constraint->score, attribute);
node->weight = pe__add_scores(-constraint->score, node->weight);
}
}
if (can_run_any(work)
|| constraint->score <= -INFINITY || constraint->score >= INFINITY) {
g_hash_table_destroy(rsc_lh->allowed_nodes);
rsc_lh->allowed_nodes = work;
work = NULL;
} else {
pe_rsc_info(rsc_lh,
"%s: Rolling back scores from %s (no available nodes)",
rsc_lh->id, rsc_rh->id);
}
if (work) {
g_hash_table_destroy(work);
}
}
void
native_rsc_colocation_rh(pe_resource_t *rsc_lh, pe_resource_t *rsc_rh,
pcmk__colocation_t *constraint,
pe_working_set_t *data_set)
{
enum filter_colocation_res filter_results;
CRM_ASSERT(rsc_lh);
CRM_ASSERT(rsc_rh);
filter_results = filter_colocation_constraint(rsc_lh, rsc_rh, constraint, FALSE);
pe_rsc_trace(rsc_lh, "%s %s with %s (%s, score=%d, filter=%d)",
((constraint->score > 0)? "Colocating" : "Anti-colocating"),
rsc_lh->id, rsc_rh->id, constraint->id, constraint->score, filter_results);
switch (filter_results) {
case influence_rsc_priority:
influence_priority(rsc_lh, rsc_rh, constraint);
break;
case influence_rsc_location:
colocation_match(rsc_lh, rsc_rh, constraint);
break;
case influence_nothing:
default:
return;
}
}
static gboolean
filter_rsc_ticket(pe_resource_t * rsc_lh, rsc_ticket_t * rsc_ticket)
{
if (rsc_ticket->role_lh != RSC_ROLE_UNKNOWN && rsc_ticket->role_lh != rsc_lh->role) {
pe_rsc_trace(rsc_lh, "LH: Skipping constraint: \"%s\" state filter",
role2text(rsc_ticket->role_lh));
return FALSE;
}
return TRUE;
}
void
rsc_ticket_constraint(pe_resource_t * rsc_lh, rsc_ticket_t * rsc_ticket, pe_working_set_t * data_set)
{
if (rsc_ticket == NULL) {
pe_err("rsc_ticket was NULL");
return;
}
if (rsc_lh == NULL) {
pe_err("rsc_lh was NULL for %s", rsc_ticket->id);
return;
}
if (rsc_ticket->ticket->granted && rsc_ticket->ticket->standby == FALSE) {
return;
}
if (rsc_lh->children) {
GList *gIter = rsc_lh->children;
pe_rsc_trace(rsc_lh, "Processing ticket dependencies from %s", rsc_lh->id);
for (; gIter != NULL; gIter = gIter->next) {
pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
rsc_ticket_constraint(child_rsc, rsc_ticket, data_set);
}
return;
}
pe_rsc_trace(rsc_lh, "%s: Processing ticket dependency on %s (%s, %s)",
rsc_lh->id, rsc_ticket->ticket->id, rsc_ticket->id,
role2text(rsc_ticket->role_lh));
if ((rsc_ticket->ticket->granted == FALSE)
&& (rsc_lh->running_on != NULL)) {
GList *gIter = NULL;
switch (rsc_ticket->loss_policy) {
case loss_ticket_stop:
resource_location(rsc_lh, NULL, -INFINITY, "__loss_of_ticket__", data_set);
break;
case loss_ticket_demote:
// Promotion score will be set to -INFINITY in promotion_order()
if (rsc_ticket->role_lh != RSC_ROLE_PROMOTED) {
resource_location(rsc_lh, NULL, -INFINITY, "__loss_of_ticket__", data_set);
}
break;
case loss_ticket_fence:
if (filter_rsc_ticket(rsc_lh, rsc_ticket) == FALSE) {
return;
}
resource_location(rsc_lh, NULL, -INFINITY, "__loss_of_ticket__", data_set);
for (gIter = rsc_lh->running_on; gIter != NULL; gIter = gIter->next) {
pe_node_t *node = (pe_node_t *) gIter->data;
pe_fence_node(data_set, node, "deadman ticket was lost", FALSE);
}
break;
case loss_ticket_freeze:
if (filter_rsc_ticket(rsc_lh, rsc_ticket) == FALSE) {
return;
}
if (rsc_lh->running_on != NULL) {
pe__clear_resource_flags(rsc_lh, pe_rsc_managed);
pe__set_resource_flags(rsc_lh, pe_rsc_block);
}
break;
}
} else if (rsc_ticket->ticket->granted == FALSE) {
if ((rsc_ticket->role_lh != RSC_ROLE_PROMOTED)
|| (rsc_ticket->loss_policy == loss_ticket_stop)) {
resource_location(rsc_lh, NULL, -INFINITY, "__no_ticket__", data_set);
}
} else if (rsc_ticket->ticket->standby) {
if ((rsc_ticket->role_lh != RSC_ROLE_PROMOTED)
|| (rsc_ticket->loss_policy == loss_ticket_stop)) {
resource_location(rsc_lh, NULL, -INFINITY, "__ticket_standby__", data_set);
}
}
}
enum pe_action_flags
native_action_flags(pe_action_t * action, pe_node_t * node)
{
return action->flags;
}
static inline bool
is_primitive_action(pe_action_t *action)
{
return action && action->rsc && (action->rsc->variant == pe_native);
}
/*!
* \internal
* \brief Set action bits appropriately when pe_restart_order is used
*
* \param[in] first 'First' action in an ordering with pe_restart_order
* \param[in] then 'Then' action in an ordering with pe_restart_order
* \param[in] filter What ordering flags to care about
*
* \note pe_restart_order is set for "stop resource before starting it" and
* "stop later group member before stopping earlier group member"
*/
static void
handle_restart_ordering(pe_action_t *first, pe_action_t *then,
enum pe_action_flags filter)
{
const char *reason = NULL;
CRM_ASSERT(is_primitive_action(first));
CRM_ASSERT(is_primitive_action(then));
// We need to update the action in two cases:
// ... if 'then' is required
if (pcmk_is_set(filter, pe_action_optional)
&& !pcmk_is_set(then->flags, pe_action_optional)) {
reason = "restart";
}
/* ... if 'then' is unrunnable start of managed resource (if a resource
* should restart but can't start, we still want to stop)
*/
if (pcmk_is_set(filter, pe_action_runnable)
&& !pcmk_is_set(then->flags, pe_action_runnable)
&& pcmk_is_set(then->rsc->flags, pe_rsc_managed)
&& pcmk__str_eq(then->task, RSC_START, pcmk__str_casei)) {
reason = "stop";
}
if (reason == NULL) {
return;
}
pe_rsc_trace(first->rsc, "Handling %s -> %s for %s",
first->uuid, then->uuid, reason);
// Make 'first' required if it is runnable
if (pcmk_is_set(first->flags, pe_action_runnable)) {
pe_action_implies(first, then, pe_action_optional);
}
// Make 'first' required if 'then' is required
if (!pcmk_is_set(then->flags, pe_action_optional)) {
pe_action_implies(first, then, pe_action_optional);
}
// Make 'first' unmigratable if 'then' is unmigratable
if (!pcmk_is_set(then->flags, pe_action_migrate_runnable)) {
pe_action_implies(first, then, pe_action_migrate_runnable);
}
// Make 'then' unrunnable if 'first' is required but unrunnable
if (!pcmk_is_set(first->flags, pe_action_optional)
&& !pcmk_is_set(first->flags, pe_action_runnable)) {
pe_action_implies(then, first, pe_action_runnable);
}
}
enum pe_graph_flags
native_update_actions(pe_action_t *first, pe_action_t *then, pe_node_t *node,
enum pe_action_flags flags, enum pe_action_flags filter,
enum pe_ordering type, pe_working_set_t *data_set)
{
/* flags == get_action_flags(first, then_node) called from update_action() */
enum pe_graph_flags changed = pe_graph_none;
enum pe_action_flags then_flags = then->flags;
enum pe_action_flags first_flags = first->flags;
crm_trace( "Testing %s on %s (0x%.6x) with %s 0x%.6x",
first->uuid, first->node ? first->node->details->uname : "[none]",
first->flags, then->uuid, then->flags);
if (type & pe_order_asymmetrical) {
pe_resource_t *then_rsc = then->rsc;
enum rsc_role_e then_rsc_role = then_rsc ? then_rsc->fns->state(then_rsc, TRUE) : 0;
if (!then_rsc) {
/* ignore */
} else if ((then_rsc_role == RSC_ROLE_STOPPED) && pcmk__str_eq(then->task, RSC_STOP, pcmk__str_casei)) {
/* ignore... if 'then' is supposed to be stopped after 'first', but
* then is already stopped, there is nothing to be done when non-symmetrical. */
} else if ((then_rsc_role >= RSC_ROLE_STARTED)
&& pcmk__str_eq(then->task, RSC_START, pcmk__str_casei)
&& pcmk_is_set(then->flags, pe_action_optional)
&& then->node
&& pcmk__list_of_1(then_rsc->running_on)
&& then->node->details == ((pe_node_t *) then_rsc->running_on->data)->details) {
/* Ignore. If 'then' is supposed to be started after 'first', but
* 'then' is already started, there is nothing to be done when
* asymmetrical -- unless the start is mandatory, which indicates
* the resource is restarting, and the ordering is still needed.
*/
} else if (!(first->flags & pe_action_runnable)) {
/* prevent 'then' action from happening if 'first' is not runnable and
* 'then' has not yet occurred. */
pe_action_implies(then, first, pe_action_optional);
pe_action_implies(then, first, pe_action_runnable);
pe_rsc_trace(then->rsc, "Unset optional and runnable on %s", then->uuid);
} else {
/* ignore... then is allowed to start/stop if it wants to. */
}
}
if (type & pe_order_implies_first) {
if (pcmk_is_set(filter, pe_action_optional)
&& !pcmk_is_set(flags /* Should be then_flags? */, pe_action_optional)) {
// Needs pcmk_is_set(first_flags, pe_action_optional) too?
pe_rsc_trace(first->rsc, "Unset optional on %s because of %s", first->uuid, then->uuid);
pe_action_implies(first, then, pe_action_optional);
}
if (pcmk_is_set(flags, pe_action_migrate_runnable) &&
!pcmk_is_set(then->flags, pe_action_migrate_runnable) &&
!pcmk_is_set(then->flags, pe_action_optional)) {
pe_rsc_trace(first->rsc, "Unset migrate runnable on %s because of %s",
first->uuid, then->uuid);
pe_action_implies(first, then, pe_action_migrate_runnable);
}
}
if (type & pe_order_promoted_implies_first) {
if ((filter & pe_action_optional) &&
((then->flags & pe_action_optional) == FALSE) &&
(then->rsc != NULL) && (then->rsc->role == RSC_ROLE_PROMOTED)) {
pe_action_implies(first, then, pe_action_optional);
if (pcmk_is_set(first->flags, pe_action_migrate_runnable) &&
!pcmk_is_set(then->flags, pe_action_migrate_runnable)) {
pe_rsc_trace(first->rsc, "Unset migrate runnable on %s because of %s", first->uuid, then->uuid);
pe_action_implies(first, then, pe_action_migrate_runnable);
}
pe_rsc_trace(then->rsc, "Unset optional on %s because of %s", first->uuid, then->uuid);
}
}
if ((type & pe_order_implies_first_migratable)
&& pcmk_is_set(filter, pe_action_optional)) {
if (((then->flags & pe_action_migrate_runnable) == FALSE) ||
((then->flags & pe_action_runnable) == FALSE)) {
pe_rsc_trace(then->rsc, "Unset runnable on %s because %s is neither runnable or migratable", first->uuid, then->uuid);
pe_action_implies(first, then, pe_action_runnable);
}
if ((then->flags & pe_action_optional) == 0) {
pe_rsc_trace(then->rsc, "Unset optional on %s because %s is not optional", first->uuid, then->uuid);
pe_action_implies(first, then, pe_action_optional);
}
}
if ((type & pe_order_pseudo_left)
&& pcmk_is_set(filter, pe_action_optional)) {
if ((first->flags & pe_action_runnable) == FALSE) {
pe_action_implies(then, first, pe_action_migrate_runnable);
pe__clear_action_flags(then, pe_action_pseudo);
pe_rsc_trace(then->rsc, "Unset pseudo on %s because %s is not runnable", then->uuid, first->uuid);
}
}
if (pcmk_is_set(type, pe_order_runnable_left)
&& pcmk_is_set(filter, pe_action_runnable)
&& pcmk_is_set(then->flags, pe_action_runnable)
&& !pcmk_is_set(flags, pe_action_runnable)) {
pe_rsc_trace(then->rsc, "Unset runnable on %s because of %s", then->uuid, first->uuid);
pe_action_implies(then, first, pe_action_runnable);
pe_action_implies(then, first, pe_action_migrate_runnable);
}
if (pcmk_is_set(type, pe_order_implies_then)
&& pcmk_is_set(filter, pe_action_optional)
&& pcmk_is_set(then->flags, pe_action_optional)
&& !pcmk_is_set(flags, pe_action_optional)) {
/* in this case, treat migrate_runnable as if first is optional */
if (!pcmk_is_set(first->flags, pe_action_migrate_runnable)) {
pe_rsc_trace(then->rsc, "Unset optional on %s because of %s", then->uuid, first->uuid);
pe_action_implies(then, first, pe_action_optional);
}
}
if (pcmk_is_set(type, pe_order_restart)) {
handle_restart_ordering(first, then, filter);
}
if (then_flags != then->flags) {
pe__set_graph_flags(changed, first, pe_graph_updated_then);
pe_rsc_trace(then->rsc,
"Then: Flags for %s on %s are now 0x%.6x (was 0x%.6x) because of %s 0x%.6x",
then->uuid, then->node ? then->node->details->uname : "[none]", then->flags,
then_flags, first->uuid, first->flags);
if(then->rsc && then->rsc->parent) {
/* "X_stop then X_start" doesn't get handled for cloned groups unless we do this */
update_action(then, data_set);
}
}
if (first_flags != first->flags) {
pe__set_graph_flags(changed, first, pe_graph_updated_first);
pe_rsc_trace(first->rsc,
"First: Flags for %s on %s are now 0x%.6x (was 0x%.6x) because of %s 0x%.6x",
first->uuid, first->node ? first->node->details->uname : "[none]",
first->flags, first_flags, then->uuid, then->flags);
}
return changed;
}
void
native_rsc_location(pe_resource_t *rsc, pe__location_t *constraint)
{
GList *gIter = NULL;
bool need_role = false;
CRM_CHECK((constraint != NULL) && (rsc != NULL), return);
// If a role was specified, ensure constraint is applicable
need_role = (constraint->role_filter > RSC_ROLE_UNKNOWN);
if (need_role && (constraint->role_filter != rsc->next_role)) {
pe_rsc_trace(rsc,
"Not applying %s to %s because role will be %s not %s",
constraint->id, rsc->id, role2text(rsc->next_role),
role2text(constraint->role_filter));
return;
}
if (constraint->node_list_rh == NULL) {
pe_rsc_trace(rsc, "Not applying %s to %s because no nodes match",
constraint->id, rsc->id);
return;
}
pe_rsc_trace(rsc, "Applying %s%s%s to %s", constraint->id,
(need_role? " for role " : ""),
(need_role? role2text(constraint->role_filter) : ""), rsc->id);
for (gIter = constraint->node_list_rh; gIter != NULL; gIter = gIter->next) {
pe_node_t *node = (pe_node_t *) gIter->data;
pe_node_t *other_node = NULL;
other_node = (pe_node_t *) pe_hash_table_lookup(rsc->allowed_nodes, node->details->id);
if (other_node != NULL) {
pe_rsc_trace(rsc, "* + %d on %s",
node->weight, node->details->uname);
other_node->weight = pe__add_scores(other_node->weight,
node->weight);
} else {
pe_rsc_trace(rsc, "* = %d on %s",
node->weight, node->details->uname);
other_node = pe__copy_node(node);
g_hash_table_insert(rsc->allowed_nodes, (gpointer) other_node->details->id, other_node);
}
if (other_node->rsc_discover_mode < constraint->discover_mode) {
if (constraint->discover_mode == pe_discover_exclusive) {
rsc->exclusive_discover = TRUE;
}
/* exclusive > never > always... always is default */
other_node->rsc_discover_mode = constraint->discover_mode;
}
}
}
void
native_expand(pe_resource_t * rsc, pe_working_set_t * data_set)
{
GList *gIter = NULL;
CRM_ASSERT(rsc);
pe_rsc_trace(rsc, "Processing actions from %s", rsc->id);
for (gIter = rsc->actions; gIter != NULL; gIter = gIter->next) {
pe_action_t *action = (pe_action_t *) gIter->data;
crm_trace("processing action %d for rsc=%s", action->id, rsc->id);
graph_element_from_action(action, data_set);
}
for (gIter = rsc->children; gIter != NULL; gIter = gIter->next) {
pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
child_rsc->cmds->expand(child_rsc, data_set);
}
}
#define STOP_SANITY_ASSERT(lineno) do { \
if(current && current->details->unclean) { \
/* It will be a pseudo op */ \
} else if(stop == NULL) { \
crm_err("%s:%d: No stop action exists for %s", \
__func__, lineno, rsc->id); \
CRM_ASSERT(stop != NULL); \
} else if (pcmk_is_set(stop->flags, pe_action_optional)) { \
crm_err("%s:%d: Action %s is still optional", \
__func__, lineno, stop->uuid); \
CRM_ASSERT(!pcmk_is_set(stop->flags, pe_action_optional)); \
} \
} while(0)
void
LogActions(pe_resource_t * rsc, pe_working_set_t * data_set)
{
pcmk__output_t *out = data_set->priv;
pe_node_t *next = NULL;
pe_node_t *current = NULL;
gboolean moving = FALSE;
if(rsc->variant == pe_container) {
pcmk__bundle_log_actions(rsc, data_set);
return;
}
if (rsc->children) {
GList *gIter = NULL;
for (gIter = rsc->children; gIter != NULL; gIter = gIter->next) {
pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
LogActions(child_rsc, data_set);
}
return;
}
next = rsc->allocated_to;
if (rsc->running_on) {
current = pe__current_node(rsc);
if (rsc->role == RSC_ROLE_STOPPED) {
/*
* This can occur when resources are being recovered
* We fiddle with the current role in native_create_actions()
*/
rsc->role = RSC_ROLE_STARTED;
}
}
if ((current == NULL) && pcmk_is_set(rsc->flags, pe_rsc_orphan)) {
/* Don't log stopped orphans */
return;
}
out->message(out, "rsc-action", rsc, current, next, moving);
}
gboolean
StopRsc(pe_resource_t * rsc, pe_node_t * next, gboolean optional, pe_working_set_t * data_set)
{
GList *gIter = NULL;
CRM_ASSERT(rsc);
pe_rsc_trace(rsc, "%s", rsc->id);
for (gIter = rsc->running_on; gIter != NULL; gIter = gIter->next) {
pe_node_t *current = (pe_node_t *) gIter->data;
pe_action_t *stop;
if (rsc->partial_migration_target) {
if (rsc->partial_migration_target->details == current->details) {
pe_rsc_trace(rsc, "Filtered %s -> %s %s", current->details->uname,
next->details->uname, rsc->id);
continue;
} else {
pe_rsc_trace(rsc, "Forced on %s %s", current->details->uname, rsc->id);
optional = FALSE;
}
}
pe_rsc_trace(rsc, "%s on %s", rsc->id, current->details->uname);
stop = stop_action(rsc, current, optional);
if(rsc->allocated_to == NULL) {
pe_action_set_reason(stop, "node availability", TRUE);
}
if (!pcmk_is_set(rsc->flags, pe_rsc_managed)) {
update_action_flags(stop, pe_action_runnable | pe_action_clear,
__func__, __LINE__);
}
if (pcmk_is_set(data_set->flags, pe_flag_remove_after_stop)) {
DeleteRsc(rsc, current, optional, data_set);
}
if (pcmk_is_set(rsc->flags, pe_rsc_needs_unfencing)) {
pe_action_t *unfence = pe_fence_op(current, "on", TRUE, NULL, FALSE, data_set);
order_actions(stop, unfence, pe_order_implies_first);
if (!node_has_been_unfenced(current)) {
pe_proc_err("Stopping %s until %s can be unfenced", rsc->id, current->details->uname);
}
}
}
return TRUE;
}
static void
order_after_unfencing(pe_resource_t *rsc, pe_node_t *node, pe_action_t *action,
enum pe_ordering order, pe_working_set_t *data_set)
{
/* When unfencing is in use, we order unfence actions before any probe or
* start of resources that require unfencing, and also of fence devices.
*
* This might seem to violate the principle that fence devices require
* only quorum. However, fence agents that unfence often don't have enough
* information to even probe or start unless the node is first unfenced.
*/
if (is_unfence_device(rsc, data_set)
|| pcmk_is_set(rsc->flags, pe_rsc_needs_unfencing)) {
/* Start with an optional ordering. Requiring unfencing would result in
* the node being unfenced, and all its resources being stopped,
* whenever a new resource is added -- which would be highly suboptimal.
*/
pe_action_t *unfence = pe_fence_op(node, "on", TRUE, NULL, FALSE, data_set);
order_actions(unfence, action, order);
if (!node_has_been_unfenced(node)) {
// But unfencing is required if it has never been done
char *reason = crm_strdup_printf("required by %s %s",
rsc->id, action->task);
trigger_unfencing(NULL, node, reason, NULL, data_set);
free(reason);
}
}
}
gboolean
StartRsc(pe_resource_t * rsc, pe_node_t * next, gboolean optional, pe_working_set_t * data_set)
{
pe_action_t *start = NULL;
CRM_ASSERT(rsc);
pe_rsc_trace(rsc, "%s on %s %d %d", rsc->id, next ? next->details->uname : "N/A", optional, next ? next->weight : 0);
start = start_action(rsc, next, TRUE);
order_after_unfencing(rsc, next, start, pe_order_implies_then, data_set);
if (pcmk_is_set(start->flags, pe_action_runnable) && !optional) {
update_action_flags(start, pe_action_optional | pe_action_clear,
__func__, __LINE__);
}
return TRUE;
}
gboolean
PromoteRsc(pe_resource_t * rsc, pe_node_t * next, gboolean optional, pe_working_set_t * data_set)
{
GList *gIter = NULL;
gboolean runnable = TRUE;
GList *action_list = NULL;
CRM_ASSERT(rsc);
CRM_CHECK(next != NULL, return FALSE);
pe_rsc_trace(rsc, "%s on %s", rsc->id, next->details->uname);
action_list = pe__resource_actions(rsc, next, RSC_START, TRUE);
for (gIter = action_list; gIter != NULL; gIter = gIter->next) {
pe_action_t *start = (pe_action_t *) gIter->data;
if (!pcmk_is_set(start->flags, pe_action_runnable)) {
runnable = FALSE;
}
}
g_list_free(action_list);
if (runnable) {
promote_action(rsc, next, optional);
return TRUE;
}
pe_rsc_debug(rsc, "%s\tPromote %s (canceled)", next->details->uname, rsc->id);
action_list = pe__resource_actions(rsc, next, RSC_PROMOTE, TRUE);
for (gIter = action_list; gIter != NULL; gIter = gIter->next) {
pe_action_t *promote = (pe_action_t *) gIter->data;
update_action_flags(promote, pe_action_runnable | pe_action_clear,
__func__, __LINE__);
}
g_list_free(action_list);
return TRUE;
}
gboolean
DemoteRsc(pe_resource_t * rsc, pe_node_t * next, gboolean optional, pe_working_set_t * data_set)
{
GList *gIter = NULL;
CRM_ASSERT(rsc);
pe_rsc_trace(rsc, "%s", rsc->id);
/* CRM_CHECK(rsc->next_role == RSC_ROLE_UNPROMOTED, return FALSE); */
for (gIter = rsc->running_on; gIter != NULL; gIter = gIter->next) {
pe_node_t *current = (pe_node_t *) gIter->data;
pe_rsc_trace(rsc, "%s on %s", rsc->id, next ? next->details->uname : "N/A");
demote_action(rsc, current, optional);
}
return TRUE;
}
gboolean
RoleError(pe_resource_t * rsc, pe_node_t * next, gboolean optional, pe_working_set_t * data_set)
{
CRM_ASSERT(rsc);
crm_err("%s on %s", rsc->id, next ? next->details->uname : "N/A");
CRM_CHECK(FALSE, return FALSE);
return FALSE;
}
gboolean
NullOp(pe_resource_t * rsc, pe_node_t * next, gboolean optional, pe_working_set_t * data_set)
{
CRM_ASSERT(rsc);
pe_rsc_trace(rsc, "%s", rsc->id);
return FALSE;
}
gboolean
DeleteRsc(pe_resource_t * rsc, pe_node_t * node, gboolean optional, pe_working_set_t * data_set)
{
if (pcmk_is_set(rsc->flags, pe_rsc_failed)) {
pe_rsc_trace(rsc, "Resource %s not deleted from %s: failed", rsc->id, node->details->uname);
return FALSE;
} else if (node == NULL) {
pe_rsc_trace(rsc, "Resource %s not deleted: NULL node", rsc->id);
return FALSE;
} else if (node->details->unclean || node->details->online == FALSE) {
pe_rsc_trace(rsc, "Resource %s not deleted from %s: unrunnable", rsc->id,
node->details->uname);
return FALSE;
}
crm_notice("Removing %s from %s", rsc->id, node->details->uname);
delete_action(rsc, node, optional);
new_rsc_order(rsc, RSC_STOP, rsc, RSC_DELETE,
optional ? pe_order_implies_then : pe_order_optional, data_set);
new_rsc_order(rsc, RSC_DELETE, rsc, RSC_START,
optional ? pe_order_implies_then : pe_order_optional, data_set);
return TRUE;
}
gboolean
native_create_probe(pe_resource_t * rsc, pe_node_t * node, pe_action_t * complete,
gboolean force, pe_working_set_t * data_set)
{
enum pe_ordering flags = pe_order_optional;
char *key = NULL;
pe_action_t *probe = NULL;
pe_node_t *running = NULL;
pe_node_t *allowed = NULL;
pe_resource_t *top = uber_parent(rsc);
static const char *rc_promoted = NULL;
static const char *rc_inactive = NULL;
if (rc_inactive == NULL) {
rc_inactive = pcmk__itoa(PCMK_OCF_NOT_RUNNING);
rc_promoted = pcmk__itoa(PCMK_OCF_RUNNING_PROMOTED);
}
CRM_CHECK(node != NULL, return FALSE);
if (!force && !pcmk_is_set(data_set->flags, pe_flag_startup_probes)) {
pe_rsc_trace(rsc, "Skipping active resource detection for %s", rsc->id);
return FALSE;
}
if (pe__is_guest_or_remote_node(node)) {
const char *class = crm_element_value(rsc->xml, XML_AGENT_ATTR_CLASS);
if (pcmk__str_eq(class, PCMK_RESOURCE_CLASS_STONITH, pcmk__str_casei)) {
pe_rsc_trace(rsc,
"Skipping probe for %s on %s because Pacemaker Remote nodes cannot run stonith agents",
rsc->id, node->details->id);
return FALSE;
} else if (pe__is_guest_node(node)
&& pe__resource_contains_guest_node(data_set, rsc)) {
pe_rsc_trace(rsc,
"Skipping probe for %s on %s because guest nodes cannot run resources containing guest nodes",
rsc->id, node->details->id);
return FALSE;
} else if (rsc->is_remote_node) {
pe_rsc_trace(rsc,
"Skipping probe for %s on %s because Pacemaker Remote nodes cannot host remote connections",
rsc->id, node->details->id);
return FALSE;
}
}
if (rsc->children) {
GList *gIter = NULL;
gboolean any_created = FALSE;
for (gIter = rsc->children; gIter != NULL; gIter = gIter->next) {
pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
any_created = child_rsc->cmds->create_probe(child_rsc, node, complete, force, data_set)
|| any_created;
}
return any_created;
} else if ((rsc->container) && (!rsc->is_remote_node)) {
pe_rsc_trace(rsc, "Skipping %s: it is within container %s", rsc->id, rsc->container->id);
return FALSE;
}
if (pcmk_is_set(rsc->flags, pe_rsc_orphan)) {
pe_rsc_trace(rsc, "Skipping orphan: %s", rsc->id);
return FALSE;
}
// Check whether resource is already known on node
if (!force && g_hash_table_lookup(rsc->known_on, node->details->id)) {
pe_rsc_trace(rsc, "Skipping known: %s on %s", rsc->id, node->details->uname);
return FALSE;
}
allowed = g_hash_table_lookup(rsc->allowed_nodes, node->details->id);
if (rsc->exclusive_discover || top->exclusive_discover) {
if (allowed == NULL) {
/* exclusive discover is enabled and this node is not in the allowed list. */
pe_rsc_trace(rsc, "Skipping probe for %s on node %s, A", rsc->id, node->details->id);
return FALSE;
} else if (allowed->rsc_discover_mode != pe_discover_exclusive) {
/* exclusive discover is enabled and this node is not marked
* as a node this resource should be discovered on */
pe_rsc_trace(rsc, "Skipping probe for %s on node %s, B", rsc->id, node->details->id);
return FALSE;
}
}
if(allowed == NULL && node->rsc_discover_mode == pe_discover_never) {
/* If this node was allowed to host this resource it would
* have been explicitly added to the 'allowed_nodes' list.
* However it wasn't and the node has discovery disabled, so
* no need to probe for this resource.
*/
pe_rsc_trace(rsc, "Skipping probe for %s on node %s, C", rsc->id, node->details->id);
return FALSE;
}
if (allowed && allowed->rsc_discover_mode == pe_discover_never) {
/* this resource is marked as not needing to be discovered on this node */
pe_rsc_trace(rsc, "Skipping probe for %s on node %s, discovery mode", rsc->id, node->details->id);
return FALSE;
}
if (pe__is_guest_node(node)) {
pe_resource_t *remote = node->details->remote_rsc->container;
if(remote->role == RSC_ROLE_STOPPED) {
/* If the container is stopped, then we know anything that
* might have been inside it is also stopped and there is
* no need to probe.
*
* If we don't know the container's state on the target
* either:
*
* - the container is running, the transition will abort
* and we'll end up in a different case next time, or
*
* - the container is stopped
*
* Either way there is no need to probe.
*
*/
if(remote->allocated_to
&& g_hash_table_lookup(remote->known_on, remote->allocated_to->details->id) == NULL) {
/* For safety, we order the 'rsc' start after 'remote'
* has been probed.
*
* Using 'top' helps for groups, but we may need to
* follow the start's ordering chain backwards.
*/
custom_action_order(remote,
pcmk__op_key(remote->id, RSC_STATUS, 0),
NULL, top,
pcmk__op_key(top->id, RSC_START, 0), NULL,
pe_order_optional, data_set);
}
pe_rsc_trace(rsc, "Skipping probe for %s on node %s, %s is stopped",
rsc->id, node->details->id, remote->id);
return FALSE;
/* Here we really we want to check if remote->stop is required,
* but that information doesn't exist yet
*/
} else if(node->details->remote_requires_reset
|| node->details->unclean
|| pcmk_is_set(remote->flags, pe_rsc_failed)
|| remote->next_role == RSC_ROLE_STOPPED
|| (remote->allocated_to
&& pe_find_node(remote->running_on, remote->allocated_to->details->uname) == NULL)
) {
/* The container is stopping or restarting, don't start
* 'rsc' until 'remote' stops as this also implies that
* 'rsc' is stopped - avoiding the need to probe
*/
custom_action_order(remote, pcmk__op_key(remote->id, RSC_STOP, 0),
NULL, top, pcmk__op_key(top->id, RSC_START, 0),
NULL, pe_order_optional, data_set);
pe_rsc_trace(rsc, "Skipping probe for %s on node %s, %s is stopping, restarting or moving",
rsc->id, node->details->id, remote->id);
return FALSE;
/* } else {
* The container is running so there is no problem probing it
*/
}
}
key = pcmk__op_key(rsc->id, RSC_STATUS, 0);
probe = custom_action(rsc, key, RSC_STATUS, node, FALSE, TRUE, data_set);
update_action_flags(probe, pe_action_optional | pe_action_clear, __func__,
__LINE__);
order_after_unfencing(rsc, node, probe, pe_order_optional, data_set);
/*
* We need to know if it's running_on (not just known_on) this node
* to correctly determine the target rc.
*/
running = pe_find_node_id(rsc->running_on, node->details->id);
if (running == NULL) {
add_hash_param(probe->meta, XML_ATTR_TE_TARGET_RC, rc_inactive);
} else if (rsc->role == RSC_ROLE_PROMOTED) {
add_hash_param(probe->meta, XML_ATTR_TE_TARGET_RC, rc_promoted);
}
crm_debug("Probing %s on %s (%s) %d %p", rsc->id, node->details->uname, role2text(rsc->role),
pcmk_is_set(probe->flags, pe_action_runnable), rsc->running_on);
if (is_unfence_device(rsc, data_set) || !pe_rsc_is_clone(top)) {
top = rsc;
} else {
crm_trace("Probing %s on %s (%s) as %s", rsc->id, node->details->uname, role2text(rsc->role), top->id);
}
if (!pcmk_is_set(probe->flags, pe_action_runnable)
&& (rsc->running_on == NULL)) {
/* Prevent the start from occurring if rsc isn't active, but
* don't cause it to stop if it was active already
*/
pe__set_order_flags(flags, pe_order_runnable_left);
}
custom_action_order(rsc, NULL, probe,
top, pcmk__op_key(top->id, RSC_START, 0), NULL,
flags, data_set);
- /* Before any reloads, if they exist */
+ // Order the probe before any agent reload
custom_action_order(rsc, NULL, probe,
top, reload_key(rsc), NULL,
pe_order_optional, data_set);
#if 0
// complete is always null currently
if (!is_unfence_device(rsc, data_set)) {
/* Normally rsc.start depends on probe complete which depends
* on rsc.probe. But this can't be the case for fence devices
* with unfencing, as it would create graph loops.
*
* So instead we explicitly order 'rsc.probe then rsc.start'
*/
order_actions(probe, complete, pe_order_implies_then);
}
#endif
return TRUE;
}
/*!
* \internal
* \brief Check whether a resource is known on a particular node
*
* \param[in] rsc Resource to check
* \param[in] node Node to check
*
* \return TRUE if resource (or parent if an anonymous clone) is known
*/
static bool
rsc_is_known_on(pe_resource_t *rsc, const pe_node_t *node)
{
if (pe_hash_table_lookup(rsc->known_on, node->details->id)) {
return TRUE;
} else if ((rsc->variant == pe_native)
&& pe_rsc_is_anon_clone(rsc->parent)
&& pe_hash_table_lookup(rsc->parent->known_on, node->details->id)) {
/* We check only the parent, not the uber-parent, because we cannot
* assume that the resource is known if it is in an anonymously cloned
* group (which may be only partially known).
*/
return TRUE;
}
return FALSE;
}
/*!
* \internal
* \brief Order a resource's start and promote actions relative to fencing
*
* \param[in] rsc Resource to be ordered
* \param[in] stonith_op Fence action
* \param[in] data_set Cluster information
*/
static void
native_start_constraints(pe_resource_t * rsc, pe_action_t * stonith_op, pe_working_set_t * data_set)
{
pe_node_t *target;
GList *gIter = NULL;
CRM_CHECK(stonith_op && stonith_op->node, return);
target = stonith_op->node;
for (gIter = rsc->actions; gIter != NULL; gIter = gIter->next) {
pe_action_t *action = (pe_action_t *) gIter->data;
switch (action->needs) {
case rsc_req_nothing:
// Anything other than start or promote requires nothing
break;
case rsc_req_stonith:
order_actions(stonith_op, action, pe_order_optional);
break;
case rsc_req_quorum:
if (pcmk__str_eq(action->task, RSC_START, pcmk__str_casei)
&& pe_hash_table_lookup(rsc->allowed_nodes, target->details->id)
&& !rsc_is_known_on(rsc, target)) {
/* If we don't know the status of the resource on the node
* we're about to shoot, we have to assume it may be active
* there. Order the resource start after the fencing. This
* is analogous to waiting for all the probes for a resource
* to complete before starting it.
*
* The most likely explanation is that the DC died and took
* its status with it.
*/
pe_rsc_debug(rsc, "Ordering %s after %s recovery", action->uuid,
target->details->uname);
order_actions(stonith_op, action,
pe_order_optional | pe_order_runnable_left);
}
break;
}
}
}
static void
native_stop_constraints(pe_resource_t * rsc, pe_action_t * stonith_op, pe_working_set_t * data_set)
{
GList *gIter = NULL;
GList *action_list = NULL;
bool order_implicit = false;
pe_resource_t *top = uber_parent(rsc);
pe_action_t *parent_stop = NULL;
pe_node_t *target;
CRM_CHECK(stonith_op && stonith_op->node, return);
target = stonith_op->node;
/* Get a list of stop actions potentially implied by the fencing */
action_list = pe__resource_actions(rsc, target, RSC_STOP, FALSE);
/* If resource requires fencing, implicit actions must occur after fencing.
*
* Implied stops and demotes of resources running on guest nodes are always
* ordered after fencing, even if the resource does not require fencing,
* because guest node "fencing" is actually just a resource stop.
*/
if (pcmk_is_set(rsc->flags, pe_rsc_needs_fencing)
|| pe__is_guest_node(target)) {
order_implicit = true;
}
if (action_list && order_implicit) {
parent_stop = find_first_action(top->actions, NULL, RSC_STOP, NULL);
}
for (gIter = action_list; gIter != NULL; gIter = gIter->next) {
pe_action_t *action = (pe_action_t *) gIter->data;
// The stop would never complete, so convert it into a pseudo-action.
update_action_flags(action, pe_action_pseudo|pe_action_runnable,
__func__, __LINE__);
if (order_implicit) {
update_action_flags(action, pe_action_implied_by_stonith,
__func__, __LINE__);
/* Order the stonith before the parent stop (if any).
*
* Also order the stonith before the resource stop, unless the
* resource is inside a bundle -- that would cause a graph loop.
* We can rely on the parent stop's ordering instead.
*
* User constraints must not order a resource in a guest node
* relative to the guest node container resource. The
* pe_order_preserve flag marks constraints as generated by the
* cluster and thus immune to that check (and is irrelevant if
* target is not a guest).
*/
if (!pe_rsc_is_bundled(rsc)) {
order_actions(stonith_op, action, pe_order_preserve);
}
order_actions(stonith_op, parent_stop, pe_order_preserve);
}
if (pcmk_is_set(rsc->flags, pe_rsc_failed)) {
crm_notice("Stop of failed resource %s is implicit %s %s is fenced",
rsc->id, (order_implicit? "after" : "because"),
target->details->uname);
} else {
crm_info("%s is implicit %s %s is fenced",
action->uuid, (order_implicit? "after" : "because"),
target->details->uname);
}
if (pcmk_is_set(rsc->flags, pe_rsc_notify)) {
/* Create a second notification that will be delivered
* immediately after the node is fenced
*
* Basic problem:
* - C is a clone active on the node to be shot and stopping on another
* - R is a resource that depends on C
*
* + C.stop depends on R.stop
* + C.stopped depends on STONITH
* + C.notify depends on C.stopped
* + C.healthy depends on C.notify
* + R.stop depends on C.healthy
*
* The extra notification here changes
* + C.healthy depends on C.notify
* into:
* + C.healthy depends on C.notify'
* + C.notify' depends on STONITH'
* thus breaking the loop
*/
create_secondary_notification(action, rsc, stonith_op, data_set);
}
/* From Bug #1601, successful fencing must be an input to a failed resources stop action.
However given group(rA, rB) running on nodeX and B.stop has failed,
A := stop healthy resource (rA.stop)
B := stop failed resource (pseudo operation B.stop)
C := stonith nodeX
A requires B, B requires C, C requires A
This loop would prevent the cluster from making progress.
This block creates the "C requires A" dependency and therefore must (at least
for now) be disabled.
Instead, run the block above and treat all resources on nodeX as B would be
(marked as a pseudo op depending on the STONITH).
TODO: Break the "A requires B" dependency in update_action() and re-enable this block
} else if(is_stonith == FALSE) {
crm_info("Moving healthy resource %s"
" off %s before fencing",
rsc->id, node->details->uname);
* stop healthy resources before the
* stonith op
*
custom_action_order(
rsc, stop_key(rsc), NULL,
NULL,strdup(CRM_OP_FENCE),stonith_op,
pe_order_optional, data_set);
*/
}
g_list_free(action_list);
/* Get a list of demote actions potentially implied by the fencing */
action_list = pe__resource_actions(rsc, target, RSC_DEMOTE, FALSE);
for (gIter = action_list; gIter != NULL; gIter = gIter->next) {
pe_action_t *action = (pe_action_t *) gIter->data;
if (action->node->details->online == FALSE || action->node->details->unclean == TRUE
|| pcmk_is_set(rsc->flags, pe_rsc_failed)) {
if (pcmk_is_set(rsc->flags, pe_rsc_failed)) {
pe_rsc_info(rsc,
"Demote of failed resource %s is implicit after %s is fenced",
rsc->id, target->details->uname);
} else {
pe_rsc_info(rsc, "%s is implicit after %s is fenced",
action->uuid, target->details->uname);
}
/* The demote would never complete and is now implied by the
* fencing, so convert it into a pseudo-action.
*/
update_action_flags(action, pe_action_pseudo|pe_action_runnable,
__func__, __LINE__);
if (pe_rsc_is_bundled(rsc)) {
/* Do nothing, let the recovery be ordered after the parent's implied stop */
} else if (order_implicit) {
order_actions(stonith_op, action, pe_order_preserve|pe_order_optional);
}
}
}
g_list_free(action_list);
}
void
rsc_stonith_ordering(pe_resource_t * rsc, pe_action_t * stonith_op, pe_working_set_t * data_set)
{
if (rsc->children) {
GList *gIter = NULL;
for (gIter = rsc->children; gIter != NULL; gIter = gIter->next) {
pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
rsc_stonith_ordering(child_rsc, stonith_op, data_set);
}
} else if (!pcmk_is_set(rsc->flags, pe_rsc_managed)) {
pe_rsc_trace(rsc, "Skipping fencing constraints for unmanaged resource: %s", rsc->id);
} else {
native_start_constraints(rsc, stonith_op, data_set);
native_stop_constraints(rsc, stonith_op, data_set);
}
}
void
ReloadRsc(pe_resource_t * rsc, pe_node_t *node, pe_working_set_t * data_set)
{
GList *gIter = NULL;
pe_action_t *reload = NULL;
if (rsc->children) {
for (gIter = rsc->children; gIter != NULL; gIter = gIter->next) {
pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
ReloadRsc(child_rsc, node, data_set);
}
return;
} else if (rsc->variant > pe_native) {
/* Complex resource with no children */
return;
} else if (!pcmk_is_set(rsc->flags, pe_rsc_managed)) {
pe_rsc_trace(rsc, "%s: unmanaged", rsc->id);
return;
} else if (pcmk_is_set(rsc->flags, pe_rsc_failed)) {
/* We don't need to specify any particular actions here, normal failure
* recovery will apply.
*/
- pe_rsc_trace(rsc, "%s: preventing reload because failed", rsc->id);
+ pe_rsc_trace(rsc, "%s: preventing agent reload because failed",
+ rsc->id);
return;
} else if (pcmk_is_set(rsc->flags, pe_rsc_start_pending)) {
/* If a resource's configuration changed while a start was pending,
* force a full restart.
*/
- pe_rsc_trace(rsc, "%s: preventing reload because start pending", rsc->id);
+ pe_rsc_trace(rsc, "%s: preventing agent reload because start pending",
+ rsc->id);
stop_action(rsc, node, FALSE);
return;
} else if (node == NULL) {
pe_rsc_trace(rsc, "%s: not active", rsc->id);
return;
}
pe_rsc_trace(rsc, "Processing %s", rsc->id);
pe__set_resource_flags(rsc, pe_rsc_reload);
- reload = custom_action(
- rsc, reload_key(rsc), CRMD_ACTION_RELOAD, node, FALSE, TRUE, data_set);
+ reload = custom_action(rsc, reload_key(rsc), CRMD_ACTION_RELOAD_AGENT, node,
+ FALSE, TRUE, data_set);
pe_action_set_reason(reload, "resource definition change", FALSE);
custom_action_order(NULL, NULL, reload, rsc, stop_key(rsc), NULL,
pe_order_optional|pe_order_then_cancels_first,
data_set);
custom_action_order(NULL, NULL, reload, rsc, demote_key(rsc), NULL,
pe_order_optional|pe_order_then_cancels_first,
data_set);
}
void
native_append_meta(pe_resource_t * rsc, xmlNode * xml)
{
char *value = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_INCARNATION);
pe_resource_t *parent;
if (value) {
char *name = NULL;
name = crm_meta_name(XML_RSC_ATTR_INCARNATION);
crm_xml_add(xml, name, value);
free(name);
}
value = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_REMOTE_NODE);
if (value) {
char *name = NULL;
name = crm_meta_name(XML_RSC_ATTR_REMOTE_NODE);
crm_xml_add(xml, name, value);
free(name);
}
for (parent = rsc; parent != NULL; parent = parent->parent) {
if (parent->container) {
crm_xml_add(xml, CRM_META"_"XML_RSC_ATTR_CONTAINER, parent->container->id);
}
}
}
diff --git a/lib/pacemaker/pcmk_sched_utils.c b/lib/pacemaker/pcmk_sched_utils.c
index 5fbb82d3d3..b97c0304aa 100644
--- a/lib/pacemaker/pcmk_sched_utils.c
+++ b/lib/pacemaker/pcmk_sched_utils.c
@@ -1,785 +1,792 @@
/*
* Copyright 2004-2021 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
* This source code is licensed under the GNU General Public License version 2
* or later (GPLv2+) WITHOUT ANY WARRANTY.
*/
#include
#include
#include // lrmd_event_data_t
#include
#include
pe__location_t *
rsc2node_new(const char *id, pe_resource_t *rsc,
int node_weight, const char *discover_mode,
pe_node_t *foo_node, pe_working_set_t *data_set)
{
pe__location_t *new_con = NULL;
if (rsc == NULL || id == NULL) {
pe_err("Invalid constraint %s for rsc=%p", crm_str(id), rsc);
return NULL;
} else if (foo_node == NULL) {
CRM_CHECK(node_weight == 0, return NULL);
}
new_con = calloc(1, sizeof(pe__location_t));
if (new_con != NULL) {
new_con->id = strdup(id);
new_con->rsc_lh = rsc;
new_con->node_list_rh = NULL;
new_con->role_filter = RSC_ROLE_UNKNOWN;
if (pcmk__str_eq(discover_mode, "always", pcmk__str_null_matches | pcmk__str_casei)) {
new_con->discover_mode = pe_discover_always;
} else if (pcmk__str_eq(discover_mode, "never", pcmk__str_casei)) {
new_con->discover_mode = pe_discover_never;
} else if (pcmk__str_eq(discover_mode, "exclusive", pcmk__str_casei)) {
new_con->discover_mode = pe_discover_exclusive;
rsc->exclusive_discover = TRUE;
} else {
pe_err("Invalid %s value %s in location constraint", XML_LOCATION_ATTR_DISCOVERY, discover_mode);
}
if (foo_node != NULL) {
pe_node_t *copy = pe__copy_node(foo_node);
copy->weight = node_weight;
new_con->node_list_rh = g_list_prepend(NULL, copy);
}
data_set->placement_constraints = g_list_prepend(data_set->placement_constraints, new_con);
rsc->rsc_location = g_list_prepend(rsc->rsc_location, new_con);
}
return new_con;
}
gboolean
can_run_resources(const pe_node_t * node)
{
if (node == NULL) {
return FALSE;
}
#if 0
if (node->weight < 0) {
return FALSE;
}
#endif
if (node->details->online == FALSE
|| node->details->shutdown || node->details->unclean
|| node->details->standby || node->details->maintenance) {
crm_trace("%s: online=%d, unclean=%d, standby=%d, maintenance=%d",
node->details->uname, node->details->online,
node->details->unclean, node->details->standby, node->details->maintenance);
return FALSE;
}
return TRUE;
}
/*!
* \internal
* \brief Copy a hash table of node objects
*
* \param[in] nodes Hash table to copy
*
* \return New copy of nodes (or NULL if nodes is NULL)
*/
GHashTable *
pcmk__copy_node_table(GHashTable *nodes)
{
GHashTable *new_table = NULL;
GHashTableIter iter;
pe_node_t *node = NULL;
if (nodes == NULL) {
return NULL;
}
new_table = pcmk__strkey_table(NULL, free);
g_hash_table_iter_init(&iter, nodes);
while (g_hash_table_iter_next(&iter, NULL, (gpointer *) &node)) {
pe_node_t *new_node = pe__copy_node(node);
g_hash_table_insert(new_table, (gpointer) new_node->details->id,
new_node);
}
return new_table;
}
/*!
* \internal
* \brief Copy a list of node objects
*
* \param[in] list List to copy
* \param[in] reset Set copies' scores to 0
*
* \return New list of shallow copies of nodes in original list
*/
GList *
pcmk__copy_node_list(const GList *list, bool reset)
{
GList *result = NULL;
for (const GList *gIter = list; gIter != NULL; gIter = gIter->next) {
pe_node_t *new_node = NULL;
pe_node_t *this_node = (pe_node_t *) gIter->data;
new_node = pe__copy_node(this_node);
if (reset) {
new_node->weight = 0;
}
result = g_list_prepend(result, new_node);
}
return result;
}
struct node_weight_s {
pe_node_t *active;
pe_working_set_t *data_set;
};
/* return -1 if 'a' is more preferred
* return 1 if 'b' is more preferred
*/
static gint
sort_node_weight(gconstpointer a, gconstpointer b, gpointer data)
{
const pe_node_t *node1 = (const pe_node_t *)a;
const pe_node_t *node2 = (const pe_node_t *)b;
struct node_weight_s *nw = data;
int node1_weight = 0;
int node2_weight = 0;
int result = 0;
if (a == NULL) {
return 1;
}
if (b == NULL) {
return -1;
}
node1_weight = node1->weight;
node2_weight = node2->weight;
if (can_run_resources(node1) == FALSE) {
node1_weight = -INFINITY;
}
if (can_run_resources(node2) == FALSE) {
node2_weight = -INFINITY;
}
if (node1_weight > node2_weight) {
crm_trace("%s (%d) > %s (%d) : weight",
node1->details->uname, node1_weight, node2->details->uname, node2_weight);
return -1;
}
if (node1_weight < node2_weight) {
crm_trace("%s (%d) < %s (%d) : weight",
node1->details->uname, node1_weight, node2->details->uname, node2_weight);
return 1;
}
crm_trace("%s (%d) == %s (%d) : weight",
node1->details->uname, node1_weight, node2->details->uname, node2_weight);
if (pcmk__str_eq(nw->data_set->placement_strategy, "minimal", pcmk__str_casei)) {
goto equal;
}
if (pcmk__str_eq(nw->data_set->placement_strategy, "balanced", pcmk__str_casei)) {
result = compare_capacity(node1, node2);
if (result < 0) {
crm_trace("%s > %s : capacity (%d)",
node1->details->uname, node2->details->uname, result);
return -1;
} else if (result > 0) {
crm_trace("%s < %s : capacity (%d)",
node1->details->uname, node2->details->uname, result);
return 1;
}
}
/* now try to balance resources across the cluster */
if (node1->details->num_resources < node2->details->num_resources) {
crm_trace("%s (%d) > %s (%d) : resources",
node1->details->uname, node1->details->num_resources,
node2->details->uname, node2->details->num_resources);
return -1;
} else if (node1->details->num_resources > node2->details->num_resources) {
crm_trace("%s (%d) < %s (%d) : resources",
node1->details->uname, node1->details->num_resources,
node2->details->uname, node2->details->num_resources);
return 1;
}
if (nw->active && nw->active->details == node1->details) {
crm_trace("%s (%d) > %s (%d) : active",
node1->details->uname, node1->details->num_resources,
node2->details->uname, node2->details->num_resources);
return -1;
} else if (nw->active && nw->active->details == node2->details) {
crm_trace("%s (%d) < %s (%d) : active",
node1->details->uname, node1->details->num_resources,
node2->details->uname, node2->details->num_resources);
return 1;
}
equal:
crm_trace("%s = %s", node1->details->uname, node2->details->uname);
return strcmp(node1->details->uname, node2->details->uname);
}
GList *
sort_nodes_by_weight(GList *nodes, pe_node_t *active_node,
pe_working_set_t *data_set)
{
struct node_weight_s nw = { active_node, data_set };
return g_list_sort_with_data(nodes, sort_node_weight, &nw);
}
void
native_deallocate(pe_resource_t * rsc)
{
if (rsc->allocated_to) {
pe_node_t *old = rsc->allocated_to;
crm_info("Deallocating %s from %s", rsc->id, old->details->uname);
pe__set_resource_flags(rsc, pe_rsc_provisional);
rsc->allocated_to = NULL;
old->details->allocated_rsc = g_list_remove(old->details->allocated_rsc, rsc);
old->details->num_resources--;
/* old->count--; */
calculate_utilization(old->details->utilization, rsc->utilization, TRUE);
free(old);
}
}
gboolean
native_assign_node(pe_resource_t *rsc, pe_node_t *chosen, gboolean force)
{
pcmk__output_t *out = rsc->cluster->priv;
CRM_ASSERT(rsc->variant == pe_native);
if (force == FALSE && chosen != NULL) {
bool unset = FALSE;
if(chosen->weight < 0) {
unset = TRUE;
// Allow the graph to assume that the remote resource will come up
} else if (!can_run_resources(chosen) && !pe__is_guest_node(chosen)) {
unset = TRUE;
}
if(unset) {
crm_debug("All nodes for resource %s are unavailable"
", unclean or shutting down (%s: %d, %d)",
rsc->id, chosen->details->uname, can_run_resources(chosen), chosen->weight);
pe__set_next_role(rsc, RSC_ROLE_STOPPED, "node availability");
chosen = NULL;
}
}
/* todo: update the old node for each resource to reflect its
* new resource count
*/
native_deallocate(rsc);
pe__clear_resource_flags(rsc, pe_rsc_provisional);
if (chosen == NULL) {
GList *gIter = NULL;
char *rc_inactive = pcmk__itoa(PCMK_OCF_NOT_RUNNING);
crm_debug("Could not allocate a node for %s", rsc->id);
pe__set_next_role(rsc, RSC_ROLE_STOPPED, "unable to allocate");
for (gIter = rsc->actions; gIter != NULL; gIter = gIter->next) {
pe_action_t *op = (pe_action_t *) gIter->data;
const char *interval_ms_s = g_hash_table_lookup(op->meta, XML_LRM_ATTR_INTERVAL_MS);
crm_debug("Processing %s", op->uuid);
if(pcmk__str_eq(RSC_STOP, op->task, pcmk__str_casei)) {
update_action_flags(op, pe_action_optional | pe_action_clear,
__func__, __LINE__);
} else if(pcmk__str_eq(RSC_START, op->task, pcmk__str_casei)) {
update_action_flags(op, pe_action_runnable | pe_action_clear,
__func__, __LINE__);
//pe__set_resource_flags(rsc, pe_rsc_block);
} else if (interval_ms_s && !pcmk__str_eq(interval_ms_s, "0", pcmk__str_casei)) {
if(pcmk__str_eq(rc_inactive, g_hash_table_lookup(op->meta, XML_ATTR_TE_TARGET_RC), pcmk__str_casei)) {
/* This is a recurring monitor for the stopped state, leave it alone */
} else {
/* Normal monitor operation, cancel it */
update_action_flags(op, pe_action_runnable | pe_action_clear,
__func__, __LINE__);
}
}
}
free(rc_inactive);
return FALSE;
}
crm_debug("Assigning %s to %s", chosen->details->uname, rsc->id);
rsc->allocated_to = pe__copy_node(chosen);
chosen->details->allocated_rsc = g_list_prepend(chosen->details->allocated_rsc, rsc);
chosen->details->num_resources++;
chosen->count++;
calculate_utilization(chosen->details->utilization, rsc->utilization, FALSE);
if (pcmk_is_set(rsc->cluster->flags, pe_flag_show_utilization)) {
out->message(out, "resource-util", rsc, chosen, __func__);
}
return TRUE;
}
void
log_action(unsigned int log_level, const char *pre_text, pe_action_t * action, gboolean details)
{
const char *node_uname = NULL;
const char *node_uuid = NULL;
const char *desc = NULL;
if (action == NULL) {
crm_trace("%s%s: ", pre_text == NULL ? "" : pre_text, pre_text == NULL ? "" : ": ");
return;
}
if (pcmk_is_set(action->flags, pe_action_pseudo)) {
node_uname = NULL;
node_uuid = NULL;
} else if (action->node != NULL) {
node_uname = action->node->details->uname;
node_uuid = action->node->details->id;
} else {
node_uname = "";
node_uuid = NULL;
}
switch (text2task(action->task)) {
case stonith_node:
case shutdown_crm:
if (pcmk_is_set(action->flags, pe_action_pseudo)) {
desc = "Pseudo ";
} else if (pcmk_is_set(action->flags, pe_action_optional)) {
desc = "Optional ";
} else if (!pcmk_is_set(action->flags, pe_action_runnable)) {
desc = "!!Non-Startable!! ";
} else if (pcmk_is_set(action->flags, pe_action_processed)) {
desc = "";
} else {
desc = "(Provisional) ";
}
crm_trace("%s%s%sAction %d: %s%s%s%s%s%s",
((pre_text == NULL)? "" : pre_text),
((pre_text == NULL)? "" : ": "),
desc, action->id, action->uuid,
(node_uname? "\ton " : ""), (node_uname? node_uname : ""),
(node_uuid? "\t\t(" : ""), (node_uuid? node_uuid : ""),
(node_uuid? ")" : ""));
break;
default:
if (pcmk_is_set(action->flags, pe_action_optional)) {
desc = "Optional ";
} else if (pcmk_is_set(action->flags, pe_action_pseudo)) {
desc = "Pseudo ";
} else if (!pcmk_is_set(action->flags, pe_action_runnable)) {
desc = "!!Non-Startable!! ";
} else if (pcmk_is_set(action->flags, pe_action_processed)) {
desc = "";
} else {
desc = "(Provisional) ";
}
crm_trace("%s%s%sAction %d: %s %s%s%s%s%s%s",
((pre_text == NULL)? "" : pre_text),
((pre_text == NULL)? "" : ": "),
desc, action->id, action->uuid,
(action->rsc? action->rsc->id : ""),
(node_uname? "\ton " : ""), (node_uname? node_uname : ""),
(node_uuid? "\t\t(" : ""), (node_uuid? node_uuid : ""),
(node_uuid? ")" : ""));
break;
}
if (details) {
GList *gIter = NULL;
crm_trace("\t\t====== Preceding Actions");
gIter = action->actions_before;
for (; gIter != NULL; gIter = gIter->next) {
pe_action_wrapper_t *other = (pe_action_wrapper_t *) gIter->data;
log_action(log_level + 1, "\t\t", other->action, FALSE);
}
crm_trace("\t\t====== Subsequent Actions");
gIter = action->actions_after;
for (; gIter != NULL; gIter = gIter->next) {
pe_action_wrapper_t *other = (pe_action_wrapper_t *) gIter->data;
log_action(log_level + 1, "\t\t", other->action, FALSE);
}
crm_trace("\t\t====== End");
} else {
crm_trace("\t\t(before=%d, after=%d)",
g_list_length(action->actions_before), g_list_length(action->actions_after));
}
}
gboolean
can_run_any(GHashTable * nodes)
{
GHashTableIter iter;
pe_node_t *node = NULL;
if (nodes == NULL) {
return FALSE;
}
g_hash_table_iter_init(&iter, nodes);
while (g_hash_table_iter_next(&iter, NULL, (void **)&node)) {
if (can_run_resources(node) && node->weight >= 0) {
return TRUE;
}
}
return FALSE;
}
pe_action_t *
create_pseudo_resource_op(pe_resource_t * rsc, const char *task, bool optional, bool runnable, pe_working_set_t *data_set)
{
pe_action_t *action = custom_action(rsc, pcmk__op_key(rsc->id, task, 0),
task, NULL, optional, TRUE, data_set);
update_action_flags(action, pe_action_pseudo, __func__, __LINE__);
update_action_flags(action, pe_action_runnable, __func__, __LINE__);
if(runnable) {
update_action_flags(action, pe_action_runnable, __func__, __LINE__);
}
return action;
}
/*!
* \internal
* \brief Create an executor cancel op
*
* \param[in] rsc Resource of action to cancel
* \param[in] task Name of action to cancel
* \param[in] interval_ms Interval of action to cancel
* \param[in] node Node of action to cancel
* \param[in] data_set Working set of cluster
*
* \return Created op
*/
pe_action_t *
pe_cancel_op(pe_resource_t *rsc, const char *task, guint interval_ms,
pe_node_t *node, pe_working_set_t *data_set)
{
pe_action_t *cancel_op;
char *interval_ms_s = crm_strdup_printf("%u", interval_ms);
// @TODO dangerous if possible to schedule another action with this key
char *key = pcmk__op_key(rsc->id, task, interval_ms);
cancel_op = custom_action(rsc, key, RSC_CANCEL, node, FALSE, TRUE,
data_set);
free(cancel_op->task);
cancel_op->task = strdup(RSC_CANCEL);
free(cancel_op->cancel_task);
cancel_op->cancel_task = strdup(task);
add_hash_param(cancel_op->meta, XML_LRM_ATTR_TASK, task);
add_hash_param(cancel_op->meta, XML_LRM_ATTR_INTERVAL_MS, interval_ms_s);
free(interval_ms_s);
return cancel_op;
}
/*!
* \internal
* \brief Create a shutdown op for a scheduler transition
*
* \param[in] node Node being shut down
* \param[in] data_set Working set of cluster
*
* \return Created op
*/
pe_action_t *
sched_shutdown_op(pe_node_t *node, pe_working_set_t *data_set)
{
char *shutdown_id = crm_strdup_printf("%s-%s", CRM_OP_SHUTDOWN,
node->details->uname);
pe_action_t *shutdown_op = custom_action(NULL, shutdown_id, CRM_OP_SHUTDOWN,
node, FALSE, TRUE, data_set);
crm_notice("Scheduling shutdown of node %s", node->details->uname);
shutdown_constraints(node, shutdown_op, data_set);
add_hash_param(shutdown_op->meta, XML_ATTR_TE_NOWAIT, XML_BOOLEAN_TRUE);
return shutdown_op;
}
static char *
generate_transition_magic(const char *transition_key, int op_status, int op_rc)
{
CRM_CHECK(transition_key != NULL, return NULL);
return crm_strdup_printf("%d:%d;%s", op_status, op_rc, transition_key);
}
static void
append_digest(lrmd_event_data_t *op, xmlNode *update, const char *version,
const char *magic, int level)
{
/* this will enable us to later determine that the
* resource's parameters have changed and we should force
* a restart
*/
char *digest = NULL;
xmlNode *args_xml = NULL;
if (op->params == NULL) {
return;
}
args_xml = create_xml_node(NULL, XML_TAG_PARAMS);
g_hash_table_foreach(op->params, hash2field, args_xml);
pcmk__filter_op_for_digest(args_xml);
digest = calculate_operation_digest(args_xml, version);
#if 0
if (level < get_crm_log_level()
&& op->interval_ms == 0 && pcmk__str_eq(op->op_type, CRMD_ACTION_START, pcmk__str_none)) {
char *digest_source = dump_xml_unformatted(args_xml);
do_crm_log(level, "Calculated digest %s for %s (%s). Source: %s\n",
digest, ID(update), magic, digest_source);
free(digest_source);
}
#endif
crm_xml_add(update, XML_LRM_ATTR_OP_DIGEST, digest);
free_xml(args_xml);
free(digest);
}
#define FAKE_TE_ID "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx"
/*!
* \internal
* \brief Create XML for resource operation history update
*
* \param[in,out] parent Parent XML node to add to
* \param[in,out] op Operation event data
* \param[in] caller_version DC feature set
* \param[in] target_rc Expected result of operation
* \param[in] node Name of node on which operation was performed
* \param[in] origin Arbitrary description of update source
* \param[in] level A log message will be logged at this level
*
* \return Newly created XML node for history update
*/
xmlNode *
pcmk__create_history_xml(xmlNode *parent, lrmd_event_data_t *op,
const char *caller_version, int target_rc,
const char *node, const char *origin, int level)
{
char *key = NULL;
char *magic = NULL;
char *op_id = NULL;
char *op_id_additional = NULL;
char *local_user_data = NULL;
const char *exit_reason = NULL;
xmlNode *xml_op = NULL;
const char *task = NULL;
CRM_CHECK(op != NULL, return NULL);
do_crm_log(level, "%s: Updating resource %s after %s op %s (interval=%u)",
origin, op->rsc_id, op->op_type, services_lrm_status_str(op->op_status),
op->interval_ms);
crm_trace("DC version: %s", caller_version);
task = op->op_type;
- /* Record a successful reload as a start, and a failed reload as a monitor,
- * to make life easier for the scheduler when determining the current state.
+ /* Record a successful agent reload as a start, and a failed one as a
+ * monitor, to make life easier for the scheduler when determining the
+ * current state.
+ *
+ * @COMPAT We should check "reload" here only if the operation was for a
+ * pre-OCF-1.1 resource agent, but we don't know that here, and we should
+ * only ever get results for actions scheduled by us, so we can reasonably
+ * assume any "reload" is actually a pre-1.1 agent reload.
*/
- if (pcmk__str_eq(task, "reload", pcmk__str_none)) {
+ if (pcmk__str_any_of(task, CRMD_ACTION_RELOAD, CRMD_ACTION_RELOAD_AGENT,
+ NULL)) {
if (op->op_status == PCMK_LRM_OP_DONE) {
task = CRMD_ACTION_START;
} else {
task = CRMD_ACTION_STATUS;
}
}
key = pcmk__op_key(op->rsc_id, task, op->interval_ms);
if (pcmk__str_eq(task, CRMD_ACTION_NOTIFY, pcmk__str_none)) {
const char *n_type = crm_meta_value(op->params, "notify_type");
const char *n_task = crm_meta_value(op->params, "notify_operation");
CRM_LOG_ASSERT(n_type != NULL);
CRM_LOG_ASSERT(n_task != NULL);
op_id = pcmk__notify_key(op->rsc_id, n_type, n_task);
if (op->op_status != PCMK_LRM_OP_PENDING) {
/* Ignore notify errors.
*
* @TODO It might be better to keep the correct result here, and
* ignore it in process_graph_event().
*/
op->op_status = PCMK_LRM_OP_DONE;
op->rc = 0;
}
} else if (did_rsc_op_fail(op, target_rc)) {
op_id = pcmk__op_key(op->rsc_id, "last_failure", 0);
if (op->interval_ms == 0) {
// Ensure 'last' gets updated, in case record-pending is true
op_id_additional = pcmk__op_key(op->rsc_id, "last", 0);
}
exit_reason = op->exit_reason;
} else if (op->interval_ms > 0) {
op_id = strdup(key);
} else {
op_id = pcmk__op_key(op->rsc_id, "last", 0);
}
again:
xml_op = pcmk__xe_match(parent, XML_LRM_TAG_RSC_OP, XML_ATTR_ID, op_id);
if (xml_op == NULL) {
xml_op = create_xml_node(parent, XML_LRM_TAG_RSC_OP);
}
if (op->user_data == NULL) {
crm_debug("Generating fake transition key for: " PCMK__OP_FMT
" %d from %s", op->rsc_id, op->op_type, op->interval_ms,
op->call_id, origin);
local_user_data = pcmk__transition_key(-1, op->call_id, target_rc,
FAKE_TE_ID);
op->user_data = local_user_data;
}
if(magic == NULL) {
magic = generate_transition_magic(op->user_data, op->op_status, op->rc);
}
crm_xml_add(xml_op, XML_ATTR_ID, op_id);
crm_xml_add(xml_op, XML_LRM_ATTR_TASK_KEY, key);
crm_xml_add(xml_op, XML_LRM_ATTR_TASK, task);
crm_xml_add(xml_op, XML_ATTR_ORIGIN, origin);
crm_xml_add(xml_op, XML_ATTR_CRM_VERSION, caller_version);
crm_xml_add(xml_op, XML_ATTR_TRANSITION_KEY, op->user_data);
crm_xml_add(xml_op, XML_ATTR_TRANSITION_MAGIC, magic);
crm_xml_add(xml_op, XML_LRM_ATTR_EXIT_REASON, exit_reason == NULL ? "" : exit_reason);
crm_xml_add(xml_op, XML_LRM_ATTR_TARGET, node); /* For context during triage */
crm_xml_add_int(xml_op, XML_LRM_ATTR_CALLID, op->call_id);
crm_xml_add_int(xml_op, XML_LRM_ATTR_RC, op->rc);
crm_xml_add_int(xml_op, XML_LRM_ATTR_OPSTATUS, op->op_status);
crm_xml_add_ms(xml_op, XML_LRM_ATTR_INTERVAL_MS, op->interval_ms);
if (compare_version("2.1", caller_version) <= 0) {
if (op->t_run || op->t_rcchange || op->exec_time || op->queue_time) {
crm_trace("Timing data (" PCMK__OP_FMT
"): last=%u change=%u exec=%u queue=%u",
op->rsc_id, op->op_type, op->interval_ms,
op->t_run, op->t_rcchange, op->exec_time, op->queue_time);
if ((op->interval_ms != 0) && (op->t_rcchange != 0)) {
// Recurring ops may have changed rc after initial run
crm_xml_add_ll(xml_op, XML_RSC_OP_LAST_CHANGE,
(long long) op->t_rcchange);
} else {
crm_xml_add_ll(xml_op, XML_RSC_OP_LAST_CHANGE,
(long long) op->t_run);
}
crm_xml_add_int(xml_op, XML_RSC_OP_T_EXEC, op->exec_time);
crm_xml_add_int(xml_op, XML_RSC_OP_T_QUEUE, op->queue_time);
}
}
if (pcmk__str_any_of(op->op_type, CRMD_ACTION_MIGRATE, CRMD_ACTION_MIGRATED, NULL)) {
/*
* Record migrate_source and migrate_target always for migrate ops.
*/
const char *name = XML_LRM_ATTR_MIGRATE_SOURCE;
crm_xml_add(xml_op, name, crm_meta_value(op->params, name));
name = XML_LRM_ATTR_MIGRATE_TARGET;
crm_xml_add(xml_op, name, crm_meta_value(op->params, name));
}
append_digest(op, xml_op, caller_version, magic, LOG_DEBUG);
if (op_id_additional) {
free(op_id);
op_id = op_id_additional;
op_id_additional = NULL;
goto again;
}
if (local_user_data) {
free(local_user_data);
op->user_data = NULL;
}
free(magic);
free(op_id);
free(key);
return xml_op;
}
pcmk__output_t *
pcmk__new_logger(void)
{
int rc = pcmk_rc_ok;
pcmk__output_t *out = NULL;
const char* argv[] = { "", NULL };
pcmk__supported_format_t formats[] = {
PCMK__SUPPORTED_FORMAT_LOG,
{ NULL, NULL, NULL }
};
pcmk__register_formats(NULL, formats);
rc = pcmk__output_new(&out, "log", NULL, (char**)argv);
if ((rc != pcmk_rc_ok) || (out == NULL)) {
crm_err("Can't log resource details due to internal error: %s\n",
pcmk_rc_str(rc));
return NULL;
}
pe__register_messages(out);
pcmk__register_lib_messages(out);
return out;
}
diff --git a/lib/services/services_linux.c b/lib/services/services_linux.c
index eb773c7dcc..9e48a00886 100644
--- a/lib/services/services_linux.c
+++ b/lib/services/services_linux.c
@@ -1,1186 +1,1186 @@
/*
- * Copyright 2010-2020 the Pacemaker project contributors
+ * Copyright 2010-2021 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
* This source code is licensed under the GNU Lesser General Public License
* version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
*/
#include
#ifndef _GNU_SOURCE
# define _GNU_SOURCE
#endif
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include "crm/crm.h"
#include "crm/common/mainloop.h"
#include "crm/services.h"
#include "services_private.h"
static void close_pipe(int fildes[]);
/* We have two alternative ways of handling SIGCHLD when synchronously waiting
* for spawned processes to complete. Both rely on polling a file descriptor to
* discover SIGCHLD events.
*
* If sys/signalfd.h is available (e.g. on Linux), we call signalfd() to
* generate the file descriptor. Otherwise, we use the "self-pipe trick"
* (opening a pipe and writing a byte to it when SIGCHLD is received).
*/
#ifdef HAVE_SYS_SIGNALFD_H
// signalfd() implementation
#include
// Everything needed to manage SIGCHLD handling
struct sigchld_data_s {
sigset_t mask; // Signals to block now (including SIGCHLD)
sigset_t old_mask; // Previous set of blocked signals
};
// Initialize SIGCHLD data and prepare for use
static bool
sigchld_setup(struct sigchld_data_s *data)
{
sigemptyset(&(data->mask));
sigaddset(&(data->mask), SIGCHLD);
sigemptyset(&(data->old_mask));
// Block SIGCHLD (saving previous set of blocked signals to restore later)
if (sigprocmask(SIG_BLOCK, &(data->mask), &(data->old_mask)) < 0) {
crm_err("Wait for child process completion failed: %s "
CRM_XS " source=sigprocmask", pcmk_strerror(errno));
return false;
}
return true;
}
// Get a file descriptor suitable for polling for SIGCHLD events
static int
sigchld_open(struct sigchld_data_s *data)
{
int fd;
CRM_CHECK(data != NULL, return -1);
fd = signalfd(-1, &(data->mask), SFD_NONBLOCK);
if (fd < 0) {
crm_err("Wait for child process completion failed: %s "
CRM_XS " source=signalfd", pcmk_strerror(errno));
}
return fd;
}
// Close a file descriptor returned by sigchld_open()
static void
sigchld_close(int fd)
{
if (fd > 0) {
close(fd);
}
}
// Return true if SIGCHLD was received from polled fd
static bool
sigchld_received(int fd)
{
struct signalfd_siginfo fdsi;
ssize_t s;
if (fd < 0) {
return false;
}
s = read(fd, &fdsi, sizeof(struct signalfd_siginfo));
if (s != sizeof(struct signalfd_siginfo)) {
crm_err("Wait for child process completion failed: %s "
CRM_XS " source=read", pcmk_strerror(errno));
} else if (fdsi.ssi_signo == SIGCHLD) {
return true;
}
return false;
}
// Do anything needed after done waiting for SIGCHLD
static void
sigchld_cleanup(struct sigchld_data_s *data)
{
// Restore the original set of blocked signals
if ((sigismember(&(data->old_mask), SIGCHLD) == 0)
&& (sigprocmask(SIG_UNBLOCK, &(data->mask), NULL) < 0)) {
crm_warn("Could not clean up after child process completion: %s",
pcmk_strerror(errno));
}
}
#else // HAVE_SYS_SIGNALFD_H not defined
// Self-pipe implementation (see above for function descriptions)
struct sigchld_data_s {
int pipe_fd[2]; // Pipe file descriptors
struct sigaction sa; // Signal handling info (with SIGCHLD)
struct sigaction old_sa; // Previous signal handling info
};
// We need a global to use in the signal handler
volatile struct sigchld_data_s *last_sigchld_data = NULL;
static void
sigchld_handler()
{
// We received a SIGCHLD, so trigger pipe polling
if ((last_sigchld_data != NULL)
&& (last_sigchld_data->pipe_fd[1] >= 0)
&& (write(last_sigchld_data->pipe_fd[1], "", 1) == -1)) {
crm_err("Wait for child process completion failed: %s "
CRM_XS " source=write", pcmk_strerror(errno));
}
}
static bool
sigchld_setup(struct sigchld_data_s *data)
{
int rc;
data->pipe_fd[0] = data->pipe_fd[1] = -1;
if (pipe(data->pipe_fd) == -1) {
crm_err("Wait for child process completion failed: %s "
CRM_XS " source=pipe", pcmk_strerror(errno));
return false;
}
rc = pcmk__set_nonblocking(data->pipe_fd[0]);
if (rc != pcmk_rc_ok) {
crm_warn("Could not set pipe input non-blocking: %s " CRM_XS " rc=%d",
pcmk_rc_str(rc), rc);
}
rc = pcmk__set_nonblocking(data->pipe_fd[1]);
if (rc != pcmk_rc_ok) {
crm_warn("Could not set pipe output non-blocking: %s " CRM_XS " rc=%d",
pcmk_rc_str(rc), rc);
}
// Set SIGCHLD handler
data->sa.sa_handler = sigchld_handler;
data->sa.sa_flags = 0;
sigemptyset(&(data->sa.sa_mask));
if (sigaction(SIGCHLD, &(data->sa), &(data->old_sa)) < 0) {
crm_err("Wait for child process completion failed: %s "
CRM_XS " source=sigaction", pcmk_strerror(errno));
}
// Remember data for use in signal handler
last_sigchld_data = data;
return true;
}
static int
sigchld_open(struct sigchld_data_s *data)
{
CRM_CHECK(data != NULL, return -1);
return data->pipe_fd[0];
}
static void
sigchld_close(int fd)
{
// Pipe will be closed in sigchld_cleanup()
return;
}
static bool
sigchld_received(int fd)
{
char ch;
if (fd < 0) {
return false;
}
// Clear out the self-pipe
while (read(fd, &ch, 1) == 1) /*omit*/;
return true;
}
static void
sigchld_cleanup(struct sigchld_data_s *data)
{
// Restore the previous SIGCHLD handler
if (sigaction(SIGCHLD, &(data->old_sa), NULL) < 0) {
crm_warn("Could not clean up after child process completion: %s",
pcmk_strerror(errno));
}
close_pipe(data->pipe_fd);
}
#endif
/*!
* \internal
* \brief Close the two file descriptors of a pipe
*
* \param[in] fildes Array of file descriptors opened by pipe()
*/
static void
close_pipe(int fildes[])
{
if (fildes[0] >= 0) {
close(fildes[0]);
fildes[0] = -1;
}
if (fildes[1] >= 0) {
close(fildes[1]);
fildes[1] = -1;
}
}
static gboolean
svc_read_output(int fd, svc_action_t * op, bool is_stderr)
{
char *data = NULL;
int rc = 0, len = 0;
char buf[500];
static const size_t buf_read_len = sizeof(buf) - 1;
if (fd < 0) {
crm_trace("No fd for %s", op->id);
return FALSE;
}
if (is_stderr && op->stderr_data) {
len = strlen(op->stderr_data);
data = op->stderr_data;
crm_trace("Reading %s stderr into offset %d", op->id, len);
} else if (is_stderr == FALSE && op->stdout_data) {
len = strlen(op->stdout_data);
data = op->stdout_data;
crm_trace("Reading %s stdout into offset %d", op->id, len);
} else {
crm_trace("Reading %s %s into offset %d", op->id, is_stderr?"stderr":"stdout", len);
}
do {
rc = read(fd, buf, buf_read_len);
if (rc > 0) {
buf[rc] = 0;
crm_trace("Got %d chars: %.80s", rc, buf);
data = pcmk__realloc(data, len + rc + 1);
len += sprintf(data + len, "%s", buf);
} else if (errno != EINTR) {
/* error or EOF
* Cleanup happens in pipe_done()
*/
rc = FALSE;
break;
}
} while (rc == buf_read_len || rc < 0);
if (is_stderr) {
op->stderr_data = data;
} else {
op->stdout_data = data;
}
return rc;
}
static int
dispatch_stdout(gpointer userdata)
{
svc_action_t *op = (svc_action_t *) userdata;
return svc_read_output(op->opaque->stdout_fd, op, FALSE);
}
static int
dispatch_stderr(gpointer userdata)
{
svc_action_t *op = (svc_action_t *) userdata;
return svc_read_output(op->opaque->stderr_fd, op, TRUE);
}
static void
pipe_out_done(gpointer user_data)
{
svc_action_t *op = (svc_action_t *) user_data;
crm_trace("%p", op);
op->opaque->stdout_gsource = NULL;
if (op->opaque->stdout_fd > STDOUT_FILENO) {
close(op->opaque->stdout_fd);
}
op->opaque->stdout_fd = -1;
}
static void
pipe_err_done(gpointer user_data)
{
svc_action_t *op = (svc_action_t *) user_data;
op->opaque->stderr_gsource = NULL;
if (op->opaque->stderr_fd > STDERR_FILENO) {
close(op->opaque->stderr_fd);
}
op->opaque->stderr_fd = -1;
}
static struct mainloop_fd_callbacks stdout_callbacks = {
.dispatch = dispatch_stdout,
.destroy = pipe_out_done,
};
static struct mainloop_fd_callbacks stderr_callbacks = {
.dispatch = dispatch_stderr,
.destroy = pipe_err_done,
};
static void
set_ocf_env(const char *key, const char *value, gpointer user_data)
{
if (setenv(key, value, 1) != 0) {
crm_perror(LOG_ERR, "setenv failed for key:%s and value:%s", key, value);
}
}
static void
set_ocf_env_with_prefix(gpointer key, gpointer value, gpointer user_data)
{
char buffer[500];
snprintf(buffer, sizeof(buffer), strcmp(key, "OCF_CHECK_LEVEL") != 0 ? "OCF_RESKEY_%s" : "%s", (char *)key);
set_ocf_env(buffer, value, user_data);
}
static void
set_alert_env(gpointer key, gpointer value, gpointer user_data)
{
int rc;
if (value != NULL) {
rc = setenv(key, value, 1);
} else {
rc = unsetenv(key);
}
if (rc < 0) {
crm_perror(LOG_ERR, "setenv %s=%s",
(char*)key, (value? (char*)value : ""));
} else {
crm_trace("setenv %s=%s", (char*)key, (value? (char*)value : ""));
}
}
/*!
* \internal
* \brief Add environment variables suitable for an action
*
* \param[in] op Action to use
*/
static void
add_action_env_vars(const svc_action_t *op)
{
void (*env_setter)(gpointer, gpointer, gpointer) = NULL;
if (op->agent == NULL) {
env_setter = set_alert_env; /* we deal with alert handler */
} else if (pcmk__str_eq(op->standard, PCMK_RESOURCE_CLASS_OCF, pcmk__str_casei)) {
env_setter = set_ocf_env_with_prefix;
}
if (env_setter != NULL && op->params != NULL) {
g_hash_table_foreach(op->params, env_setter, NULL);
}
if (env_setter == NULL || env_setter == set_alert_env) {
return;
}
- set_ocf_env("OCF_RA_VERSION_MAJOR", "1", NULL);
- set_ocf_env("OCF_RA_VERSION_MINOR", "0", NULL);
+ set_ocf_env("OCF_RA_VERSION_MAJOR", PCMK_OCF_MAJOR_VERSION, NULL);
+ set_ocf_env("OCF_RA_VERSION_MINOR", PCMK_OCF_MINOR_VERSION, NULL);
set_ocf_env("OCF_ROOT", OCF_ROOT_DIR, NULL);
set_ocf_env("OCF_EXIT_REASON_PREFIX", PCMK_OCF_REASON_PREFIX, NULL);
if (op->rsc) {
set_ocf_env("OCF_RESOURCE_INSTANCE", op->rsc, NULL);
}
if (op->agent != NULL) {
set_ocf_env("OCF_RESOURCE_TYPE", op->agent, NULL);
}
/* Notes: this is not added to specification yet. Sept 10,2004 */
if (op->provider != NULL) {
set_ocf_env("OCF_RESOURCE_PROVIDER", op->provider, NULL);
}
}
static void
pipe_in_single_parameter(gpointer key, gpointer value, gpointer user_data)
{
svc_action_t *op = user_data;
char *buffer = crm_strdup_printf("%s=%s\n", (char *)key, (char *) value);
int ret, total = 0, len = strlen(buffer);
do {
errno = 0;
ret = write(op->opaque->stdin_fd, buffer + total, len - total);
if (ret > 0) {
total += ret;
}
} while ((errno == EINTR) && (total < len));
free(buffer);
}
/*!
* \internal
* \brief Pipe parameters in via stdin for action
*
* \param[in] op Action to use
*/
static void
pipe_in_action_stdin_parameters(const svc_action_t *op)
{
crm_debug("sending args");
if (op->params) {
g_hash_table_foreach(op->params, pipe_in_single_parameter, (gpointer) op);
}
}
gboolean
recurring_action_timer(gpointer data)
{
svc_action_t *op = data;
crm_debug("Scheduling another invocation of %s", op->id);
/* Clean out the old result */
free(op->stdout_data);
op->stdout_data = NULL;
free(op->stderr_data);
op->stderr_data = NULL;
op->opaque->repeat_timer = 0;
services_action_async(op, NULL);
return FALSE;
}
/* Returns FALSE if 'op' should be free'd by the caller */
gboolean
operation_finalize(svc_action_t * op)
{
int recurring = 0;
if (op->interval_ms) {
if (op->cancel) {
op->status = PCMK_LRM_OP_CANCELLED;
cancel_recurring_action(op);
} else {
recurring = 1;
op->opaque->repeat_timer = g_timeout_add(op->interval_ms,
recurring_action_timer, (void *)op);
}
}
if (op->opaque->callback) {
op->opaque->callback(op);
}
op->pid = 0;
services_untrack_op(op);
if (!recurring && op->synchronous == FALSE) {
/*
* If this is a recurring action, do not free explicitly.
* It will get freed whenever the action gets cancelled.
*/
services_action_free(op);
return TRUE;
}
services_action_cleanup(op);
return FALSE;
}
static void
close_op_input(svc_action_t *op)
{
if (op->opaque->stdin_fd >= 0) {
close(op->opaque->stdin_fd);
}
}
static void
finish_op_output(svc_action_t *op, bool is_stderr)
{
mainloop_io_t **source;
int fd;
if (is_stderr) {
source = &(op->opaque->stderr_gsource);
fd = op->opaque->stderr_fd;
} else {
source = &(op->opaque->stdout_gsource);
fd = op->opaque->stdout_fd;
}
if (op->synchronous || *source) {
crm_trace("Finish reading %s[%d] %s",
op->id, op->pid, (is_stderr? "stdout" : "stderr"));
svc_read_output(fd, op, is_stderr);
if (op->synchronous) {
close(fd);
} else {
mainloop_del_fd(*source);
*source = NULL;
}
}
}
// Log an operation's stdout and stderr
static void
log_op_output(svc_action_t *op)
{
char *prefix = crm_strdup_printf("%s[%d] error output", op->id, op->pid);
crm_log_output(LOG_NOTICE, prefix, op->stderr_data);
strcpy(prefix + strlen(prefix) - strlen("error output"), "output");
crm_log_output(LOG_DEBUG, prefix, op->stdout_data);
free(prefix);
}
static void
operation_finished(mainloop_child_t * p, pid_t pid, int core, int signo, int exitcode)
{
svc_action_t *op = mainloop_child_userdata(p);
mainloop_clear_child_userdata(p);
CRM_ASSERT(op->pid == pid);
/* Depending on the priority the mainloop gives the stdout and stderr
* file descriptors, this function could be called before everything has
* been read from them, so force a final read now.
*/
finish_op_output(op, true);
finish_op_output(op, false);
close_op_input(op);
if (signo == 0) {
crm_debug("%s[%d] exited with status %d", op->id, op->pid, exitcode);
op->status = PCMK_LRM_OP_DONE;
op->rc = exitcode;
} else if (mainloop_child_timeout(p)) {
crm_warn("%s[%d] timed out after %dms", op->id, op->pid, op->timeout);
op->status = PCMK_LRM_OP_TIMEOUT;
op->rc = PCMK_OCF_TIMEOUT;
} else if (op->cancel) {
/* If an in-flight recurring operation was killed because it was
* cancelled, don't treat that as a failure.
*/
crm_info("%s[%d] terminated with signal: %s " CRM_XS " (%d)",
op->id, op->pid, strsignal(signo), signo);
op->status = PCMK_LRM_OP_CANCELLED;
op->rc = PCMK_OCF_OK;
} else {
crm_warn("%s[%d] terminated with signal: %s " CRM_XS " (%d)",
op->id, op->pid, strsignal(signo), signo);
op->status = PCMK_LRM_OP_ERROR;
op->rc = PCMK_OCF_SIGNAL;
}
log_op_output(op);
operation_finalize(op);
}
/*!
* \internal
* \brief Set operation rc and status per errno from stat(), fork() or execvp()
*
* \param[in,out] op Operation to set rc and status for
* \param[in] error Value of errno after system call
*
* \return void
*/
static void
services_handle_exec_error(svc_action_t * op, int error)
{
int rc_not_installed, rc_insufficient_priv, rc_exec_error;
/* Mimic the return codes for each standard as that's what we'll convert back from in get_uniform_rc() */
if (pcmk__str_eq(op->standard, PCMK_RESOURCE_CLASS_LSB, pcmk__str_casei)
&& pcmk__str_eq(op->action, "status", pcmk__str_casei)) {
rc_not_installed = PCMK_LSB_STATUS_NOT_INSTALLED;
rc_insufficient_priv = PCMK_LSB_STATUS_INSUFFICIENT_PRIV;
rc_exec_error = PCMK_LSB_STATUS_UNKNOWN;
#if SUPPORT_NAGIOS
} else if (pcmk__str_eq(op->standard, PCMK_RESOURCE_CLASS_NAGIOS, pcmk__str_casei)) {
rc_not_installed = NAGIOS_NOT_INSTALLED;
rc_insufficient_priv = NAGIOS_INSUFFICIENT_PRIV;
rc_exec_error = PCMK_OCF_EXEC_ERROR;
#endif
} else {
rc_not_installed = PCMK_OCF_NOT_INSTALLED;
rc_insufficient_priv = PCMK_OCF_INSUFFICIENT_PRIV;
rc_exec_error = PCMK_OCF_EXEC_ERROR;
}
switch (error) { /* see execve(2), stat(2) and fork(2) */
case ENOENT: /* No such file or directory */
case EISDIR: /* Is a directory */
case ENOTDIR: /* Path component is not a directory */
case EINVAL: /* Invalid executable format */
case ENOEXEC: /* Invalid executable format */
op->rc = rc_not_installed;
op->status = PCMK_LRM_OP_NOT_INSTALLED;
break;
case EACCES: /* permission denied (various errors) */
case EPERM: /* permission denied (various errors) */
op->rc = rc_insufficient_priv;
op->status = PCMK_LRM_OP_ERROR;
break;
default:
op->rc = rc_exec_error;
op->status = PCMK_LRM_OP_ERROR;
}
}
static void
action_launch_child(svc_action_t *op)
{
/* SIGPIPE is ignored (which is different from signal blocking) by the gnutls library.
* Depending on the libqb version in use, libqb may set SIGPIPE to be ignored as well.
* We do not want this to be inherited by the child process. By resetting this the signal
* to the default behavior, we avoid some potential odd problems that occur during OCF
* scripts when SIGPIPE is ignored by the environment. */
signal(SIGPIPE, SIG_DFL);
#if defined(HAVE_SCHED_SETSCHEDULER)
if (sched_getscheduler(0) != SCHED_OTHER) {
struct sched_param sp;
memset(&sp, 0, sizeof(sp));
sp.sched_priority = 0;
if (sched_setscheduler(0, SCHED_OTHER, &sp) == -1) {
crm_perror(LOG_ERR, "Could not reset scheduling policy to SCHED_OTHER for %s", op->id);
}
}
#endif
if (setpriority(PRIO_PROCESS, 0, 0) == -1) {
crm_perror(LOG_ERR, "Could not reset process priority to 0 for %s", op->id);
}
/* Man: The call setpgrp() is equivalent to setpgid(0,0)
* _and_ compiles on BSD variants too
* need to investigate if it works the same too.
*/
setpgid(0, 0);
pcmk__close_fds_in_child(false);
#if SUPPORT_CIBSECRETS
if (pcmk__substitute_secrets(op->rsc, op->params) != pcmk_rc_ok) {
/* replacing secrets failed! */
if (pcmk__str_eq(op->action, "stop", pcmk__str_casei)) {
/* don't fail on stop! */
crm_info("proceeding with the stop operation for %s", op->rsc);
} else {
crm_err("failed to get secrets for %s, "
"considering resource not configured", op->rsc);
_exit(PCMK_OCF_NOT_CONFIGURED);
}
}
#endif
add_action_env_vars(op);
/* Become the desired user */
if (op->opaque->uid && (geteuid() == 0)) {
// If requested, set effective group
if (op->opaque->gid && (setgid(op->opaque->gid) < 0)) {
crm_perror(LOG_ERR, "Could not set child group to %d", op->opaque->gid);
_exit(PCMK_OCF_NOT_CONFIGURED);
}
// Erase supplementary group list
// (We could do initgroups() if we kept a copy of the username)
if (setgroups(0, NULL) < 0) {
crm_perror(LOG_ERR, "Could not set child groups");
_exit(PCMK_OCF_NOT_CONFIGURED);
}
// Set effective user
if (setuid(op->opaque->uid) < 0) {
crm_perror(LOG_ERR, "setting user to %d", op->opaque->uid);
_exit(PCMK_OCF_NOT_CONFIGURED);
}
}
/* execute the RA */
execvp(op->opaque->exec, op->opaque->args);
/* Most cases should have been already handled by stat() */
services_handle_exec_error(op, errno);
_exit(op->rc);
}
static void
action_synced_wait(svc_action_t *op, struct sigchld_data_s *data)
{
int status = 0;
int timeout = op->timeout;
time_t start = -1;
struct pollfd fds[3];
int wait_rc = 0;
fds[0].fd = op->opaque->stdout_fd;
fds[0].events = POLLIN;
fds[0].revents = 0;
fds[1].fd = op->opaque->stderr_fd;
fds[1].events = POLLIN;
fds[1].revents = 0;
fds[2].fd = sigchld_open(data);
fds[2].events = POLLIN;
fds[2].revents = 0;
crm_trace("Waiting for %s[%d]", op->id, op->pid);
start = time(NULL);
do {
int poll_rc = poll(fds, 3, timeout);
if (poll_rc > 0) {
if (fds[0].revents & POLLIN) {
svc_read_output(op->opaque->stdout_fd, op, FALSE);
}
if (fds[1].revents & POLLIN) {
svc_read_output(op->opaque->stderr_fd, op, TRUE);
}
if ((fds[2].revents & POLLIN) && sigchld_received(fds[2].fd)) {
wait_rc = waitpid(op->pid, &status, WNOHANG);
if ((wait_rc > 0) || ((wait_rc < 0) && (errno == ECHILD))) {
// Child process exited or doesn't exist
break;
} else if (wait_rc < 0) {
crm_warn("Wait for completion of %s[%d] failed: %s "
CRM_XS " source=waitpid",
op->id, op->pid, pcmk_strerror(errno));
wait_rc = 0; // Act as if process is still running
}
}
} else if (poll_rc == 0) {
// Poll timed out with no descriptors ready
timeout = 0;
break;
} else if ((poll_rc < 0) && (errno != EINTR)) {
crm_err("Wait for completion of %s[%d] failed: %s "
CRM_XS " source=poll",
op->id, op->pid, pcmk_strerror(errno));
break;
}
timeout = op->timeout - (time(NULL) - start) * 1000;
} while ((op->timeout < 0 || timeout > 0));
crm_trace("Stopped waiting for %s[%d]", op->id, op->pid);
if (wait_rc <= 0) {
op->rc = PCMK_OCF_UNKNOWN_ERROR;
if (op->timeout > 0 && timeout <= 0) {
op->status = PCMK_LRM_OP_TIMEOUT;
crm_warn("%s[%d] timed out after %dms",
op->id, op->pid, op->timeout);
} else {
op->status = PCMK_LRM_OP_ERROR;
}
/* If only child hasn't been successfully waited for, yet.
This is to limit killing wrong target a bit more. */
if (wait_rc == 0 && waitpid(op->pid, &status, WNOHANG) == 0) {
if (kill(op->pid, SIGKILL)) {
crm_warn("Could not kill rogue child %s[%d]: %s",
op->id, op->pid, pcmk_strerror(errno));
}
/* Safe to skip WNOHANG here as we sent non-ignorable signal. */
while (waitpid(op->pid, &status, 0) == (pid_t) -1 && errno == EINTR) /*omit*/;
}
} else if (WIFEXITED(status)) {
op->status = PCMK_LRM_OP_DONE;
op->rc = WEXITSTATUS(status);
crm_info("%s[%d] exited with status %d", op->id, op->pid, op->rc);
} else if (WIFSIGNALED(status)) {
int signo = WTERMSIG(status);
op->status = PCMK_LRM_OP_ERROR;
crm_err("%s[%d] terminated with signal: %s " CRM_XS " (%d)",
op->id, op->pid, strsignal(signo), signo);
}
#ifdef WCOREDUMP
if (WCOREDUMP(status)) {
crm_err("%s[%d] dumped core", op->id, op->pid);
}
#endif
finish_op_output(op, true);
finish_op_output(op, false);
close_op_input(op);
sigchld_close(fds[2].fd);
}
/* For an asynchronous 'op', returns FALSE if 'op' should be free'd by the caller */
/* For a synchronous 'op', returns FALSE if 'op' fails */
gboolean
services_os_action_execute(svc_action_t * op)
{
int stdout_fd[2];
int stderr_fd[2];
int stdin_fd[2] = {-1, -1};
int rc;
struct stat st;
struct sigchld_data_s data;
/* Fail fast */
if(stat(op->opaque->exec, &st) != 0) {
rc = errno;
crm_warn("Cannot execute '%s': %s " CRM_XS " stat rc=%d",
op->opaque->exec, pcmk_strerror(rc), rc);
services_handle_exec_error(op, rc);
if (!op->synchronous) {
return operation_finalize(op);
}
return FALSE;
}
if (pipe(stdout_fd) < 0) {
rc = errno;
crm_err("Cannot execute '%s': %s " CRM_XS " pipe(stdout) rc=%d",
op->opaque->exec, pcmk_strerror(rc), rc);
services_handle_exec_error(op, rc);
if (!op->synchronous) {
return operation_finalize(op);
}
return FALSE;
}
if (pipe(stderr_fd) < 0) {
rc = errno;
close_pipe(stdout_fd);
crm_err("Cannot execute '%s': %s " CRM_XS " pipe(stderr) rc=%d",
op->opaque->exec, pcmk_strerror(rc), rc);
services_handle_exec_error(op, rc);
if (!op->synchronous) {
return operation_finalize(op);
}
return FALSE;
}
if (pcmk_is_set(pcmk_get_ra_caps(op->standard), pcmk_ra_cap_stdin)) {
if (pipe(stdin_fd) < 0) {
rc = errno;
close_pipe(stdout_fd);
close_pipe(stderr_fd);
crm_err("Cannot execute '%s': %s " CRM_XS " pipe(stdin) rc=%d",
op->opaque->exec, pcmk_strerror(rc), rc);
services_handle_exec_error(op, rc);
if (!op->synchronous) {
return operation_finalize(op);
}
return FALSE;
}
}
if (op->synchronous && !sigchld_setup(&data)) {
close_pipe(stdin_fd);
close_pipe(stdout_fd);
close_pipe(stderr_fd);
sigchld_cleanup(&data);
return FALSE;
}
op->pid = fork();
switch (op->pid) {
case -1:
rc = errno;
close_pipe(stdin_fd);
close_pipe(stdout_fd);
close_pipe(stderr_fd);
crm_err("Cannot execute '%s': %s " CRM_XS " fork rc=%d",
op->opaque->exec, pcmk_strerror(rc), rc);
services_handle_exec_error(op, rc);
if (!op->synchronous) {
return operation_finalize(op);
}
sigchld_cleanup(&data);
return FALSE;
case 0: /* Child */
close(stdout_fd[0]);
close(stderr_fd[0]);
if (stdin_fd[1] >= 0) {
close(stdin_fd[1]);
}
if (STDOUT_FILENO != stdout_fd[1]) {
if (dup2(stdout_fd[1], STDOUT_FILENO) != STDOUT_FILENO) {
crm_warn("Can't redirect output from '%s': %s "
CRM_XS " errno=%d",
op->opaque->exec, pcmk_strerror(errno), errno);
}
close(stdout_fd[1]);
}
if (STDERR_FILENO != stderr_fd[1]) {
if (dup2(stderr_fd[1], STDERR_FILENO) != STDERR_FILENO) {
crm_warn("Can't redirect error output from '%s': %s "
CRM_XS " errno=%d",
op->opaque->exec, pcmk_strerror(errno), errno);
}
close(stderr_fd[1]);
}
if ((stdin_fd[0] >= 0) &&
(STDIN_FILENO != stdin_fd[0])) {
if (dup2(stdin_fd[0], STDIN_FILENO) != STDIN_FILENO) {
crm_warn("Can't redirect input to '%s': %s "
CRM_XS " errno=%d",
op->opaque->exec, pcmk_strerror(errno), errno);
}
close(stdin_fd[0]);
}
if (op->synchronous) {
sigchld_cleanup(&data);
}
action_launch_child(op);
CRM_ASSERT(0); /* action_launch_child is effectively noreturn */
}
/* Only the parent reaches here */
close(stdout_fd[1]);
close(stderr_fd[1]);
if (stdin_fd[0] >= 0) {
close(stdin_fd[0]);
}
op->opaque->stdout_fd = stdout_fd[0];
rc = pcmk__set_nonblocking(op->opaque->stdout_fd);
if (rc != pcmk_rc_ok) {
crm_warn("Could not set '%s' output non-blocking: %s "
CRM_XS " rc=%d",
op->opaque->exec, pcmk_rc_str(rc), rc);
}
op->opaque->stderr_fd = stderr_fd[0];
rc = pcmk__set_nonblocking(op->opaque->stderr_fd);
if (rc != pcmk_rc_ok) {
crm_warn("Could not set '%s' error output non-blocking: %s "
CRM_XS " rc=%d",
op->opaque->exec, pcmk_rc_str(rc), rc);
}
op->opaque->stdin_fd = stdin_fd[1];
if (op->opaque->stdin_fd >= 0) {
// using buffer behind non-blocking-fd here - that could be improved
// as long as no other standard uses stdin_fd assume stonith
rc = pcmk__set_nonblocking(op->opaque->stdin_fd);
if (rc != pcmk_rc_ok) {
crm_warn("Could not set '%s' input non-blocking: %s "
CRM_XS " fd=%d,rc=%d", op->opaque->exec,
pcmk_rc_str(rc), op->opaque->stdin_fd, rc);
}
pipe_in_action_stdin_parameters(op);
// as long as we are handling parameters directly in here just close
close(op->opaque->stdin_fd);
op->opaque->stdin_fd = -1;
}
// after fds are setup properly and before we plug anything into mainloop
if (op->opaque->fork_callback) {
op->opaque->fork_callback(op);
}
if (op->synchronous) {
action_synced_wait(op, &data);
sigchld_cleanup(&data);
} else {
crm_trace("Waiting async for '%s'[%d]", op->opaque->exec, op->pid);
mainloop_child_add_with_flags(op->pid,
op->timeout,
op->id,
op,
(op->flags & SVC_ACTION_LEAVE_GROUP) ? mainloop_leave_pid_group : 0,
operation_finished);
op->opaque->stdout_gsource = mainloop_add_fd(op->id,
G_PRIORITY_LOW,
op->opaque->stdout_fd, op, &stdout_callbacks);
op->opaque->stderr_gsource = mainloop_add_fd(op->id,
G_PRIORITY_LOW,
op->opaque->stderr_fd, op, &stderr_callbacks);
services_add_inflight_op(op);
}
return TRUE;
}
GList *
services_os_get_directory_list(const char *root, gboolean files, gboolean executable)
{
GList *list = NULL;
struct dirent **namelist;
int entries = 0, lpc = 0;
char buffer[PATH_MAX];
entries = scandir(root, &namelist, NULL, alphasort);
if (entries <= 0) {
return list;
}
for (lpc = 0; lpc < entries; lpc++) {
struct stat sb;
if ('.' == namelist[lpc]->d_name[0]) {
free(namelist[lpc]);
continue;
}
snprintf(buffer, sizeof(buffer), "%s/%s", root, namelist[lpc]->d_name);
if (stat(buffer, &sb)) {
continue;
}
if (S_ISDIR(sb.st_mode)) {
if (files) {
free(namelist[lpc]);
continue;
}
} else if (S_ISREG(sb.st_mode)) {
if (files == FALSE) {
free(namelist[lpc]);
continue;
} else if (executable
&& (sb.st_mode & S_IXUSR) == 0
&& (sb.st_mode & S_IXGRP) == 0 && (sb.st_mode & S_IXOTH) == 0) {
free(namelist[lpc]);
continue;
}
}
list = g_list_append(list, strdup(namelist[lpc]->d_name));
free(namelist[lpc]);
}
free(namelist);
return list;
}
GList *
resources_os_list_ocf_providers(void)
{
return get_directory_list(OCF_ROOT_DIR "/resource.d", FALSE, TRUE);
}
GList *
resources_os_list_ocf_agents(const char *provider)
{
GList *gIter = NULL;
GList *result = NULL;
GList *providers = NULL;
if (provider) {
char buffer[500];
snprintf(buffer, sizeof(buffer), "%s/resource.d/%s", OCF_ROOT_DIR, provider);
return get_directory_list(buffer, TRUE, TRUE);
}
providers = resources_os_list_ocf_providers();
for (gIter = providers; gIter != NULL; gIter = gIter->next) {
GList *tmp1 = result;
GList *tmp2 = resources_os_list_ocf_agents(gIter->data);
if (tmp2) {
result = g_list_concat(tmp1, tmp2);
}
}
g_list_free_full(providers, free);
return result;
}
gboolean
services__ocf_agent_exists(const char *provider, const char *agent)
{
char *buf = NULL;
gboolean rc = FALSE;
struct stat st;
if (provider == NULL || agent == NULL) {
return rc;
}
buf = crm_strdup_printf(OCF_ROOT_DIR "/resource.d/%s/%s", provider, agent);
if (stat(buf, &st) == 0) {
rc = TRUE;
}
free(buf);
return rc;
}
diff --git a/tools/cibsecret.in b/tools/cibsecret.in
index ce57a1845c..4569863af7 100644
--- a/tools/cibsecret.in
+++ b/tools/cibsecret.in
@@ -1,440 +1,440 @@
#!@BASH_PATH@
# Copyright 2011-2020 the Pacemaker project contributors
#
# The version control history for this file may have further details.
#
# This source code is licensed under the GNU General Public License version 2
# or later (GPLv2+) WITHOUT ANY WARRANTY.
#
# cibsecret
#
# Manage the secrets directory (by default, /var/lib/pacemaker/lrm/secrets).
# Secrets are ASCII files, holding one value per file:
# //
# These constants must track crm_exit_t values
CRM_EX_OK=0
CRM_EX_ERROR=1
CRM_EX_NOT_INSTALLED=5
CRM_EX_USAGE=64
CRM_EX_UNAVAILABLE=69
CRM_EX_OSFILE=72
CRM_EX_CONFIG=78
CRM_EX_DIGEST=104
CRM_EX_NOSUCH=105
CRM_EX_EXISTS=108
LRM_CIBSECRETS="@LRM_CIBSECRETS_DIR@"
PROG="$(basename "$0")"
SSH_OPTS="-o StrictHostKeyChecking=no"
MAGIC="lrm://"
usage() {
cat <] []
Options:
--help Show this message, then exit
--version Display version information, then exit
-C Don't read or write the CIB
Commands and their parameters:
set
Set the value of a sensitive resource parameter.
get
Display the locally stored value of a sensitive resource parameter.
check
Verify that the locally stored value of a sensitive resource parameter
matches its locally stored MD5 hash.
stash
Make a non-sensitive resource parameter that is already in the CIB
sensitive (move its value to a locally stored and protected file).
This may not be used with -C.
unstash
Make a sensitive resource parameter that is already in the CIB
non-sensitive (move its value from the locally stored file to the CIB).
This may not be used with -C.
delete
Remove a sensitive resource parameter value.
sync
Copy all locally stored secrets to all other nodes.
This command manages sensitive resource parameter values that should not be
stored directly in Pacemaker's Cluster Information Base (CIB). Such values
are handled by storing a special string directly in the CIB that tells
Pacemaker to look in a separate, protected file for the actual value.
The secret files are not encrypted, but protected by file system permissions
such that only root can read or modify them.
Since the secret files are stored locally, they must be synchronized across all
cluster nodes. This command handles the synchronization using (in order of
preference) pssh, pdsh, or ssh, so one of those must be installed. Before
synchronizing, this command will ping the cluster nodes to determine which are
alive, using fping if it is installed, otherwise the ping command. Installing
fping is strongly recommended for better performance.
Known limitations:
This command can only be run from full cluster nodes (not Pacemaker Remote
nodes).
Changes are not atomic, so the cluster may use different values while a
change is in progress. To avoid problems, it is recommended to put the
cluster in maintenance mode when making changes with this command.
- Changes in secret values do not trigger a reload or restart of the affected
- resource, since they do not change the CIB. If a response is desired before
- the next cluster recheck interval, any CIB change (such as setting a node
- attribute) will trigger it.
+ Changes in secret values do not trigger an agent reload or restart of the
+ affected resource, since they do not change the CIB. If a response is
+ desired before the next cluster recheck interval, any CIB change (such as
+ setting a node attribute) will trigger it.
If any node is down when changes to secrets are made, or a new node is
later added to the cluster, it may have different values when it joins the
cluster, before "$PROG sync" is run. To avoid this, it is recommended to
run the sync command (from another node) before starting Pacemaker on the
node.
Examples:
$PROG set ipmi_node1 passwd SecreT_PASS
$PROG get ipmi_node1 passwd
$PROG check ipmi_node1 passwd
$PROG stash ipmi_node2 passwd
$PROG sync
EOF
exit "$1"
}
check_usage() {
case "$1" in
set) [ "$2" -ne 4 ] && [ "$2" -ne 3 ] && usage 1 ;;
get) [ "$2" -ne 3 ] && usage 1 ;;
check) [ "$2" -ne 3 ] && usage 1 ;;
stash) [ "$2" -ne 3 ] && usage 1 ;;
unstash) [ "$2" -ne 3 ] && usage 1 ;;
delete) [ "$2" -ne 3 ] && usage 1 ;;
sync) [ "$2" -ne 1 ] && usage 1 ;;
--help) usage $CRM_EX_OK ;;
--version) crm_attribute --version; exit $? ;;
*) usage $CRM_EX_USAGE ;;
esac
}
fatal() {
rc=$1
shift
echo "ERROR: $*"
exit $rc
}
warn() {
echo "WARNING: $*"
}
info() {
echo "INFO: $*"
}
check_env() {
which md5sum >/dev/null 2>&1 ||
fatal $CRM_EX_NOT_INSTALLED "please install md5sum to run $PROG"
if which pssh >/dev/null 2>&1; then
rsh=pssh_fun
rcp_to_from=pscp_fun
# -q is a SUSE patch not present in upstream pssh
PSSH_QUIET_OPTION=""
pssh -q 2>&1|grep "no such option: -q" > /dev/null ||
PSSH_QUIET_OPTION="-q"
elif which pdsh >/dev/null 2>&1; then
rsh=pdsh_fun
rcp_to_from=pdcp_fun
elif which ssh >/dev/null 2>&1; then
rsh=ssh_fun
rcp_to_from=scp_fun
else
fatal $CRM_EX_NOT_INSTALLED "please install pssh, pdsh, or ssh to run $PROG"
fi
ps -ef | grep '[p]acemaker-controld' >/dev/null ||
fatal $CRM_EX_UNAVAILABLE "pacemaker not running? $PROG needs pacemaker"
}
# This must be called (and return success) before calling $rsh or $rcp_to_from
get_live_peers() {
# Get local node name
GLP_LOCAL_NODE="$(crm_node -n)"
[ $? -eq 0 ] || fatal $CRM_EX_UNAVAILABLE "couldn't get local node name"
# Get a list of all other cluster nodes
GLP_ALL_PEERS="$(crmadmin -N -q)"
[ $? -eq 0 ] || fatal $CRM_EX_UNAVAILABLE "couldn't determine cluster nodes"
GLP_ALL_PEERS="$(echo "$GLP_ALL_PEERS" | grep -v "^${GLP_LOCAL_NODE}$")"
# Make a list of those that respond to pings
if [ "$(id -u)" = "0" ] && which fping >/dev/null 2>&1; then
LIVE_NODES=$(fping -a $GLP_ALL_PEERS 2>/dev/null)
else
LIVE_NODES=""
for GLP_NODE in $GLP_ALL_PEERS; do \
ping -c 2 -q "$GLP_NODE" >/dev/null 2>&1 &&
LIVE_NODES="$LIVE_NODES $GLP_NODE"
done
fi
# Warn the user about any that didn't respond to pings
GLP_DOWN="$( (for GLP_NODE in $LIVE_NODES $GLP_ALL_PEERS; do echo "$GLP_NODE"; done) | sort | uniq -u)"
if [ "$(echo "$GLP_DOWN" | wc -w)" = "1" ]; then
warn "node $GLP_DOWN is down"
warn "you'll need to update it using \"$PROG sync\" later"
elif [ -n "$GLP_DOWN" ]; then
warn "nodes $(echo "$GLP_DOWN" | tr '\n' ' ')are down"
warn "you'll need to update them using \"$PROG sync\" later"
fi
if [ "$LIVE_NODES" = "" ]; then
info "no other nodes live"
return 1
fi
return 0
}
pssh_fun() {
pssh $PSSH_QUIET_OPTION -i -H "$LIVE_NODES" -x "$SSH_OPTS" -- "$@"
}
pscp_fun() {
PSCP_DEST="$1"
shift
pscp $PSSH_QUIET_OPTION -H "$LIVE_NODES" -x "-pr" -x "$SSH_OPTS" -- "$@" "$PSCP_DEST"
}
pdsh_fun() {
PDSH_NODES=$(echo "$LIVE_NODES" | tr '[:space:]' ',')
export PDSH_SSH_ARGS_APPEND="$SSH_OPTS"
pdsh -w "$PDSH_NODES" -- "$@"
}
pdcp_fun() {
PDCP_DEST="$1"
shift
PDCP_NODES=$(echo "$LIVE_NODES" | tr '[:space:]' ',')
export PDSH_SSH_ARGS_APPEND="$SSH_OPTS"
pdcp -pr -w "$PDCP_NODES" -- "$@" "$PDCP_DEST"
}
ssh_fun() {
for SSH_NODE in $LIVE_NODES; do
ssh $SSH_OPTS "$SSH_NODE" -- "$@" || return
done
}
scp_fun() {
SCP_DEST="$1"
shift
for SCP_NODE in $LIVE_NODES; do
scp -pqr $SSH_OPTS "$@" "$SCP_NODE:$SCP_DEST" || return
done
}
# TODO: this procedure should be replaced with csync2
# provided that csync2 has already been configured
sync_files() {
get_live_peers || return
if [ "$cmd" != "delete" ]; then
info "syncing $LRM_CIBSECRETS to $(echo "$LIVE_NODES" | tr '\n' ' ') ..."
else
info "deleting $LRM_CIBSECRETS from $(echo "$LIVE_NODES" | tr '\n' ' ') ..."
fi
$rsh rm -rf "$LRM_CIBSECRETS" &&
$rsh mkdir -p "$(dirname "$LRM_CIBSECRETS")" &&
$rcp_to_from "$(dirname "$LRM_CIBSECRETS")" "$LRM_CIBSECRETS"
}
sync_one() {
SO_FILE="$1"
get_live_peers || return
if [ "$cmd" != "delete" ]; then
info "syncing $SO_FILE to $(echo "$LIVE_NODES" | tr '\n' ' ') ..."
else
info "deleting $SO_FILE from $(echo "$LIVE_NODES" | tr '\n' ' ') ..."
fi
$rsh mkdir -p "$(dirname "$SO_FILE")" &&
if [ -f "$SO_FILE" ]; then
$rcp_to_from "$(dirname "$SO_FILE")" "$SO_FILE" "${SO_FILE}.sign"
else
$rsh rm -f "$SO_FILE" "${SO_FILE}.sign"
fi
}
is_secret() {
# assume that the secret is in the CIB if we cannot talk to cib
[ "$NO_CRM" ] || test "$1" = "$MAGIC"
}
check_cib_rsc() {
CCR_OUT="$($NO_CRM crm_resource -r "$1" -W 2>&1)" || fatal $CRM_EX_NOSUCH "$CCR_OUT"
}
get_cib_param() {
GCP_RSC="$1"
GCP_PARAM="$2"
$NO_CRM crm_resource -r "$GCP_RSC" -g "$GCP_PARAM" 2>/dev/null
}
set_cib_param() {
SET_RSC="$1"
SET_PARAM="$2"
SET_VAL="$3"
$NO_CRM crm_resource -r "$SET_RSC" -p "$SET_PARAM" -v "$SET_VAL" 2>/dev/null
}
remove_cib_param() {
RM_RSC="$1"
RM_PARAM="$2"
$NO_CRM crm_resource -r "$RM_RSC" -d "$RM_PARAM" 2>/dev/null
}
localfiles() {
LF_CMD="$1"
LF_RSC="$2"
LF_PARAM="$3"
LF_VALUE="$4"
LF_FILE="$LRM_CIBSECRETS/$LF_RSC/$LF_PARAM"
case "$LF_CMD" in
get)
cat "$LF_FILE" 2>/dev/null
true
;;
getsum)
cat "${LF_FILE}.sign" 2>/dev/null
true
;;
set)
LF_SUM="$(printf %s "$LF_VALUE" | md5sum)" ||
fatal $CRM_EX_ERROR "md5sum failed to produce hash for resource $LF_RSC parameter $LF_PARAM"
LF_SUM="$(echo "$LF_SUM" | awk '{print $1}')"
mkdir -p "$(dirname "$LF_FILE")" &&
echo "$LF_VALUE" > "$LF_FILE" &&
echo "$LF_SUM" > "${LF_FILE}.sign" &&
sync_one "$LF_FILE"
;;
remove)
rm -f "$LF_FILE" "${LF_FILE}.sign"
sync_one "$LF_FILE"
;;
esac
}
cibsecret_set() {
CS_VALUE="$1"
if [ "$2" -ne 4 ]; then
read -p "Enter value: " CS_VALUE
fi
check_cib_rsc "$rsc"
CIBSET_CURRENT="$(get_cib_param "$rsc" "$param")"
[ -z "$NO_CRM" ] &&
[ ! -z "$CIBSET_CURRENT" ] &&
[ "$CIBSET_CURRENT" != "$MAGIC" ] &&
[ "$CIBSET_CURRENT" != "$CS_VALUE" ] &&
fatal $CRM_EX_CONFIG "CIB value <$CIBSET_CURRENT> different for $rsc parameter $param; please delete it first"
localfiles set "$rsc" "$param" "$CS_VALUE" &&
set_cib_param "$rsc" "$param" "$MAGIC"
}
cibsecret_check() {
check_cib_rsc "$rsc"
is_secret "$(get_cib_param "$rsc" "$param")" ||
fatal $CRM_EX_CONFIG "resource $rsc parameter $param not set as secret, nothing to check"
CSC_LOCAL_SUM="$(localfiles getsum "$rsc" "$param")"
[ "$CSC_LOCAL_SUM" ] ||
fatal $CRM_EX_OSFILE "no MD5 hash for resource $rsc parameter $param"
CSC_LOCAL_VALUE="$(localfiles get "$rsc" "$param")"
CSC_CALC_SUM="$(printf "%s" "$CSC_LOCAL_VALUE" | md5sum | awk '{print $1}')"
[ "$CSC_CALC_SUM" = "$CSC_LOCAL_SUM" ] ||
fatal $CRM_EX_DIGEST "MD5 hash mismatch for resource $rsc parameter $param"
}
cibsecret_get() {
cibsecret_check
localfiles get "$rsc" "$param"
}
cibsecret_delete() {
check_cib_rsc "$rsc"
localfiles remove "$rsc" "$param" && remove_cib_param "$rsc" "$param"
}
cibsecret_stash() {
[ "$NO_CRM" ] && fatal $CRM_EX_USAGE "no access to Pacemaker, stash not supported"
check_cib_rsc "$rsc"
CIBSTASH_CURRENT="$(get_cib_param "$rsc" "$param")"
[ "$CIBSTASH_CURRENT" = "" ] &&
fatal $CRM_EX_NOSUCH "nothing to stash for resource $rsc parameter $param"
is_secret "$CIBSTASH_CURRENT" &&
fatal $CRM_EX_EXISTS "resource $rsc parameter $param already set as secret, nothing to stash"
cibsecret_set "$CIBSTASH_CURRENT" 4
}
cibsecret_unstash() {
[ "$NO_CRM" ] && fatal $CRM_EX_USAGE "no access to Pacemaker, unstash not supported"
UNSTASH_LOCAL_VALUE="$(localfiles get "$rsc" "$param")"
[ "$UNSTASH_LOCAL_VALUE" = "" ] &&
fatal $CRM_EX_NOSUCH "nothing to unstash for resource $rsc parameter $param"
check_cib_rsc "$rsc"
is_secret "$(get_cib_param "$rsc" "$param")" ||
warn "resource $rsc parameter $param not set as secret, but we have local value so proceeding anyway"
localfiles remove "$rsc" "$param" &&
set_cib_param "$rsc" "$param" "$UNSTASH_LOCAL_VALUE"
}
cibsecret_sync() {
sync_files
}
# Grab arguments
if [ "$1" = "-C" ]; then
NO_CRM=':'
shift
fi
cmd="$1"
rsc="$2"
param="$3"
value="$4"
# Ensure we have everything we need
check_usage "$cmd" $#
check_env
umask 0077
# for dirname() function (@TODO why are we replacing dirname?)
. "@OCF_ROOT_DIR@/lib/heartbeat/ocf-shellfuncs"
"cibsecret_$cmd" "$value" $#
rc=$?
if [ $rc -ne 0 ]; then
fatal $CRM_EX_ERROR "$cmd(): failed with rc: $rc"
fi
# vim: set expandtab tabstop=8 softtabstop=4 shiftwidth=4 textwidth=80: