diff --git a/cts/cli/crm_mon.xml b/cts/cli/crm_mon.xml
index 296a97c03f..ffb2f80d8e 100644
--- a/cts/cli/crm_mon.xml
+++ b/cts/cli/crm_mon.xml
@@ -1,171 +1,197 @@
+
+
+
+
+
+
+
+
+
-
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/cts/cli/regression.crm_mon.exp b/cts/cli/regression.crm_mon.exp
index 39e5bcc64e..b36ffa25f5 100644
--- a/cts/cli/regression.crm_mon.exp
+++ b/cts/cli/regression.crm_mon.exp
@@ -1,1229 +1,3030 @@
=#=#=#= Begin test: Basic text output =#=#=#=
Cluster Summary:
* Stack: corosync
* Current DC: cluster02 (version) - partition with quorum
* Last updated:
* Last change:
* 5 nodes configured
- * 21 resource instances configured (1 DISABLED)
+ * 26 resource instances configured (1 DISABLED)
Node List:
* Online: [ cluster01 cluster02 ]
Active Resources:
* Clone Set: ping-clone [ping]:
* Started: [ cluster01 cluster02 ]
* Fencing (stonith:fence_xvm): Started cluster01
* dummy (ocf::pacemaker:Dummy): Started cluster02
* Resource Group: exim-group:
* Public-IP (ocf::heartbeat:IPaddr): Started cluster02
* Email (lsb:exim): Started cluster02
+ * Clone Set: mysql-clone-group [mysql-group]:
+ * Started: [ cluster01 cluster02 ]
=#=#=#= End test: Basic text output - OK (0) =#=#=#=
* Passed: crm_mon - Basic text output
=#=#=#= Begin test: XML output =#=#=#=
-
+
-
-
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
=#=#=#= End test: XML output - OK (0) =#=#=#=
* Passed: crm_mon - XML output
=#=#=#= Begin test: Basic text output without node section =#=#=#=
Cluster Summary:
* Stack: corosync
* Current DC: cluster02 (version) - partition with quorum
* Last updated:
* Last change:
* 5 nodes configured
- * 21 resource instances configured (1 DISABLED)
+ * 26 resource instances configured (1 DISABLED)
Active Resources:
* Clone Set: ping-clone [ping]:
* Started: [ cluster01 cluster02 ]
* Fencing (stonith:fence_xvm): Started cluster01
* dummy (ocf::pacemaker:Dummy): Started cluster02
* Resource Group: exim-group:
* Public-IP (ocf::heartbeat:IPaddr): Started cluster02
* Email (lsb:exim): Started cluster02
+ * Clone Set: mysql-clone-group [mysql-group]:
+ * Started: [ cluster01 cluster02 ]
=#=#=#= End test: Basic text output without node section - OK (0) =#=#=#=
* Passed: crm_mon - Basic text output without node section
=#=#=#= Begin test: XML output without the node section =#=#=#=
-
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
=#=#=#= End test: XML output without the node section - OK (0) =#=#=#=
* Passed: crm_mon - XML output without the node section
=#=#=#= Begin test: Text output with only the node section =#=#=#=
Node List:
* Online: [ cluster01 cluster02 ]
=#=#=#= End test: Text output with only the node section - OK (0) =#=#=#=
* Passed: crm_mon - Text output with only the node section
=#=#=#= Begin test: Complete text output =#=#=#=
Cluster Summary:
* Stack: corosync
* Current DC: cluster02 (version) - partition with quorum
* Last updated:
* Last change:
* 5 nodes configured
- * 21 resource instances configured (1 DISABLED)
+ * 26 resource instances configured (1 DISABLED)
Node List:
* Online: [ cluster01 cluster02 ]
Active Resources:
* Clone Set: ping-clone [ping]:
* Started: [ cluster01 cluster02 ]
* Fencing (stonith:fence_xvm): Started cluster01
* dummy (ocf::pacemaker:Dummy): Started cluster02
* Resource Group: exim-group:
* Public-IP (ocf::heartbeat:IPaddr): Started cluster02
* Email (lsb:exim): Started cluster02
+ * Clone Set: mysql-clone-group [mysql-group]:
+ * Started: [ cluster01 cluster02 ]
Node Attributes:
* Node: cluster01:
* location : office
* pingd : 1000
* Node: cluster02:
* pingd : 1000
Operations:
* Node: cluster02:
* ping: migration-threshold=1000000:
* (11) start
* (12) monitor: interval="10000ms"
* dummy: migration-threshold=1000000:
* (18) start
* (19) monitor: interval="60000ms"
* Public-IP: migration-threshold=1000000:
* (2) start
* Email: migration-threshold=1000000:
* (2) start
+ * mysql-proxy: migration-threshold=1000000:
+ * (2) start
+ * (3) monitor: interval="10000ms"
* Node: cluster01:
* ping: migration-threshold=1000000:
* (17) start
* (18) monitor: interval="10000ms"
* Fencing: migration-threshold=1000000:
* (15) start
* (19) monitor: interval="60000ms"
* dummy: migration-threshold=1000000:
* (16) stop
+ * mysql-proxy: migration-threshold=1000000:
+ * (2) start
+ * (3) monitor: interval="10000ms"
+
+Negative Location Constraints:
+ * not-on-cluster1 prevents dummy from running on cluster01
=#=#=#= End test: Complete text output - OK (0) =#=#=#=
* Passed: crm_mon - Complete text output
=#=#=#= Begin test: Complete text output with detail =#=#=#=
Cluster Summary:
* Stack: corosync
* Current DC: cluster02 (2) (version) - partition with quorum
* Last updated:
* Last change:
* 5 nodes configured
- * 21 resource instances configured (1 DISABLED)
+ * 26 resource instances configured (1 DISABLED)
Node List:
* Online: [ cluster01 (1) cluster02 (2) ]
Active Resources:
* Clone Set: ping-clone [ping]:
* ping (ocf::pacemaker:ping): Started cluster02
* ping (ocf::pacemaker:ping): Started cluster01
* Fencing (stonith:fence_xvm): Started cluster01
* dummy (ocf::pacemaker:Dummy): Started cluster02
* Resource Group: exim-group:
* Public-IP (ocf::heartbeat:IPaddr): Started cluster02
* Email (lsb:exim): Started cluster02
+ * Clone Set: mysql-clone-group [mysql-group]:
+ * Resource Group: mysql-group:0:
+ * mysql-proxy (lsb:mysql-proxy): Started cluster02
+ * Resource Group: mysql-group:1:
+ * mysql-proxy (lsb:mysql-proxy): Started cluster01
+ * Resource Group: mysql-group:2:
+ * mysql-proxy (lsb:mysql-proxy): Stopped
+ * Resource Group: mysql-group:3:
+ * mysql-proxy (lsb:mysql-proxy): Stopped
+ * Resource Group: mysql-group:4:
+ * mysql-proxy (lsb:mysql-proxy): Stopped
Node Attributes:
* Node: cluster01 (1):
* location : office
* pingd : 1000
* Node: cluster02 (2):
* pingd : 1000
Operations:
* Node: cluster02 (2):
* ping: migration-threshold=1000000:
* (11) start
* (12) monitor: interval="10000ms"
* dummy: migration-threshold=1000000:
* (18) start
* (19) monitor: interval="60000ms"
* Public-IP: migration-threshold=1000000:
* (2) start
* Email: migration-threshold=1000000:
* (2) start
+ * mysql-proxy: migration-threshold=1000000:
+ * (2) start
+ * (3) monitor: interval="10000ms"
* Node: cluster01 (1):
* ping: migration-threshold=1000000:
* (17) start
* (18) monitor: interval="10000ms"
* Fencing: migration-threshold=1000000:
* (15) start
* (19) monitor: interval="60000ms"
* dummy: migration-threshold=1000000:
* (16) stop
+ * mysql-proxy: migration-threshold=1000000:
+ * (2) start
+ * (3) monitor: interval="10000ms"
+
+Negative Location Constraints:
+ * not-on-cluster1 prevents dummy from running on cluster01 (1)
=#=#=#= End test: Complete text output with detail - OK (0) =#=#=#=
* Passed: crm_mon - Complete text output with detail
=#=#=#= Begin test: Complete brief text output =#=#=#=
Cluster Summary:
* Stack: corosync
* Current DC: cluster02 (version) - partition with quorum
* Last updated:
* Last change:
* 5 nodes configured
- * 21 resource instances configured (1 DISABLED)
+ * 26 resource instances configured (1 DISABLED)
Node List:
* Online: [ cluster01 cluster02 ]
Active Resources:
* 1 (ocf::pacemaker:Dummy): Active cluster02
* 1 (stonith:fence_xvm): Active cluster01
* Clone Set: ping-clone [ping]:
* Started: [ cluster01 cluster02 ]
* Resource Group: exim-group:
* 1/1 (lsb:exim): Active cluster02
* 1/1 (ocf::heartbeat:IPaddr): Active cluster02
+ * Clone Set: mysql-clone-group [mysql-group]:
+ * Started: [ cluster01 cluster02 ]
Node Attributes:
* Node: cluster01:
* location : office
* pingd : 1000
* Node: cluster02:
* pingd : 1000
Operations:
* Node: cluster02:
* ping: migration-threshold=1000000:
* (11) start
* (12) monitor: interval="10000ms"
* dummy: migration-threshold=1000000:
* (18) start
* (19) monitor: interval="60000ms"
* Public-IP: migration-threshold=1000000:
* (2) start
* Email: migration-threshold=1000000:
* (2) start
+ * mysql-proxy: migration-threshold=1000000:
+ * (2) start
+ * (3) monitor: interval="10000ms"
* Node: cluster01:
* ping: migration-threshold=1000000:
* (17) start
* (18) monitor: interval="10000ms"
* Fencing: migration-threshold=1000000:
* (15) start
* (19) monitor: interval="60000ms"
* dummy: migration-threshold=1000000:
* (16) stop
+ * mysql-proxy: migration-threshold=1000000:
+ * (2) start
+ * (3) monitor: interval="10000ms"
+
+Negative Location Constraints:
+ * not-on-cluster1 prevents dummy from running on cluster01
=#=#=#= End test: Complete brief text output - OK (0) =#=#=#=
* Passed: crm_mon - Complete brief text output
=#=#=#= Begin test: Complete text output grouped by node =#=#=#=
Cluster Summary:
* Stack: corosync
* Current DC: cluster02 (version) - partition with quorum
* Last updated:
* Last change:
* 5 nodes configured
- * 21 resource instances configured (1 DISABLED)
+ * 26 resource instances configured (1 DISABLED)
Node List:
* Node cluster01: online:
* Resources:
* ping (ocf::pacemaker:ping): Started
* Fencing (stonith:fence_xvm): Started
+ * mysql-proxy (lsb:mysql-proxy): Started
* Node cluster02: online:
* Resources:
* ping (ocf::pacemaker:ping): Started
* dummy (ocf::pacemaker:Dummy): Started
* Public-IP (ocf::heartbeat:IPaddr): Started
* Email (lsb:exim): Started
+ * mysql-proxy (lsb:mysql-proxy): Started
* GuestNode httpd-bundle-0@: OFFLINE:
* Resources:
* GuestNode httpd-bundle-1@: OFFLINE:
* Resources:
* GuestNode httpd-bundle-2@: OFFLINE:
* Resources:
Node Attributes:
* Node: cluster01:
* location : office
* pingd : 1000
* Node: cluster02:
* pingd : 1000
Operations:
* Node: cluster02:
* ping: migration-threshold=1000000:
* (11) start
* (12) monitor: interval="10000ms"
* dummy: migration-threshold=1000000:
* (18) start
* (19) monitor: interval="60000ms"
* Public-IP: migration-threshold=1000000:
* (2) start
* Email: migration-threshold=1000000:
* (2) start
+ * mysql-proxy: migration-threshold=1000000:
+ * (2) start
+ * (3) monitor: interval="10000ms"
* Node: cluster01:
* ping: migration-threshold=1000000:
* (17) start
* (18) monitor: interval="10000ms"
* Fencing: migration-threshold=1000000:
* (15) start
* (19) monitor: interval="60000ms"
* dummy: migration-threshold=1000000:
* (16) stop
+ * mysql-proxy: migration-threshold=1000000:
+ * (2) start
+ * (3) monitor: interval="10000ms"
+
+Negative Location Constraints:
+ * not-on-cluster1 prevents dummy from running on cluster01
=#=#=#= End test: Complete text output grouped by node - OK (0) =#=#=#=
* Passed: crm_mon - Complete text output grouped by node
=#=#=#= Begin test: Complete brief text output grouped by node =#=#=#=
Cluster Summary:
* Stack: corosync
* Current DC: cluster02 (version) - partition with quorum
* Last updated:
* Last change:
* 5 nodes configured
- * 21 resource instances configured (1 DISABLED)
+ * 26 resource instances configured (1 DISABLED)
Node List:
* Node cluster01: online:
* Resources:
+ * 1 (lsb:mysql-proxy): Active
* 1 (ocf::pacemaker:ping): Active
* 1 (stonith:fence_xvm): Active
* Node cluster02: online:
* Resources:
* 1 (lsb:exim): Active
+ * 1 (lsb:mysql-proxy): Active
* 1 (ocf::heartbeat:IPaddr): Active
* 1 (ocf::pacemaker:Dummy): Active
* 1 (ocf::pacemaker:ping): Active
- * GuestNode httpd-bundle-0@: OFFLINE:
- * Resources:
- * GuestNode httpd-bundle-1@: OFFLINE:
- * Resources:
- * GuestNode httpd-bundle-2@: OFFLINE:
- * Resources:
Node Attributes:
* Node: cluster01:
* location : office
* pingd : 1000
* Node: cluster02:
* pingd : 1000
Operations:
* Node: cluster02:
* ping: migration-threshold=1000000:
* (11) start
* (12) monitor: interval="10000ms"
* dummy: migration-threshold=1000000:
* (18) start
* (19) monitor: interval="60000ms"
* Public-IP: migration-threshold=1000000:
* (2) start
* Email: migration-threshold=1000000:
* (2) start
+ * mysql-proxy: migration-threshold=1000000:
+ * (2) start
+ * (3) monitor: interval="10000ms"
* Node: cluster01:
* ping: migration-threshold=1000000:
* (17) start
* (18) monitor: interval="10000ms"
* Fencing: migration-threshold=1000000:
* (15) start
* (19) monitor: interval="60000ms"
* dummy: migration-threshold=1000000:
* (16) stop
+ * mysql-proxy: migration-threshold=1000000:
+ * (2) start
+ * (3) monitor: interval="10000ms"
+
+Negative Location Constraints:
+ * not-on-cluster1 prevents dummy from running on cluster01
=#=#=#= End test: Complete brief text output grouped by node - OK (0) =#=#=#=
* Passed: crm_mon - Complete brief text output grouped by node
=#=#=#= Begin test: XML output grouped by node =#=#=#=
-
+
-
+
+
+
+
-
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
=#=#=#= End test: XML output grouped by node - OK (0) =#=#=#=
* Passed: crm_mon - XML output grouped by node
=#=#=#= Begin test: Complete text output filtered by node =#=#=#=
Cluster Summary:
* Stack: corosync
* Current DC: cluster02 (version) - partition with quorum
* Last updated:
* Last change:
* 5 nodes configured
- * 21 resource instances configured (1 DISABLED)
+ * 26 resource instances configured (1 DISABLED)
Node List:
* Online: [ cluster01 ]
Active Resources:
* Clone Set: ping-clone [ping]:
* Started: [ cluster01 ]
* Fencing (stonith:fence_xvm): Started cluster01
+ * Clone Set: mysql-clone-group [mysql-group]:
+ * Started: [ cluster01 ]
Node Attributes:
* Node: cluster01:
* location : office
* pingd : 1000
Operations:
* Node: cluster01:
* ping: migration-threshold=1000000:
* (17) start
* (18) monitor: interval="10000ms"
* Fencing: migration-threshold=1000000:
* (15) start
* (19) monitor: interval="60000ms"
* dummy: migration-threshold=1000000:
* (16) stop
+ * mysql-proxy: migration-threshold=1000000:
+ * (2) start
+ * (3) monitor: interval="10000ms"
+
+Negative Location Constraints:
+ * not-on-cluster1 prevents dummy from running on cluster01
=#=#=#= End test: Complete text output filtered by node - OK (0) =#=#=#=
* Passed: crm_mon - Complete text output filtered by node
=#=#=#= Begin test: XML output filtered by node =#=#=#=
-
+
-
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
=#=#=#= End test: XML output filtered by node - OK (0) =#=#=#=
* Passed: crm_mon - XML output filtered by node
=#=#=#= Begin test: Complete text output filtered by tag =#=#=#=
Cluster Summary:
* Stack: corosync
* Current DC: cluster02 (version) - partition with quorum
* Last updated:
* Last change:
* 5 nodes configured
- * 21 resource instances configured (1 DISABLED)
+ * 26 resource instances configured (1 DISABLED)
Node List:
* Online: [ cluster02 ]
Active Resources:
* Clone Set: ping-clone [ping]:
* Started: [ cluster02 ]
* dummy (ocf::pacemaker:Dummy): Started cluster02
* Resource Group: exim-group:
* Public-IP (ocf::heartbeat:IPaddr): Started cluster02
* Email (lsb:exim): Started cluster02
+ * Clone Set: mysql-clone-group [mysql-group]:
+ * Started: [ cluster02 ]
Node Attributes:
* Node: cluster02:
* pingd : 1000
Operations:
* Node: cluster02:
* ping: migration-threshold=1000000:
* (11) start
* (12) monitor: interval="10000ms"
* dummy: migration-threshold=1000000:
* (18) start
* (19) monitor: interval="60000ms"
* Public-IP: migration-threshold=1000000:
* (2) start
* Email: migration-threshold=1000000:
* (2) start
+ * mysql-proxy: migration-threshold=1000000:
+ * (2) start
+ * (3) monitor: interval="10000ms"
+
+Negative Location Constraints:
+ * not-on-cluster1 prevents dummy from running on cluster01
=#=#=#= End test: Complete text output filtered by tag - OK (0) =#=#=#=
* Passed: crm_mon - Complete text output filtered by tag
=#=#=#= Begin test: XML output filtered by tag =#=#=#=
-
+
-
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
=#=#=#= End test: XML output filtered by tag - OK (0) =#=#=#=
* Passed: crm_mon - XML output filtered by tag
+=#=#=#= Begin test: Complete text output filtered by resource tag =#=#=#=
+Cluster Summary:
+ * Stack: corosync
+ * Current DC: cluster02 (version) - partition with quorum
+ * Last updated:
+ * Last change:
+ * 5 nodes configured
+ * 26 resource instances configured (1 DISABLED)
+
+Node List:
+ * Online: [ cluster01 cluster02 ]
+
+Active Resources:
+ * Fencing (stonith:fence_xvm): Started cluster01
+
+Node Attributes:
+ * Node: cluster01:
+ * location : office
+ * pingd : 1000
+ * Node: cluster02:
+ * pingd : 1000
+
+Operations:
+ * Node: cluster01:
+ * Fencing: migration-threshold=1000000:
+ * (15) start
+ * (19) monitor: interval="60000ms"
+=#=#=#= End test: Complete text output filtered by resource tag - OK (0) =#=#=#=
+* Passed: crm_mon - Complete text output filtered by resource tag
+=#=#=#= Begin test: XML output filtered by resource tag =#=#=#=
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+=#=#=#= End test: XML output filtered by resource tag - OK (0) =#=#=#=
+* Passed: crm_mon - XML output filtered by resource tag
=#=#=#= Begin test: Basic text output filtered by node that doesn't exist =#=#=#=
Cluster Summary:
* Stack: corosync
* Current DC: cluster02 (version) - partition with quorum
* Last updated:
* Last change:
* 5 nodes configured
- * 21 resource instances configured (1 DISABLED)
+ * 26 resource instances configured (1 DISABLED)
Active Resources:
* No active resources
=#=#=#= End test: Basic text output filtered by node that doesn't exist - OK (0) =#=#=#=
* Passed: crm_mon - Basic text output filtered by node that doesn't exist
=#=#=#= Begin test: XML output filtered by node that doesn't exist =#=#=#=
-
+
+
+
+
=#=#=#= End test: XML output filtered by node that doesn't exist - OK (0) =#=#=#=
* Passed: crm_mon - XML output filtered by node that doesn't exist
=#=#=#= Begin test: Basic text output with inactive resources =#=#=#=
Cluster Summary:
* Stack: corosync
* Current DC: cluster02 (version) - partition with quorum
* Last updated:
* Last change:
* 5 nodes configured
- * 21 resource instances configured (1 DISABLED)
+ * 26 resource instances configured (1 DISABLED)
Node List:
* Online: [ cluster01 cluster02 ]
Full List of Resources:
* Clone Set: ping-clone [ping]:
* Started: [ cluster01 cluster02 ]
* Fencing (stonith:fence_xvm): Started cluster01
* dummy (ocf::pacemaker:Dummy): Started cluster02
* inactive-dummy (ocf::pacemaker:Dummy): Stopped (disabled)
* Clone Set: inactive-clone-master [inactive-clone] (promotable):
* Stopped: [ cluster01 cluster02 ]
* Container bundle set: httpd-bundle [pcmk:http]:
* httpd-bundle-0 (192.168.122.131) (ocf::heartbeat:apache): Stopped
* httpd-bundle-1 (192.168.122.132) (ocf::heartbeat:apache): Stopped
* httpd-bundle-2 (192.168.122.133) (ocf::heartbeat:apache): Stopped
* Resource Group: exim-group:
* Public-IP (ocf::heartbeat:IPaddr): Started cluster02
* Email (lsb:exim): Started cluster02
+ * Clone Set: mysql-clone-group [mysql-group]:
+ * Started: [ cluster01 cluster02 ]
=#=#=#= End test: Basic text output with inactive resources - OK (0) =#=#=#=
* Passed: crm_mon - Basic text output with inactive resources
=#=#=#= Begin test: Basic text output with inactive resources, filtered by node =#=#=#=
Cluster Summary:
* Stack: corosync
* Current DC: cluster02 (version) - partition with quorum
* Last updated:
* Last change:
* 5 nodes configured
- * 21 resource instances configured (1 DISABLED)
+ * 26 resource instances configured (1 DISABLED)
Node List:
* Online: [ cluster02 ]
Full List of Resources:
* Clone Set: ping-clone [ping]:
* Started: [ cluster02 ]
* dummy (ocf::pacemaker:Dummy): Started cluster02
* inactive-dummy (ocf::pacemaker:Dummy): Stopped (disabled)
* Clone Set: inactive-clone-master [inactive-clone] (promotable):
* Stopped: [ cluster02 ]
* Container bundle set: httpd-bundle [pcmk:http]:
* httpd-bundle-0 (192.168.122.131) (ocf::heartbeat:apache): Stopped
* httpd-bundle-1 (192.168.122.132) (ocf::heartbeat:apache): Stopped
* httpd-bundle-2 (192.168.122.133) (ocf::heartbeat:apache): Stopped
* Resource Group: exim-group:
* Public-IP (ocf::heartbeat:IPaddr): Started cluster02
* Email (lsb:exim): Started cluster02
+ * Clone Set: mysql-clone-group [mysql-group]:
+ * Started: [ cluster02 ]
=#=#=#= End test: Basic text output with inactive resources, filtered by node - OK (0) =#=#=#=
* Passed: crm_mon - Basic text output with inactive resources, filtered by node
-=#=#=#= Begin test: Text output of partially active resources =#=#=#=
+=#=#=#= Begin test: Complete text output filtered by primitive resource =#=#=#=
Cluster Summary:
* Stack: corosync
* Current DC: cluster02 (version) - partition with quorum
* Last updated:
* Last change:
- * 4 nodes configured
- * 11 resource instances configured
+ * 5 nodes configured
+ * 26 resource instances configured (1 DISABLED)
Node List:
* Online: [ cluster01 cluster02 ]
- * GuestOnline: [ httpd-bundle-0@cluster02 httpd-bundle-1@cluster01 ]
Active Resources:
- * Clone Set: ping-clone [ping]:
- * Started: [ cluster01 ]
* Fencing (stonith:fence_xvm): Started cluster01
- * Container bundle set: httpd-bundle [pcmk:http]:
- * httpd-bundle-0 (192.168.122.131) (ocf::heartbeat:apache): Started cluster02
- * httpd-bundle-1 (192.168.122.132) (ocf::heartbeat:apache): Stopped cluster01
-=#=#=#= End test: Text output of partially active resources - OK (0) =#=#=#=
-* Passed: crm_mon - Text output of partially active resources
-=#=#=#= Begin test: XML output of partially active resources =#=#=#=
-
+
+Node Attributes:
+ * Node: cluster01:
+ * location : office
+ * pingd : 1000
+ * Node: cluster02:
+ * pingd : 1000
+
+Operations:
+ * Node: cluster01:
+ * Fencing: migration-threshold=1000000:
+ * (15) start
+ * (19) monitor: interval="60000ms"
+=#=#=#= End test: Complete text output filtered by primitive resource - OK (0) =#=#=#=
+* Passed: crm_mon - Complete text output filtered by primitive resource
+=#=#=#= Begin test: XML output filtered by primitive resource =#=#=#=
+
-
-
+
+
-
-
-
-
+
+
+
+
+
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
+
+
-
+
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
+
-
-
-
-
+
+
+
+=#=#=#= End test: XML output filtered by primitive resource - OK (0) =#=#=#=
+* Passed: crm_mon - XML output filtered by primitive resource
+=#=#=#= Begin test: Complete text output filtered by group resource =#=#=#=
+Cluster Summary:
+ * Stack: corosync
+ * Current DC: cluster02 (version) - partition with quorum
+ * Last updated:
+ * Last change:
+ * 5 nodes configured
+ * 26 resource instances configured (1 DISABLED)
+
+Node List:
+ * Online: [ cluster01 cluster02 ]
+
+Active Resources:
+ * Resource Group: exim-group:
+ * Public-IP (ocf::heartbeat:IPaddr): Started cluster02
+ * Email (lsb:exim): Started cluster02
+
+Node Attributes:
+ * Node: cluster01:
+ * location : office
+ * pingd : 1000
+ * Node: cluster02:
+ * pingd : 1000
+
+Operations:
+ * Node: cluster02:
+ * Public-IP: migration-threshold=1000000:
+ * (2) start
+ * Email: migration-threshold=1000000:
+ * (2) start
+=#=#=#= End test: Complete text output filtered by group resource - OK (0) =#=#=#=
+* Passed: crm_mon - Complete text output filtered by group resource
+=#=#=#= Begin test: XML output filtered by group resource =#=#=#=
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+=#=#=#= End test: XML output filtered by group resource - OK (0) =#=#=#=
+* Passed: crm_mon - XML output filtered by group resource
+=#=#=#= Begin test: Complete text output filtered by group resource member =#=#=#=
+Cluster Summary:
+ * Stack: corosync
+ * Current DC: cluster02 (version) - partition with quorum
+ * Last updated:
+ * Last change:
+ * 5 nodes configured
+ * 26 resource instances configured (1 DISABLED)
+
+Node List:
+ * Online: [ cluster01 cluster02 ]
+
+Active Resources:
+ * Resource Group: exim-group:
+ * Public-IP (ocf::heartbeat:IPaddr): Started cluster02
+
+Node Attributes:
+ * Node: cluster01:
+ * location : office
+ * pingd : 1000
+ * Node: cluster02:
+ * pingd : 1000
+
+Operations:
+ * Node: cluster02:
+ * Public-IP: migration-threshold=1000000:
+ * (2) start
+=#=#=#= End test: Complete text output filtered by group resource member - OK (0) =#=#=#=
+* Passed: crm_mon - Complete text output filtered by group resource member
+=#=#=#= Begin test: XML output filtered by group resource member =#=#=#=
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+=#=#=#= End test: XML output filtered by group resource member - OK (0) =#=#=#=
+* Passed: crm_mon - XML output filtered by group resource member
+=#=#=#= Begin test: Complete text output filtered by clone resource =#=#=#=
+Cluster Summary:
+ * Stack: corosync
+ * Current DC: cluster02 (version) - partition with quorum
+ * Last updated:
+ * Last change:
+ * 5 nodes configured
+ * 26 resource instances configured (1 DISABLED)
+
+Node List:
+ * Online: [ cluster01 cluster02 ]
+
+Active Resources:
+ * Clone Set: ping-clone [ping]:
+ * Started: [ cluster01 cluster02 ]
+
+Node Attributes:
+ * Node: cluster01:
+ * location : office
+ * pingd : 1000
+ * Node: cluster02:
+ * pingd : 1000
+
+Operations:
+ * Node: cluster02:
+ * ping: migration-threshold=1000000:
+ * (11) start
+ * (12) monitor: interval="10000ms"
+ * Node: cluster01:
+ * ping: migration-threshold=1000000:
+ * (17) start
+ * (18) monitor: interval="10000ms"
+=#=#=#= End test: Complete text output filtered by clone resource - OK (0) =#=#=#=
+* Passed: crm_mon - Complete text output filtered by clone resource
+=#=#=#= Begin test: XML output filtered by clone resource =#=#=#=
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+=#=#=#= End test: XML output filtered by clone resource - OK (0) =#=#=#=
+* Passed: crm_mon - XML output filtered by clone resource
+=#=#=#= Begin test: Complete text output filtered by clone resource instance =#=#=#=
+Cluster Summary:
+ * Stack: corosync
+ * Current DC: cluster02 (version) - partition with quorum
+ * Last updated:
+ * Last change:
+ * 5 nodes configured
+ * 26 resource instances configured (1 DISABLED)
+
+Node List:
+ * Online: [ cluster01 cluster02 ]
+
+Active Resources:
+ * Clone Set: ping-clone [ping]:
+ * Started: [ cluster01 cluster02 ]
+
+Node Attributes:
+ * Node: cluster01:
+ * location : office
+ * pingd : 1000
+ * Node: cluster02:
+ * pingd : 1000
+
+Operations:
+ * Node: cluster02:
+ * ping: migration-threshold=1000000:
+ * (11) start
+ * (12) monitor: interval="10000ms"
+ * Node: cluster01:
+ * ping: migration-threshold=1000000:
+ * (17) start
+ * (18) monitor: interval="10000ms"
+=#=#=#= End test: Complete text output filtered by clone resource instance - OK (0) =#=#=#=
+* Passed: crm_mon - Complete text output filtered by clone resource instance
+=#=#=#= Begin test: XML output filtered by clone resource instance =#=#=#=
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+=#=#=#= End test: XML output filtered by clone resource instance - OK (0) =#=#=#=
+* Passed: crm_mon - XML output filtered by clone resource instance
+=#=#=#= Begin test: Complete text output filtered by exact clone resource instance =#=#=#=
+Cluster Summary:
+ * Stack: corosync
+ * Current DC: cluster02 (2) (version) - partition with quorum
+ * Last updated:
+ * Last change:
+ * 5 nodes configured
+ * 26 resource instances configured (1 DISABLED)
+
+Node List:
+ * Online: [ cluster01 (1) cluster02 (2) ]
+
+Active Resources:
+ * Clone Set: ping-clone [ping]:
+ * ping (ocf::pacemaker:ping): Started cluster02
+
+Node Attributes:
+ * Node: cluster01 (1):
+ * location : office
+ * pingd : 1000
+ * Node: cluster02 (2):
+ * pingd : 1000
+
+Operations:
+ * Node: cluster02 (2):
+ * ping: migration-threshold=1000000:
+ * (11) start
+ * (12) monitor: interval="10000ms"
+ * Node: cluster01 (1):
+ * ping: migration-threshold=1000000:
+ * (17) start
+ * (18) monitor: interval="10000ms"
+=#=#=#= End test: Complete text output filtered by exact clone resource instance - OK (0) =#=#=#=
+* Passed: crm_mon - Complete text output filtered by exact clone resource instance
+=#=#=#= Begin test: XML output filtered by exact clone resource instance =#=#=#=
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+=#=#=#= End test: XML output filtered by exact clone resource instance - OK (0) =#=#=#=
+* Passed: crm_mon - XML output filtered by exact clone resource instance
+=#=#=#= Begin test: Basic text output filtered by resource that doesn't exist =#=#=#=
+Cluster Summary:
+ * Stack: corosync
+ * Current DC: cluster02 (version) - partition with quorum
+ * Last updated:
+ * Last change:
+ * 5 nodes configured
+ * 26 resource instances configured (1 DISABLED)
+
+Node List:
+ * Online: [ cluster01 cluster02 ]
+
+Active Resources:
+ * No active resources
+=#=#=#= End test: Basic text output filtered by resource that doesn't exist - OK (0) =#=#=#=
+* Passed: crm_mon - Basic text output filtered by resource that doesn't exist
+=#=#=#= Begin test: XML output filtered by resource that doesn't exist =#=#=#=
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+=#=#=#= End test: XML output filtered by resource that doesn't exist - OK (0) =#=#=#=
+* Passed: crm_mon - XML output filtered by resource that doesn't exist
+=#=#=#= Begin test: Basic text output with inactive resources, filtered by tag =#=#=#=
+Cluster Summary:
+ * Stack: corosync
+ * Current DC: cluster02 (version) - partition with quorum
+ * Last updated:
+ * Last change:
+ * 5 nodes configured
+ * 26 resource instances configured (1 DISABLED)
+
+Node List:
+ * Online: [ cluster01 cluster02 ]
+
+Full List of Resources:
+ * inactive-dummy (ocf::pacemaker:Dummy): Stopped (disabled)
+ * Clone Set: inactive-clone-master [inactive-clone] (promotable):
+ * Stopped: [ cluster01 cluster02 ]
+=#=#=#= End test: Basic text output with inactive resources, filtered by tag - OK (0) =#=#=#=
+* Passed: crm_mon - Basic text output with inactive resources, filtered by tag
+=#=#=#= Begin test: Basic text output with inactive resources, filtered by bundle resource =#=#=#=
+Cluster Summary:
+ * Stack: corosync
+ * Current DC: cluster02 (version) - partition with quorum
+ * Last updated:
+ * Last change:
+ * 5 nodes configured
+ * 26 resource instances configured (1 DISABLED)
+
+Node List:
+ * Online: [ cluster01 cluster02 ]
+
+Full List of Resources:
+ * Container bundle set: httpd-bundle [pcmk:http]:
+ * httpd-bundle-0 (192.168.122.131) (ocf::heartbeat:apache): Stopped
+ * httpd-bundle-1 (192.168.122.132) (ocf::heartbeat:apache): Stopped
+ * httpd-bundle-2 (192.168.122.133) (ocf::heartbeat:apache): Stopped
+=#=#=#= End test: Basic text output with inactive resources, filtered by bundle resource - OK (0) =#=#=#=
+* Passed: crm_mon - Basic text output with inactive resources, filtered by bundle resource
+=#=#=#= Begin test: XML output filtered by inactive bundle resource =#=#=#=
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+=#=#=#= End test: XML output filtered by inactive bundle resource - OK (0) =#=#=#=
+* Passed: crm_mon - XML output filtered by inactive bundle resource
+=#=#=#= Begin test: Basic text output with inactive resources, filtered by bundled IP address resource =#=#=#=
+Cluster Summary:
+ * Stack: corosync
+ * Current DC: cluster02 (version) - partition with quorum
+ * Last updated:
+ * Last change:
+ * 5 nodes configured
+ * 26 resource instances configured (1 DISABLED)
+
+Node List:
+ * Online: [ cluster01 cluster02 ]
+
+Full List of Resources:
+ * Container bundle set: httpd-bundle [pcmk:http]:
+ * Replica[0]
+ * httpd-bundle-ip-192.168.122.131 (ocf::heartbeat:IPaddr2): Stopped
+=#=#=#= End test: Basic text output with inactive resources, filtered by bundled IP address resource - OK (0) =#=#=#=
+* Passed: crm_mon - Basic text output with inactive resources, filtered by bundled IP address resource
+=#=#=#= Begin test: XML output filtered by bundled IP address resource =#=#=#=
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+=#=#=#= End test: XML output filtered by bundled IP address resource - OK (0) =#=#=#=
+* Passed: crm_mon - XML output filtered by bundled IP address resource
+=#=#=#= Begin test: Basic text output with inactive resources, filtered by bundled container =#=#=#=
+Cluster Summary:
+ * Stack: corosync
+ * Current DC: cluster02 (version) - partition with quorum
+ * Last updated:
+ * Last change:
+ * 5 nodes configured
+ * 26 resource instances configured (1 DISABLED)
+
+Node List:
+ * Online: [ cluster01 cluster02 ]
+
+Full List of Resources:
+ * Container bundle set: httpd-bundle [pcmk:http]:
+ * Replica[1]
+ * httpd-bundle-docker-1 (ocf::heartbeat:docker): Stopped
+=#=#=#= End test: Basic text output with inactive resources, filtered by bundled container - OK (0) =#=#=#=
+* Passed: crm_mon - Basic text output with inactive resources, filtered by bundled container
+=#=#=#= Begin test: XML output filtered by bundled container =#=#=#=
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+=#=#=#= End test: XML output filtered by bundled container - OK (0) =#=#=#=
+* Passed: crm_mon - XML output filtered by bundled container
+=#=#=#= Begin test: Basic text output with inactive resources, filtered by bundle connection =#=#=#=
+Cluster Summary:
+ * Stack: corosync
+ * Current DC: cluster02 (version) - partition with quorum
+ * Last updated:
+ * Last change:
+ * 5 nodes configured
+ * 26 resource instances configured (1 DISABLED)
+
+Node List:
+ * Online: [ cluster01 cluster02 ]
+
+Full List of Resources:
+ * Container bundle set: httpd-bundle [pcmk:http]:
+ * Replica[0]
+ * httpd-bundle-0 (ocf::pacemaker:remote): Stopped
+=#=#=#= End test: Basic text output with inactive resources, filtered by bundle connection - OK (0) =#=#=#=
+* Passed: crm_mon - Basic text output with inactive resources, filtered by bundle connection
+=#=#=#= Begin test: XML output filtered by bundle connection =#=#=#=
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+=#=#=#= End test: XML output filtered by bundle connection - OK (0) =#=#=#=
+* Passed: crm_mon - XML output filtered by bundle connection
+=#=#=#= Begin test: Basic text output with inactive resources, filtered by bundled primitive resource =#=#=#=
+Cluster Summary:
+ * Stack: corosync
+ * Current DC: cluster02 (version) - partition with quorum
+ * Last updated:
+ * Last change:
+ * 5 nodes configured
+ * 26 resource instances configured (1 DISABLED)
+
+Node List:
+ * Online: [ cluster01 cluster02 ]
+
+Full List of Resources:
+ * Container bundle set: httpd-bundle [pcmk:http]:
+ * Replica[0]
+ * httpd (ocf::heartbeat:apache): Stopped
+ * Replica[1]
+ * httpd (ocf::heartbeat:apache): Stopped
+ * Replica[2]
+ * httpd (ocf::heartbeat:apache): Stopped
+=#=#=#= End test: Basic text output with inactive resources, filtered by bundled primitive resource - OK (0) =#=#=#=
+* Passed: crm_mon - Basic text output with inactive resources, filtered by bundled primitive resource
+=#=#=#= Begin test: XML output filtered by bundled primitive resource =#=#=#=
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+=#=#=#= End test: XML output filtered by bundled primitive resource - OK (0) =#=#=#=
+* Passed: crm_mon - XML output filtered by bundled primitive resource
+=#=#=#= Begin test: Complete text output, filtered by clone name in cloned group =#=#=#=
+Cluster Summary:
+ * Stack: corosync
+ * Current DC: cluster02 (2) (version) - partition with quorum
+ * Last updated:
+ * Last change:
+ * 5 nodes configured
+ * 26 resource instances configured (1 DISABLED)
+
+Node List:
+ * Online: [ cluster01 (1) cluster02 (2) ]
+
+Active Resources:
+ * Clone Set: mysql-clone-group [mysql-group]:
+ * Resource Group: mysql-group:0:
+ * mysql-proxy (lsb:mysql-proxy): Started cluster02
+ * Resource Group: mysql-group:1:
+ * mysql-proxy (lsb:mysql-proxy): Started cluster01
+ * Resource Group: mysql-group:2:
+ * mysql-proxy (lsb:mysql-proxy): Stopped
+ * Resource Group: mysql-group:3:
+ * mysql-proxy (lsb:mysql-proxy): Stopped
+ * Resource Group: mysql-group:4:
+ * mysql-proxy (lsb:mysql-proxy): Stopped
+
+Node Attributes:
+ * Node: cluster01 (1):
+ * location : office
+ * pingd : 1000
+ * Node: cluster02 (2):
+ * pingd : 1000
+
+Operations:
+ * Node: cluster02 (2):
+ * mysql-proxy: migration-threshold=1000000:
+ * (2) start
+ * (3) monitor: interval="10000ms"
+ * Node: cluster01 (1):
+ * mysql-proxy: migration-threshold=1000000:
+ * (2) start
+ * (3) monitor: interval="10000ms"
+=#=#=#= End test: Complete text output, filtered by clone name in cloned group - OK (0) =#=#=#=
+* Passed: crm_mon - Complete text output, filtered by clone name in cloned group
+=#=#=#= Begin test: XML output, filtered by clone name in cloned group =#=#=#=
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+=#=#=#= End test: XML output, filtered by clone name in cloned group - OK (0) =#=#=#=
+* Passed: crm_mon - XML output, filtered by clone name in cloned group
+=#=#=#= Begin test: Complete text output, filtered by group name in cloned group =#=#=#=
+Cluster Summary:
+ * Stack: corosync
+ * Current DC: cluster02 (2) (version) - partition with quorum
+ * Last updated:
+ * Last change:
+ * 5 nodes configured
+ * 26 resource instances configured (1 DISABLED)
+
+Node List:
+ * Online: [ cluster01 (1) cluster02 (2) ]
+
+Active Resources:
+ * Clone Set: mysql-clone-group [mysql-group]:
+ * Resource Group: mysql-group:0:
+ * mysql-proxy (lsb:mysql-proxy): Started cluster02
+ * Resource Group: mysql-group:1:
+ * mysql-proxy (lsb:mysql-proxy): Started cluster01
+ * Resource Group: mysql-group:2:
+ * mysql-proxy (lsb:mysql-proxy): Stopped
+ * Resource Group: mysql-group:3:
+ * mysql-proxy (lsb:mysql-proxy): Stopped
+ * Resource Group: mysql-group:4:
+ * mysql-proxy (lsb:mysql-proxy): Stopped
+
+Node Attributes:
+ * Node: cluster01 (1):
+ * location : office
+ * pingd : 1000
+ * Node: cluster02 (2):
+ * pingd : 1000
+
+Operations:
+ * Node: cluster02 (2):
+ * mysql-proxy: migration-threshold=1000000:
+ * (2) start
+ * (3) monitor: interval="10000ms"
+ * Node: cluster01 (1):
+ * mysql-proxy: migration-threshold=1000000:
+ * (2) start
+ * (3) monitor: interval="10000ms"
+=#=#=#= End test: Complete text output, filtered by group name in cloned group - OK (0) =#=#=#=
+* Passed: crm_mon - Complete text output, filtered by group name in cloned group
+=#=#=#= Begin test: XML output, filtered by group name in cloned group =#=#=#=
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+=#=#=#= End test: XML output, filtered by group name in cloned group - OK (0) =#=#=#=
+* Passed: crm_mon - XML output, filtered by group name in cloned group
+=#=#=#= Begin test: Complete text output, filtered by exact group instance name in cloned group =#=#=#=
+Cluster Summary:
+ * Stack: corosync
+ * Current DC: cluster02 (2) (version) - partition with quorum
+ * Last updated:
+ * Last change:
+ * 5 nodes configured
+ * 26 resource instances configured (1 DISABLED)
+
+Node List:
+ * Online: [ cluster01 (1) cluster02 (2) ]
+
+Active Resources:
+ * Clone Set: mysql-clone-group [mysql-group]:
+ * Resource Group: mysql-group:1:
+ * mysql-proxy (lsb:mysql-proxy): Started cluster01
+
+Node Attributes:
+ * Node: cluster01 (1):
+ * location : office
+ * pingd : 1000
+ * Node: cluster02 (2):
+ * pingd : 1000
+
+Operations:
+ * Node: cluster02 (2):
+ * mysql-proxy: migration-threshold=1000000:
+ * (2) start
+ * (3) monitor: interval="10000ms"
+ * Node: cluster01 (1):
+ * mysql-proxy: migration-threshold=1000000:
+ * (2) start
+ * (3) monitor: interval="10000ms"
+=#=#=#= End test: Complete text output, filtered by exact group instance name in cloned group - OK (0) =#=#=#=
+* Passed: crm_mon - Complete text output, filtered by exact group instance name in cloned group
+=#=#=#= Begin test: XML output, filtered by exact group instance name in cloned group =#=#=#=
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+=#=#=#= End test: XML output, filtered by exact group instance name in cloned group - OK (0) =#=#=#=
+* Passed: crm_mon - XML output, filtered by exact group instance name in cloned group
+=#=#=#= Begin test: Complete text output, filtered by primitive name in cloned group =#=#=#=
+Cluster Summary:
+ * Stack: corosync
+ * Current DC: cluster02 (2) (version) - partition with quorum
+ * Last updated:
+ * Last change:
+ * 5 nodes configured
+ * 26 resource instances configured (1 DISABLED)
+
+Node List:
+ * Online: [ cluster01 (1) cluster02 (2) ]
+
+Active Resources:
+ * Clone Set: mysql-clone-group [mysql-group]:
+ * Resource Group: mysql-group:0:
+ * mysql-proxy (lsb:mysql-proxy): Started cluster02
+ * Resource Group: mysql-group:1:
+ * mysql-proxy (lsb:mysql-proxy): Started cluster01
+ * Resource Group: mysql-group:2:
+ * mysql-proxy (lsb:mysql-proxy): Stopped
+ * Resource Group: mysql-group:3:
+ * mysql-proxy (lsb:mysql-proxy): Stopped
+ * Resource Group: mysql-group:4:
+ * mysql-proxy (lsb:mysql-proxy): Stopped
+
+Node Attributes:
+ * Node: cluster01 (1):
+ * location : office
+ * pingd : 1000
+ * Node: cluster02 (2):
+ * pingd : 1000
+
+Operations:
+ * Node: cluster02 (2):
+ * mysql-proxy: migration-threshold=1000000:
+ * (2) start
+ * (3) monitor: interval="10000ms"
+ * Node: cluster01 (1):
+ * mysql-proxy: migration-threshold=1000000:
+ * (2) start
+ * (3) monitor: interval="10000ms"
+=#=#=#= End test: Complete text output, filtered by primitive name in cloned group - OK (0) =#=#=#=
+* Passed: crm_mon - Complete text output, filtered by primitive name in cloned group
+=#=#=#= Begin test: XML output, filtered by primitive name in cloned group =#=#=#=
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+=#=#=#= End test: XML output, filtered by primitive name in cloned group - OK (0) =#=#=#=
+* Passed: crm_mon - XML output, filtered by primitive name in cloned group
+=#=#=#= Begin test: Complete text output, filtered by exact primitive instance name in cloned group =#=#=#=
+Cluster Summary:
+ * Stack: corosync
+ * Current DC: cluster02 (2) (version) - partition with quorum
+ * Last updated:
+ * Last change:
+ * 5 nodes configured
+ * 26 resource instances configured (1 DISABLED)
+
+Node List:
+ * Online: [ cluster01 (1) cluster02 (2) ]
+
+Active Resources:
+ * Clone Set: mysql-clone-group [mysql-group]:
+ * Resource Group: mysql-group:1:
+ * mysql-proxy (lsb:mysql-proxy): Started cluster01
+
+Node Attributes:
+ * Node: cluster01 (1):
+ * location : office
+ * pingd : 1000
+ * Node: cluster02 (2):
+ * pingd : 1000
+
+Operations:
+ * Node: cluster02 (2):
+ * mysql-proxy: migration-threshold=1000000:
+ * (2) start
+ * (3) monitor: interval="10000ms"
+ * Node: cluster01 (1):
+ * mysql-proxy: migration-threshold=1000000:
+ * (2) start
+ * (3) monitor: interval="10000ms"
+=#=#=#= End test: Complete text output, filtered by exact primitive instance name in cloned group - OK (0) =#=#=#=
+* Passed: crm_mon - Complete text output, filtered by exact primitive instance name in cloned group
+=#=#=#= Begin test: XML output, filtered by exact primitive instance name in cloned group =#=#=#=
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+=#=#=#= End test: XML output, filtered by exact primitive instance name in cloned group - OK (0) =#=#=#=
+* Passed: crm_mon - XML output, filtered by exact primitive instance name in cloned group
+=#=#=#= Begin test: Text output of partially active resources =#=#=#=
+Cluster Summary:
+ * Stack: corosync
+ * Current DC: cluster02 (version) - partition with quorum
+ * Last updated:
+ * Last change:
+ * 4 nodes configured
+ * 11 resource instances configured
+
+Node List:
+ * Online: [ cluster01 cluster02 ]
+ * GuestOnline: [ httpd-bundle-0@cluster02 httpd-bundle-1@cluster01 ]
+
+Active Resources:
+ * Clone Set: ping-clone [ping]:
+ * Started: [ cluster01 ]
+ * Fencing (stonith:fence_xvm): Started cluster01
+ * Container bundle set: httpd-bundle [pcmk:http]:
+ * httpd-bundle-0 (192.168.122.131) (ocf::heartbeat:apache): Started cluster02
+ * httpd-bundle-1 (192.168.122.132) (ocf::heartbeat:apache): Stopped cluster01
+=#=#=#= End test: Text output of partially active resources - OK (0) =#=#=#=
+* Passed: crm_mon - Text output of partially active resources
+=#=#=#= Begin test: XML output of partially active resources =#=#=#=
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
=#=#=#= End test: XML output of partially active resources - OK (0) =#=#=#=
* Passed: crm_mon - XML output of partially active resources
=#=#=#= Begin test: Text output of partially active resources, with inactive resources =#=#=#=
Cluster Summary:
* Stack: corosync
* Current DC: cluster02 (version) - partition with quorum
* Last updated:
* Last change:
* 4 nodes configured
* 11 resource instances configured
Node List:
* Online: [ cluster01 cluster02 ]
* GuestOnline: [ httpd-bundle-0@cluster02 httpd-bundle-1@cluster01 ]
Full List of Resources:
* Clone Set: ping-clone [ping]:
* Started: [ cluster01 ]
* Stopped: [ cluster02 ]
* Fencing (stonith:fence_xvm): Started cluster01
* Container bundle set: httpd-bundle [pcmk:http]:
* httpd-bundle-0 (192.168.122.131) (ocf::heartbeat:apache): Started cluster02
* httpd-bundle-1 (192.168.122.132) (ocf::heartbeat:apache): Stopped cluster01
=#=#=#= End test: Text output of partially active resources, with inactive resources - OK (0) =#=#=#=
* Passed: crm_mon - Text output of partially active resources, with inactive resources
+=#=#=#= Begin test: Complete brief text output, with inactive resources =#=#=#=
+Cluster Summary:
+ * Stack: corosync
+ * Current DC: cluster02 (version) - partition with quorum
+ * Last updated:
+ * Last change:
+ * 4 nodes configured
+ * 11 resource instances configured
+
+Node List:
+ * Online: [ cluster01 cluster02 ]
+ * GuestOnline: [ httpd-bundle-0@cluster02 httpd-bundle-1@cluster01 ]
+
+Full List of Resources:
+ * 1/1 (stonith:fence_xvm): Active cluster01
+ * Clone Set: ping-clone [ping]:
+ * Started: [ cluster01 ]
+ * Stopped: [ cluster02 ]
+ * Container bundle set: httpd-bundle [pcmk:http]:
+ * httpd-bundle-0 (192.168.122.131) (ocf::heartbeat:apache): Started cluster02
+ * httpd-bundle-1 (192.168.122.132) (ocf::heartbeat:apache): Stopped cluster01
+
+Node Attributes:
+ * Node: cluster01:
+ * pingd : 1000
+ * Node: cluster02:
+ * pingd : 1000
+
+Operations:
+ * Node: cluster02:
+ * httpd-bundle-ip-192.168.122.131: migration-threshold=1000000:
+ * (2) start
+ * (3) monitor: interval="60000ms"
+ * httpd-bundle-docker-0: migration-threshold=1000000:
+ * (2) start
+ * (3) monitor: interval="60000ms"
+ * httpd-bundle-0: migration-threshold=1000000:
+ * (2) start
+ * (3) monitor: interval="30000ms"
+ * Node: cluster01:
+ * Fencing: migration-threshold=1000000:
+ * (15) start
+ * (20) monitor: interval="60000ms"
+ * ping: migration-threshold=1000000:
+ * (2) start
+ * (3) monitor: interval="10000ms"
+ * httpd-bundle-ip-192.168.122.132: migration-threshold=1000000:
+ * (2) start
+ * (3) monitor: interval="60000ms"
+ * httpd-bundle-docker-1: migration-threshold=1000000:
+ * (2) start
+ * (3) monitor: interval="60000ms"
+ * httpd-bundle-1: migration-threshold=1000000:
+ * (2) start
+ * (3) monitor: interval="30000ms"
+ * Node: httpd-bundle-0@cluster02:
+ * httpd: migration-threshold=1000000:
+ * (1) start
+=#=#=#= End test: Complete brief text output, with inactive resources - OK (0) =#=#=#=
+* Passed: crm_mon - Complete brief text output, with inactive resources
+=#=#=#= Begin test: Complete brief text output grouped by node, with inactive resources =#=#=#=
+Cluster Summary:
+ * Stack: corosync
+ * Current DC: cluster02 (version) - partition with quorum
+ * Last updated:
+ * Last change:
+ * 4 nodes configured
+ * 11 resource instances configured
+
+Node List:
+ * Node cluster01: online:
+ * Resources:
+ * 1 (ocf::heartbeat:IPaddr2): Active
+ * 1 (ocf::heartbeat:docker): Active
+ * 1 (ocf::pacemaker:ping): Active
+ * 1 (ocf::pacemaker:remote): Active
+ * 1 (stonith:fence_xvm): Active
+ * Node cluster02: online:
+ * Resources:
+ * 1 (ocf::heartbeat:IPaddr2): Active
+ * 1 (ocf::heartbeat:docker): Active
+ * 1 (ocf::pacemaker:remote): Active
+ * GuestNode httpd-bundle-0@cluster02: online:
+ * Resources:
+ * 1 (ocf::heartbeat:apache): Active
+
+Inactive Resources:
+ * Clone Set: ping-clone [ping]:
+ * Started: [ cluster01 ]
+ * Stopped: [ cluster02 ]
+ * Container bundle set: httpd-bundle [pcmk:http]:
+ * httpd-bundle-0 (192.168.122.131) (ocf::heartbeat:apache): Started cluster02
+ * httpd-bundle-1 (192.168.122.132) (ocf::heartbeat:apache): Stopped cluster01
+
+Node Attributes:
+ * Node: cluster01:
+ * pingd : 1000
+ * Node: cluster02:
+ * pingd : 1000
+
+Operations:
+ * Node: cluster02:
+ * httpd-bundle-ip-192.168.122.131: migration-threshold=1000000:
+ * (2) start
+ * (3) monitor: interval="60000ms"
+ * httpd-bundle-docker-0: migration-threshold=1000000:
+ * (2) start
+ * (3) monitor: interval="60000ms"
+ * httpd-bundle-0: migration-threshold=1000000:
+ * (2) start
+ * (3) monitor: interval="30000ms"
+ * Node: cluster01:
+ * Fencing: migration-threshold=1000000:
+ * (15) start
+ * (20) monitor: interval="60000ms"
+ * ping: migration-threshold=1000000:
+ * (2) start
+ * (3) monitor: interval="10000ms"
+ * httpd-bundle-ip-192.168.122.132: migration-threshold=1000000:
+ * (2) start
+ * (3) monitor: interval="60000ms"
+ * httpd-bundle-docker-1: migration-threshold=1000000:
+ * (2) start
+ * (3) monitor: interval="60000ms"
+ * httpd-bundle-1: migration-threshold=1000000:
+ * (2) start
+ * (3) monitor: interval="30000ms"
+ * Node: httpd-bundle-0@cluster02:
+ * httpd: migration-threshold=1000000:
+ * (1) start
+=#=#=#= End test: Complete brief text output grouped by node, with inactive resources - OK (0) =#=#=#=
+* Passed: crm_mon - Complete brief text output grouped by node, with inactive resources
=#=#=#= Begin test: Text output of partially active resources, with inactive resources, filtered by node =#=#=#=
Cluster Summary:
* Stack: corosync
* Current DC: cluster02 (version) - partition with quorum
* Last updated:
* Last change:
* 4 nodes configured
* 11 resource instances configured
Node List:
* Online: [ cluster01 ]
Full List of Resources:
* Clone Set: ping-clone [ping]:
* Started: [ cluster01 ]
* Fencing (stonith:fence_xvm): Started cluster01
* Container bundle set: httpd-bundle [pcmk:http]:
* httpd-bundle-1 (192.168.122.132) (ocf::heartbeat:apache): Stopped cluster01
=#=#=#= End test: Text output of partially active resources, with inactive resources, filtered by node - OK (0) =#=#=#=
* Passed: crm_mon - Text output of partially active resources, with inactive resources, filtered by node
=#=#=#= Begin test: Text output of partially active resources, filtered by node =#=#=#=
=#=#=#= End test: Text output of partially active resources, filtered by node - OK (0) =#=#=#=
* Passed: crm_mon - Text output of partially active resources, filtered by node
diff --git a/cts/cts-cli.in b/cts/cts-cli.in
index 62e5698a6b..bec06c8a96 100755
--- a/cts/cts-cli.in
+++ b/cts/cts-cli.in
@@ -1,1510 +1,1668 @@
#!@BASH_PATH@
#
# Copyright 2008-2020 the Pacemaker project contributors
#
# The version control history for this file may have further details.
#
# This source code is licensed under the GNU General Public License version 2
# or later (GPLv2+) WITHOUT ANY WARRANTY.
#
#
# Note on portable usage of sed: GNU/POSIX/*BSD sed have a limited subset of
# compatible functionality. Do not use the -i option, alternation (\|),
# \0, or character sequences such as \n or \s.
#
USAGE_TEXT="Usage: cts-cli []
Options:
--help Display this text, then exit
-V, --verbose Display any differences from expected output
-t 'TEST [...]' Run only specified tests (default: 'dates tools crm_mon acls validity upgrade rules')
-p DIR Look for executables in DIR (may be specified multiple times)
-v, --valgrind Run all commands under valgrind
-s Save actual output as expected output"
# If readlink supports -e (i.e. GNU), use it
readlink -e / >/dev/null 2>/dev/null
if [ $? -eq 0 ]; then
test_home="$(dirname "$(readlink -e "$0")")"
else
test_home="$(dirname "$0")"
fi
: ${shadow=cts-cli}
shadow_dir=$(mktemp -d ${TMPDIR:-/tmp}/cts-cli.shadow.XXXXXXXXXX)
num_errors=0
num_passed=0
verbose=0
tests="dates tools crm_mon acls validity upgrade rules"
do_save=0
VALGRIND_CMD=
VALGRIND_OPTS="
-q
--gen-suppressions=all
--show-reachable=no
--leak-check=full
--trace-children=no
--time-stamp=yes
--num-callers=20
--suppressions=$test_home/valgrind-pcmk.suppressions
"
# These constants must track crm_exit_t values
CRM_EX_OK=0
CRM_EX_ERROR=1
CRM_EX_INVALID_PARAM=2
CRM_EX_UNIMPLEMENT_FEATURE=3
CRM_EX_INSUFFICIENT_PRIV=4
CRM_EX_USAGE=64
CRM_EX_CONFIG=78
CRM_EX_OLD=103
CRM_EX_DIGEST=104
CRM_EX_NOSUCH=105
CRM_EX_UNSAFE=107
CRM_EX_EXISTS=108
CRM_EX_MULTIPLE=109
CRM_EX_EXPIRED=110
CRM_EX_NOT_YET_IN_EFFECT=111
function test_assert() {
target=$1; shift
cib=$1; shift
app=`echo "$cmd" | sed 's/\ .*//'`
printf "* Running: $app - $desc\n" 1>&2
printf "=#=#=#= Begin test: $desc =#=#=#=\n"
eval $VALGRIND_CMD $cmd 2>&1
rc=$?
if [ x$cib != x0 ]; then
printf "=#=#=#= Current cib after: $desc =#=#=#=\n"
CIB_user=root cibadmin -Q
fi
printf "=#=#=#= End test: $desc - $(crm_error --exit $rc) ($rc) =#=#=#=\n"
if [ $rc -ne $target ]; then
num_errors=$(( $num_errors + 1 ))
printf "* Failed (rc=%.3d): %-14s - %s\n" $rc $app "$desc"
printf "* Failed (rc=%.3d): %-14s - %s\n" $rc $app "$desc (`which $app`)" 1>&2
return
exit $CRM_EX_ERROR
else
printf "* Passed: %-14s - %s\n" $app "$desc"
num_passed=$(( $num_passed + 1 ))
fi
}
function test_crm_mon() {
export CIB_file="$test_home/cli/crm_mon.xml"
desc="Basic text output"
cmd="crm_mon -1"
test_assert $CRM_EX_OK 0
desc="XML output"
cmd="crm_mon --output-as=xml"
test_assert $CRM_EX_OK 0
desc="Basic text output without node section"
cmd="crm_mon -1 --exclude=nodes"
test_assert $CRM_EX_OK 0
desc="XML output without the node section"
cmd="crm_mon --output-as=xml --exclude=nodes"
test_assert $CRM_EX_OK 0
desc="Text output with only the node section"
cmd="crm_mon -1 --exclude=all --include=nodes"
test_assert $CRM_EX_OK 0
# The above test doesn't need to be performed for other output formats. It's
# really just a test to make sure that blank lines are correct.
desc="Complete text output"
cmd="crm_mon -1 --include=all"
test_assert $CRM_EX_OK 0
# XML includes everything already so there's no need for a complete test
desc="Complete text output with detail"
cmd="crm_mon -1R --include=all"
test_assert $CRM_EX_OK 0
# XML includes detailed output already
desc="Complete brief text output"
cmd="crm_mon -1 --include=all --brief"
test_assert $CRM_EX_OK 0
desc="Complete text output grouped by node"
cmd="crm_mon -1 --include=all --group-by-node"
test_assert $CRM_EX_OK 0
# XML does not have a brief output option
desc="Complete brief text output grouped by node"
cmd="crm_mon -1 --include=all --group-by-node --brief"
test_assert $CRM_EX_OK 0
desc="XML output grouped by node"
cmd="crm_mon -1 --output-as=xml --group-by-node"
test_assert $CRM_EX_OK 0
desc="Complete text output filtered by node"
cmd="crm_mon -1 --include=all --node=cluster01"
test_assert $CRM_EX_OK 0
desc="XML output filtered by node"
cmd="crm_mon --output-as xml --include=all --node=cluster01"
test_assert $CRM_EX_OK 0
desc="Complete text output filtered by tag"
cmd="crm_mon -1 --include=all --node=even-nodes"
test_assert $CRM_EX_OK 0
desc="XML output filtered by tag"
cmd="crm_mon --output-as=xml --include=all --node=even-nodes"
test_assert $CRM_EX_OK 0
+ desc="Complete text output filtered by resource tag"
+ cmd="crm_mon -1 --include=all --resource=fencing-rscs"
+ test_assert $CRM_EX_OK 0
+
+ desc="XML output filtered by resource tag"
+ cmd="crm_mon --output-as=xml --include=all --resource=fencing-rscs"
+ test_assert $CRM_EX_OK 0
+
desc="Basic text output filtered by node that doesn't exist"
cmd="crm_mon -1 --node=blah"
test_assert $CRM_EX_OK 0
desc="XML output filtered by node that doesn't exist"
cmd="crm_mon --output-as=xml --node=blah"
test_assert $CRM_EX_OK 0
desc="Basic text output with inactive resources"
cmd="crm_mon -1 -r"
test_assert $CRM_EX_OK 0
# XML already includes inactive resources
desc="Basic text output with inactive resources, filtered by node"
cmd="crm_mon -1 -r --node=cluster02"
test_assert $CRM_EX_OK 0
# XML already includes inactive resources
+ desc="Complete text output filtered by primitive resource"
+ cmd="crm_mon -1 --include=all --resource=Fencing"
+ test_assert $CRM_EX_OK 0
+
+ desc="XML output filtered by primitive resource"
+ cmd="crm_mon --output-as=xml --resource=Fencing"
+ test_assert $CRM_EX_OK 0
+
+ desc="Complete text output filtered by group resource"
+ cmd="crm_mon -1 --include=all --resource=exim-group"
+ test_assert $CRM_EX_OK 0
+
+ desc="XML output filtered by group resource"
+ cmd="crm_mon --output-as=xml --resource=exim-group"
+ test_assert $CRM_EX_OK 0
+
+ desc="Complete text output filtered by group resource member"
+ cmd="crm_mon -1 --include=all --resource=Public-IP"
+ test_assert $CRM_EX_OK 0
+
+ desc="XML output filtered by group resource member"
+ cmd="crm_mon --output-as=xml --resource=Email"
+ test_assert $CRM_EX_OK 0
+
+ desc="Complete text output filtered by clone resource"
+ cmd="crm_mon -1 --include=all --resource=ping-clone"
+ test_assert $CRM_EX_OK 0
+
+ desc="XML output filtered by clone resource"
+ cmd="crm_mon --output-as=xml --resource=ping-clone"
+ test_assert $CRM_EX_OK 0
+
+ desc="Complete text output filtered by clone resource instance"
+ cmd="crm_mon -1 --include=all --resource=ping"
+ test_assert $CRM_EX_OK 0
+
+ desc="XML output filtered by clone resource instance"
+ cmd="crm_mon --output-as=xml --resource=ping"
+ test_assert $CRM_EX_OK 0
+
+ desc="Complete text output filtered by exact clone resource instance"
+ cmd="crm_mon -1 --include=all --show-detail --resource=ping:0"
+ test_assert $CRM_EX_OK 0
+
+ desc="XML output filtered by exact clone resource instance"
+ cmd="crm_mon --output-as=xml --resource=ping:1"
+ test_assert $CRM_EX_OK 0
+
+ desc="Basic text output filtered by resource that doesn't exist"
+ cmd="crm_mon -1 --resource=blah"
+ test_assert $CRM_EX_OK 0
+
+ desc="XML output filtered by resource that doesn't exist"
+ cmd="crm_mon --output-as=xml --resource=blah"
+ test_assert $CRM_EX_OK 0
+
+ desc="Basic text output with inactive resources, filtered by tag"
+ cmd="crm_mon -1 -r --resource=inactive-rscs"
+ test_assert $CRM_EX_OK 0
+
+ desc="Basic text output with inactive resources, filtered by bundle resource"
+ cmd="crm_mon -1 -r --resource=httpd-bundle"
+ test_assert $CRM_EX_OK 0
+
+ desc="XML output filtered by inactive bundle resource"
+ cmd="crm_mon --output-as=xml --resource=httpd-bundle"
+ test_assert $CRM_EX_OK 0
+
+ desc="Basic text output with inactive resources, filtered by bundled IP address resource"
+ cmd="crm_mon -1 -r --resource=httpd-bundle-ip-192.168.122.131"
+ test_assert $CRM_EX_OK 0
+
+ desc="XML output filtered by bundled IP address resource"
+ cmd="crm_mon --output-as=xml --resource=httpd-bundle-ip-192.168.122.132"
+ test_assert $CRM_EX_OK 0
+
+ desc="Basic text output with inactive resources, filtered by bundled container"
+ cmd="crm_mon -1 -r --resource=httpd-bundle-docker-1"
+ test_assert $CRM_EX_OK 0
+
+ desc="XML output filtered by bundled container"
+ cmd="crm_mon --output-as=xml --resource=httpd-bundle-docker-2"
+ test_assert $CRM_EX_OK 0
+
+ desc="Basic text output with inactive resources, filtered by bundle connection"
+ cmd="crm_mon -1 -r --resource=httpd-bundle-0"
+ test_assert $CRM_EX_OK 0
+
+ desc="XML output filtered by bundle connection"
+ cmd="crm_mon --output-as=xml --resource=httpd-bundle-0"
+ test_assert $CRM_EX_OK 0
+
+ desc="Basic text output with inactive resources, filtered by bundled primitive resource"
+ cmd="crm_mon -1 -r --resource=httpd"
+ test_assert $CRM_EX_OK 0
+
+ desc="XML output filtered by bundled primitive resource"
+ cmd="crm_mon --output-as=xml --resource=httpd"
+ test_assert $CRM_EX_OK 0
+
+ desc="Complete text output, filtered by clone name in cloned group"
+ cmd="crm_mon -1 --include=all --show-detail --resource=mysql-clone-group"
+ test_assert $CRM_EX_OK 0
+
+ desc="XML output, filtered by clone name in cloned group"
+ cmd="crm_mon --output-as=xml --resource=mysql-clone-group"
+ test_assert $CRM_EX_OK 0
+
+ desc="Complete text output, filtered by group name in cloned group"
+ cmd="crm_mon -1 --include=all --show-detail --resource=mysql-group"
+ test_assert $CRM_EX_OK 0
+
+ desc="XML output, filtered by group name in cloned group"
+ cmd="crm_mon --output-as=xml --resource=mysql-group"
+ test_assert $CRM_EX_OK 0
+
+ desc="Complete text output, filtered by exact group instance name in cloned group"
+ cmd="crm_mon -1 --include=all --show-detail --resource=mysql-group:1"
+ test_assert $CRM_EX_OK 0
+
+ desc="XML output, filtered by exact group instance name in cloned group"
+ cmd="crm_mon --output-as=xml --resource=mysql-group:1"
+ test_assert $CRM_EX_OK 0
+
+ desc="Complete text output, filtered by primitive name in cloned group"
+ cmd="crm_mon -1 --include=all --show-detail --resource=mysql-proxy"
+ test_assert $CRM_EX_OK 0
+
+ desc="XML output, filtered by primitive name in cloned group"
+ cmd="crm_mon --output-as=xml --resource=mysql-proxy"
+ test_assert $CRM_EX_OK 0
+
+ desc="Complete text output, filtered by exact primitive instance name in cloned group"
+ cmd="crm_mon -1 --include=all --show-detail --resource=mysql-proxy:1"
+ test_assert $CRM_EX_OK 0
+
+ desc="XML output, filtered by exact primitive instance name in cloned group"
+ cmd="crm_mon --output-as=xml --resource=mysql-proxy:1"
+ test_assert $CRM_EX_OK 0
+
unset CIB_file
export CIB_file="$test_home/cli/crm_mon-partial.xml"
desc="Text output of partially active resources"
cmd="crm_mon -1"
test_assert $CRM_EX_OK 0
desc="XML output of partially active resources"
cmd="crm_mon -1 --output-as=xml"
test_assert $CRM_EX_OK 0
desc="Text output of partially active resources, with inactive resources"
cmd="crm_mon -1 -r"
test_assert $CRM_EX_OK 0
# XML already includes inactive resources
+ desc="Complete brief text output, with inactive resources"
+ cmd="crm_mon -1 -r --include=all --brief"
+ test_assert $CRM_EX_OK 0
+
+ # XML does not have a brief output option
+
+ desc="Complete brief text output grouped by node, with inactive resources"
+ cmd="crm_mon -1 -r --include=all --group-by-node --brief"
+ test_assert $CRM_EX_OK 0
+
desc="Text output of partially active resources, with inactive resources, filtered by node"
cmd="crm_mon -1 -r --node=cluster01"
test_assert $CRM_EX_OK 0
desc="Text output of partially active resources, filtered by node"
cmd="crm_mon -1 --output-as=xml --node=cluster01"
test_assert $CRM_EX_OK 0
unset CIB_file
}
function test_tools() {
local TMPXML
local TMPORIG
TMPXML=$(mktemp ${TMPDIR:-/tmp}/cts-cli.tools.xml.XXXXXXXXXX)
TMPORIG=$(mktemp ${TMPDIR:-/tmp}/cts-cli.tools.existing.xml.XXXXXXXXXX)
export CIB_shadow_dir="${shadow_dir}"
$VALGRIND_CMD crm_shadow --batch --force --create-empty $shadow 2>&1
export CIB_shadow=$shadow
desc="Validate CIB"
cmd="cibadmin -Q"
test_assert $CRM_EX_OK
desc="Configure something before erasing"
cmd="crm_attribute -n cluster-delay -v 60s"
test_assert $CRM_EX_OK
desc="Require --force for CIB erasure"
cmd="cibadmin -E"
test_assert $CRM_EX_UNSAFE
desc="Allow CIB erasure with --force"
cmd="cibadmin -E --force"
test_assert $CRM_EX_OK
desc="Query CIB"
cmd="cibadmin -Q > $TMPORIG"
test_assert $CRM_EX_OK
desc="Set cluster option"
cmd="crm_attribute -n cluster-delay -v 60s"
test_assert $CRM_EX_OK
desc="Query new cluster option"
cmd="cibadmin -Q -o crm_config | grep cib-bootstrap-options-cluster-delay"
test_assert $CRM_EX_OK
desc="Query cluster options"
cmd="cibadmin -Q -o crm_config > $TMPXML"
test_assert $CRM_EX_OK
desc="Set no-quorum policy"
cmd="crm_attribute -n no-quorum-policy -v ignore"
test_assert $CRM_EX_OK
desc="Delete nvpair"
cmd="cibadmin -D -o crm_config --xml-text ''"
test_assert $CRM_EX_OK
desc="Create operation should fail"
cmd="cibadmin -C -o crm_config --xml-file $TMPXML"
test_assert $CRM_EX_EXISTS
desc="Modify cluster options section"
cmd="cibadmin -M -o crm_config --xml-file $TMPXML"
test_assert $CRM_EX_OK
desc="Query updated cluster option"
cmd="cibadmin -Q -o crm_config | grep cib-bootstrap-options-cluster-delay"
test_assert $CRM_EX_OK
desc="Set duplicate cluster option"
cmd="crm_attribute -n cluster-delay -v 40s -s duplicate"
test_assert $CRM_EX_OK
desc="Setting multiply defined cluster option should fail"
cmd="crm_attribute -n cluster-delay -v 30s"
test_assert $CRM_EX_MULTIPLE
desc="Set cluster option with -s"
cmd="crm_attribute -n cluster-delay -v 30s -s duplicate"
test_assert $CRM_EX_OK
desc="Delete cluster option with -i"
cmd="crm_attribute -n cluster-delay -D -i cib-bootstrap-options-cluster-delay"
test_assert $CRM_EX_OK
desc="Create node1 and bring it online"
cmd="crm_simulate --live-check --in-place --node-up=node1"
test_assert $CRM_EX_OK
desc="Create node attribute"
cmd="crm_attribute -n ram -v 1024M -N node1 -t nodes"
test_assert $CRM_EX_OK
desc="Query new node attribute"
cmd="cibadmin -Q -o nodes | grep node1-ram"
test_assert $CRM_EX_OK
desc="Set a transient (fail-count) node attribute"
cmd="crm_attribute -n fail-count-foo -v 3 -N node1 -t status"
test_assert $CRM_EX_OK
desc="Query a fail count"
cmd="crm_failcount --query -r foo -N node1"
test_assert $CRM_EX_OK
desc="Delete a transient (fail-count) node attribute"
cmd="crm_attribute -n fail-count-foo -D -N node1 -t status"
test_assert $CRM_EX_OK
desc="Digest calculation"
cmd="cibadmin -Q | cibadmin -5 -p 2>&1 > /dev/null"
test_assert $CRM_EX_OK
# This update will fail because it has version numbers
desc="Replace operation should fail"
cmd="cibadmin -R --xml-file $TMPORIG"
test_assert $CRM_EX_OLD
desc="Default standby value"
cmd="crm_standby -N node1 -G"
test_assert $CRM_EX_OK
desc="Set standby status"
cmd="crm_standby -N node1 -v true"
test_assert $CRM_EX_OK
desc="Query standby value"
cmd="crm_standby -N node1 -G"
test_assert $CRM_EX_OK
desc="Delete standby value"
cmd="crm_standby -N node1 -D"
test_assert $CRM_EX_OK
desc="Create a resource"
cmd="cibadmin -C -o resources --xml-text ''"
test_assert $CRM_EX_OK
desc="Create a resource meta attribute"
cmd="crm_resource -r dummy --meta -p is-managed -v false"
test_assert $CRM_EX_OK
desc="Query a resource meta attribute"
cmd="crm_resource -r dummy --meta -g is-managed"
test_assert $CRM_EX_OK
desc="Remove a resource meta attribute"
cmd="crm_resource -r dummy --meta -d is-managed"
test_assert $CRM_EX_OK
desc="Create another resource meta attribute"
cmd="crm_resource -r dummy --meta -p target-role -v Stopped"
test_assert $CRM_EX_OK 0
desc="Show why a resource is not running"
cmd="crm_resource -Y -r dummy"
test_assert $CRM_EX_OK 0
desc="Remove another resource meta attribute"
cmd="crm_resource -r dummy --meta -d target-role"
test_assert $CRM_EX_OK 0
desc="Create a resource attribute"
cmd="crm_resource -r dummy -p delay -v 10s"
test_assert $CRM_EX_OK
desc="List the configured resources"
cmd="crm_resource -L"
test_assert $CRM_EX_OK
desc="List IDs of instantiated resources"
cmd="crm_resource -l"
test_assert $CRM_EX_OK 0
desc="Show XML configuration of resource"
cmd="crm_resource -q -r dummy"
test_assert $CRM_EX_OK 0
desc="Require a destination when migrating a resource that is stopped"
cmd="crm_resource -r dummy -M"
test_assert $CRM_EX_USAGE
desc="Don't support migration to non-existent locations"
cmd="crm_resource -r dummy -M -N i.do.not.exist"
test_assert $CRM_EX_NOSUCH
desc="Create a fencing resource"
cmd="cibadmin -C -o resources --xml-text ''"
test_assert $CRM_EX_OK
desc="Bring resources online"
cmd="crm_simulate --live-check --in-place -S"
test_assert $CRM_EX_OK
desc="Try to move a resource to its existing location"
cmd="crm_resource -r dummy --move --node node1"
test_assert $CRM_EX_EXISTS
desc="Move a resource from its existing location"
cmd="crm_resource -r dummy --move"
test_assert $CRM_EX_OK
desc="Clear out constraints generated by --move"
cmd="crm_resource -r dummy --clear"
test_assert $CRM_EX_OK
desc="Default ticket granted state"
cmd="crm_ticket -t ticketA -G granted -d false"
test_assert $CRM_EX_OK
desc="Set ticket granted state"
cmd="crm_ticket -t ticketA -r --force"
test_assert $CRM_EX_OK
desc="Query ticket granted state"
cmd="crm_ticket -t ticketA -G granted"
test_assert $CRM_EX_OK
desc="Delete ticket granted state"
cmd="crm_ticket -t ticketA -D granted --force"
test_assert $CRM_EX_OK
desc="Make a ticket standby"
cmd="crm_ticket -t ticketA -s"
test_assert $CRM_EX_OK
desc="Query ticket standby state"
cmd="crm_ticket -t ticketA -G standby"
test_assert $CRM_EX_OK
desc="Activate a ticket"
cmd="crm_ticket -t ticketA -a"
test_assert $CRM_EX_OK
desc="Delete ticket standby state"
cmd="crm_ticket -t ticketA -D standby"
test_assert $CRM_EX_OK
desc="Ban a resource on unknown node"
cmd="crm_resource -r dummy -B -N host1"
test_assert $CRM_EX_NOSUCH
desc="Create two more nodes and bring them online"
cmd="crm_simulate --live-check --in-place --node-up=node2 --node-up=node3"
test_assert $CRM_EX_OK
desc="Ban dummy from node1"
cmd="crm_resource -r dummy -B -N node1"
test_assert $CRM_EX_OK
desc="Show where a resource is running"
cmd="crm_resource -r dummy -W"
test_assert $CRM_EX_OK 0
desc="Show constraints on a resource"
cmd="crm_resource -a -r dummy"
test_assert $CRM_EX_OK 0
desc="Ban dummy from node2"
cmd="crm_resource -r dummy -B -N node2"
test_assert $CRM_EX_OK
desc="Relocate resources due to ban"
cmd="crm_simulate --live-check --in-place -S"
test_assert $CRM_EX_OK
desc="Move dummy to node1"
cmd="crm_resource -r dummy -M -N node1"
test_assert $CRM_EX_OK
desc="Clear implicit constraints for dummy on node2"
cmd="crm_resource -r dummy -U -N node2"
test_assert $CRM_EX_OK
desc="Drop the status section"
cmd="cibadmin -R -o status --xml-text ''"
test_assert $CRM_EX_OK 0
desc="Create a clone"
cmd="cibadmin -C -o resources --xml-text ''"
test_assert $CRM_EX_OK 0
desc="Create a resource meta attribute"
cmd="crm_resource -r test-primitive --meta -p is-managed -v false"
test_assert $CRM_EX_OK
desc="Create a resource meta attribute in the primitive"
cmd="crm_resource -r test-primitive --meta -p is-managed -v false --force"
test_assert $CRM_EX_OK
desc="Update resource meta attribute with duplicates"
cmd="crm_resource -r test-clone --meta -p is-managed -v true"
test_assert $CRM_EX_OK
desc="Update resource meta attribute with duplicates (force clone)"
cmd="crm_resource -r test-clone --meta -p is-managed -v true --force"
test_assert $CRM_EX_OK
desc="Update child resource meta attribute with duplicates"
cmd="crm_resource -r test-primitive --meta -p is-managed -v false"
test_assert $CRM_EX_OK
desc="Delete resource meta attribute with duplicates"
cmd="crm_resource -r test-clone --meta -d is-managed"
test_assert $CRM_EX_OK
desc="Delete resource meta attribute in parent"
cmd="crm_resource -r test-primitive --meta -d is-managed"
test_assert $CRM_EX_OK
desc="Create a resource meta attribute in the primitive"
cmd="crm_resource -r test-primitive --meta -p is-managed -v false --force"
test_assert $CRM_EX_OK
desc="Update existing resource meta attribute"
cmd="crm_resource -r test-clone --meta -p is-managed -v true"
test_assert $CRM_EX_OK
desc="Create a resource meta attribute in the parent"
cmd="crm_resource -r test-clone --meta -p is-managed -v true --force"
test_assert $CRM_EX_OK
desc="Copy resources"
cmd="cibadmin -Q -o resources > $TMPXML"
test_assert $CRM_EX_OK 0
desc="Delete resource parent meta attribute (force)"
cmd="crm_resource -r test-clone --meta -d is-managed --force"
test_assert $CRM_EX_OK
desc="Restore duplicates"
cmd="cibadmin -R -o resources --xml-file $TMPXML"
test_assert $CRM_EX_OK
desc="Delete resource child meta attribute"
cmd="crm_resource -r test-primitive --meta -d is-managed"
test_assert $CRM_EX_OK
cibadmin -C -o resources --xml-text ' \
\
\
'
desc="Create a resource meta attribute in dummy1"
cmd="crm_resource -r dummy1 --meta -p is-managed -v true"
test_assert $CRM_EX_OK
desc="Create a resource meta attribute in dummy-group"
cmd="crm_resource -r dummy-group --meta -p is-managed -v false"
test_assert $CRM_EX_OK
cibadmin -D -o resource --xml-text ''
desc="Specify a lifetime when moving a resource"
cmd="crm_resource -r dummy --move --node node2 --lifetime=PT1H"
test_assert $CRM_EX_OK
desc="Try to move a resource previously moved with a lifetime"
cmd="crm_resource -r dummy --move --node node1"
test_assert $CRM_EX_OK
desc="Ban dummy from node1 for a short time"
cmd="crm_resource -r dummy -B -N node1 --lifetime=PT1S"
test_assert $CRM_EX_OK
desc="Remove expired constraints"
sleep 2
cmd="crm_resource --clear --expired"
test_assert $CRM_EX_OK
# Clear has already been tested elsewhere, but we need to get rid of the
# constraints so testing delete works. It won't delete if there's still
# a reference to the resource somewhere.
desc="Clear all implicit constraints for dummy"
cmd="crm_resource -r dummy -U"
test_assert $CRM_EX_OK
desc="Delete a resource"
cmd="crm_resource -D -r dummy -t primitive"
test_assert $CRM_EX_OK
unset CIB_shadow
unset CIB_shadow_dir
rm -f "$TMPXML" "$TMPORIG"
desc="Create an XML patchset"
cmd="crm_diff -o $test_home/cli/crm_diff_old.xml -n $test_home/cli/crm_diff_new.xml"
test_assert $CRM_EX_ERROR 0
}
INVALID_PERIODS=(
"2019-01-01 00:00:00Z" # Start with no end
"2019-01-01 00:00:00Z/" # Start with only a trailing slash
"PT2S/P1M" # Two durations
"2019-13-01 00:00:00Z/P1M" # Out-of-range month
"20191077T15/P1M" # Out-of-range day
"2019-10-01T25:00:00Z/P1M" # Out-of-range hour
"2019-10-01T24:00:01Z/P1M" # Hour 24 with anything but :00:00
"PT5H/20191001T007000Z" # Out-of-range minute
"2019-10-01 00:00:80Z/P1M" # Out-of-range second
"2019-10-01 00:00:10 +25:00/P1M" # Out-of-range offset hour
"20191001T000010 -00:61/P1M" # Out-of-range offset minute
"P1Y/2019-02-29 00:00:00Z" # Feb. 29 in non-leap-year
"2019-01-01 00:00:00Z/P" # Duration with no values
"P1Z/2019-02-20 00:00:00Z" # Invalid duration unit
"P1YM/2019-02-20 00:00:00Z" # No number for duration unit
)
function test_dates() {
# Ensure invalid period specifications are rejected
for spec in '' "${INVALID_PERIODS[@]}"; do
desc="Invalid period - [$spec]"
cmd="iso8601 -p \"$spec\""
test_assert $CRM_EX_INVALID_PARAM 0
done
desc="2014-01-01 00:30:00 - 1 Hour"
cmd="iso8601 -d '2014-01-01 00:30:00Z' -D P-1H -E '2013-12-31 23:30:00Z'"
test_assert $CRM_EX_OK 0
desc="Valid date - Feb 29 in leap year"
cmd="iso8601 -d '2020-02-29 00:00:00Z' -E '2020-02-29 00:00:00Z'"
test_assert $CRM_EX_OK 0
desc="Valid date - using 'T' and offset"
cmd="iso8601 -d '20191201T131211 -05:00' -E '2019-12-01 18:12:11Z'"
test_assert $CRM_EX_OK 0
desc="24:00:00 equivalent to 00:00:00 of next day"
cmd="iso8601 -d '2019-12-31 24:00:00Z' -E '2020-01-01 00:00:00Z'"
test_assert $CRM_EX_OK 0
for y in 06 07 08 09 10 11 12 13 14 15 16 17 18 40; do
desc="20$y-W01-7"
cmd="iso8601 -d '20$y-W01-7 00Z'"
test_assert $CRM_EX_OK 0
desc="20$y-W01-7 - round-trip"
cmd="iso8601 -d '20$y-W01-7 00Z' -W -E '20$y-W01-7 00:00:00Z'"
test_assert $CRM_EX_OK 0
desc="20$y-W01-1"
cmd="iso8601 -d '20$y-W01-1 00Z'"
test_assert $CRM_EX_OK 0
desc="20$y-W01-1 - round-trip"
cmd="iso8601 -d '20$y-W01-1 00Z' -W -E '20$y-W01-1 00:00:00Z'"
test_assert $CRM_EX_OK 0
done
desc="2009-W53-07"
cmd="iso8601 -d '2009-W53-7 00:00:00Z' -W -E '2009-W53-7 00:00:00Z'"
test_assert $CRM_EX_OK 0
desc="epoch + 2 Years 5 Months 6 Minutes"
cmd="iso8601 -d 'epoch' -D P2Y5MT6M -E '1972-06-01 00:06:00Z'"
test_assert $CRM_EX_OK 0
desc="2009-01-31 + 1 Month"
cmd="iso8601 -d '20090131T000000Z' -D P1M -E '2009-02-28 00:00:00Z'"
test_assert $CRM_EX_OK 0
desc="2009-01-31 + 2 Months"
cmd="iso8601 -d '2009-01-31 00:00:00Z' -D P2M -E '2009-03-31 00:00:00Z'"
test_assert $CRM_EX_OK 0
desc="2009-01-31 + 3 Months"
cmd="iso8601 -d '2009-01-31 00:00:00Z' -D P3M -E '2009-04-30 00:00:00Z'"
test_assert $CRM_EX_OK 0
desc="2009-03-31 - 1 Month"
cmd="iso8601 -d '2009-03-31 01:00:00 +01:00' -D P-1M -E '2009-02-28 00:00:00Z'"
test_assert $CRM_EX_OK 0
desc="2038-01-01 + 3 Months"
cmd="iso8601 -d '2038-01-01 00:00:00Z' -D P3M -E '2038-04-01 00:00:00Z'"
test_assert $CRM_EX_OK 0
}
function test_acl_loop() {
local TMPXML
TMPXML="$1"
# Make sure we're rejecting things for the right reasons
export PCMK_trace_functions=pcmk__check_acl,pcmk__apply_creation_acl
export PCMK_stderr=1
CIB_user=root cibadmin --replace --xml-text ''
### no ACL ###
export CIB_user=unknownguy
desc="$CIB_user: Query configuration"
cmd="cibadmin -Q"
test_assert $CRM_EX_INSUFFICIENT_PRIV 0
desc="$CIB_user: Set enable-acl"
cmd="crm_attribute -n enable-acl -v false"
test_assert $CRM_EX_INSUFFICIENT_PRIV 0
desc="$CIB_user: Set stonith-enabled"
cmd="crm_attribute -n stonith-enabled -v false"
test_assert $CRM_EX_INSUFFICIENT_PRIV 0
desc="$CIB_user: Create a resource"
cmd="cibadmin -C -o resources --xml-text ''"
test_assert $CRM_EX_INSUFFICIENT_PRIV 0
### deny /cib permission ###
export CIB_user=l33t-haxor
desc="$CIB_user: Query configuration"
cmd="cibadmin -Q"
test_assert $CRM_EX_INSUFFICIENT_PRIV 0
desc="$CIB_user: Set enable-acl"
cmd="crm_attribute -n enable-acl -v false"
test_assert $CRM_EX_INSUFFICIENT_PRIV 0
desc="$CIB_user: Set stonith-enabled"
cmd="crm_attribute -n stonith-enabled -v false"
test_assert $CRM_EX_INSUFFICIENT_PRIV 0
desc="$CIB_user: Create a resource"
cmd="cibadmin -C -o resources --xml-text ''"
test_assert $CRM_EX_INSUFFICIENT_PRIV 0
### observer role ###
export CIB_user=niceguy
desc="$CIB_user: Query configuration"
cmd="cibadmin -Q"
test_assert $CRM_EX_OK 0
desc="$CIB_user: Set enable-acl"
cmd="crm_attribute -n enable-acl -v false"
test_assert $CRM_EX_INSUFFICIENT_PRIV 0
desc="$CIB_user: Set stonith-enabled"
cmd="crm_attribute -n stonith-enabled -v false"
test_assert $CRM_EX_OK
desc="$CIB_user: Create a resource"
cmd="cibadmin -C -o resources --xml-text ''"
test_assert $CRM_EX_INSUFFICIENT_PRIV 0
export CIB_user=root
desc="$CIB_user: Query configuration"
cmd="cibadmin -Q"
test_assert $CRM_EX_OK 0
desc="$CIB_user: Set stonith-enabled"
cmd="crm_attribute -n stonith-enabled -v true"
test_assert $CRM_EX_OK
desc="$CIB_user: Create a resource"
cmd="cibadmin -C -o resources --xml-text ''"
test_assert $CRM_EX_OK
### deny /cib permission ###
export CIB_user=l33t-haxor
desc="$CIB_user: Create a resource meta attribute"
cmd="crm_resource -r dummy --meta -p target-role -v Stopped"
test_assert $CRM_EX_INSUFFICIENT_PRIV 0
desc="$CIB_user: Query a resource meta attribute"
cmd="crm_resource -r dummy --meta -g target-role"
test_assert $CRM_EX_INSUFFICIENT_PRIV 0
desc="$CIB_user: Remove a resource meta attribute"
cmd="crm_resource -r dummy --meta -d target-role"
test_assert $CRM_EX_INSUFFICIENT_PRIV 0
### observer role ###
export CIB_user=niceguy
desc="$CIB_user: Create a resource meta attribute"
cmd="crm_resource -r dummy --meta -p target-role -v Stopped"
test_assert $CRM_EX_OK
desc="$CIB_user: Query a resource meta attribute"
cmd="crm_resource -r dummy --meta -g target-role"
test_assert $CRM_EX_OK
desc="$CIB_user: Remove a resource meta attribute"
cmd="crm_resource -r dummy --meta -d target-role"
test_assert $CRM_EX_OK
desc="$CIB_user: Create a resource meta attribute"
cmd="crm_resource -r dummy --meta -p target-role -v Started"
test_assert $CRM_EX_OK
### read //meta_attributes ###
export CIB_user=badidea
desc="$CIB_user: Query configuration - implied deny"
cmd="cibadmin -Q"
test_assert $CRM_EX_OK 0
### deny /cib, read //meta_attributes ###
export CIB_user=betteridea
desc="$CIB_user: Query configuration - explicit deny"
cmd="cibadmin -Q"
test_assert $CRM_EX_OK 0
CIB_user=root cibadmin -Q > "$TMPXML"
CIB_user=root CIB_file="$TMPXML" CIB_shadow="" cibadmin --delete --xml-text ''
CIB_user=root CIB_file="$TMPXML" CIB_shadow="" cibadmin -Ql
### observer role ###
export CIB_user=niceguy
desc="$CIB_user: Replace - remove acls"
cmd="cibadmin --replace --xml-file $TMPXML"
test_assert $CRM_EX_INSUFFICIENT_PRIV 0
CIB_user=root cibadmin -Q > "$TMPXML"
CIB_user=root CIB_file="$TMPXML" CIB_shadow="" cibadmin -C -o resources --xml-text ''
CIB_user=root CIB_file="$TMPXML" CIB_shadow="" cibadmin -Ql
desc="$CIB_user: Replace - create resource"
cmd="cibadmin --replace --xml-file $TMPXML"
test_assert $CRM_EX_INSUFFICIENT_PRIV 0
CIB_user=root cibadmin -Q > "$TMPXML"
CIB_user=root CIB_file="$TMPXML" CIB_shadow="" crm_attribute -n enable-acl -v false
CIB_user=root CIB_file="$TMPXML" CIB_shadow="" cibadmin -Ql
desc="$CIB_user: Replace - modify attribute (deny)"
cmd="cibadmin --replace --xml-file $TMPXML"
test_assert $CRM_EX_INSUFFICIENT_PRIV 0
CIB_user=root cibadmin -Q > "$TMPXML"
CIB_user=root CIB_file="$TMPXML" CIB_shadow="" cibadmin --replace --xml-text ''
CIB_user=root CIB_file="$TMPXML" CIB_shadow="" cibadmin -Ql
desc="$CIB_user: Replace - delete attribute (deny)"
cmd="cibadmin --replace --xml-file $TMPXML"
test_assert $CRM_EX_INSUFFICIENT_PRIV 0
CIB_user=root cibadmin -Q > "$TMPXML"
CIB_user=root CIB_file="$TMPXML" CIB_shadow="" cibadmin --modify --xml-text ''
CIB_user=root CIB_file="$TMPXML" CIB_shadow="" cibadmin -Ql
desc="$CIB_user: Replace - create attribute (deny)"
cmd="cibadmin --replace --xml-file $TMPXML"
test_assert $CRM_EX_INSUFFICIENT_PRIV 0
### admin role ###
CIB_user=bob
CIB_user=root cibadmin -Q > "$TMPXML"
CIB_user=root CIB_file="$TMPXML" CIB_shadow="" cibadmin --modify --xml-text ''
CIB_user=root CIB_file="$TMPXML" CIB_shadow="" cibadmin -Ql
desc="$CIB_user: Replace - create attribute (direct allow)"
cmd="cibadmin --replace -o resources --xml-file $TMPXML"
test_assert $CRM_EX_OK 0
CIB_user=root cibadmin -Q > "$TMPXML"
CIB_user=root CIB_file="$TMPXML" CIB_shadow="" cibadmin --modify --xml-text ''
CIB_user=root CIB_file="$TMPXML" CIB_shadow="" cibadmin -Ql
desc="$CIB_user: Replace - modify attribute (direct allow)"
cmd="cibadmin --replace -o resources --xml-file $TMPXML"
test_assert $CRM_EX_OK 0
CIB_user=root cibadmin -Q > "$TMPXML"
CIB_user=root CIB_file="$TMPXML" CIB_shadow="" cibadmin --replace -o resources --xml-text ''
CIB_user=root CIB_file="$TMPXML" CIB_shadow="" cibadmin -Ql
desc="$CIB_user: Replace - delete attribute (direct allow)"
cmd="cibadmin --replace -o resources --xml-file $TMPXML"
test_assert $CRM_EX_OK 0
### super_user role ###
export CIB_user=joe
CIB_user=root cibadmin -Q > "$TMPXML"
CIB_user=root CIB_file="$TMPXML" CIB_shadow="" cibadmin --modify --xml-text ''
CIB_user=root CIB_file="$TMPXML" CIB_shadow="" cibadmin -Ql
desc="$CIB_user: Replace - create attribute (inherited allow)"
cmd="cibadmin --replace -o resources --xml-file $TMPXML"
test_assert $CRM_EX_OK 0
CIB_user=root cibadmin -Q > "$TMPXML"
CIB_user=root CIB_file="$TMPXML" CIB_shadow="" cibadmin --modify --xml-text ''
CIB_user=root CIB_file="$TMPXML" CIB_shadow="" cibadmin -Ql
desc="$CIB_user: Replace - modify attribute (inherited allow)"
cmd="cibadmin --replace -o resources --xml-file $TMPXML"
test_assert $CRM_EX_OK 0
CIB_user=root cibadmin -Q > "$TMPXML"
CIB_user=root CIB_file="$TMPXML" CIB_shadow="" cibadmin --replace -o resources --xml-text ''
CIB_user=root CIB_file="$TMPXML" CIB_shadow="" cibadmin -Ql
desc="$CIB_user: Replace - delete attribute (inherited allow)"
cmd="cibadmin --replace -o resources --xml-file $TMPXML"
test_assert $CRM_EX_OK 0
### rsc_writer role ###
export CIB_user=mike
CIB_user=root cibadmin -Q > "$TMPXML"
CIB_user=root CIB_file="$TMPXML" CIB_shadow="" cibadmin --modify --xml-text ''
CIB_user=root CIB_file="$TMPXML" CIB_shadow="" cibadmin -Ql
desc="$CIB_user: Replace - create attribute (allow overrides deny)"
cmd="cibadmin --replace -o resources --xml-file $TMPXML"
test_assert $CRM_EX_OK 0
CIB_user=root cibadmin -Q > "$TMPXML"
CIB_user=root CIB_file="$TMPXML" CIB_shadow="" cibadmin --modify --xml-text ''
CIB_user=root CIB_file="$TMPXML" CIB_shadow="" cibadmin -Ql
desc="$CIB_user: Replace - modify attribute (allow overrides deny)"
cmd="cibadmin --replace -o resources --xml-file $TMPXML"
test_assert $CRM_EX_OK 0
CIB_user=root cibadmin -Q > "$TMPXML"
CIB_user=root CIB_file="$TMPXML" CIB_shadow="" cibadmin --replace -o resources --xml-text ''
CIB_user=root CIB_file="$TMPXML" CIB_shadow="" cibadmin -Ql
desc="$CIB_user: Replace - delete attribute (allow overrides deny)"
cmd="cibadmin --replace -o resources --xml-file $TMPXML"
test_assert $CRM_EX_OK 0
### rsc_denied role ###
export CIB_user=chris
CIB_user=root cibadmin -Q > "$TMPXML"
CIB_user=root CIB_file="$TMPXML" CIB_shadow="" cibadmin --modify --xml-text ''
CIB_user=root CIB_file="$TMPXML" CIB_shadow="" cibadmin -Ql
desc="$CIB_user: Replace - create attribute (deny overrides allow)"
cmd="cibadmin --replace -o resources --xml-file $TMPXML"
test_assert $CRM_EX_INSUFFICIENT_PRIV 0
# Set as root since setting as chris failed
CIB_user=root cibadmin --modify --xml-text ''
CIB_user=root cibadmin -Q > "$TMPXML"
CIB_user=root CIB_file="$TMPXML" CIB_shadow="" cibadmin --modify --xml-text ''
CIB_user=root CIB_file="$TMPXML" CIB_shadow="" cibadmin -Ql
desc="$CIB_user: Replace - modify attribute (deny overrides allow)"
cmd="cibadmin --replace -o resources --xml-file $TMPXML"
test_assert $CRM_EX_INSUFFICIENT_PRIV 0
# Set as root since setting as chris failed
CIB_user=root cibadmin --modify --xml-text ''
CIB_user=root cibadmin -Q > "$TMPXML"
CIB_user=root CIB_file="$TMPXML" CIB_shadow="" cibadmin --replace -o resources --xml-text ''
CIB_user=root CIB_file="$TMPXML" CIB_shadow="" cibadmin -Ql
desc="$CIB_user: Replace - delete attribute (deny overrides allow)"
cmd="cibadmin --replace -o resources --xml-file $TMPXML"
test_assert $CRM_EX_INSUFFICIENT_PRIV 0
}
function test_acls() {
local SHADOWPATH
local TMPXML
TMPXML=$(mktemp ${TMPDIR:-/tmp}/cts-cli.acls.xml.XXXXXXXXXX)
export CIB_shadow_dir="${shadow_dir}"
$VALGRIND_CMD crm_shadow --batch --force --create-empty $shadow --validate-with pacemaker-1.3 2>&1
export CIB_shadow=$shadow
cat < "$TMPXML"
EOF
desc="Configure some ACLs"
cmd="cibadmin -M -o acls --xml-file $TMPXML"
test_assert $CRM_EX_OK
desc="Enable ACLs"
cmd="crm_attribute -n enable-acl -v true"
test_assert $CRM_EX_OK
desc="Set cluster option"
cmd="crm_attribute -n no-quorum-policy -v ignore"
test_assert $CRM_EX_OK
desc="New ACL"
cmd="cibadmin --create -o acls --xml-text ''"
test_assert $CRM_EX_OK
desc="Another ACL"
cmd="cibadmin --create -o acls --xml-text ''"
test_assert $CRM_EX_OK
desc="Updated ACL"
cmd="cibadmin --replace -o acls --xml-text ''"
test_assert $CRM_EX_OK
test_acl_loop "$TMPXML"
printf "\n\n !#!#!#!#! Upgrading to latest CIB schema and re-testing !#!#!#!#!\n"
printf "\nUpgrading to latest CIB schema and re-testing\n" 1>&2
export CIB_user=root
desc="$CIB_user: Upgrade to latest CIB schema"
cmd="cibadmin --upgrade --force -V"
test_assert $CRM_EX_OK
SHADOWPATH="$(crm_shadow --file)"
# sed -i isn't portable :-(
cp -p "$SHADOWPATH" "${SHADOWPATH}.$$" # to keep permissions
sed -e 's/epoch=.2/epoch=\"6/g' -e 's/admin_epoch=.1/admin_epoch=\"0/g' \
"$SHADOWPATH" > "${SHADOWPATH}.$$"
mv -- "${SHADOWPATH}.$$" "$SHADOWPATH"
test_acl_loop "$TMPXML"
unset CIB_shadow_dir
rm -f "$TMPXML"
}
function test_validity() {
local TMPGOOD
local TMPBAD
TMPGOOD=$(mktemp ${TMPDIR:-/tmp}/cts-cli.validity.good.xml.XXXXXXXXXX)
TMPBAD=$(mktemp ${TMPDIR:-/tmp}/cts-cli.validity.bad.xml.XXXXXXXXXX)
export CIB_shadow_dir="${shadow_dir}"
$VALGRIND_CMD crm_shadow --batch --force --create-empty $shadow --validate-with pacemaker-1.2 2>&1
export CIB_shadow=$shadow
export PCMK_trace_functions=apply_upgrade,update_validation,cli_config_update
export PCMK_stderr=1
cibadmin -C -o resources --xml-text ''
cibadmin -C -o resources --xml-text ''
cibadmin -C -o constraints --xml-text ''
cibadmin -Q > "$TMPGOOD"
desc="Try to make resulting CIB invalid (enum violation)"
cmd="cibadmin -M -o constraints --xml-text ''"
test_assert $CRM_EX_CONFIG
sed 's|"start"|"break"|' "$TMPGOOD" > "$TMPBAD"
desc="Run crm_simulate with invalid CIB (enum violation)"
cmd="crm_simulate -x $TMPBAD -S"
test_assert $CRM_EX_CONFIG 0
desc="Try to make resulting CIB invalid (unrecognized validate-with)"
cmd="cibadmin -M --xml-text ''"
test_assert $CRM_EX_CONFIG
sed 's|"pacemaker-1.2"|"pacemaker-9999.0"|' "$TMPGOOD" > "$TMPBAD"
desc="Run crm_simulate with invalid CIB (unrecognized validate-with)"
cmd="crm_simulate -x $TMPBAD -S"
test_assert $CRM_EX_CONFIG 0
desc="Try to make resulting CIB invalid, but possibly recoverable (valid with X.Y+1)"
cmd="cibadmin -C -o configuration --xml-text ''"
test_assert $CRM_EX_CONFIG
sed 's|||' "$TMPGOOD" > "$TMPBAD"
desc="Run crm_simulate with invalid, but possibly recoverable CIB (valid with X.Y+1)"
cmd="crm_simulate -x $TMPBAD -S"
test_assert $CRM_EX_OK 0
sed 's|[ ][ ]*validate-with="[^"]*"||' "$TMPGOOD" > "$TMPBAD"
desc="Make resulting CIB valid, although without validate-with attribute"
cmd="cibadmin -R --xml-file $TMPBAD"
test_assert $CRM_EX_OK
desc="Run crm_simulate with valid CIB, but without validate-with attribute"
cmd="crm_simulate -x $TMPBAD -S"
test_assert $CRM_EX_OK 0
# this will just disable validation and accept the config, outputting
# validation errors
sed -e 's|[ ][ ]*validate-with="[^"]*"||' \
-e 's|\([ ][ ]*epoch="[^"]*\)"|\10"|' -e 's|"start"|"break"|' \
"$TMPGOOD" > "$TMPBAD"
desc="Make resulting CIB invalid, and without validate-with attribute"
cmd="cibadmin -R --xml-file $TMPBAD"
test_assert $CRM_EX_OK
desc="Run crm_simulate with invalid CIB, also without validate-with attribute"
cmd="crm_simulate -x $TMPBAD -S"
test_assert $CRM_EX_OK 0
unset CIB_shadow_dir
rm -f "$TMPGOOD" "$TMPBAD"
}
test_upgrade() {
local TMPXML
TMPXML=$(mktemp ${TMPDIR:-/tmp}/cts-cli.tools.xml.XXXXXXXXXX)
export CIB_shadow_dir="${shadow_dir}"
$VALGRIND_CMD crm_shadow --batch --force --create-empty $shadow --validate-with pacemaker-2.10 2>&1
export CIB_shadow=$shadow
desc="Set stonith-enabled=false"
cmd="crm_attribute -n stonith-enabled -v false"
test_assert $CRM_EX_OK
cat < "$TMPXML"
EOF
desc="Configure the initial resource"
cmd="cibadmin -M -o resources --xml-file $TMPXML"
test_assert $CRM_EX_OK
desc="Upgrade to latest CIB schema (trigger 2.10.xsl + the wrapping)"
cmd="cibadmin --upgrade --force -V -V"
test_assert $CRM_EX_OK
desc="Query a resource instance attribute (shall survive)"
cmd="crm_resource -r mySmartFuse -g requires"
test_assert $CRM_EX_OK
unset CIB_shadow_dir
rm -f "$TMPXML"
}
test_rules() {
local TMPXML
export CIB_shadow_dir="${shadow_dir}"
$VALGRIND_CMD crm_shadow --batch --force --create-empty $shadow 2>&1
export CIB_shadow=$shadow
cibadmin -C -o resources --xml-text ''
TMPXML=$(mktemp ${TMPDIR:-/tmp}/cts-cli.tools.xml.XXXXXXXXXX)
cat < "$TMPXML"
EOF
cibadmin -C -o constraints -x "$TMPXML"
rm -f "$TMPXML"
TMPXML=$(mktemp ${TMPDIR:-/tmp}/cts-cli.tools.xml.XXXXXXXXXX)
cat < "$TMPXML"
EOF
cibadmin -C -o constraints -x "$TMPXML"
rm -f "$TMPXML"
if [ "$(uname)" == "FreeBSD" ]; then
tomorrow=$(date -v+1d +"%F %T %z")
else
tomorrow=$(date --date=tomorrow +"%F %T %z")
fi
TMPXML=$(mktemp ${TMPDIR:-/tmp}/cts-cli.tools.xml.XXXXXXXXXX)
cat < "$TMPXML"
EOF
cibadmin -C -o constraints -x "$TMPXML"
rm -f "$TMPXML"
TMPXML=$(mktemp ${TMPDIR:-/tmp}/cts-cli.tools.xml.XXXXXXXXXX)
cat < "$TMPXML"
EOF
cibadmin -C -o constraints -x "$TMPXML"
rm -f "$TMPXML"
TMPXML=$(mktemp ${TMPDIR:-/tmp}/cts-cli.tools.xml.XXXXXXXXXX)
cat < "$TMPXML"
EOF
cibadmin -C -o constraints -x "$TMPXML"
rm -f "$TMPXML"
TMPXML=$(mktemp ${TMPDIR:-/tmp}/cts-cli.tools.xml.XXXXXXXXXX)
cat < "$TMPXML"
EOF
cibadmin -C -o constraints -x "$TMPXML"
rm -f "$TMPXML"
TMPXML=$(mktemp ${TMPDIR:-/tmp}/cts-cli.tools.xml.XXXXXXXXXX)
cat < "$TMPXML"
EOF
cibadmin -C -o constraints -x "$TMPXML"
rm -f "$TMPXML"
desc="Try to check a rule that doesn't exist"
cmd="crm_rule -c -r blahblah"
test_assert $CRM_EX_NOSUCH
desc="Try to check a rule that has too many date_expressions"
cmd="crm_rule -c -r cli-rule-too-many-date-expressions"
test_assert $CRM_EX_UNIMPLEMENT_FEATURE
desc="Verify basic rule is expired"
cmd="crm_rule -c -r cli-prefer-rule-dummy-expired"
test_assert $CRM_EX_EXPIRED
desc="Verify basic rule worked in the past"
cmd="crm_rule -c -r cli-prefer-rule-dummy-expired -d 20180101"
test_assert $CRM_EX_OK
desc="Verify basic rule is not yet in effect"
cmd="crm_rule -c -r cli-prefer-rule-dummy-not-yet"
test_assert $CRM_EX_NOT_YET_IN_EFFECT
desc="Verify date_spec rule with years has expired"
cmd="crm_rule -c -r cli-prefer-rule-dummy-date_spec-only-years"
test_assert $CRM_EX_EXPIRED
desc="Verify date_spec rule with years is in effect"
cmd="crm_rule -c -r cli-prefer-rule-dummy-date_spec-only-years -d 20190201"
test_assert $CRM_EX_OK
desc="Try to check a rule whose date_spec does not contain years="
cmd="crm_rule -c -r cli-prefer-rule-dummy-date_spec-without-years"
test_assert $CRM_EX_NOSUCH
desc="Try to check a rule whose date_spec contains years= and moon="
cmd="crm_rule -c -r cli-prefer-rule-dummy-date_spec-years-moon"
test_assert $CRM_EX_NOSUCH
desc="Try to check a rule with no date_expression"
cmd="crm_rule -c -r cli-no-date_expression-rule"
test_assert $CRM_EX_UNIMPLEMENT_FEATURE
unset CIB_shadow_dir
}
# Process command-line arguments
while [ $# -gt 0 ]; do
case "$1" in
-t)
tests="$2"
shift 2
;;
-V|--verbose)
verbose=1
shift
;;
-v|--valgrind)
export G_SLICE=always-malloc
VALGRIND_CMD="valgrind $VALGRIND_OPTS"
shift
;;
-s)
do_save=1
shift
;;
-p)
export PATH="$2:$PATH"
shift
;;
--help)
echo "$USAGE_TEXT"
exit $CRM_EX_OK
;;
*)
echo "error: unknown option $1"
echo
echo "$USAGE_TEXT"
exit $CRM_EX_USAGE
;;
esac
done
for t in $tests; do
case "$t" in
dates) ;;
tools) ;;
acls) ;;
validity) ;;
upgrade) ;;
rules) ;;
crm_mon) ;;
*)
echo "error: unknown test $t"
echo
echo "$USAGE_TEXT"
exit $CRM_EX_USAGE
;;
esac
done
# Check whether we're running from source directory
SRCDIR=$(dirname $test_home)
if [ -x "$SRCDIR/tools/crm_simulate" ]; then
export PATH="$SRCDIR/tools:$PATH"
echo "Using local binaries from: $SRCDIR/tools"
if [ -x "$SRCDIR/xml" ]; then
export PCMK_schema_directory="$SRCDIR/xml"
echo "Using local schemas from: $PCMK_schema_directory"
fi
fi
for t in $tests; do
echo "Testing $t"
TMPFILE=$(mktemp ${TMPDIR:-/tmp}/cts-cli.$t.XXXXXXXXXX)
eval TMPFILE_$t="$TMPFILE"
test_$t > "$TMPFILE"
# last-run= and last-rc-change= are always numeric in the CIB. However,
# for the crm_mon test we also need to compare against the XML output of
# the crm_mon program. There, these are shown as human readable strings
# (like the output of the `date` command).
sed -e 's/cib-last-written.*>/>/'\
-e 's/ last-run=\"[A-Za-z0-9: ]*\"//'\
-e 's/Last updated: .*/Last updated:/' \
-e 's/Last change: .*/Last change:/' \
-e 's/(version .*)/(version)/' \
-e 's/last_update time=\".*\"/last_update time=\"\"/' \
-e 's/last_change time=\".*\"/last_change time=\"\"/' \
-e 's/ version=\".*\" / version=\"\" /' \
-e 's/request=\".*crm_mon/request=\"crm_mon/' \
-e 's/crm_feature_set="[^"]*" //'\
-e 's/validate-with="[^"]*" //'\
-e 's/Created new pacemaker-.* configuration/Created new pacemaker configuration/'\
-e 's/.*\(pcmk__.*\)@.*\.c:[0-9][0-9]*)/\1/g' \
-e 's/.*\(unpack_.*\)@.*\.c:[0-9][0-9]*)/\1/g' \
-e 's/.*\(update_validation\)@.*\.c:[0-9][0-9]*)/\1/g' \
-e 's/.*\(apply_upgrade\)@.*\.c:[0-9][0-9]*)/\1/g' \
-e 's/ last-rc-change=\"[A-Za-z0-9: ]*\"//'\
-e 's|^/tmp/cts-cli\.validity\.bad.xml\.[^:]*:|validity.bad.xml:|'\
-e 's/^Entity: line [0-9][0-9]*: //'\
-e 's/\(validation ([0-9][0-9]* of \)[0-9][0-9]*\().*\)/\1X\2/' \
-e 's/^Migration will take effect until: .*/Migration will take effect until:/' \
-e 's/ end=\"[0-9][-+: 0-9]*Z*\"/ end=\"\"/' \
-e 's/ start=\"[0-9][-+: 0-9]*Z*\"/ start=\"\"/' \
-e 's/^Error checking rule: Device not configured/Error checking rule: No such device or address/' \
"$TMPFILE" > "${TMPFILE}.$$"
mv -- "${TMPFILE}.$$" "$TMPFILE"
if [ $do_save -eq 1 ]; then
cp "$TMPFILE" $test_home/cli/regression.$t.exp
fi
done
rm -rf "${shadow_dir}"
failed=0
if [ $verbose -eq 1 ]; then
echo -e "\n\nResults"
fi
for t in $tests; do
eval TMPFILE="\$TMPFILE_$t"
if [ $verbose -eq 1 ]; then
diff -wu $test_home/cli/regression.$t.exp "$TMPFILE"
else
diff -w $test_home/cli/regression.$t.exp "$TMPFILE" >/dev/null 2>&1
fi
if [ $? -ne 0 ]; then
failed=1
fi
done
echo -e "\n\nSummary"
for t in $tests; do
eval TMPFILE="\$TMPFILE_$t"
grep -e '^\* \(Passed\|Failed\)' "$TMPFILE"
done
if [ $num_errors -ne 0 ]; then
echo "$num_errors tests failed; see output in:"
for t in $tests; do
eval TMPFILE="\$TMPFILE_$t"
echo " $TMPFILE"
done
exit $CRM_EX_ERROR
elif [ $failed -eq 1 ]; then
echo "$num_passed tests passed but output was unexpected; see output in:"
for t in $tests; do
eval TMPFILE="\$TMPFILE_$t"
echo " $TMPFILE"
done
exit $CRM_EX_DIGEST
else
echo $num_passed tests passed
for t in $tests; do
eval TMPFILE="\$TMPFILE_$t"
rm -f "$TMPFILE"
done
crm_shadow --force --delete $shadow >/dev/null 2>&1
exit $CRM_EX_OK
fi
diff --git a/include/crm/pengine/internal.h b/include/crm/pengine/internal.h
index f9e57e168c..767b6bcd8e 100644
--- a/include/crm/pengine/internal.h
+++ b/include/crm/pengine/internal.h
@@ -1,480 +1,486 @@
/*
* Copyright 2004-2020 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
* This source code is licensed under the GNU Lesser General Public License
* version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
*/
#ifndef PE_INTERNAL__H
# define PE_INTERNAL__H
# include
# include
# include
# include
# define pe_rsc_info(rsc, fmt, args...) crm_log_tag(LOG_INFO, rsc ? rsc->id : "", fmt, ##args)
# define pe_rsc_debug(rsc, fmt, args...) crm_log_tag(LOG_DEBUG, rsc ? rsc->id : "", fmt, ##args)
# define pe_rsc_trace(rsc, fmt, args...) crm_log_tag(LOG_TRACE, rsc ? rsc->id : "", fmt, ##args)
# define pe_err(fmt...) { was_processing_error = TRUE; crm_config_error = TRUE; crm_err(fmt); }
# define pe_warn(fmt...) { was_processing_warning = TRUE; crm_config_warning = TRUE; crm_warn(fmt); }
# define pe_proc_err(fmt...) { was_processing_error = TRUE; crm_err(fmt); }
# define pe_proc_warn(fmt...) { was_processing_warning = TRUE; crm_warn(fmt); }
# define pe_set_action_bit(action, bit) action->flags = crm_set_bit(__FUNCTION__, __LINE__, action->uuid, action->flags, bit)
# define pe_clear_action_bit(action, bit) action->flags = crm_clear_bit(__FUNCTION__, __LINE__, action->uuid, action->flags, bit)
// Some warnings we don't want to print every transition
enum pe_warn_once_e {
pe_wo_blind = 0x0001,
pe_wo_restart_type = 0x0002,
pe_wo_role_after = 0x0004,
pe_wo_poweroff = 0x0008,
pe_wo_require_all = 0x0010,
pe_wo_order_score = 0x0020,
pe_wo_neg_threshold = 0x0040,
};
extern uint32_t pe_wo;
#define pe_warn_once(pe_wo_bit, fmt...) do { \
if (is_not_set(pe_wo, pe_wo_bit)) { \
if (pe_wo_bit == pe_wo_blind) { \
crm_warn(fmt); \
} else { \
pe_warn(fmt); \
} \
set_bit(pe_wo, pe_wo_bit); \
} \
} while (0);
typedef struct pe__location_constraint_s {
char *id; // Constraint XML ID
pe_resource_t *rsc_lh; // Resource being located
enum rsc_role_e role_filter; // Role to locate
enum pe_discover_e discover_mode; // Resource discovery
GListPtr node_list_rh; // List of pe_node_t*
} pe__location_t;
typedef struct pe__order_constraint_s {
int id;
enum pe_ordering type;
void *lh_opaque;
pe_resource_t *lh_rsc;
pe_action_t *lh_action;
char *lh_action_task;
void *rh_opaque;
pe_resource_t *rh_rsc;
pe_action_t *rh_action;
char *rh_action_task;
} pe__ordering_t;
typedef struct notify_data_s {
GSList *keys; // Environment variable name/value pairs
const char *action;
pe_action_t *pre;
pe_action_t *post;
pe_action_t *pre_done;
pe_action_t *post_done;
GListPtr active; /* notify_entry_t* */
GListPtr inactive; /* notify_entry_t* */
GListPtr start; /* notify_entry_t* */
GListPtr stop; /* notify_entry_t* */
GListPtr demote; /* notify_entry_t* */
GListPtr promote; /* notify_entry_t* */
GListPtr master; /* notify_entry_t* */
GListPtr slave; /* notify_entry_t* */
GHashTable *allowed_nodes;
} notify_data_t;
bool pe_can_fence(pe_working_set_t *data_set, pe_node_t *node);
int pe__add_scores(int score1, int score2);
void add_hash_param(GHashTable * hash, const char *name, const char *value);
char *native_parameter(pe_resource_t * rsc, pe_node_t * node, gboolean create, const char *name,
pe_working_set_t * data_set);
pe_node_t *native_location(const pe_resource_t *rsc, GList **list, int current);
void pe_metadata(void);
void verify_pe_options(GHashTable * options);
void common_update_score(pe_resource_t * rsc, const char *id, int score);
void native_add_running(pe_resource_t * rsc, pe_node_t * node, pe_working_set_t * data_set);
gboolean native_unpack(pe_resource_t * rsc, pe_working_set_t * data_set);
gboolean group_unpack(pe_resource_t * rsc, pe_working_set_t * data_set);
gboolean clone_unpack(pe_resource_t * rsc, pe_working_set_t * data_set);
gboolean pe__unpack_bundle(pe_resource_t *rsc, pe_working_set_t *data_set);
pe_resource_t *native_find_rsc(pe_resource_t *rsc, const char *id, const pe_node_t *node,
int flags);
gboolean native_active(pe_resource_t * rsc, gboolean all);
gboolean group_active(pe_resource_t * rsc, gboolean all);
gboolean clone_active(pe_resource_t * rsc, gboolean all);
gboolean pe__bundle_active(pe_resource_t *rsc, gboolean all);
void native_print(pe_resource_t * rsc, const char *pre_text, long options, void *print_data);
void group_print(pe_resource_t * rsc, const char *pre_text, long options, void *print_data);
void clone_print(pe_resource_t * rsc, const char *pre_text, long options, void *print_data);
void pe__print_bundle(pe_resource_t *rsc, const char *pre_text, long options,
void *print_data);
int pe__name_and_nvpairs_xml(pcmk__output_t *out, bool is_list, const char *tag_name
, size_t pairs_count, ...);
char *pe__node_display_name(pe_node_t *node, bool print_detail);
int pe__ban_html(pcmk__output_t *out, va_list args);
int pe__ban_text(pcmk__output_t *out, va_list args);
int pe__ban_xml(pcmk__output_t *out, va_list args);
int pe__clone_xml(pcmk__output_t *out, va_list args);
int pe__clone_html(pcmk__output_t *out, va_list args);
int pe__clone_text(pcmk__output_t *out, va_list args);
int pe__cluster_counts_html(pcmk__output_t *out, va_list args);
int pe__cluster_counts_text(pcmk__output_t *out, va_list args);
int pe__cluster_counts_xml(pcmk__output_t *out, va_list args);
int pe__cluster_dc_html(pcmk__output_t *out, va_list args);
int pe__cluster_dc_text(pcmk__output_t *out, va_list args);
int pe__cluster_dc_xml(pcmk__output_t *out, va_list args);
int pe__cluster_maint_mode_html(pcmk__output_t *out, va_list args);
int pe__cluster_maint_mode_text(pcmk__output_t *out, va_list args);
int pe__cluster_options_html(pcmk__output_t *out, va_list args);
int pe__cluster_options_log(pcmk__output_t *out, va_list args);
int pe__cluster_options_text(pcmk__output_t *out, va_list args);
int pe__cluster_options_xml(pcmk__output_t *out, va_list args);
int pe__cluster_stack_html(pcmk__output_t *out, va_list args);
int pe__cluster_stack_text(pcmk__output_t *out, va_list args);
int pe__cluster_stack_xml(pcmk__output_t *out, va_list args);
int pe__cluster_summary(pcmk__output_t *out, va_list args);
int pe__cluster_summary_html(pcmk__output_t *out, va_list args);
int pe__cluster_times_html(pcmk__output_t *out, va_list args);
int pe__cluster_times_xml(pcmk__output_t *out, va_list args);
int pe__cluster_times_text(pcmk__output_t *out, va_list args);
int pe__failed_action_text(pcmk__output_t *out, va_list args);
int pe__failed_action_xml(pcmk__output_t *out, va_list args);
int pe__group_xml(pcmk__output_t *out, va_list args);
int pe__group_html(pcmk__output_t *out, va_list args);
int pe__group_text(pcmk__output_t *out, va_list args);
int pe__bundle_xml(pcmk__output_t *out, va_list args);
int pe__bundle_html(pcmk__output_t *out, va_list args);
int pe__bundle_text(pcmk__output_t *out, va_list args);
int pe__node_html(pcmk__output_t *out, va_list args);
int pe__node_text(pcmk__output_t *out, va_list args);
int pe__node_xml(pcmk__output_t *out, va_list args);
int pe__node_attribute_html(pcmk__output_t *out, va_list args);
int pe__node_attribute_text(pcmk__output_t *out, va_list args);
int pe__node_attribute_xml(pcmk__output_t *out, va_list args);
int pe__node_list_html(pcmk__output_t *out, va_list args);
int pe__node_list_text(pcmk__output_t *out, va_list args);
int pe__node_list_xml(pcmk__output_t *out, va_list args);
int pe__op_history_text(pcmk__output_t *out, va_list args);
int pe__op_history_xml(pcmk__output_t *out, va_list args);
int pe__resource_history_text(pcmk__output_t *out, va_list args);
int pe__resource_history_xml(pcmk__output_t *out, va_list args);
int pe__resource_xml(pcmk__output_t *out, va_list args);
int pe__resource_html(pcmk__output_t *out, va_list args);
int pe__resource_text(pcmk__output_t *out, va_list args);
int pe__ticket_html(pcmk__output_t *out, va_list args);
int pe__ticket_text(pcmk__output_t *out, va_list args);
int pe__ticket_xml(pcmk__output_t *out, va_list args);
void native_free(pe_resource_t * rsc);
void group_free(pe_resource_t * rsc);
void clone_free(pe_resource_t * rsc);
void pe__free_bundle(pe_resource_t *rsc);
enum rsc_role_e native_resource_state(const pe_resource_t * rsc, gboolean current);
enum rsc_role_e group_resource_state(const pe_resource_t * rsc, gboolean current);
enum rsc_role_e clone_resource_state(const pe_resource_t * rsc, gboolean current);
enum rsc_role_e pe__bundle_resource_state(const pe_resource_t *rsc,
gboolean current);
void pe__count_common(pe_resource_t *rsc);
void pe__count_bundle(pe_resource_t *rsc);
gboolean common_unpack(xmlNode * xml_obj, pe_resource_t ** rsc, pe_resource_t * parent,
pe_working_set_t * data_set);
void common_free(pe_resource_t * rsc);
pe_node_t *pe__copy_node(const pe_node_t *this_node);
extern time_t get_effective_time(pe_working_set_t * data_set);
/* Failure handling utilities (from failcounts.c) */
// bit flags for fail count handling options
enum pe_fc_flags_e {
pe_fc_default = 0x00,
pe_fc_effective = 0x01, // don't count expired failures
pe_fc_fillers = 0x02, // if container, include filler failures in count
};
int pe_get_failcount(pe_node_t *node, pe_resource_t *rsc, time_t *last_failure,
uint32_t flags, xmlNode *xml_op,
pe_working_set_t *data_set);
pe_action_t *pe__clear_failcount(pe_resource_t *rsc, pe_node_t *node,
const char *reason,
pe_working_set_t *data_set);
/* Functions for finding/counting a resource's active nodes */
pe_node_t *pe__find_active_on(const pe_resource_t *rsc,
unsigned int *count_all,
unsigned int *count_clean);
pe_node_t *pe__find_active_requires(const pe_resource_t *rsc,
unsigned int *count);
static inline pe_node_t *
pe__current_node(const pe_resource_t *rsc)
{
return pe__find_active_on(rsc, NULL, NULL);
}
/* Binary like operators for lists of nodes */
extern void node_list_exclude(GHashTable * list, GListPtr list2, gboolean merge_scores);
GHashTable *pe__node_list2table(GList *list);
static inline gpointer
pe_hash_table_lookup(GHashTable * hash, gconstpointer key)
{
if (hash) {
return g_hash_table_lookup(hash, key);
}
return NULL;
}
extern pe_action_t *get_pseudo_op(const char *name, pe_working_set_t * data_set);
extern gboolean order_actions(pe_action_t * lh_action, pe_action_t * rh_action, enum pe_ordering order);
/* Printing functions for debug */
extern void print_node(const char *pre_text, pe_node_t * node, gboolean details);
extern void print_str_str(gpointer key, gpointer value, gpointer user_data);
extern void pe__output_node(pe_node_t * node, gboolean details, pcmk__output_t *out);
extern void dump_node_capacity(int level, const char *comment, pe_node_t * node);
extern void dump_rsc_utilization(int level, const char *comment, pe_resource_t * rsc, pe_node_t * node);
void pe__show_node_weights_as(const char *file, const char *function,
int line, bool to_log, pe_resource_t *rsc,
const char *comment, GHashTable *nodes);
#define pe__show_node_weights(level, rsc, text, nodes) \
pe__show_node_weights_as(__FILE__, __FUNCTION__, __LINE__, \
(level), (rsc), (text), (nodes))
/* Sorting functions */
extern gint sort_rsc_priority(gconstpointer a, gconstpointer b);
extern gint sort_rsc_index(gconstpointer a, gconstpointer b);
extern xmlNode *find_rsc_op_entry(pe_resource_t * rsc, const char *key);
extern pe_action_t *custom_action(pe_resource_t * rsc, char *key, const char *task, pe_node_t * on_node,
gboolean optional, gboolean foo, pe_working_set_t * data_set);
# define delete_key(rsc) pcmk__op_key(rsc->id, CRMD_ACTION_DELETE, 0)
# define delete_action(rsc, node, optional) custom_action( \
rsc, delete_key(rsc), CRMD_ACTION_DELETE, node, \
optional, TRUE, data_set);
# define stopped_key(rsc) pcmk__op_key(rsc->id, CRMD_ACTION_STOPPED, 0)
# define stopped_action(rsc, node, optional) custom_action( \
rsc, stopped_key(rsc), CRMD_ACTION_STOPPED, node, \
optional, TRUE, data_set);
# define stop_key(rsc) pcmk__op_key(rsc->id, CRMD_ACTION_STOP, 0)
# define stop_action(rsc, node, optional) custom_action( \
rsc, stop_key(rsc), CRMD_ACTION_STOP, node, \
optional, TRUE, data_set);
# define reload_key(rsc) pcmk__op_key(rsc->id, CRMD_ACTION_RELOAD, 0)
# define start_key(rsc) pcmk__op_key(rsc->id, CRMD_ACTION_START, 0)
# define start_action(rsc, node, optional) custom_action( \
rsc, start_key(rsc), CRMD_ACTION_START, node, \
optional, TRUE, data_set)
# define started_key(rsc) pcmk__op_key(rsc->id, CRMD_ACTION_STARTED, 0)
# define started_action(rsc, node, optional) custom_action( \
rsc, started_key(rsc), CRMD_ACTION_STARTED, node, \
optional, TRUE, data_set)
# define promote_key(rsc) pcmk__op_key(rsc->id, CRMD_ACTION_PROMOTE, 0)
# define promote_action(rsc, node, optional) custom_action( \
rsc, promote_key(rsc), CRMD_ACTION_PROMOTE, node, \
optional, TRUE, data_set)
# define promoted_key(rsc) pcmk__op_key(rsc->id, CRMD_ACTION_PROMOTED, 0)
# define promoted_action(rsc, node, optional) custom_action( \
rsc, promoted_key(rsc), CRMD_ACTION_PROMOTED, node, \
optional, TRUE, data_set)
# define demote_key(rsc) pcmk__op_key(rsc->id, CRMD_ACTION_DEMOTE, 0)
# define demote_action(rsc, node, optional) custom_action( \
rsc, demote_key(rsc), CRMD_ACTION_DEMOTE, node, \
optional, TRUE, data_set)
# define demoted_key(rsc) pcmk__op_key(rsc->id, CRMD_ACTION_DEMOTED, 0)
# define demoted_action(rsc, node, optional) custom_action( \
rsc, demoted_key(rsc), CRMD_ACTION_DEMOTED, node, \
optional, TRUE, data_set)
extern int pe_get_configured_timeout(pe_resource_t *rsc, const char *action,
pe_working_set_t *data_set);
extern pe_action_t *find_first_action(GListPtr input, const char *uuid, const char *task,
pe_node_t * on_node);
extern enum action_tasks get_complex_task(pe_resource_t * rsc, const char *name,
gboolean allow_non_atomic);
extern GListPtr find_actions(GListPtr input, const char *key, const pe_node_t *on_node);
GList *find_actions_exact(GList *input, const char *key,
const pe_node_t *on_node);
extern GListPtr find_recurring_actions(GListPtr input, pe_node_t * not_on_node);
GList *pe__resource_actions(const pe_resource_t *rsc, const pe_node_t *node,
const char *task, bool require_node);
extern void pe_free_action(pe_action_t * action);
extern void resource_location(pe_resource_t * rsc, pe_node_t * node, int score, const char *tag,
pe_working_set_t * data_set);
extern gint sort_op_by_callid(gconstpointer a, gconstpointer b);
extern gboolean get_target_role(pe_resource_t * rsc, enum rsc_role_e *role);
extern pe_resource_t *find_clone_instance(pe_resource_t * rsc, const char *sub_id,
pe_working_set_t * data_set);
extern void destroy_ticket(gpointer data);
extern pe_ticket_t *ticket_new(const char *ticket_id, pe_working_set_t * data_set);
// Resources for manipulating resource names
const char *pe_base_name_end(const char *id);
char *clone_strip(const char *last_rsc_id);
char *clone_zero(const char *last_rsc_id);
static inline bool
pe_base_name_eq(pe_resource_t *rsc, const char *id)
{
if (id && rsc && rsc->id) {
// Number of characters in rsc->id before any clone suffix
size_t base_len = pe_base_name_end(rsc->id) - rsc->id + 1;
return (strlen(id) == base_len) && !strncmp(id, rsc->id, base_len);
}
return FALSE;
}
int pe__target_rc_from_xml(xmlNode *xml_op);
gint sort_node_uname(gconstpointer a, gconstpointer b);
bool is_set_recursive(pe_resource_t * rsc, long long flag, bool any);
enum rsc_digest_cmp_val {
/*! Digests are the same */
RSC_DIGEST_MATCH = 0,
/*! Params that require a restart changed */
RSC_DIGEST_RESTART,
/*! Some parameter changed. */
RSC_DIGEST_ALL,
/*! rsc op didn't have a digest associated with it, so
* it is unknown if parameters changed or not. */
RSC_DIGEST_UNKNOWN,
};
typedef struct op_digest_cache_s {
enum rsc_digest_cmp_val rc;
xmlNode *params_all;
xmlNode *params_secure;
xmlNode *params_restart;
char *digest_all_calc;
char *digest_secure_calc;
char *digest_restart_calc;
} op_digest_cache_t;
op_digest_cache_t *rsc_action_digest_cmp(pe_resource_t * rsc, xmlNode * xml_op, pe_node_t * node,
pe_working_set_t * data_set);
pe_action_t *pe_fence_op(pe_node_t * node, const char *op, bool optional, const char *reason, bool priority_delay, pe_working_set_t * data_set);
void trigger_unfencing(
pe_resource_t * rsc, pe_node_t *node, const char *reason, pe_action_t *dependency, pe_working_set_t * data_set);
void pe_action_set_reason(pe_action_t *action, const char *reason, bool overwrite);
void pe_action_set_flag_reason(const char *function, long line, pe_action_t *action, pe_action_t *reason, const char *text, enum pe_action_flags flags, bool overwrite);
#define pe_action_required(action, reason, text) pe_action_set_flag_reason(__FUNCTION__, __LINE__, action, reason, text, pe_action_optional, FALSE)
#define pe_action_implies(action, reason, flag) pe_action_set_flag_reason(__FUNCTION__, __LINE__, action, reason, NULL, flag, FALSE)
void set_bit_recursive(pe_resource_t * rsc, unsigned long long flag);
void clear_bit_recursive(pe_resource_t * rsc, unsigned long long flag);
gboolean add_tag_ref(GHashTable * tags, const char * tag_name, const char * obj_ref);
void print_rscs_brief(GListPtr rsc_list, const char * pre_text, long options,
void * print_data, gboolean print_all);
int pe__rscs_brief_output(pcmk__output_t *out, GListPtr rsc_list, long options, gboolean print_all);
void pe_fence_node(pe_working_set_t * data_set, pe_node_t * node, const char *reason, bool priority_delay);
pe_node_t *pe_create_node(const char *id, const char *uname, const char *type,
const char *score, pe_working_set_t * data_set);
void common_print(pe_resource_t * rsc, const char *pre_text, const char *name, pe_node_t *node, long options, void *print_data);
int pe__common_output_text(pcmk__output_t *out, pe_resource_t * rsc, const char *name, pe_node_t *node, long options);
int pe__common_output_html(pcmk__output_t *out, pe_resource_t * rsc, const char *name, pe_node_t *node, long options);
pe_resource_t *pe__find_bundle_replica(const pe_resource_t *bundle,
const pe_node_t *node);
bool pe__bundle_needs_remote_name(pe_resource_t *rsc);
const char *pe__add_bundle_remote_name(pe_resource_t *rsc, xmlNode *xml,
const char *field);
const char *pe_node_attribute_calculated(const pe_node_t *node,
const char *name,
const pe_resource_t *rsc);
const char *pe_node_attribute_raw(pe_node_t *node, const char *name);
bool pe__is_universal_clone(pe_resource_t *rsc,
pe_working_set_t *data_set);
void pe__add_param_check(xmlNode *rsc_op, pe_resource_t *rsc, pe_node_t *node,
enum pe_check_parameters, pe_working_set_t *data_set);
void pe__foreach_param_check(pe_working_set_t *data_set,
void (*cb)(pe_resource_t*, pe_node_t*, xmlNode*,
enum pe_check_parameters,
pe_working_set_t*));
void pe__free_param_checks(pe_working_set_t *data_set);
bool pe__shutdown_requested(pe_node_t *node);
void pe__update_recheck_time(time_t recheck, pe_working_set_t *data_set);
#define BOOL2STR(x) ((x) ? "true" : "false")
/*!
* \internal
* \brief Register xml formatting message functions.
*/
void pe__register_messages(pcmk__output_t *out);
void pe__unpack_dataset_nvpairs(xmlNode *xml_obj, const char *set_name,
pe_rule_eval_data_t *rule_data, GHashTable *hash,
const char *always_first, gboolean overwrite,
pe_working_set_t *data_set);
bool pe__resource_is_disabled(pe_resource_t *rsc);
pe_action_t *pe__clear_resource_history(pe_resource_t *rsc, pe_node_t *node,
pe_working_set_t *data_set);
GListPtr pe__rscs_with_tag(pe_working_set_t *data_set, const char *tag_name);
GListPtr pe__unames_with_tag(pe_working_set_t *data_set, const char *tag_name);
bool pe__rsc_has_tag(pe_working_set_t *data_set, const char *rsc, const char *tag);
bool pe__uname_has_tag(pe_working_set_t *data_set, const char *node, const char *tag);
bool pe__rsc_running_on_any_node_in_list(pe_resource_t *rsc, GListPtr node_list);
+GListPtr pe__filter_rsc_list(GListPtr rscs, GListPtr filter);
bool pcmk__rsc_filtered_by_node(pe_resource_t *rsc, GListPtr only_node);
+gboolean pe__bundle_is_filtered(pe_resource_t *rsc, GListPtr only_rsc, gboolean check_parent);
+gboolean pe__clone_is_filtered(pe_resource_t *rsc, GListPtr only_rsc, gboolean check_parent);
+gboolean pe__group_is_filtered(pe_resource_t *rsc, GListPtr only_rsc, gboolean check_parent);
+gboolean pe__native_is_filtered(pe_resource_t *rsc, GListPtr only_rsc, gboolean check_parent);
+
#endif
diff --git a/include/crm/pengine/pe_types.h b/include/crm/pengine/pe_types.h
index c4ff70773d..9fea637ced 100644
--- a/include/crm/pengine/pe_types.h
+++ b/include/crm/pengine/pe_types.h
@@ -1,538 +1,539 @@
/*
* Copyright 2004-2020 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
* This source code is licensed under the GNU Lesser General Public License
* version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
*/
#ifndef PE_TYPES__H
# define PE_TYPES__H
#ifdef __cplusplus
extern "C" {
#endif
/*!
* \file
* \brief Data types for cluster status
* \ingroup pengine
*/
# include // bool
# include // time_t
# include // gboolean, guint, GList, GHashTable
# include // GListPtr
# include
# include
typedef struct pe_node_s pe_node_t;
typedef struct pe_action_s pe_action_t;
typedef struct pe_resource_s pe_resource_t;
typedef struct pe_working_set_s pe_working_set_t;
enum pe_obj_types {
pe_unknown = -1,
pe_native = 0,
pe_group = 1,
pe_clone = 2,
pe_container = 3,
};
typedef struct resource_object_functions_s {
gboolean (*unpack) (pe_resource_t*, pe_working_set_t*);
pe_resource_t *(*find_rsc) (pe_resource_t *parent, const char *search,
const pe_node_t *node, int flags);
/* parameter result must be free'd */
char *(*parameter) (pe_resource_t*, pe_node_t*, gboolean, const char*,
pe_working_set_t*);
void (*print) (pe_resource_t*, const char*, long, void*);
gboolean (*active) (pe_resource_t*, gboolean);
enum rsc_role_e (*state) (const pe_resource_t*, gboolean);
pe_node_t *(*location) (const pe_resource_t*, GList**, int);
void (*free) (pe_resource_t*);
void (*count) (pe_resource_t*);
+ gboolean (*is_filtered) (pe_resource_t*, GListPtr, gboolean);
} resource_object_functions_t;
typedef struct resource_alloc_functions_s resource_alloc_functions_t;
enum pe_quorum_policy {
no_quorum_freeze,
no_quorum_stop,
no_quorum_ignore,
no_quorum_suicide,
no_quorum_demote
};
enum node_type {
node_ping,
node_member,
node_remote
};
//! \deprecated will be removed in a future release
enum pe_restart {
pe_restart_restart,
pe_restart_ignore
};
//! Determine behavior of pe_find_resource_with_flags()
enum pe_find {
pe_find_renamed = 0x001, //!< match resource ID or LRM history ID
pe_find_anon = 0x002, //!< match base name of anonymous clone instances
pe_find_clone = 0x004, //!< match only clone instances
pe_find_current = 0x008, //!< match resource active on specified node
pe_find_inactive = 0x010, //!< match resource not running anywhere
pe_find_any = 0x020, //!< match base name of any clone instance
};
// @TODO Make these an enum
# define pe_flag_have_quorum 0x00000001ULL
# define pe_flag_symmetric_cluster 0x00000002ULL
# define pe_flag_maintenance_mode 0x00000008ULL
# define pe_flag_stonith_enabled 0x00000010ULL
# define pe_flag_have_stonith_resource 0x00000020ULL
# define pe_flag_enable_unfencing 0x00000040ULL
# define pe_flag_concurrent_fencing 0x00000080ULL
# define pe_flag_stop_rsc_orphans 0x00000100ULL
# define pe_flag_stop_action_orphans 0x00000200ULL
# define pe_flag_stop_everything 0x00000400ULL
# define pe_flag_start_failure_fatal 0x00001000ULL
# define pe_flag_remove_after_stop 0x00002000ULL
# define pe_flag_startup_fencing 0x00004000ULL
# define pe_flag_shutdown_lock 0x00008000ULL
# define pe_flag_startup_probes 0x00010000ULL
# define pe_flag_have_status 0x00020000ULL
# define pe_flag_have_remote_nodes 0x00040000ULL
# define pe_flag_quick_location 0x00100000ULL
# define pe_flag_sanitized 0x00200000ULL
# define pe_flag_stdout 0x00400000ULL
//! Don't count total, disabled and blocked resource instances
# define pe_flag_no_counts 0x00800000ULL
/*! Skip deprecated code that is kept solely for backward API compatibility.
* (Internal code should always set this.)
*/
# define pe_flag_no_compat 0x01000000ULL
struct pe_working_set_s {
xmlNode *input;
crm_time_t *now;
/* options extracted from the input */
char *dc_uuid;
pe_node_t *dc_node;
const char *stonith_action;
const char *placement_strategy;
unsigned long long flags;
int stonith_timeout;
enum pe_quorum_policy no_quorum_policy;
GHashTable *config_hash;
GHashTable *tickets;
// Actions for which there can be only one (e.g. fence nodeX)
GHashTable *singletons;
GListPtr nodes;
GListPtr resources;
GListPtr placement_constraints;
GListPtr ordering_constraints;
GListPtr colocation_constraints;
GListPtr ticket_constraints;
GListPtr actions;
xmlNode *failed;
xmlNode *op_defaults;
xmlNode *rsc_defaults;
/* stats */
int num_synapse;
int max_valid_nodes; //! Deprecated (will be removed in a future release)
int order_id;
int action_id;
/* final output */
xmlNode *graph;
GHashTable *template_rsc_sets;
const char *localhost;
GHashTable *tags;
int blocked_resources;
int disabled_resources;
GList *param_check; // History entries that need to be checked
GList *stop_needed; // Containers that need stop actions
time_t recheck_by; // Hint to controller to re-run scheduler by this time
int ninstances; // Total number of resource instances
guint shutdown_lock;// How long (seconds) to lock resources to shutdown node
int priority_fencing_delay; // Priority fencing delay
};
enum pe_check_parameters {
/* Clear fail count if parameters changed for un-expired start or monitor
* last_failure.
*/
pe_check_last_failure,
/* Clear fail count if parameters changed for start, monitor, promote, or
* migrate_from actions for active resources.
*/
pe_check_active,
};
struct pe_node_shared_s {
const char *id;
const char *uname;
enum node_type type;
/* @TODO convert these flags into a bitfield */
gboolean online;
gboolean standby;
gboolean standby_onfail;
gboolean pending;
gboolean unclean;
gboolean unseen;
gboolean shutdown;
gboolean expected_up;
gboolean is_dc;
gboolean maintenance;
gboolean rsc_discovery_enabled;
gboolean remote_requires_reset;
gboolean remote_was_fenced;
gboolean remote_maintenance; /* what the remote-rsc is thinking */
gboolean unpacked;
int num_resources;
pe_resource_t *remote_rsc;
GListPtr running_rsc; /* pe_resource_t* */
GListPtr allocated_rsc; /* pe_resource_t* */
GHashTable *attrs; /* char* => char* */
GHashTable *utilization;
GHashTable *digest_cache; //!< cache of calculated resource digests
int priority; // calculated based on the priority of resources running on the node
};
struct pe_node_s {
int weight;
gboolean fixed;
int count;
struct pe_node_shared_s *details;
int rsc_discover_mode;
};
# define pe_rsc_orphan 0x00000001ULL
# define pe_rsc_managed 0x00000002ULL
# define pe_rsc_block 0x00000004ULL
# define pe_rsc_orphan_container_filler 0x00000008ULL
# define pe_rsc_notify 0x00000010ULL
# define pe_rsc_unique 0x00000020ULL
# define pe_rsc_fence_device 0x00000040ULL
# define pe_rsc_promotable 0x00000080ULL
# define pe_rsc_provisional 0x00000100ULL
# define pe_rsc_allocating 0x00000200ULL
# define pe_rsc_merging 0x00000400ULL
# define pe_rsc_stop 0x00001000ULL
# define pe_rsc_reload 0x00002000ULL
# define pe_rsc_allow_remote_remotes 0x00004000ULL
# define pe_rsc_failed 0x00010000ULL
# define pe_rsc_runnable 0x00040000ULL
# define pe_rsc_start_pending 0x00080000ULL
# define pe_rsc_starting 0x00100000ULL
# define pe_rsc_stopping 0x00200000ULL
# define pe_rsc_allow_migrate 0x00800000ULL
# define pe_rsc_failure_ignored 0x01000000ULL
# define pe_rsc_maintenance 0x04000000ULL
# define pe_rsc_is_container 0x08000000ULL
# define pe_rsc_needs_quorum 0x10000000ULL
# define pe_rsc_needs_fencing 0x20000000ULL
# define pe_rsc_needs_unfencing 0x40000000ULL
enum pe_graph_flags {
pe_graph_none = 0x00000,
pe_graph_updated_first = 0x00001,
pe_graph_updated_then = 0x00002,
pe_graph_disable = 0x00004,
};
/* *INDENT-OFF* */
enum pe_action_flags {
pe_action_pseudo = 0x00001,
pe_action_runnable = 0x00002,
pe_action_optional = 0x00004,
pe_action_print_always = 0x00008,
pe_action_have_node_attrs = 0x00010,
pe_action_implied_by_stonith = 0x00040,
pe_action_migrate_runnable = 0x00080,
pe_action_dumped = 0x00100,
pe_action_processed = 0x00200,
pe_action_clear = 0x00400,
pe_action_dangle = 0x00800,
/* This action requires one or more of its dependencies to be runnable.
* We use this to clear the runnable flag before checking dependencies.
*/
pe_action_requires_any = 0x01000,
pe_action_reschedule = 0x02000,
pe_action_tracking = 0x04000,
pe_action_dedup = 0x08000, //! Internal state tracking when creating graph
pe_action_dc = 0x10000, //! Action may run on DC instead of target
};
/* *INDENT-ON* */
struct pe_resource_s {
char *id;
char *clone_name;
xmlNode *xml;
xmlNode *orig_xml;
xmlNode *ops_xml;
pe_working_set_t *cluster;
pe_resource_t *parent;
enum pe_obj_types variant;
void *variant_opaque;
resource_object_functions_t *fns;
resource_alloc_functions_t *cmds;
enum rsc_recovery_type recovery_type;
// @TODO only pe_restart_restart is of interest, so merge into flags
enum pe_restart restart_type; //!< \deprecated will be removed in future release
int priority;
int stickiness;
int sort_index;
int failure_timeout;
int migration_threshold;
guint remote_reconnect_ms;
char *pending_task;
unsigned long long flags;
// @TODO merge these into flags
gboolean is_remote_node;
gboolean exclusive_discover;
//!@{
//! This field should be treated as internal to Pacemaker
GListPtr rsc_cons_lhs; // List of rsc_colocation_t*
GListPtr rsc_cons; // List of rsc_colocation_t*
GListPtr rsc_location; // List of pe__location_t*
GListPtr actions; // List of pe_action_t*
GListPtr rsc_tickets; // List of rsc_ticket*
//!@}
pe_node_t *allocated_to;
pe_node_t *partial_migration_target;
pe_node_t *partial_migration_source;
GListPtr running_on; /* pe_node_t* */
GHashTable *known_on; /* pe_node_t* */
GHashTable *allowed_nodes; /* pe_node_t* */
enum rsc_role_e role;
enum rsc_role_e next_role;
GHashTable *meta;
GHashTable *parameters;
GHashTable *utilization;
GListPtr children; /* pe_resource_t* */
GListPtr dangling_migrations; /* pe_node_t* */
pe_resource_t *container;
GListPtr fillers;
pe_node_t *pending_node; // Node on which pending_task is happening
pe_node_t *lock_node; // Resource is shutdown-locked to this node
time_t lock_time; // When shutdown lock started
#if ENABLE_VERSIONED_ATTRS
xmlNode *versioned_parameters;
#endif
};
#if ENABLE_VERSIONED_ATTRS
// Used as action->action_details if action->rsc is not NULL
typedef struct pe_rsc_action_details_s {
xmlNode *versioned_parameters;
xmlNode *versioned_meta;
} pe_rsc_action_details_t;
#endif
struct pe_action_s {
int id;
int priority;
pe_resource_t *rsc;
pe_node_t *node;
xmlNode *op_entry;
char *task;
char *uuid;
char *cancel_task;
char *reason;
enum pe_action_flags flags;
enum rsc_start_requirement needs;
enum action_fail_response on_fail;
enum rsc_role_e fail_role;
GHashTable *meta;
GHashTable *extra;
/*
* These two varables are associated with the constraint logic
* that involves first having one or more actions runnable before
* then allowing this action to execute.
*
* These varables are used with features such as 'clone-min' which
* requires at minimum X number of cloned instances to be running
* before an order dependency can run. Another option that uses
* this is 'require-all=false' in ordering constrants. This option
* says "only require one instance of a resource to start before
* allowing dependencies to start" -- basically, require-all=false is
* the same as clone-min=1.
*/
/* current number of known runnable actions in the before list. */
int runnable_before;
/* the number of "before" runnable actions required for this action
* to be considered runnable */
int required_runnable_before;
GListPtr actions_before; /* pe_action_wrapper_t* */
GListPtr actions_after; /* pe_action_wrapper_t* */
/* Some of the above fields could be moved to the details,
* except for API backward compatibility.
*/
void *action_details; // varies by type of action
};
typedef struct pe_ticket_s {
char *id;
gboolean granted;
time_t last_granted;
gboolean standby;
GHashTable *state;
} pe_ticket_t;
typedef struct pe_tag_s {
char *id;
GListPtr refs;
} pe_tag_t;
//! Internal tracking for transition graph creation
enum pe_link_state {
pe_link_not_dumped, //! Internal tracking for transition graph creation
pe_link_dumped, //! Internal tracking for transition graph creation
pe_link_dup, //! \deprecated No longer used by Pacemaker
};
enum pe_discover_e {
pe_discover_always = 0,
pe_discover_never,
pe_discover_exclusive,
};
/* *INDENT-OFF* */
enum pe_ordering {
pe_order_none = 0x0, /* deleted */
pe_order_optional = 0x1, /* pure ordering, nothing implied */
pe_order_apply_first_non_migratable = 0x2, /* Only apply this constraint's ordering if first is not migratable. */
pe_order_implies_first = 0x10, /* If 'then' is required, ensure 'first' is too */
pe_order_implies_then = 0x20, /* If 'first' is required, ensure 'then' is too */
pe_order_implies_first_master = 0x40, /* Imply 'first' is required when 'then' is required and then's rsc holds Master role. */
/* first requires then to be both runnable and migrate runnable. */
pe_order_implies_first_migratable = 0x80,
pe_order_runnable_left = 0x100, /* 'then' requires 'first' to be runnable */
pe_order_pseudo_left = 0x200, /* 'then' can only be pseudo if 'first' is runnable */
pe_order_implies_then_on_node = 0x400, /* If 'first' is required on 'nodeX',
* ensure instances of 'then' on 'nodeX' are too.
* Only really useful if 'then' is a clone and 'first' is not
*/
pe_order_probe = 0x800, /* If 'first->rsc' is
* - running but about to stop, ignore the constraint
* - otherwise, behave as runnable_left
*/
pe_order_restart = 0x1000, /* 'then' is runnable if 'first' is optional or runnable */
pe_order_stonith_stop = 0x2000, /* only applies if the action is non-pseudo */
pe_order_serialize_only = 0x4000, /* serialize */
pe_order_same_node = 0x8000, /* applies only if 'first' and 'then' are on same node */
pe_order_implies_first_printed = 0x10000, /* Like ..implies_first but only ensures 'first' is printed, not mandatory */
pe_order_implies_then_printed = 0x20000, /* Like ..implies_then but only ensures 'then' is printed, not mandatory */
pe_order_asymmetrical = 0x100000, /* Indicates asymmetrical one way ordering constraint. */
pe_order_load = 0x200000, /* Only relevant if... */
pe_order_one_or_more = 0x400000, /* 'then' is runnable only if one or more of its dependencies are too */
pe_order_anti_colocation = 0x800000,
pe_order_preserve = 0x1000000, /* Hack for breaking user ordering constraints with container resources */
pe_order_then_cancels_first = 0x2000000, // if 'then' becomes required, 'first' becomes optional
pe_order_trace = 0x4000000, /* test marker */
};
/* *INDENT-ON* */
typedef struct pe_action_wrapper_s {
enum pe_ordering type;
enum pe_link_state state;
pe_action_t *action;
} pe_action_wrapper_t;
#ifndef PCMK__NO_COMPAT
/* Everything here is deprecated and kept only for public API backward
* compatibility. It will be moved to compatibility.h in a future release.
*/
//!< \deprecated Use pe_action_t instead
typedef struct pe_action_s action_t;
//!< \deprecated Use pe_action_wrapper_t instead
typedef struct pe_action_wrapper_s action_wrapper_t;
//!< \deprecated Use pe_node_t instead
typedef struct pe_node_s node_t;
//!< \deprecated Use enum pe_quorum_policy instead
typedef enum pe_quorum_policy no_quorum_policy_t;
//!< \deprecated use pe_resource_t instead
typedef struct pe_resource_s resource_t;
//!< \deprecated Use pe_tag_t instead
typedef struct pe_tag_s tag_t;
//!< \deprecated Use pe_ticket_t instead
typedef struct pe_ticket_s ticket_t;
#endif
#ifdef __cplusplus
}
#endif
#endif // PE_TYPES__H
diff --git a/lib/pacemaker/pcmk_sched_messages.c b/lib/pacemaker/pcmk_sched_messages.c
index 3d09a5e1fd..ffc0c2b03a 100644
--- a/lib/pacemaker/pcmk_sched_messages.c
+++ b/lib/pacemaker/pcmk_sched_messages.c
@@ -1,148 +1,148 @@
/*
* Copyright 2004-2020 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
* This source code is licensed under the GNU General Public License version 2
* or later (GPLv2+) WITHOUT ANY WARRANTY.
*/
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
gboolean show_scores = FALSE;
gboolean show_utilization = FALSE;
static void
log_resource_details(pe_working_set_t *data_set)
{
int rc = pcmk_rc_ok;
pcmk__output_t *out = NULL;
const char* argv[] = { "", NULL };
- GListPtr unames = NULL;
+ GListPtr all = NULL;
pcmk__supported_format_t formats[] = {
PCMK__SUPPORTED_FORMAT_LOG,
{ NULL, NULL, NULL }
};
/* We need a list of nodes that we are allowed to output information for.
* This is necessary because out->message for all the resource-related
* messages expects such a list, due to the `crm_mon --node=` feature. Here,
* we just make it a list of all the nodes.
*/
- unames = g_list_append(unames, strdup("*"));
+ all = g_list_prepend(all, strdup("*"));
pcmk__register_formats(NULL, formats);
rc = pcmk__output_new(&out, "log", NULL, (char**)argv);
if ((rc != pcmk_rc_ok) || (out == NULL)) {
crm_err("Can't log resource details due to internal error: %s\n",
pcmk_rc_str(rc));
return;
}
pe__register_messages(out);
for (GList *item = data_set->resources; item != NULL; item = item->next) {
pe_resource_t *rsc = (pe_resource_t *) item->data;
// Log all resources except inactive orphans
if (is_not_set(rsc->flags, pe_rsc_orphan)
|| (rsc->role != RSC_ROLE_STOPPED)) {
- out->message(out, crm_map_element_name(rsc->xml), 0, rsc, unames);
+ out->message(out, crm_map_element_name(rsc->xml), 0, rsc, all, all);
}
}
pcmk__output_free(out);
- g_list_free_full(unames, free);
+ g_list_free_full(all, free);
}
/*!
* \internal
* \brief Run the scheduler for a given CIB
*
* \param[in,out] data_set Cluster working set
* \param[in] xml_input CIB XML to use as scheduler input
* \param[in] now Time to use for rule evaluation (or NULL for now)
*/
xmlNode *
pcmk__schedule_actions(pe_working_set_t *data_set, xmlNode *xml_input,
crm_time_t *now)
{
GListPtr gIter = NULL;
/* pe_debug_on(); */
CRM_ASSERT(xml_input || is_set(data_set->flags, pe_flag_have_status));
if (is_set(data_set->flags, pe_flag_have_status) == FALSE) {
set_working_set_defaults(data_set);
data_set->input = xml_input;
data_set->now = now;
} else {
crm_trace("Already have status - reusing");
}
if (data_set->now == NULL) {
data_set->now = crm_time_new(NULL);
}
crm_trace("Calculate cluster status");
stage0(data_set);
if (is_not_set(data_set->flags, pe_flag_quick_location)) {
log_resource_details(data_set);
}
crm_trace("Applying placement constraints");
stage2(data_set);
if(is_set(data_set->flags, pe_flag_quick_location)){
return NULL;
}
crm_trace("Create internal constraints");
stage3(data_set);
crm_trace("Check actions");
stage4(data_set);
crm_trace("Allocate resources");
stage5(data_set);
crm_trace("Processing fencing and shutdown cases");
stage6(data_set);
crm_trace("Applying ordering constraints");
stage7(data_set);
crm_trace("Create transition graph");
stage8(data_set);
crm_trace("=#=#=#=#= Summary =#=#=#=#=");
crm_trace("\t========= Set %d (Un-runnable) =========", -1);
if (get_crm_log_level() == LOG_TRACE) {
gIter = data_set->actions;
for (; gIter != NULL; gIter = gIter->next) {
pe_action_t *action = (pe_action_t *) gIter->data;
if (is_set(action->flags, pe_action_optional) == FALSE
&& is_set(action->flags, pe_action_runnable) == FALSE
&& is_set(action->flags, pe_action_pseudo) == FALSE) {
log_action(LOG_TRACE, "\t", action, TRUE);
}
}
}
return data_set->graph;
}
diff --git a/lib/pengine/bundle.c b/lib/pengine/bundle.c
index de93646e43..634c138c16 100644
--- a/lib/pengine/bundle.c
+++ b/lib/pengine/bundle.c
@@ -1,1973 +1,2105 @@
/*
* Copyright 2004-2020 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
* This source code is licensed under the GNU Lesser General Public License
* version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
*/
#include
#include
#include
#include
#include
#include
#include
#define PE__VARIANT_BUNDLE 1
#include "./variant.h"
static char *
next_ip(const char *last_ip)
{
unsigned int oct1 = 0;
unsigned int oct2 = 0;
unsigned int oct3 = 0;
unsigned int oct4 = 0;
int rc = sscanf(last_ip, "%u.%u.%u.%u", &oct1, &oct2, &oct3, &oct4);
if (rc != 4) {
/*@ TODO check for IPv6 */
return NULL;
} else if (oct3 > 253) {
return NULL;
} else if (oct4 > 253) {
++oct3;
oct4 = 1;
} else {
++oct4;
}
return crm_strdup_printf("%u.%u.%u.%u", oct1, oct2, oct3, oct4);
}
static int
allocate_ip(pe__bundle_variant_data_t *data, pe__bundle_replica_t *replica,
char *buffer, int max)
{
if(data->ip_range_start == NULL) {
return 0;
} else if(data->ip_last) {
replica->ipaddr = next_ip(data->ip_last);
} else {
replica->ipaddr = strdup(data->ip_range_start);
}
data->ip_last = replica->ipaddr;
switch (data->agent_type) {
case PE__CONTAINER_AGENT_DOCKER:
case PE__CONTAINER_AGENT_PODMAN:
if (data->add_host) {
return snprintf(buffer, max, " --add-host=%s-%d:%s",
data->prefix, replica->offset,
replica->ipaddr);
}
case PE__CONTAINER_AGENT_RKT:
return snprintf(buffer, max, " --hosts-entry=%s=%s-%d",
replica->ipaddr, data->prefix, replica->offset);
default: // PE__CONTAINER_AGENT_UNKNOWN
break;
}
return 0;
}
static xmlNode *
create_resource(const char *name, const char *provider, const char *kind)
{
xmlNode *rsc = create_xml_node(NULL, XML_CIB_TAG_RESOURCE);
crm_xml_add(rsc, XML_ATTR_ID, name);
crm_xml_add(rsc, XML_AGENT_ATTR_CLASS, PCMK_RESOURCE_CLASS_OCF);
crm_xml_add(rsc, XML_AGENT_ATTR_PROVIDER, provider);
crm_xml_add(rsc, XML_ATTR_TYPE, kind);
return rsc;
}
/*!
* \internal
* \brief Check whether cluster can manage resource inside container
*
* \param[in] data Container variant data
*
* \return TRUE if networking configuration is acceptable, FALSE otherwise
*
* \note The resource is manageable if an IP range or control port has been
* specified. If a control port is used without an IP range, replicas per
* host must be 1.
*/
static bool
valid_network(pe__bundle_variant_data_t *data)
{
if(data->ip_range_start) {
return TRUE;
}
if(data->control_port) {
if(data->nreplicas_per_host > 1) {
pe_err("Specifying the 'control-port' for %s requires 'replicas-per-host=1'", data->prefix);
data->nreplicas_per_host = 1;
/* @TODO to be sure: clear_bit(rsc->flags, pe_rsc_unique); */
}
return TRUE;
}
return FALSE;
}
static bool
create_ip_resource(pe_resource_t *parent, pe__bundle_variant_data_t *data,
pe__bundle_replica_t *replica, pe_working_set_t *data_set)
{
if(data->ip_range_start) {
char *id = NULL;
xmlNode *xml_ip = NULL;
xmlNode *xml_obj = NULL;
id = crm_strdup_printf("%s-ip-%s", data->prefix, replica->ipaddr);
crm_xml_sanitize_id(id);
xml_ip = create_resource(id, "heartbeat", "IPaddr2");
free(id);
xml_obj = create_xml_node(xml_ip, XML_TAG_ATTR_SETS);
crm_xml_set_id(xml_obj, "%s-attributes-%d",
data->prefix, replica->offset);
crm_create_nvpair_xml(xml_obj, NULL, "ip", replica->ipaddr);
if(data->host_network) {
crm_create_nvpair_xml(xml_obj, NULL, "nic", data->host_network);
}
if(data->host_netmask) {
crm_create_nvpair_xml(xml_obj, NULL,
"cidr_netmask", data->host_netmask);
} else {
crm_create_nvpair_xml(xml_obj, NULL, "cidr_netmask", "32");
}
xml_obj = create_xml_node(xml_ip, "operations");
crm_create_op_xml(xml_obj, ID(xml_ip), "monitor", "60s", NULL);
// TODO: Other ops? Timeouts and intervals from underlying resource?
if (!common_unpack(xml_ip, &replica->ip, parent, data_set)) {
return FALSE;
}
parent->children = g_list_append(parent->children, replica->ip);
}
return TRUE;
}
static bool
create_docker_resource(pe_resource_t *parent, pe__bundle_variant_data_t *data,
pe__bundle_replica_t *replica,
pe_working_set_t *data_set)
{
int offset = 0, max = 4096;
char *buffer = calloc(1, max+1);
int doffset = 0, dmax = 1024;
char *dbuffer = calloc(1, dmax+1);
char *id = NULL;
xmlNode *xml_container = NULL;
xmlNode *xml_obj = NULL;
id = crm_strdup_printf("%s-docker-%d", data->prefix, replica->offset);
crm_xml_sanitize_id(id);
xml_container = create_resource(id, "heartbeat",
PE__CONTAINER_AGENT_DOCKER_S);
free(id);
xml_obj = create_xml_node(xml_container, XML_TAG_ATTR_SETS);
crm_xml_set_id(xml_obj, "%s-attributes-%d",
data->prefix, replica->offset);
crm_create_nvpair_xml(xml_obj, NULL, "image", data->image);
crm_create_nvpair_xml(xml_obj, NULL, "allow_pull", XML_BOOLEAN_TRUE);
crm_create_nvpair_xml(xml_obj, NULL, "force_kill", XML_BOOLEAN_FALSE);
crm_create_nvpair_xml(xml_obj, NULL, "reuse", XML_BOOLEAN_FALSE);
offset += snprintf(buffer+offset, max-offset, " --restart=no");
/* Set a container hostname only if we have an IP to map it to.
* The user can set -h or --uts=host themselves if they want a nicer
* name for logs, but this makes applications happy who need their
* hostname to match the IP they bind to.
*/
if (data->ip_range_start != NULL) {
offset += snprintf(buffer+offset, max-offset, " -h %s-%d",
data->prefix, replica->offset);
}
offset += snprintf(buffer+offset, max-offset, " -e PCMK_stderr=1");
if (data->container_network) {
#if 0
offset += snprintf(buffer+offset, max-offset, " --link-local-ip=%s",
replica->ipaddr);
#endif
offset += snprintf(buffer+offset, max-offset, " --net=%s",
data->container_network);
}
if(data->control_port) {
offset += snprintf(buffer+offset, max-offset, " -e PCMK_remote_port=%s", data->control_port);
} else {
offset += snprintf(buffer+offset, max-offset, " -e PCMK_remote_port=%d", DEFAULT_REMOTE_PORT);
}
for(GListPtr pIter = data->mounts; pIter != NULL; pIter = pIter->next) {
pe__bundle_mount_t *mount = pIter->data;
if (is_set(mount->flags, pe__bundle_mount_subdir)) {
char *source = crm_strdup_printf(
"%s/%s-%d", mount->source, data->prefix, replica->offset);
if(doffset > 0) {
doffset += snprintf(dbuffer+doffset, dmax-doffset, ",");
}
doffset += snprintf(dbuffer+doffset, dmax-doffset, "%s", source);
offset += snprintf(buffer+offset, max-offset, " -v %s:%s", source, mount->target);
free(source);
} else {
offset += snprintf(buffer+offset, max-offset, " -v %s:%s", mount->source, mount->target);
}
if(mount->options) {
offset += snprintf(buffer+offset, max-offset, ":%s", mount->options);
}
}
for(GListPtr pIter = data->ports; pIter != NULL; pIter = pIter->next) {
pe__bundle_port_t *port = pIter->data;
if (replica->ipaddr) {
offset += snprintf(buffer+offset, max-offset, " -p %s:%s:%s",
replica->ipaddr, port->source,
port->target);
} else if(safe_str_neq(data->container_network, "host")) {
// No need to do port mapping if net=host
offset += snprintf(buffer+offset, max-offset, " -p %s:%s", port->source, port->target);
}
}
if (data->launcher_options) {
offset += snprintf(buffer+offset, max-offset, " %s",
data->launcher_options);
}
if (data->container_host_options) {
offset += snprintf(buffer + offset, max - offset, " %s",
data->container_host_options);
}
crm_create_nvpair_xml(xml_obj, NULL, "run_opts", buffer);
free(buffer);
crm_create_nvpair_xml(xml_obj, NULL, "mount_points", dbuffer);
free(dbuffer);
if (replica->child) {
if (data->container_command) {
crm_create_nvpair_xml(xml_obj, NULL,
"run_cmd", data->container_command);
} else {
crm_create_nvpair_xml(xml_obj, NULL,
"run_cmd", SBIN_DIR "/pacemaker-remoted");
}
/* TODO: Allow users to specify their own?
*
* We just want to know if the container is alive, we'll
* monitor the child independently
*/
crm_create_nvpair_xml(xml_obj, NULL, "monitor_cmd", "/bin/true");
/* } else if(child && data->untrusted) {
* Support this use-case?
*
* The ability to have resources started/stopped by us, but
* unable to set attributes, etc.
*
* Arguably better to control API access this with ACLs like
* "normal" remote nodes
*
* crm_create_nvpair_xml(xml_obj, NULL,
* "run_cmd",
* "/usr/libexec/pacemaker/pacemaker-execd");
* crm_create_nvpair_xml(xml_obj, NULL, "monitor_cmd",
* "/usr/libexec/pacemaker/lrmd_internal_ctl -c poke");
*/
} else {
if (data->container_command) {
crm_create_nvpair_xml(xml_obj, NULL,
"run_cmd", data->container_command);
}
/* TODO: Allow users to specify their own?
*
* We don't know what's in the container, so we just want
* to know if it is alive
*/
crm_create_nvpair_xml(xml_obj, NULL, "monitor_cmd", "/bin/true");
}
xml_obj = create_xml_node(xml_container, "operations");
crm_create_op_xml(xml_obj, ID(xml_container), "monitor", "60s", NULL);
// TODO: Other ops? Timeouts and intervals from underlying resource?
if (!common_unpack(xml_container, &replica->container, parent, data_set)) {
return FALSE;
}
parent->children = g_list_append(parent->children, replica->container);
return TRUE;
}
static bool
create_podman_resource(pe_resource_t *parent, pe__bundle_variant_data_t *data,
pe__bundle_replica_t *replica,
pe_working_set_t *data_set)
{
int offset = 0, max = 4096;
char *buffer = calloc(1, max+1);
int doffset = 0, dmax = 1024;
char *dbuffer = calloc(1, dmax+1);
char *id = NULL;
xmlNode *xml_container = NULL;
xmlNode *xml_obj = NULL;
id = crm_strdup_printf("%s-podman-%d", data->prefix, replica->offset);
crm_xml_sanitize_id(id);
xml_container = create_resource(id, "heartbeat",
PE__CONTAINER_AGENT_PODMAN_S);
free(id);
xml_obj = create_xml_node(xml_container, XML_TAG_ATTR_SETS);
crm_xml_set_id(xml_obj, "%s-attributes-%d",
data->prefix, replica->offset);
crm_create_nvpair_xml(xml_obj, NULL, "image", data->image);
crm_create_nvpair_xml(xml_obj, NULL, "allow_pull", XML_BOOLEAN_TRUE);
crm_create_nvpair_xml(xml_obj, NULL, "force_kill", XML_BOOLEAN_FALSE);
crm_create_nvpair_xml(xml_obj, NULL, "reuse", XML_BOOLEAN_FALSE);
// FIXME: (bandini 2018-08) podman has no restart policies
//offset += snprintf(buffer+offset, max-offset, " --restart=no");
/* Set a container hostname only if we have an IP to map it to.
* The user can set -h or --uts=host themselves if they want a nicer
* name for logs, but this makes applications happy who need their
* hostname to match the IP they bind to.
*/
if (data->ip_range_start != NULL) {
offset += snprintf(buffer+offset, max-offset, " -h %s-%d",
data->prefix, replica->offset);
}
offset += snprintf(buffer+offset, max-offset, " -e PCMK_stderr=1");
if (data->container_network) {
#if 0
// podman has no support for --link-local-ip
offset += snprintf(buffer+offset, max-offset, " --link-local-ip=%s",
replica->ipaddr);
#endif
offset += snprintf(buffer+offset, max-offset, " --net=%s",
data->container_network);
}
if(data->control_port) {
offset += snprintf(buffer+offset, max-offset, " -e PCMK_remote_port=%s", data->control_port);
} else {
offset += snprintf(buffer+offset, max-offset, " -e PCMK_remote_port=%d", DEFAULT_REMOTE_PORT);
}
for(GListPtr pIter = data->mounts; pIter != NULL; pIter = pIter->next) {
pe__bundle_mount_t *mount = pIter->data;
if (is_set(mount->flags, pe__bundle_mount_subdir)) {
char *source = crm_strdup_printf(
"%s/%s-%d", mount->source, data->prefix, replica->offset);
if(doffset > 0) {
doffset += snprintf(dbuffer+doffset, dmax-doffset, ",");
}
doffset += snprintf(dbuffer+doffset, dmax-doffset, "%s", source);
offset += snprintf(buffer+offset, max-offset, " -v %s:%s", source, mount->target);
free(source);
} else {
offset += snprintf(buffer+offset, max-offset, " -v %s:%s", mount->source, mount->target);
}
if(mount->options) {
offset += snprintf(buffer+offset, max-offset, ":%s", mount->options);
}
}
for(GListPtr pIter = data->ports; pIter != NULL; pIter = pIter->next) {
pe__bundle_port_t *port = pIter->data;
if (replica->ipaddr) {
offset += snprintf(buffer+offset, max-offset, " -p %s:%s:%s",
replica->ipaddr, port->source,
port->target);
} else if(safe_str_neq(data->container_network, "host")) {
// No need to do port mapping if net=host
offset += snprintf(buffer+offset, max-offset, " -p %s:%s", port->source, port->target);
}
}
if (data->launcher_options) {
offset += snprintf(buffer+offset, max-offset, " %s",
data->launcher_options);
}
if (data->container_host_options) {
offset += snprintf(buffer + offset, max - offset, " %s",
data->container_host_options);
}
crm_create_nvpair_xml(xml_obj, NULL, "run_opts", buffer);
free(buffer);
crm_create_nvpair_xml(xml_obj, NULL, "mount_points", dbuffer);
free(dbuffer);
if (replica->child) {
if (data->container_command) {
crm_create_nvpair_xml(xml_obj, NULL,
"run_cmd", data->container_command);
} else {
crm_create_nvpair_xml(xml_obj, NULL,
"run_cmd", SBIN_DIR "/pacemaker-remoted");
}
/* TODO: Allow users to specify their own?
*
* We just want to know if the container is alive, we'll
* monitor the child independently
*/
crm_create_nvpair_xml(xml_obj, NULL, "monitor_cmd", "/bin/true");
/* } else if(child && data->untrusted) {
* Support this use-case?
*
* The ability to have resources started/stopped by us, but
* unable to set attributes, etc.
*
* Arguably better to control API access this with ACLs like
* "normal" remote nodes
*
* crm_create_nvpair_xml(xml_obj, NULL,
* "run_cmd",
* "/usr/libexec/pacemaker/pacemaker-execd");
* crm_create_nvpair_xml(xml_obj, NULL, "monitor_cmd",
* "/usr/libexec/pacemaker/lrmd_internal_ctl -c poke");
*/
} else {
if (data->container_command) {
crm_create_nvpair_xml(xml_obj, NULL,
"run_cmd", data->container_command);
}
/* TODO: Allow users to specify their own?
*
* We don't know what's in the container, so we just want
* to know if it is alive
*/
crm_create_nvpair_xml(xml_obj, NULL, "monitor_cmd", "/bin/true");
}
xml_obj = create_xml_node(xml_container, "operations");
crm_create_op_xml(xml_obj, ID(xml_container), "monitor", "60s", NULL);
// TODO: Other ops? Timeouts and intervals from underlying resource?
if (!common_unpack(xml_container, &replica->container, parent,
data_set)) {
return FALSE;
}
parent->children = g_list_append(parent->children, replica->container);
return TRUE;
}
static bool
create_rkt_resource(pe_resource_t *parent, pe__bundle_variant_data_t *data,
pe__bundle_replica_t *replica, pe_working_set_t *data_set)
{
int offset = 0, max = 4096;
char *buffer = calloc(1, max+1);
int doffset = 0, dmax = 1024;
char *dbuffer = calloc(1, dmax+1);
char *id = NULL;
xmlNode *xml_container = NULL;
xmlNode *xml_obj = NULL;
int volid = 0;
id = crm_strdup_printf("%s-rkt-%d", data->prefix, replica->offset);
crm_xml_sanitize_id(id);
xml_container = create_resource(id, "heartbeat",
PE__CONTAINER_AGENT_RKT_S);
free(id);
xml_obj = create_xml_node(xml_container, XML_TAG_ATTR_SETS);
crm_xml_set_id(xml_obj, "%s-attributes-%d",
data->prefix, replica->offset);
crm_create_nvpair_xml(xml_obj, NULL, "image", data->image);
crm_create_nvpair_xml(xml_obj, NULL, "allow_pull", "true");
crm_create_nvpair_xml(xml_obj, NULL, "force_kill", "false");
crm_create_nvpair_xml(xml_obj, NULL, "reuse", "false");
/* Set a container hostname only if we have an IP to map it to.
* The user can set -h or --uts=host themselves if they want a nicer
* name for logs, but this makes applications happy who need their
* hostname to match the IP they bind to.
*/
if (data->ip_range_start != NULL) {
offset += snprintf(buffer+offset, max-offset, " --hostname=%s-%d",
data->prefix, replica->offset);
}
offset += snprintf(buffer+offset, max-offset, " --environment=PCMK_stderr=1");
if (data->container_network) {
#if 0
offset += snprintf(buffer+offset, max-offset, " --link-local-ip=%s",
replica->ipaddr);
#endif
offset += snprintf(buffer+offset, max-offset, " --net=%s",
data->container_network);
}
if(data->control_port) {
offset += snprintf(buffer+offset, max-offset, " --environment=PCMK_remote_port=%s", data->control_port);
} else {
offset += snprintf(buffer+offset, max-offset, " --environment=PCMK_remote_port=%d", DEFAULT_REMOTE_PORT);
}
for(GListPtr pIter = data->mounts; pIter != NULL; pIter = pIter->next) {
pe__bundle_mount_t *mount = pIter->data;
if (is_set(mount->flags, pe__bundle_mount_subdir)) {
char *source = crm_strdup_printf(
"%s/%s-%d", mount->source, data->prefix, replica->offset);
if(doffset > 0) {
doffset += snprintf(dbuffer+doffset, dmax-doffset, ",");
}
doffset += snprintf(dbuffer+doffset, dmax-doffset, "%s", source);
offset += snprintf(buffer+offset, max-offset, " --volume vol%d,kind=host,source=%s", volid, source);
if(mount->options) {
offset += snprintf(buffer+offset, max-offset, ",%s", mount->options);
}
offset += snprintf(buffer+offset, max-offset, " --mount volume=vol%d,target=%s", volid, mount->target);
free(source);
} else {
offset += snprintf(buffer+offset, max-offset, " --volume vol%d,kind=host,source=%s", volid, mount->source);
if(mount->options) {
offset += snprintf(buffer+offset, max-offset, ",%s", mount->options);
}
offset += snprintf(buffer+offset, max-offset, " --mount volume=vol%d,target=%s", volid, mount->target);
}
volid++;
}
for(GListPtr pIter = data->ports; pIter != NULL; pIter = pIter->next) {
pe__bundle_port_t *port = pIter->data;
if (replica->ipaddr) {
offset += snprintf(buffer+offset, max-offset,
" --port=%s:%s:%s", port->target,
replica->ipaddr, port->source);
} else {
offset += snprintf(buffer+offset, max-offset, " --port=%s:%s", port->target, port->source);
}
}
if (data->launcher_options) {
offset += snprintf(buffer+offset, max-offset, " %s",
data->launcher_options);
}
if (data->container_host_options) {
offset += snprintf(buffer + offset, max - offset, " %s",
data->container_host_options);
}
crm_create_nvpair_xml(xml_obj, NULL, "run_opts", buffer);
free(buffer);
crm_create_nvpair_xml(xml_obj, NULL, "mount_points", dbuffer);
free(dbuffer);
if (replica->child) {
if (data->container_command) {
crm_create_nvpair_xml(xml_obj, NULL, "run_cmd",
data->container_command);
} else {
crm_create_nvpair_xml(xml_obj, NULL, "run_cmd",
SBIN_DIR "/pacemaker-remoted");
}
/* TODO: Allow users to specify their own?
*
* We just want to know if the container is alive, we'll
* monitor the child independently
*/
crm_create_nvpair_xml(xml_obj, NULL, "monitor_cmd", "/bin/true");
/* } else if(child && data->untrusted) {
* Support this use-case?
*
* The ability to have resources started/stopped by us, but
* unable to set attributes, etc.
*
* Arguably better to control API access this with ACLs like
* "normal" remote nodes
*
* crm_create_nvpair_xml(xml_obj, NULL,
* "run_cmd",
* "/usr/libexec/pacemaker/pacemaker-execd");
* crm_create_nvpair_xml(xml_obj, NULL, "monitor_cmd",
* "/usr/libexec/pacemaker/lrmd_internal_ctl -c poke");
*/
} else {
if (data->container_command) {
crm_create_nvpair_xml(xml_obj, NULL, "run_cmd",
data->container_command);
}
/* TODO: Allow users to specify their own?
*
* We don't know what's in the container, so we just want
* to know if it is alive
*/
crm_create_nvpair_xml(xml_obj, NULL, "monitor_cmd", "/bin/true");
}
xml_obj = create_xml_node(xml_container, "operations");
crm_create_op_xml(xml_obj, ID(xml_container), "monitor", "60s", NULL);
// TODO: Other ops? Timeouts and intervals from underlying resource?
if (!common_unpack(xml_container, &replica->container, parent, data_set)) {
return FALSE;
}
parent->children = g_list_append(parent->children, replica->container);
return TRUE;
}
/*!
* \brief Ban a node from a resource's (and its children's) allowed nodes list
*
* \param[in,out] rsc Resource to modify
* \param[in] uname Name of node to ban
*/
static void
disallow_node(pe_resource_t *rsc, const char *uname)
{
gpointer match = g_hash_table_lookup(rsc->allowed_nodes, uname);
if (match) {
((pe_node_t *) match)->weight = -INFINITY;
((pe_node_t *) match)->rsc_discover_mode = pe_discover_never;
}
if (rsc->children) {
GListPtr child;
for (child = rsc->children; child != NULL; child = child->next) {
disallow_node((pe_resource_t *) (child->data), uname);
}
}
}
static bool
create_remote_resource(pe_resource_t *parent, pe__bundle_variant_data_t *data,
pe__bundle_replica_t *replica,
pe_working_set_t *data_set)
{
if (replica->child && valid_network(data)) {
GHashTableIter gIter;
GListPtr rsc_iter = NULL;
pe_node_t *node = NULL;
xmlNode *xml_remote = NULL;
char *id = crm_strdup_printf("%s-%d", data->prefix, replica->offset);
char *port_s = NULL;
const char *uname = NULL;
const char *connect_name = NULL;
if (pe_find_resource(data_set->resources, id) != NULL) {
free(id);
// The biggest hammer we have
id = crm_strdup_printf("pcmk-internal-%s-remote-%d",
replica->child->id, replica->offset);
//@TODO return false instead of asserting?
CRM_ASSERT(pe_find_resource(data_set->resources, id) == NULL);
}
/* REMOTE_CONTAINER_HACK: Using "#uname" as the server name when the
* connection does not have its own IP is a magic string that we use to
* support nested remotes (i.e. a bundle running on a remote node).
*/
connect_name = (replica->ipaddr? replica->ipaddr : "#uname");
if (data->control_port == NULL) {
port_s = crm_itoa(DEFAULT_REMOTE_PORT);
}
/* This sets replica->container as replica->remote's container, which is
* similar to what happens with guest nodes. This is how the scheduler
* knows that the bundle node is fenced by recovering the container, and
* that remote should be ordered relative to the container.
*/
xml_remote = pe_create_remote_xml(NULL, id, replica->container->id,
NULL, NULL, NULL,
connect_name, (data->control_port?
data->control_port : port_s));
free(port_s);
/* Abandon our created ID, and pull the copy from the XML, because we
* need something that will get freed during data set cleanup to use as
* the node ID and uname.
*/
free(id);
id = NULL;
uname = ID(xml_remote);
/* Ensure a node has been created for the guest (it may have already
* been, if it has a permanent node attribute), and ensure its weight is
* -INFINITY so no other resources can run on it.
*/
node = pe_find_node(data_set->nodes, uname);
if (node == NULL) {
node = pe_create_node(uname, uname, "remote", "-INFINITY",
data_set);
} else {
node->weight = -INFINITY;
}
node->rsc_discover_mode = pe_discover_never;
/* unpack_remote_nodes() ensures that each remote node and guest node
* has a pe_node_t entry. Ideally, it would do the same for bundle nodes.
* Unfortunately, a bundle has to be mostly unpacked before it's obvious
* what nodes will be needed, so we do it just above.
*
* Worse, that means that the node may have been utilized while
* unpacking other resources, without our weight correction. The most
* likely place for this to happen is when common_unpack() calls
* resource_location() to set a default score in symmetric clusters.
* This adds a node *copy* to each resource's allowed nodes, and these
* copies will have the wrong weight.
*
* As a hacky workaround, fix those copies here.
*
* @TODO Possible alternative: ensure bundles are unpacked before other
* resources, so the weight is correct before any copies are made.
*/
for (rsc_iter = data_set->resources; rsc_iter; rsc_iter = rsc_iter->next) {
disallow_node((pe_resource_t *) (rsc_iter->data), uname);
}
replica->node = pe__copy_node(node);
replica->node->weight = 500;
replica->node->rsc_discover_mode = pe_discover_exclusive;
/* Ensure the node shows up as allowed and with the correct discovery set */
if (replica->child->allowed_nodes != NULL) {
g_hash_table_destroy(replica->child->allowed_nodes);
}
replica->child->allowed_nodes = g_hash_table_new_full(crm_str_hash,
g_str_equal,
NULL, free);
g_hash_table_insert(replica->child->allowed_nodes,
(gpointer) replica->node->details->id,
pe__copy_node(replica->node));
{
pe_node_t *copy = pe__copy_node(replica->node);
copy->weight = -INFINITY;
g_hash_table_insert(replica->child->parent->allowed_nodes,
(gpointer) replica->node->details->id, copy);
}
if (!common_unpack(xml_remote, &replica->remote, parent, data_set)) {
return FALSE;
}
g_hash_table_iter_init(&gIter, replica->remote->allowed_nodes);
while (g_hash_table_iter_next(&gIter, NULL, (void **)&node)) {
if (pe__is_guest_or_remote_node(node)) {
/* Remote resources can only run on 'normal' cluster node */
node->weight = -INFINITY;
}
}
replica->node->details->remote_rsc = replica->remote;
// Ensure pe__is_guest_node() functions correctly immediately
replica->remote->container = replica->container;
/* A bundle's #kind is closer to "container" (guest node) than the
* "remote" set by pe_create_node().
*/
g_hash_table_insert(replica->node->details->attrs,
strdup(CRM_ATTR_KIND), strdup("container"));
/* One effect of this is that setup_container() will add
* replica->remote to replica->container's fillers, which will make
* pe__resource_contains_guest_node() true for replica->container.
*
* replica->child does NOT get added to replica->container's fillers.
* The only noticeable effect if it did would be for its fail count to
* be taken into account when checking replica->container's migration
* threshold.
*/
parent->children = g_list_append(parent->children, replica->remote);
}
return TRUE;
}
static bool
create_container(pe_resource_t *parent, pe__bundle_variant_data_t *data,
pe__bundle_replica_t *replica, pe_working_set_t *data_set)
{
switch (data->agent_type) {
case PE__CONTAINER_AGENT_DOCKER:
if (!create_docker_resource(parent, data, replica, data_set)) {
return FALSE;
}
break;
case PE__CONTAINER_AGENT_PODMAN:
if (!create_podman_resource(parent, data, replica, data_set)) {
return FALSE;
}
break;
case PE__CONTAINER_AGENT_RKT:
if (!create_rkt_resource(parent, data, replica, data_set)) {
return FALSE;
}
break;
default: // PE__CONTAINER_AGENT_UNKNOWN
return FALSE;
}
if (create_ip_resource(parent, data, replica, data_set) == FALSE) {
return FALSE;
}
if(create_remote_resource(parent, data, replica, data_set) == FALSE) {
return FALSE;
}
if (replica->child && replica->ipaddr) {
add_hash_param(replica->child->meta, "external-ip", replica->ipaddr);
}
if (replica->remote) {
/*
* Allow the remote connection resource to be allocated to a
* different node than the one on which the container is active.
*
* This makes it possible to have Pacemaker Remote nodes running
* containers with pacemaker-remoted inside in order to start
* services inside those containers.
*/
set_bit(replica->remote->flags, pe_rsc_allow_remote_remotes);
}
return TRUE;
}
static void
mount_add(pe__bundle_variant_data_t *bundle_data, const char *source,
const char *target, const char *options, uint32_t flags)
{
pe__bundle_mount_t *mount = calloc(1, sizeof(pe__bundle_mount_t));
mount->source = strdup(source);
mount->target = strdup(target);
if (options) {
mount->options = strdup(options);
}
mount->flags = flags;
bundle_data->mounts = g_list_append(bundle_data->mounts, mount);
}
static void
mount_free(pe__bundle_mount_t *mount)
{
free(mount->source);
free(mount->target);
free(mount->options);
free(mount);
}
static void
port_free(pe__bundle_port_t *port)
{
free(port->source);
free(port->target);
free(port);
}
static pe__bundle_replica_t *
replica_for_remote(pe_resource_t *remote)
{
pe_resource_t *top = remote;
pe__bundle_variant_data_t *bundle_data = NULL;
if (top == NULL) {
return NULL;
}
while (top->parent != NULL) {
top = top->parent;
}
get_bundle_variant_data(bundle_data, top);
for (GList *gIter = bundle_data->replicas; gIter != NULL;
gIter = gIter->next) {
pe__bundle_replica_t *replica = gIter->data;
if (replica->remote == remote) {
return replica;
}
}
CRM_LOG_ASSERT(FALSE);
return NULL;
}
bool
pe__bundle_needs_remote_name(pe_resource_t *rsc)
{
const char *value;
if (rsc == NULL) {
return FALSE;
}
value = g_hash_table_lookup(rsc->parameters, XML_RSC_ATTR_REMOTE_RA_ADDR);
if (safe_str_eq(value, "#uname") == FALSE) {
return FALSE;
} else {
const char *match[3][2] = {
{ XML_ATTR_TYPE, "remote" },
{ XML_AGENT_ATTR_CLASS, PCMK_RESOURCE_CLASS_OCF },
{ XML_AGENT_ATTR_PROVIDER, "pacemaker" },
};
for (int m = 0; m < 3; m++) {
value = crm_element_value(rsc->xml, match[m][0]);
if (safe_str_neq(value, match[m][1])) {
return FALSE;
}
}
}
return TRUE;
}
const char *
pe__add_bundle_remote_name(pe_resource_t *rsc, xmlNode *xml, const char *field)
{
// REMOTE_CONTAINER_HACK: Allow remote nodes that start containers with pacemaker remote inside
pe_node_t *node = NULL;
pe__bundle_replica_t *replica = NULL;
if (!pe__bundle_needs_remote_name(rsc)) {
return NULL;
}
replica = replica_for_remote(rsc);
if (replica == NULL) {
return NULL;
}
node = replica->container->allocated_to;
if (node == NULL) {
/* If it won't be running anywhere after the
* transition, go with where it's running now.
*/
node = pe__current_node(replica->container);
}
if(node == NULL) {
crm_trace("Cannot determine address for bundle connection %s", rsc->id);
return NULL;
}
crm_trace("Setting address for bundle connection %s to bundle host %s",
rsc->id, node->details->uname);
if(xml != NULL && field != NULL) {
crm_xml_add(xml, field, node->details->uname);
}
return node->details->uname;
}
gboolean
pe__unpack_bundle(pe_resource_t *rsc, pe_working_set_t *data_set)
{
const char *value = NULL;
xmlNode *xml_obj = NULL;
xmlNode *xml_resource = NULL;
pe__bundle_variant_data_t *bundle_data = NULL;
bool need_log_mount = TRUE;
CRM_ASSERT(rsc != NULL);
pe_rsc_trace(rsc, "Processing resource %s...", rsc->id);
bundle_data = calloc(1, sizeof(pe__bundle_variant_data_t));
rsc->variant_opaque = bundle_data;
bundle_data->prefix = strdup(rsc->id);
xml_obj = first_named_child(rsc->xml, PE__CONTAINER_AGENT_DOCKER_S);
if (xml_obj != NULL) {
bundle_data->agent_type = PE__CONTAINER_AGENT_DOCKER;
} else {
xml_obj = first_named_child(rsc->xml, PE__CONTAINER_AGENT_RKT_S);
if (xml_obj != NULL) {
bundle_data->agent_type = PE__CONTAINER_AGENT_RKT;
} else {
xml_obj = first_named_child(rsc->xml, PE__CONTAINER_AGENT_PODMAN_S);
if (xml_obj != NULL) {
bundle_data->agent_type = PE__CONTAINER_AGENT_PODMAN;
} else {
return FALSE;
}
}
}
value = crm_element_value(xml_obj, XML_RSC_ATTR_PROMOTED_MAX);
if (value == NULL) {
// @COMPAT deprecated since 2.0.0
value = crm_element_value(xml_obj, "masters");
}
bundle_data->promoted_max = crm_parse_int(value, "0");
if (bundle_data->promoted_max < 0) {
pe_err("%s for %s must be nonnegative integer, using 0",
XML_RSC_ATTR_PROMOTED_MAX, rsc->id);
bundle_data->promoted_max = 0;
}
value = crm_element_value(xml_obj, "replicas");
if ((value == NULL) && bundle_data->promoted_max) {
bundle_data->nreplicas = bundle_data->promoted_max;
} else {
bundle_data->nreplicas = crm_parse_int(value, "1");
}
if (bundle_data->nreplicas < 1) {
pe_err("'replicas' for %s must be positive integer, using 1", rsc->id);
bundle_data->nreplicas = 1;
}
/*
* Communication between containers on the same host via the
* floating IPs only works if the container is started with:
* --userland-proxy=false --ip-masq=false
*/
value = crm_element_value(xml_obj, "replicas-per-host");
bundle_data->nreplicas_per_host = crm_parse_int(value, "1");
if (bundle_data->nreplicas_per_host < 1) {
pe_err("'replicas-per-host' for %s must be positive integer, using 1",
rsc->id);
bundle_data->nreplicas_per_host = 1;
}
if (bundle_data->nreplicas_per_host == 1) {
clear_bit(rsc->flags, pe_rsc_unique);
}
bundle_data->container_command = crm_element_value_copy(xml_obj, "run-command");
bundle_data->launcher_options = crm_element_value_copy(xml_obj, "options");
bundle_data->image = crm_element_value_copy(xml_obj, "image");
bundle_data->container_network = crm_element_value_copy(xml_obj, "network");
xml_obj = first_named_child(rsc->xml, "network");
if(xml_obj) {
bundle_data->ip_range_start = crm_element_value_copy(xml_obj, "ip-range-start");
bundle_data->host_netmask = crm_element_value_copy(xml_obj, "host-netmask");
bundle_data->host_network = crm_element_value_copy(xml_obj, "host-interface");
bundle_data->control_port = crm_element_value_copy(xml_obj, "control-port");
value = crm_element_value(xml_obj, "add-host");
if (crm_str_to_boolean(value, &bundle_data->add_host) != 1) {
bundle_data->add_host = TRUE;
}
for (xmlNode *xml_child = __xml_first_child_element(xml_obj); xml_child != NULL;
xml_child = __xml_next_element(xml_child)) {
pe__bundle_port_t *port = calloc(1, sizeof(pe__bundle_port_t));
port->source = crm_element_value_copy(xml_child, "port");
if(port->source == NULL) {
port->source = crm_element_value_copy(xml_child, "range");
} else {
port->target = crm_element_value_copy(xml_child, "internal-port");
}
if(port->source != NULL && strlen(port->source) > 0) {
if(port->target == NULL) {
port->target = strdup(port->source);
}
bundle_data->ports = g_list_append(bundle_data->ports, port);
} else {
pe_err("Invalid port directive %s", ID(xml_child));
port_free(port);
}
}
}
xml_obj = first_named_child(rsc->xml, "storage");
for (xmlNode *xml_child = __xml_first_child_element(xml_obj); xml_child != NULL;
xml_child = __xml_next_element(xml_child)) {
const char *source = crm_element_value(xml_child, "source-dir");
const char *target = crm_element_value(xml_child, "target-dir");
const char *options = crm_element_value(xml_child, "options");
int flags = pe__bundle_mount_none;
if (source == NULL) {
source = crm_element_value(xml_child, "source-dir-root");
set_bit(flags, pe__bundle_mount_subdir);
}
if (source && target) {
mount_add(bundle_data, source, target, options, flags);
if (strcmp(target, "/var/log") == 0) {
need_log_mount = FALSE;
}
} else {
pe_err("Invalid mount directive %s", ID(xml_child));
}
}
xml_obj = first_named_child(rsc->xml, "primitive");
if (xml_obj && valid_network(bundle_data)) {
char *value = NULL;
xmlNode *xml_set = NULL;
xml_resource = create_xml_node(NULL, XML_CIB_TAG_INCARNATION);
/* @COMPAT We no longer use the tag, but we need to keep it as
* part of the resource name, so that bundles don't restart in a rolling
* upgrade. (It also avoids needing to change regression tests.)
*/
crm_xml_set_id(xml_resource, "%s-%s", bundle_data->prefix,
(bundle_data->promoted_max? "master"
: (const char *)xml_resource->name));
xml_set = create_xml_node(xml_resource, XML_TAG_META_SETS);
crm_xml_set_id(xml_set, "%s-%s-meta", bundle_data->prefix, xml_resource->name);
crm_create_nvpair_xml(xml_set, NULL,
XML_RSC_ATTR_ORDERED, XML_BOOLEAN_TRUE);
value = crm_itoa(bundle_data->nreplicas);
crm_create_nvpair_xml(xml_set, NULL,
XML_RSC_ATTR_INCARNATION_MAX, value);
free(value);
value = crm_itoa(bundle_data->nreplicas_per_host);
crm_create_nvpair_xml(xml_set, NULL,
XML_RSC_ATTR_INCARNATION_NODEMAX, value);
free(value);
crm_create_nvpair_xml(xml_set, NULL, XML_RSC_ATTR_UNIQUE,
(bundle_data->nreplicas_per_host > 1)?
XML_BOOLEAN_TRUE : XML_BOOLEAN_FALSE);
if (bundle_data->promoted_max) {
crm_create_nvpair_xml(xml_set, NULL,
XML_RSC_ATTR_PROMOTABLE, XML_BOOLEAN_TRUE);
value = crm_itoa(bundle_data->promoted_max);
crm_create_nvpair_xml(xml_set, NULL,
XML_RSC_ATTR_PROMOTED_MAX, value);
free(value);
}
//crm_xml_add(xml_obj, XML_ATTR_ID, bundle_data->prefix);
add_node_copy(xml_resource, xml_obj);
} else if(xml_obj) {
pe_err("Cannot control %s inside %s without either ip-range-start or control-port",
rsc->id, ID(xml_obj));
return FALSE;
}
if(xml_resource) {
int lpc = 0;
GListPtr childIter = NULL;
pe_resource_t *new_rsc = NULL;
pe__bundle_port_t *port = NULL;
int offset = 0, max = 1024;
char *buffer = NULL;
if (common_unpack(xml_resource, &new_rsc, rsc, data_set) == FALSE) {
pe_err("Failed unpacking resource %s", ID(rsc->xml));
if (new_rsc != NULL && new_rsc->fns != NULL) {
new_rsc->fns->free(new_rsc);
}
return FALSE;
}
bundle_data->child = new_rsc;
/* Currently, we always map the default authentication key location
* into the same location inside the container.
*
* Ideally, we would respect the host's PCMK_authkey_location, but:
* - it may be different on different nodes;
* - the actual connection will do extra checking to make sure the key
* file exists and is readable, that we can't do here on the DC
* - tools such as crm_resource and crm_simulate may not have the same
* environment variables as the cluster, causing operation digests to
* differ
*
* Always using the default location inside the container is fine,
* because we control the pacemaker_remote environment, and it avoids
* having to pass another environment variable to the container.
*
* @TODO A better solution may be to have only pacemaker_remote use the
* environment variable, and have the cluster nodes use a new
* cluster option for key location. This would introduce the limitation
* of the location being the same on all cluster nodes, but that's
* reasonable.
*/
mount_add(bundle_data, DEFAULT_REMOTE_KEY_LOCATION,
DEFAULT_REMOTE_KEY_LOCATION, NULL, pe__bundle_mount_none);
if (need_log_mount) {
mount_add(bundle_data, CRM_BUNDLE_DIR, "/var/log", NULL,
pe__bundle_mount_subdir);
}
port = calloc(1, sizeof(pe__bundle_port_t));
if(bundle_data->control_port) {
port->source = strdup(bundle_data->control_port);
} else {
/* If we wanted to respect PCMK_remote_port, we could use
* crm_default_remote_port() here and elsewhere in this file instead
* of DEFAULT_REMOTE_PORT.
*
* However, it gains nothing, since we control both the container
* environment and the connection resource parameters, and the user
* can use a different port if desired by setting control-port.
*/
port->source = crm_itoa(DEFAULT_REMOTE_PORT);
}
port->target = strdup(port->source);
bundle_data->ports = g_list_append(bundle_data->ports, port);
buffer = calloc(1, max+1);
for (childIter = bundle_data->child->children; childIter != NULL;
childIter = childIter->next) {
pe__bundle_replica_t *replica = calloc(1, sizeof(pe__bundle_replica_t));
replica->child = childIter->data;
replica->child->exclusive_discover = TRUE;
replica->offset = lpc++;
// Ensure the child's notify gets set based on the underlying primitive's value
if (is_set(replica->child->flags, pe_rsc_notify)) {
set_bit(bundle_data->child->flags, pe_rsc_notify);
}
offset += allocate_ip(bundle_data, replica, buffer+offset,
max-offset);
bundle_data->replicas = g_list_append(bundle_data->replicas,
replica);
bundle_data->attribute_target = g_hash_table_lookup(replica->child->meta,
XML_RSC_ATTR_TARGET);
}
bundle_data->container_host_options = buffer;
if (bundle_data->attribute_target) {
g_hash_table_replace(rsc->meta, strdup(XML_RSC_ATTR_TARGET),
strdup(bundle_data->attribute_target));
g_hash_table_replace(bundle_data->child->meta,
strdup(XML_RSC_ATTR_TARGET),
strdup(bundle_data->attribute_target));
}
} else {
// Just a naked container, no pacemaker-remote
int offset = 0, max = 1024;
char *buffer = calloc(1, max+1);
for (int lpc = 0; lpc < bundle_data->nreplicas; lpc++) {
pe__bundle_replica_t *replica = calloc(1, sizeof(pe__bundle_replica_t));
replica->offset = lpc;
offset += allocate_ip(bundle_data, replica, buffer+offset,
max-offset);
bundle_data->replicas = g_list_append(bundle_data->replicas,
replica);
}
bundle_data->container_host_options = buffer;
}
for (GList *gIter = bundle_data->replicas; gIter != NULL;
gIter = gIter->next) {
pe__bundle_replica_t *replica = gIter->data;
if (!create_container(rsc, bundle_data, replica, data_set)) {
pe_err("Failed unpacking resource %s", rsc->id);
rsc->fns->free(rsc);
return FALSE;
}
}
if (bundle_data->child) {
rsc->children = g_list_append(rsc->children, bundle_data->child);
}
return TRUE;
}
static int
replica_resource_active(pe_resource_t *rsc, gboolean all)
{
if (rsc) {
gboolean child_active = rsc->fns->active(rsc, all);
if (child_active && !all) {
return TRUE;
} else if (!child_active && all) {
return FALSE;
}
}
return -1;
}
gboolean
pe__bundle_active(pe_resource_t *rsc, gboolean all)
{
pe__bundle_variant_data_t *bundle_data = NULL;
GListPtr iter = NULL;
get_bundle_variant_data(bundle_data, rsc);
for (iter = bundle_data->replicas; iter != NULL; iter = iter->next) {
pe__bundle_replica_t *replica = iter->data;
int rsc_active;
rsc_active = replica_resource_active(replica->ip, all);
if (rsc_active >= 0) {
return (gboolean) rsc_active;
}
rsc_active = replica_resource_active(replica->child, all);
if (rsc_active >= 0) {
return (gboolean) rsc_active;
}
rsc_active = replica_resource_active(replica->container, all);
if (rsc_active >= 0) {
return (gboolean) rsc_active;
}
rsc_active = replica_resource_active(replica->remote, all);
if (rsc_active >= 0) {
return (gboolean) rsc_active;
}
}
/* If "all" is TRUE, we've already checked that no resources were inactive,
* so return TRUE; if "all" is FALSE, we didn't find any active resources,
* so return FALSE.
*/
return all;
}
/*!
* \internal
* \brief Find the bundle replica corresponding to a given node
*
* \param[in] bundle Top-level bundle resource
* \param[in] node Node to search for
*
* \return Bundle replica if found, NULL otherwise
*/
pe_resource_t *
pe__find_bundle_replica(const pe_resource_t *bundle, const pe_node_t *node)
{
pe__bundle_variant_data_t *bundle_data = NULL;
CRM_ASSERT(bundle && node);
get_bundle_variant_data(bundle_data, bundle);
for (GList *gIter = bundle_data->replicas; gIter != NULL;
gIter = gIter->next) {
pe__bundle_replica_t *replica = gIter->data;
CRM_ASSERT(replica && replica->node);
if (replica->node->details == node->details) {
return replica->child;
}
}
return NULL;
}
static void
print_rsc_in_list(pe_resource_t *rsc, const char *pre_text, long options,
void *print_data)
{
if (rsc != NULL) {
if (options & pe_print_html) {
status_print("");
}
rsc->fns->print(rsc, pre_text, options, print_data);
if (options & pe_print_html) {
status_print("\n");
}
}
}
static const char*
container_agent_str(enum pe__container_agent t)
{
switch (t) {
case PE__CONTAINER_AGENT_DOCKER: return PE__CONTAINER_AGENT_DOCKER_S;
case PE__CONTAINER_AGENT_RKT: return PE__CONTAINER_AGENT_RKT_S;
case PE__CONTAINER_AGENT_PODMAN: return PE__CONTAINER_AGENT_PODMAN_S;
default: // PE__CONTAINER_AGENT_UNKNOWN
break;
}
return PE__CONTAINER_AGENT_UNKNOWN_S;
}
static void
bundle_print_xml(pe_resource_t *rsc, const char *pre_text, long options,
void *print_data)
{
pe__bundle_variant_data_t *bundle_data = NULL;
char *child_text = NULL;
CRM_CHECK(rsc != NULL, return);
if (pre_text == NULL) {
pre_text = "";
}
child_text = crm_strdup_printf("%s ", pre_text);
get_bundle_variant_data(bundle_data, rsc);
status_print("%sid);
status_print("type=\"%s\" ", container_agent_str(bundle_data->agent_type));
status_print("image=\"%s\" ", bundle_data->image);
status_print("unique=\"%s\" ", is_set(rsc->flags, pe_rsc_unique)? "true" : "false");
status_print("managed=\"%s\" ", is_set(rsc->flags, pe_rsc_managed) ? "true" : "false");
status_print("failed=\"%s\" ", is_set(rsc->flags, pe_rsc_failed) ? "true" : "false");
status_print(">\n");
for (GList *gIter = bundle_data->replicas; gIter != NULL;
gIter = gIter->next) {
pe__bundle_replica_t *replica = gIter->data;
CRM_ASSERT(replica);
status_print("%s \n", pre_text, replica->offset);
print_rsc_in_list(replica->ip, child_text, options, print_data);
print_rsc_in_list(replica->child, child_text, options, print_data);
print_rsc_in_list(replica->container, child_text, options, print_data);
print_rsc_in_list(replica->remote, child_text, options, print_data);
status_print("%s \n", pre_text);
}
status_print("%s\n", pre_text);
free(child_text);
}
-PCMK__OUTPUT_ARGS("bundle", "unsigned int", "pe_resource_t *", "GListPtr")
+PCMK__OUTPUT_ARGS("bundle", "unsigned int", "pe_resource_t *", "GListPtr", "GListPtr")
int
pe__bundle_xml(pcmk__output_t *out, va_list args)
{
unsigned int options = va_arg(args, unsigned int);
pe_resource_t *rsc = va_arg(args, pe_resource_t *);
GListPtr only_node = va_arg(args, GListPtr);
+ GListPtr only_rsc = va_arg(args, GListPtr);
pe__bundle_variant_data_t *bundle_data = NULL;
int rc = pcmk_rc_no_output;
gboolean printed_header = FALSE;
+ gboolean print_everything = TRUE;
CRM_ASSERT(rsc != NULL);
get_bundle_variant_data(bundle_data, rsc);
+ if (rsc->fns->is_filtered(rsc, only_rsc, TRUE)) {
+ return rc;
+ }
+
+ print_everything = pcmk__str_in_list(only_rsc, rsc->id);
+
for (GList *gIter = bundle_data->replicas; gIter != NULL;
gIter = gIter->next) {
pe__bundle_replica_t *replica = gIter->data;
char *id = crm_itoa(replica->offset);
+ gboolean print_ip, print_child, print_ctnr, print_remote;
CRM_ASSERT(replica);
if (pcmk__rsc_filtered_by_node(replica->container, only_node)) {
continue;
}
+ print_ip = replica->ip != NULL &&
+ !replica->ip->fns->is_filtered(replica->ip, only_rsc, print_everything);
+ print_child = replica->child != NULL &&
+ !replica->child->fns->is_filtered(replica->child, only_rsc, print_everything);
+ print_ctnr = !replica->container->fns->is_filtered(replica->container, only_rsc, print_everything);
+ print_remote = replica->remote != NULL &&
+ !replica->remote->fns->is_filtered(replica->remote, only_rsc, print_everything);
+
+ if (!print_everything && !print_ip && !print_child && !print_ctnr && !print_remote) {
+ continue;
+ }
+
if (!printed_header) {
printed_header = TRUE;
rc = pe__name_and_nvpairs_xml(out, true, "bundle", 6
, "id", rsc->id
, "type", container_agent_str(bundle_data->agent_type)
, "image", bundle_data->image
, "unique", BOOL2STR(is_set(rsc->flags, pe_rsc_unique))
, "managed", BOOL2STR(is_set(rsc->flags, pe_rsc_managed))
, "failed", BOOL2STR(is_set(rsc->flags, pe_rsc_failed)));
CRM_ASSERT(rc == pcmk_rc_ok);
}
rc = pe__name_and_nvpairs_xml(out, true, "replica", 1, "id", id);
free(id);
CRM_ASSERT(rc == pcmk_rc_ok);
- if (replica->ip != NULL) {
- out->message(out, crm_map_element_name(replica->ip->xml), options, replica->ip, only_node);
+ if (print_ip) {
+ out->message(out, crm_map_element_name(replica->ip->xml), options,
+ replica->ip, only_node, only_rsc);
}
- if (replica->child != NULL) {
- out->message(out, crm_map_element_name(replica->child->xml), options, replica->child, only_node);
+ if (print_child) {
+ out->message(out, crm_map_element_name(replica->child->xml), options,
+ replica->child, only_node, only_rsc);
}
- out->message(out, crm_map_element_name(replica->container->xml), options, replica->container, only_node);
+ if (print_ctnr) {
+ out->message(out, crm_map_element_name(replica->container->xml), options,
+ replica->container, only_node, only_rsc);
+ }
- if (replica->remote != NULL) {
- out->message(out, crm_map_element_name(replica->remote->xml), options, replica->remote, only_node);
+ if (print_remote) {
+ out->message(out, crm_map_element_name(replica->remote->xml), options,
+ replica->remote, only_node, only_rsc);
}
pcmk__output_xml_pop_parent(out); // replica
}
if (printed_header) {
pcmk__output_xml_pop_parent(out); // bundle
}
return rc;
}
static void
pe__bundle_replica_output_html(pcmk__output_t *out, pe__bundle_replica_t *replica,
pe_node_t *node, long options)
{
pe_resource_t *rsc = replica->child;
int offset = 0;
char buffer[LINE_MAX];
if(rsc == NULL) {
rsc = replica->container;
}
if (replica->remote) {
offset += snprintf(buffer + offset, LINE_MAX - offset, "%s",
rsc_printable_id(replica->remote));
} else {
offset += snprintf(buffer + offset, LINE_MAX - offset, "%s",
rsc_printable_id(replica->container));
}
if (replica->ipaddr) {
offset += snprintf(buffer + offset, LINE_MAX - offset, " (%s)",
replica->ipaddr);
}
pe__common_output_html(out, rsc, buffer, node, options);
}
-PCMK__OUTPUT_ARGS("bundle", "unsigned int", "pe_resource_t *", "GListPtr")
+PCMK__OUTPUT_ARGS("bundle", "unsigned int", "pe_resource_t *", "GListPtr", "GListPtr")
int
pe__bundle_html(pcmk__output_t *out, va_list args)
{
unsigned int options = va_arg(args, unsigned int);
pe_resource_t *rsc = va_arg(args, pe_resource_t *);
GListPtr only_node = va_arg(args, GListPtr);
+ GListPtr only_rsc = va_arg(args, GListPtr);
pe__bundle_variant_data_t *bundle_data = NULL;
char buffer[LINE_MAX];
+ int rc = pcmk_rc_no_output;
+ gboolean print_everything = TRUE;
CRM_ASSERT(rsc != NULL);
get_bundle_variant_data(bundle_data, rsc);
- pcmk__output_create_xml_node(out, "br");
- out->begin_list(out, NULL, NULL, "Container bundle%s: %s [%s]%s%s",
- (bundle_data->nreplicas > 1)? " set" : "",
- rsc->id, bundle_data->image,
- is_set(rsc->flags, pe_rsc_unique) ? " (unique)" : "",
- is_set(rsc->flags, pe_rsc_managed) ? "" : " (unmanaged)");
+ if (rsc->fns->is_filtered(rsc, only_rsc, TRUE)) {
+ return rc;
+ }
+
+ print_everything = pcmk__str_in_list(only_rsc, rsc->id);
for (GList *gIter = bundle_data->replicas; gIter != NULL;
gIter = gIter->next) {
pe__bundle_replica_t *replica = gIter->data;
+ gboolean print_ip, print_child, print_ctnr, print_remote;
CRM_ASSERT(replica);
if (pcmk__rsc_filtered_by_node(replica->container, only_node)) {
continue;
}
- pcmk__output_xml_create_parent(out, "li");
- if (is_set(options, pe_print_implicit)) {
+ print_ip = replica->ip != NULL &&
+ !replica->ip->fns->is_filtered(replica->ip, only_rsc, print_everything);
+ print_child = replica->child != NULL &&
+ !replica->child->fns->is_filtered(replica->child, only_rsc, print_everything);
+ print_ctnr = !replica->container->fns->is_filtered(replica->container, only_rsc, print_everything);
+ print_remote = replica->remote != NULL &&
+ !replica->remote->fns->is_filtered(replica->remote, only_rsc, print_everything);
+
+ if (is_set(options, pe_print_implicit) ||
+ (print_everything == FALSE && (print_ip || print_child || print_ctnr || print_remote))) {
+ /* The text output messages used below require pe_print_implicit to
+ * be set to do anything.
+ */
+ unsigned int new_options = is_set(options, pe_print_implicit) ? options : options | pe_print_implicit;
+
+ if (rc == pcmk_rc_no_output) {
+ pcmk__output_create_xml_node(out, "br");
+ }
+
+ PCMK__OUTPUT_LIST_HEADER(out, FALSE, rc, "Container bundle%s: %s [%s]%s%s",
+ (bundle_data->nreplicas > 1)? " set" : "",
+ rsc->id, bundle_data->image,
+ is_set(rsc->flags, pe_rsc_unique) ? " (unique)" : "",
+ is_set(rsc->flags, pe_rsc_managed) ? "" : " (unmanaged)");
+
+ pcmk__output_xml_create_parent(out, "li");
+
if (pcmk__list_of_multiple(bundle_data->replicas)) {
snprintf(buffer, LINE_MAX, " Replica[%d]", replica->offset);
xmlNodeSetContent(pcmk__output_xml_peek_parent(out), (pcmkXmlStr) buffer);
}
pcmk__output_create_xml_node(out, "br");
out->begin_list(out, NULL, NULL, NULL);
- if (replica->ip != NULL) {
- out->message(out, crm_map_element_name(replica->ip->xml), options, replica->ip, only_node);
+ if (print_ip) {
+ out->message(out, crm_map_element_name(replica->ip->xml),
+ new_options, replica->ip, only_node, only_rsc);
}
- if (replica->child != NULL) {
- out->message(out, crm_map_element_name(replica->child->xml), options, replica->child, only_node);
+ if (print_child) {
+ out->message(out, crm_map_element_name(replica->child->xml),
+ new_options, replica->child, only_node, only_rsc);
}
- out->message(out, crm_map_element_name(replica->container->xml), options, replica->container, only_node);
+ if (print_ctnr) {
+ out->message(out, crm_map_element_name(replica->container->xml),
+ new_options, replica->container, only_node, only_rsc);
+ }
- if (replica->remote != NULL) {
- out->message(out, crm_map_element_name(replica->remote->xml), options, replica->remote, only_node);
+ if (print_remote) {
+ out->message(out, crm_map_element_name(replica->remote->xml),
+ new_options, replica->remote, only_node, only_rsc);
}
out->end_list(out);
+ } else if (print_everything == FALSE && !(print_ip || print_child || print_ctnr || print_remote)) {
+ continue;
} else {
- if (pcmk__rsc_filtered_by_node(replica->container, only_node)) {
- continue;
- }
+ PCMK__OUTPUT_LIST_HEADER(out, FALSE, rc, "Container bundle%s: %s [%s]%s%s",
+ (bundle_data->nreplicas > 1)? " set" : "",
+ rsc->id, bundle_data->image,
+ is_set(rsc->flags, pe_rsc_unique) ? " (unique)" : "",
+ is_set(rsc->flags, pe_rsc_managed) ? "" : " (unmanaged)");
pe__bundle_replica_output_html(out, replica, pe__current_node(replica->container),
options);
}
pcmk__output_xml_pop_parent(out);
}
- out->end_list(out);
- return pcmk_rc_ok;
+ PCMK__OUTPUT_LIST_FOOTER(out, rc);
+ return rc;
}
static void
pe__bundle_replica_output_text(pcmk__output_t *out, pe__bundle_replica_t *replica,
pe_node_t *node, long options)
{
pe_resource_t *rsc = replica->child;
int offset = 0;
char buffer[LINE_MAX];
if(rsc == NULL) {
rsc = replica->container;
}
if (replica->remote) {
offset += snprintf(buffer + offset, LINE_MAX - offset, "%s",
rsc_printable_id(replica->remote));
} else {
offset += snprintf(buffer + offset, LINE_MAX - offset, "%s",
rsc_printable_id(replica->container));
}
if (replica->ipaddr) {
offset += snprintf(buffer + offset, LINE_MAX - offset, " (%s)",
replica->ipaddr);
}
pe__common_output_text(out, rsc, buffer, node, options);
}
-PCMK__OUTPUT_ARGS("bundle", "unsigned int", "pe_resource_t *", "GListPtr")
+PCMK__OUTPUT_ARGS("bundle", "unsigned int", "pe_resource_t *", "GListPtr", "GListPtr")
int
pe__bundle_text(pcmk__output_t *out, va_list args)
{
unsigned int options = va_arg(args, unsigned int);
pe_resource_t *rsc = va_arg(args, pe_resource_t *);
GListPtr only_node = va_arg(args, GListPtr);
+ GListPtr only_rsc = va_arg(args, GListPtr);
pe__bundle_variant_data_t *bundle_data = NULL;
+ int rc = pcmk_rc_no_output;
+ gboolean print_everything = TRUE;
+
+ get_bundle_variant_data(bundle_data, rsc);
CRM_ASSERT(rsc != NULL);
- get_bundle_variant_data(bundle_data, rsc);
+ if (rsc->fns->is_filtered(rsc, only_rsc, TRUE)) {
+ return rc;
+ }
- out->begin_list(out, NULL, NULL, "Container bundle%s: %s [%s]%s%s",
- (bundle_data->nreplicas > 1)? " set" : "",
- rsc->id, bundle_data->image,
- is_set(rsc->flags, pe_rsc_unique) ? " (unique)" : "",
- is_set(rsc->flags, pe_rsc_managed) ? "" : " (unmanaged)");
+ print_everything = pcmk__str_in_list(only_rsc, rsc->id);
for (GList *gIter = bundle_data->replicas; gIter != NULL;
gIter = gIter->next) {
pe__bundle_replica_t *replica = gIter->data;
+ gboolean print_ip, print_child, print_ctnr, print_remote;
CRM_ASSERT(replica);
if (pcmk__rsc_filtered_by_node(replica->container, only_node)) {
continue;
}
- if (is_set(options, pe_print_implicit)) {
+ print_ip = replica->ip != NULL &&
+ !replica->ip->fns->is_filtered(replica->ip, only_rsc, print_everything);
+ print_child = replica->child != NULL &&
+ !replica->child->fns->is_filtered(replica->child, only_rsc, print_everything);
+ print_ctnr = !replica->container->fns->is_filtered(replica->container, only_rsc, print_everything);
+ print_remote = replica->remote != NULL &&
+ !replica->remote->fns->is_filtered(replica->remote, only_rsc, print_everything);
+
+ if (is_set(options, pe_print_implicit) ||
+ (print_everything == FALSE && (print_ip || print_child || print_ctnr || print_remote))) {
+ /* The text output messages used below require pe_print_implicit to
+ * be set to do anything.
+ */
+ unsigned int new_options = is_set(options, pe_print_implicit) ? options : options | pe_print_implicit;
+
+ PCMK__OUTPUT_LIST_HEADER(out, FALSE, rc, "Container bundle%s: %s [%s]%s%s",
+ (bundle_data->nreplicas > 1)? " set" : "",
+ rsc->id, bundle_data->image,
+ is_set(rsc->flags, pe_rsc_unique) ? " (unique)" : "",
+ is_set(rsc->flags, pe_rsc_managed) ? "" : " (unmanaged)");
+
if (pcmk__list_of_multiple(bundle_data->replicas)) {
out->list_item(out, NULL, "Replica[%d]", replica->offset);
}
out->begin_list(out, NULL, NULL, NULL);
- if (replica->ip != NULL) {
- out->message(out, crm_map_element_name(replica->ip->xml), options, replica->ip, only_node);
+ if (print_ip) {
+ out->message(out, crm_map_element_name(replica->ip->xml),
+ new_options, replica->ip, only_node, only_rsc);
}
- if (replica->child != NULL) {
- out->message(out, crm_map_element_name(replica->child->xml), options, replica->child, only_node);
+ if (print_child) {
+ out->message(out, crm_map_element_name(replica->child->xml),
+ new_options, replica->child, only_node, only_rsc);
}
- out->message(out, crm_map_element_name(replica->container->xml), options, replica->container, only_node);
+ if (print_ctnr) {
+ out->message(out, crm_map_element_name(replica->container->xml),
+ new_options, replica->container, only_node, only_rsc);
+ }
- if (replica->remote != NULL) {
- out->message(out, crm_map_element_name(replica->remote->xml), options, replica->remote, only_node);
+ if (print_remote) {
+ out->message(out, crm_map_element_name(replica->remote->xml),
+ new_options, replica->remote, only_node, only_rsc);
}
out->end_list(out);
+ } else if (print_everything == FALSE && !(print_ip || print_child || print_ctnr || print_remote)) {
+ continue;
} else {
- if (pcmk__rsc_filtered_by_node(replica->container, only_node)) {
- continue;
- }
+ PCMK__OUTPUT_LIST_HEADER(out, FALSE, rc, "Container bundle%s: %s [%s]%s%s",
+ (bundle_data->nreplicas > 1)? " set" : "",
+ rsc->id, bundle_data->image,
+ is_set(rsc->flags, pe_rsc_unique) ? " (unique)" : "",
+ is_set(rsc->flags, pe_rsc_managed) ? "" : " (unmanaged)");
pe__bundle_replica_output_text(out, replica, pe__current_node(replica->container),
options);
}
}
- out->end_list(out);
- return pcmk_rc_ok;
+ PCMK__OUTPUT_LIST_FOOTER(out, rc);
+ return rc;
}
static void
print_bundle_replica(pe__bundle_replica_t *replica, const char *pre_text,
long options, void *print_data)
{
pe_node_t *node = NULL;
pe_resource_t *rsc = replica->child;
int offset = 0;
char buffer[LINE_MAX];
if(rsc == NULL) {
rsc = replica->container;
}
if (replica->remote) {
offset += snprintf(buffer + offset, LINE_MAX - offset, "%s",
rsc_printable_id(replica->remote));
} else {
offset += snprintf(buffer + offset, LINE_MAX - offset, "%s",
rsc_printable_id(replica->container));
}
if (replica->ipaddr) {
offset += snprintf(buffer + offset, LINE_MAX - offset, " (%s)",
replica->ipaddr);
}
node = pe__current_node(replica->container);
common_print(rsc, pre_text, buffer, node, options, print_data);
}
void
pe__print_bundle(pe_resource_t *rsc, const char *pre_text, long options,
void *print_data)
{
pe__bundle_variant_data_t *bundle_data = NULL;
char *child_text = NULL;
CRM_CHECK(rsc != NULL, return);
if (options & pe_print_xml) {
bundle_print_xml(rsc, pre_text, options, print_data);
return;
}
get_bundle_variant_data(bundle_data, rsc);
if (pre_text == NULL) {
pre_text = " ";
}
status_print("%sContainer bundle%s: %s [%s]%s%s\n",
pre_text, ((bundle_data->nreplicas > 1)? " set" : ""),
rsc->id, bundle_data->image,
is_set(rsc->flags, pe_rsc_unique) ? " (unique)" : "",
is_set(rsc->flags, pe_rsc_managed) ? "" : " (unmanaged)");
if (options & pe_print_html) {
status_print("
\n\n");
}
for (GList *gIter = bundle_data->replicas; gIter != NULL;
gIter = gIter->next) {
pe__bundle_replica_t *replica = gIter->data;
CRM_ASSERT(replica);
if (options & pe_print_html) {
status_print("- ");
}
if (is_set(options, pe_print_implicit)) {
child_text = crm_strdup_printf(" %s", pre_text);
if (pcmk__list_of_multiple(bundle_data->replicas)) {
status_print(" %sReplica[%d]\n", pre_text, replica->offset);
}
if (options & pe_print_html) {
status_print("
\n\n");
}
print_rsc_in_list(replica->ip, child_text, options, print_data);
print_rsc_in_list(replica->container, child_text, options, print_data);
print_rsc_in_list(replica->remote, child_text, options, print_data);
print_rsc_in_list(replica->child, child_text, options, print_data);
if (options & pe_print_html) {
status_print("
\n");
}
} else {
child_text = crm_strdup_printf("%s ", pre_text);
print_bundle_replica(replica, child_text, options, print_data);
}
free(child_text);
if (options & pe_print_html) {
status_print(" \n");
}
}
if (options & pe_print_html) {
status_print("
\n");
}
}
static void
free_bundle_replica(pe__bundle_replica_t *replica)
{
if (replica == NULL) {
return;
}
if (replica->node) {
free(replica->node);
replica->node = NULL;
}
if (replica->ip) {
free_xml(replica->ip->xml);
replica->ip->xml = NULL;
replica->ip->fns->free(replica->ip);
replica->ip = NULL;
}
if (replica->container) {
free_xml(replica->container->xml);
replica->container->xml = NULL;
replica->container->fns->free(replica->container);
replica->container = NULL;
}
if (replica->remote) {
free_xml(replica->remote->xml);
replica->remote->xml = NULL;
replica->remote->fns->free(replica->remote);
replica->remote = NULL;
}
free(replica->ipaddr);
free(replica);
}
void
pe__free_bundle(pe_resource_t *rsc)
{
pe__bundle_variant_data_t *bundle_data = NULL;
CRM_CHECK(rsc != NULL, return);
get_bundle_variant_data(bundle_data, rsc);
pe_rsc_trace(rsc, "Freeing %s", rsc->id);
free(bundle_data->prefix);
free(bundle_data->image);
free(bundle_data->control_port);
free(bundle_data->host_network);
free(bundle_data->host_netmask);
free(bundle_data->ip_range_start);
free(bundle_data->container_network);
free(bundle_data->launcher_options);
free(bundle_data->container_command);
free(bundle_data->container_host_options);
g_list_free_full(bundle_data->replicas,
(GDestroyNotify) free_bundle_replica);
g_list_free_full(bundle_data->mounts, (GDestroyNotify)mount_free);
g_list_free_full(bundle_data->ports, (GDestroyNotify)port_free);
g_list_free(rsc->children);
if(bundle_data->child) {
free_xml(bundle_data->child->xml);
bundle_data->child->xml = NULL;
bundle_data->child->fns->free(bundle_data->child);
}
common_free(rsc);
}
enum rsc_role_e
pe__bundle_resource_state(const pe_resource_t *rsc, gboolean current)
{
enum rsc_role_e container_role = RSC_ROLE_UNKNOWN;
return container_role;
}
/*!
* \brief Get the number of configured replicas in a bundle
*
* \param[in] rsc Bundle resource
*
* \return Number of configured replicas, or 0 on error
*/
int
pe_bundle_replicas(const pe_resource_t *rsc)
{
if ((rsc == NULL) || (rsc->variant != pe_container)) {
return 0;
} else {
pe__bundle_variant_data_t *bundle_data = NULL;
get_bundle_variant_data(bundle_data, rsc);
return bundle_data->nreplicas;
}
}
void
pe__count_bundle(pe_resource_t *rsc)
{
pe__bundle_variant_data_t *bundle_data = NULL;
get_bundle_variant_data(bundle_data, rsc);
for (GList *item = bundle_data->replicas; item != NULL; item = item->next) {
pe__bundle_replica_t *replica = item->data;
if (replica->ip) {
replica->ip->fns->count(replica->ip);
}
if (replica->child) {
replica->child->fns->count(replica->child);
}
if (replica->container) {
replica->container->fns->count(replica->container);
}
if (replica->remote) {
replica->remote->fns->count(replica->remote);
}
}
}
+
+gboolean
+pe__bundle_is_filtered(pe_resource_t *rsc, GListPtr only_rsc, gboolean check_parent)
+{
+ gboolean passes = FALSE;
+ pe__bundle_variant_data_t *bundle_data = NULL;
+
+ if (pcmk__str_in_list(only_rsc, rsc_printable_id(rsc))) {
+ passes = TRUE;
+ } else {
+ get_bundle_variant_data(bundle_data, rsc);
+
+ for (GList *gIter = bundle_data->replicas; gIter != NULL; gIter = gIter->next) {
+ pe__bundle_replica_t *replica = gIter->data;
+
+ if (replica->ip != NULL && !replica->ip->fns->is_filtered(replica->ip, only_rsc, FALSE)) {
+ passes = TRUE;
+ break;
+ } else if (replica->child != NULL && !replica->child->fns->is_filtered(replica->child, only_rsc, FALSE)) {
+ passes = TRUE;
+ break;
+ } else if (!replica->container->fns->is_filtered(replica->container, only_rsc, FALSE)) {
+ passes = TRUE;
+ break;
+ } else if (replica->remote != NULL && !replica->remote->fns->is_filtered(replica->remote, only_rsc, FALSE)) {
+ passes = TRUE;
+ break;
+ }
+ }
+ }
+
+ return !passes;
+}
diff --git a/lib/pengine/clone.c b/lib/pengine/clone.c
index 2e7f990a16..2c81ca1943 100644
--- a/lib/pengine/clone.c
+++ b/lib/pengine/clone.c
@@ -1,1107 +1,1190 @@
/*
* Copyright 2004-2020 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
* This source code is licensed under the GNU Lesser General Public License
* version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
*/
#include
#include
#include
#include
#include
#include
#define VARIANT_CLONE 1
#include "./variant.h"
void
pe__force_anon(const char *standard, pe_resource_t *rsc, const char *rid,
pe_working_set_t *data_set)
{
if (pe_rsc_is_clone(rsc)) {
clone_variant_data_t *clone_data = NULL;
get_clone_variant_data(clone_data, rsc);
pe_warn("Ignoring " XML_RSC_ATTR_UNIQUE " for %s because %s resources "
"such as %s can be used only as anonymous clones",
rsc->id, standard, rid);
clone_data->clone_node_max = 1;
clone_data->clone_max = QB_MIN(clone_data->clone_max,
g_list_length(data_set->nodes));
}
}
pe_resource_t *
find_clone_instance(pe_resource_t * rsc, const char *sub_id, pe_working_set_t * data_set)
{
char *child_id = NULL;
pe_resource_t *child = NULL;
const char *child_base = NULL;
clone_variant_data_t *clone_data = NULL;
get_clone_variant_data(clone_data, rsc);
child_base = ID(clone_data->xml_obj_child);
child_id = crm_strdup_printf("%s:%s", child_base, sub_id);
child = pe_find_resource(rsc->children, child_id);
free(child_id);
return child;
}
pe_resource_t *
pe__create_clone_child(pe_resource_t *rsc, pe_working_set_t *data_set)
{
gboolean as_orphan = FALSE;
char *inc_num = NULL;
char *inc_max = NULL;
pe_resource_t *child_rsc = NULL;
xmlNode *child_copy = NULL;
clone_variant_data_t *clone_data = NULL;
get_clone_variant_data(clone_data, rsc);
CRM_CHECK(clone_data->xml_obj_child != NULL, return FALSE);
if (clone_data->total_clones >= clone_data->clone_max) {
// If we've already used all available instances, this is an orphan
as_orphan = TRUE;
}
// Allocate instance numbers in numerical order (starting at 0)
inc_num = crm_itoa(clone_data->total_clones);
inc_max = crm_itoa(clone_data->clone_max);
child_copy = copy_xml(clone_data->xml_obj_child);
crm_xml_add(child_copy, XML_RSC_ATTR_INCARNATION, inc_num);
if (common_unpack(child_copy, &child_rsc, rsc, data_set) == FALSE) {
pe_err("Failed unpacking resource %s", crm_element_value(child_copy, XML_ATTR_ID));
child_rsc = NULL;
goto bail;
}
/* child_rsc->globally_unique = rsc->globally_unique; */
CRM_ASSERT(child_rsc);
clone_data->total_clones += 1;
pe_rsc_trace(child_rsc, "Setting clone attributes for: %s", child_rsc->id);
rsc->children = g_list_append(rsc->children, child_rsc);
if (as_orphan) {
set_bit_recursive(child_rsc, pe_rsc_orphan);
}
add_hash_param(child_rsc->meta, XML_RSC_ATTR_INCARNATION_MAX, inc_max);
pe_rsc_trace(rsc, "Added %s instance %s", rsc->id, child_rsc->id);
bail:
free(inc_num);
free(inc_max);
return child_rsc;
}
gboolean
clone_unpack(pe_resource_t * rsc, pe_working_set_t * data_set)
{
int lpc = 0;
xmlNode *a_child = NULL;
xmlNode *xml_obj = rsc->xml;
clone_variant_data_t *clone_data = NULL;
const char *ordered = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_ORDERED);
const char *max_clones = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_INCARNATION_MAX);
const char *max_clones_node = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_INCARNATION_NODEMAX);
pe_rsc_trace(rsc, "Processing resource %s...", rsc->id);
clone_data = calloc(1, sizeof(clone_variant_data_t));
rsc->variant_opaque = clone_data;
if (is_set(rsc->flags, pe_rsc_promotable)) {
const char *promoted_max = NULL;
const char *promoted_node_max = NULL;
promoted_max = g_hash_table_lookup(rsc->meta,
XML_RSC_ATTR_PROMOTED_MAX);
if (promoted_max == NULL) {
// @COMPAT deprecated since 2.0.0
promoted_max = g_hash_table_lookup(rsc->meta,
XML_RSC_ATTR_MASTER_MAX);
}
promoted_node_max = g_hash_table_lookup(rsc->meta,
XML_RSC_ATTR_PROMOTED_NODEMAX);
if (promoted_node_max == NULL) {
// @COMPAT deprecated since 2.0.0
promoted_node_max = g_hash_table_lookup(rsc->meta,
XML_RSC_ATTR_MASTER_NODEMAX);
}
clone_data->promoted_max = crm_parse_int(promoted_max, "1");
clone_data->promoted_node_max = crm_parse_int(promoted_node_max, "1");
}
// Implied by calloc()
/* clone_data->xml_obj_child = NULL; */
clone_data->clone_node_max = crm_parse_int(max_clones_node, "1");
if (max_clones) {
clone_data->clone_max = crm_parse_int(max_clones, "1");
} else if (pcmk__list_of_multiple(data_set->nodes)) {
clone_data->clone_max = g_list_length(data_set->nodes);
} else {
clone_data->clone_max = 1; /* Handy during crm_verify */
}
clone_data->ordered = crm_is_true(ordered);
if ((rsc->flags & pe_rsc_unique) == 0 && clone_data->clone_node_max > 1) {
pcmk__config_err("Ignoring " XML_RSC_ATTR_PROMOTED_MAX " for %s "
"because anonymous clones support only one instance "
"per node", rsc->id);
clone_data->clone_node_max = 1;
}
pe_rsc_trace(rsc, "Options for %s", rsc->id);
pe_rsc_trace(rsc, "\tClone max: %d", clone_data->clone_max);
pe_rsc_trace(rsc, "\tClone node max: %d", clone_data->clone_node_max);
pe_rsc_trace(rsc, "\tClone is unique: %s",
is_set(rsc->flags, pe_rsc_unique) ? "true" : "false");
pe_rsc_trace(rsc, "\tClone is promotable: %s",
is_set(rsc->flags, pe_rsc_promotable) ? "true" : "false");
// Clones may contain a single group or primitive
for (a_child = __xml_first_child_element(xml_obj); a_child != NULL;
a_child = __xml_next_element(a_child)) {
if (crm_str_eq((const char *)a_child->name, XML_CIB_TAG_RESOURCE, TRUE)
|| crm_str_eq((const char *)a_child->name, XML_CIB_TAG_GROUP, TRUE)) {
clone_data->xml_obj_child = a_child;
break;
}
}
if (clone_data->xml_obj_child == NULL) {
pcmk__config_err("%s has nothing to clone", rsc->id);
return FALSE;
}
/*
* Make clones ever so slightly sticky by default
*
* This helps ensure clone instances are not shuffled around the cluster
* for no benefit in situations when pre-allocation is not appropriate
*/
if (g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_STICKINESS) == NULL) {
add_hash_param(rsc->meta, XML_RSC_ATTR_STICKINESS, "1");
}
/* This ensures that the globally-unique value always exists for children to
* inherit when being unpacked, as well as in resource agents' environment.
*/
add_hash_param(rsc->meta, XML_RSC_ATTR_UNIQUE,
is_set(rsc->flags, pe_rsc_unique) ? XML_BOOLEAN_TRUE : XML_BOOLEAN_FALSE);
if (clone_data->clone_max <= 0) {
/* Create one child instance so that unpack_find_resource() will hook up
* any orphans up to the parent correctly.
*/
if (pe__create_clone_child(rsc, data_set) == NULL) {
return FALSE;
}
} else {
// Create a child instance for each available instance number
for (lpc = 0; lpc < clone_data->clone_max; lpc++) {
if (pe__create_clone_child(rsc, data_set) == NULL) {
return FALSE;
}
}
}
pe_rsc_trace(rsc, "Added %d children to resource %s...", clone_data->clone_max, rsc->id);
return TRUE;
}
gboolean
clone_active(pe_resource_t * rsc, gboolean all)
{
GListPtr gIter = rsc->children;
for (; gIter != NULL; gIter = gIter->next) {
pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
gboolean child_active = child_rsc->fns->active(child_rsc, all);
if (all == FALSE && child_active) {
return TRUE;
} else if (all && child_active == FALSE) {
return FALSE;
}
}
if (all) {
return TRUE;
} else {
return FALSE;
}
}
static void
short_print(char *list, const char *prefix, const char *type, const char *suffix, long options, void *print_data)
{
if(suffix == NULL) {
suffix = "";
}
if (list) {
if (options & pe_print_html) {
status_print("");
}
status_print("%s%s: [%s ]%s", prefix, type, list, suffix);
if (options & pe_print_html) {
status_print("\n");
} else if (options & pe_print_suppres_nl) {
/* nothing */
} else if ((options & pe_print_printf) || (options & pe_print_ncurses)) {
status_print("\n");
}
}
}
static const char *
configured_role_str(pe_resource_t * rsc)
{
const char *target_role = g_hash_table_lookup(rsc->meta,
XML_RSC_ATTR_TARGET_ROLE);
if ((target_role == NULL) && rsc->children && rsc->children->data) {
target_role = g_hash_table_lookup(((pe_resource_t*)rsc->children->data)->meta,
XML_RSC_ATTR_TARGET_ROLE);
}
return target_role;
}
static enum rsc_role_e
configured_role(pe_resource_t * rsc)
{
const char *target_role = configured_role_str(rsc);
if (target_role) {
return text2role(target_role);
}
return RSC_ROLE_UNKNOWN;
}
static void
clone_print_xml(pe_resource_t * rsc, const char *pre_text, long options, void *print_data)
{
char *child_text = crm_strdup_printf("%s ", pre_text);
const char *target_role = configured_role_str(rsc);
GListPtr gIter = rsc->children;
status_print("%sid);
status_print("multi_state=\"%s\" ", is_set(rsc->flags, pe_rsc_promotable)? "true" : "false");
status_print("unique=\"%s\" ", is_set(rsc->flags, pe_rsc_unique) ? "true" : "false");
status_print("managed=\"%s\" ", is_set(rsc->flags, pe_rsc_managed) ? "true" : "false");
status_print("failed=\"%s\" ", is_set(rsc->flags, pe_rsc_failed) ? "true" : "false");
status_print("failure_ignored=\"%s\" ",
is_set(rsc->flags, pe_rsc_failure_ignored) ? "true" : "false");
if (target_role) {
status_print("target_role=\"%s\" ", target_role);
}
status_print(">\n");
for (; gIter != NULL; gIter = gIter->next) {
pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
child_rsc->fns->print(child_rsc, child_text, options, print_data);
}
status_print("%s\n", pre_text);
free(child_text);
}
bool is_set_recursive(pe_resource_t * rsc, long long flag, bool any)
{
GListPtr gIter;
bool all = !any;
if(is_set(rsc->flags, flag)) {
if(any) {
return TRUE;
}
} else if(all) {
return FALSE;
}
for (gIter = rsc->children; gIter != NULL; gIter = gIter->next) {
if(is_set_recursive(gIter->data, flag, any)) {
if(any) {
return TRUE;
}
} else if(all) {
return FALSE;
}
}
if(all) {
return TRUE;
}
return FALSE;
}
void
clone_print(pe_resource_t * rsc, const char *pre_text, long options, void *print_data)
{
char *list_text = NULL;
char *child_text = NULL;
char *stopped_list = NULL;
GListPtr master_list = NULL;
GListPtr started_list = NULL;
GListPtr gIter = rsc->children;
clone_variant_data_t *clone_data = NULL;
int active_instances = 0;
if (pre_text == NULL) {
pre_text = " ";
}
if (options & pe_print_xml) {
clone_print_xml(rsc, pre_text, options, print_data);
return;
}
get_clone_variant_data(clone_data, rsc);
child_text = crm_strdup_printf("%s ", pre_text);
status_print("%sClone Set: %s [%s]%s%s%s",
pre_text ? pre_text : "", rsc->id, ID(clone_data->xml_obj_child),
is_set(rsc->flags, pe_rsc_promotable) ? " (promotable)" : "",
is_set(rsc->flags, pe_rsc_unique) ? " (unique)" : "",
is_set(rsc->flags, pe_rsc_managed) ? "" : " (unmanaged)");
if (options & pe_print_html) {
status_print("\n\n");
} else if ((options & pe_print_log) == 0) {
status_print("\n");
}
for (; gIter != NULL; gIter = gIter->next) {
gboolean print_full = FALSE;
pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
gboolean partially_active = child_rsc->fns->active(child_rsc, FALSE);
if (options & pe_print_clone_details) {
print_full = TRUE;
}
if (is_set(rsc->flags, pe_rsc_unique)) {
// Print individual instance when unique (except stopped orphans)
if (partially_active || is_not_set(rsc->flags, pe_rsc_orphan)) {
print_full = TRUE;
}
// Everything else in this block is for anonymous clones
} else if (is_set(options, pe_print_pending)
&& (child_rsc->pending_task != NULL)
&& strcmp(child_rsc->pending_task, "probe")) {
// Print individual instance when non-probe action is pending
print_full = TRUE;
} else if (partially_active == FALSE) {
// List stopped instances when requested (except orphans)
if (is_not_set(child_rsc->flags, pe_rsc_orphan)
&& is_not_set(options, pe_print_clone_active)) {
stopped_list = pcmk__add_word(stopped_list, child_rsc->id);
}
} else if (is_set_recursive(child_rsc, pe_rsc_orphan, TRUE)
|| is_set_recursive(child_rsc, pe_rsc_managed, FALSE) == FALSE
|| is_set_recursive(child_rsc, pe_rsc_failed, TRUE)) {
// Print individual instance when active orphaned/unmanaged/failed
print_full = TRUE;
} else if (child_rsc->fns->active(child_rsc, TRUE)) {
// Instance of fully active anonymous clone
pe_node_t *location = child_rsc->fns->location(child_rsc, NULL, TRUE);
if (location) {
// Instance is active on a single node
enum rsc_role_e a_role = child_rsc->fns->state(child_rsc, TRUE);
if (location->details->online == FALSE && location->details->unclean) {
print_full = TRUE;
} else if (a_role > RSC_ROLE_SLAVE) {
master_list = g_list_append(master_list, location);
} else {
started_list = g_list_append(started_list, location);
}
} else {
/* uncolocated group - bleh */
print_full = TRUE;
}
} else {
// Instance of partially active anonymous clone
print_full = TRUE;
}
if (print_full) {
if (options & pe_print_html) {
status_print("- \n");
}
child_rsc->fns->print(child_rsc, child_text, options, print_data);
if (options & pe_print_html) {
status_print("
\n");
}
}
}
/* Masters */
master_list = g_list_sort(master_list, sort_node_uname);
for (gIter = master_list; gIter; gIter = gIter->next) {
pe_node_t *host = gIter->data;
list_text = pcmk__add_word(list_text, host->details->uname);
active_instances++;
}
short_print(list_text, child_text, "Masters", NULL, options, print_data);
g_list_free(master_list);
free(list_text);
list_text = NULL;
/* Started/Slaves */
started_list = g_list_sort(started_list, sort_node_uname);
for (gIter = started_list; gIter; gIter = gIter->next) {
pe_node_t *host = gIter->data;
list_text = pcmk__add_word(list_text, host->details->uname);
active_instances++;
}
if (is_set(rsc->flags, pe_rsc_promotable)) {
enum rsc_role_e role = configured_role(rsc);
if(role == RSC_ROLE_SLAVE) {
short_print(list_text, child_text, "Slaves (target-role)", NULL, options, print_data);
} else {
short_print(list_text, child_text, "Slaves", NULL, options, print_data);
}
} else {
short_print(list_text, child_text, "Started", NULL, options, print_data);
}
g_list_free(started_list);
free(list_text);
list_text = NULL;
if (is_not_set(options, pe_print_clone_active)) {
const char *state = "Stopped";
enum rsc_role_e role = configured_role(rsc);
if (role == RSC_ROLE_STOPPED) {
state = "Stopped (disabled)";
}
if (is_not_set(rsc->flags, pe_rsc_unique)
&& (clone_data->clone_max > active_instances)) {
GListPtr nIter;
GListPtr list = g_hash_table_get_values(rsc->allowed_nodes);
/* Custom stopped list for non-unique clones */
free(stopped_list); stopped_list = NULL;
if (list == NULL) {
/* Clusters with symmetrical=false haven't calculated allowed_nodes yet
* If we've not probed for them yet, the Stopped list will be empty
*/
list = g_hash_table_get_values(rsc->known_on);
}
list = g_list_sort(list, sort_node_uname);
for (nIter = list; nIter != NULL; nIter = nIter->next) {
pe_node_t *node = (pe_node_t *)nIter->data;
if (pe_find_node(rsc->running_on, node->details->uname) == NULL) {
stopped_list = pcmk__add_word(stopped_list,
node->details->uname);
}
}
g_list_free(list);
}
short_print(stopped_list, child_text, state, NULL, options, print_data);
free(stopped_list);
}
if (options & pe_print_html) {
status_print("
\n");
}
free(child_text);
}
-PCMK__OUTPUT_ARGS("clone", "unsigned int", "pe_resource_t *", "GListPtr")
+PCMK__OUTPUT_ARGS("clone", "unsigned int", "pe_resource_t *", "GListPtr", "GListPtr")
int
pe__clone_xml(pcmk__output_t *out, va_list args)
{
unsigned int options = va_arg(args, unsigned int);
pe_resource_t *rsc = va_arg(args, pe_resource_t *);
GListPtr only_node = va_arg(args, GListPtr);
+ GListPtr only_rsc = va_arg(args, GListPtr);
GListPtr gIter = rsc->children;
-
int rc = pcmk_rc_no_output;
gboolean printed_header = FALSE;
+ gboolean print_everything = TRUE;
+
+ if (rsc->fns->is_filtered(rsc, only_rsc, TRUE)) {
+ return rc;
+ }
+
+ print_everything = pcmk__str_in_list(only_rsc, rsc_printable_id(rsc)) ||
+ (strstr(rsc->id, ":") != NULL && pcmk__str_in_list(only_rsc, rsc->id));
for (; gIter != NULL; gIter = gIter->next) {
pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
if (pcmk__rsc_filtered_by_node(child_rsc, only_node)) {
continue;
}
+ if (child_rsc->fns->is_filtered(child_rsc, only_rsc, print_everything)) {
+ continue;
+ }
+
if (!printed_header) {
printed_header = TRUE;
rc = pe__name_and_nvpairs_xml(out, true, "clone", 7
, "id", rsc->id
, "multi_state", BOOL2STR(is_set(rsc->flags, pe_rsc_promotable))
, "unique", BOOL2STR(is_set(rsc->flags, pe_rsc_unique))
, "managed", BOOL2STR(is_set(rsc->flags, pe_rsc_managed))
, "failed", BOOL2STR(is_set(rsc->flags, pe_rsc_failed))
, "failure_ignored", BOOL2STR(is_set(rsc->flags, pe_rsc_failure_ignored))
, "target_role", configured_role_str(rsc));
+
CRM_ASSERT(rc == pcmk_rc_ok);
}
- out->message(out, crm_map_element_name(child_rsc->xml), options, child_rsc, only_node);
+ out->message(out, crm_map_element_name(child_rsc->xml), options,
+ child_rsc, only_node, only_rsc);
}
if (printed_header) {
pcmk__output_xml_pop_parent(out);
}
return rc;
}
-PCMK__OUTPUT_ARGS("clone", "unsigned int", "pe_resource_t *", "GListPtr")
+PCMK__OUTPUT_ARGS("clone", "unsigned int", "pe_resource_t *", "GListPtr", "GListPtr")
int
pe__clone_html(pcmk__output_t *out, va_list args)
{
unsigned int options = va_arg(args, unsigned int);
pe_resource_t *rsc = va_arg(args, pe_resource_t *);
GListPtr only_node = va_arg(args, GListPtr);
+ GListPtr only_rsc = va_arg(args, GListPtr);
char *list_text = NULL;
char *stopped_list = NULL;
GListPtr master_list = NULL;
GListPtr started_list = NULL;
GListPtr gIter = rsc->children;
clone_variant_data_t *clone_data = NULL;
int active_instances = 0;
+ int rc = pcmk_rc_no_output;
+ gboolean print_everything = TRUE;
get_clone_variant_data(clone_data, rsc);
+ if (rsc->fns->is_filtered(rsc, only_rsc, TRUE)) {
+ return rc;
+ }
+
+ print_everything = pcmk__str_in_list(only_rsc, rsc_printable_id(rsc)) ||
+ (strstr(rsc->id, ":") != NULL && pcmk__str_in_list(only_rsc, rsc->id));
+
out->begin_list(out, NULL, NULL, "Clone Set: %s [%s]%s%s%s",
rsc->id, ID(clone_data->xml_obj_child),
is_set(rsc->flags, pe_rsc_promotable) ? " (promotable)" : "",
is_set(rsc->flags, pe_rsc_unique) ? " (unique)" : "",
is_set(rsc->flags, pe_rsc_managed) ? "" : " (unmanaged)");
+ rc = pcmk_rc_ok;
for (; gIter != NULL; gIter = gIter->next) {
gboolean print_full = FALSE;
pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
gboolean partially_active = child_rsc->fns->active(child_rsc, FALSE);
if (pcmk__rsc_filtered_by_node(child_rsc, only_node)) {
continue;
}
+ if (child_rsc->fns->is_filtered(child_rsc, only_rsc, print_everything)) {
+ continue;
+ }
+
if (options & pe_print_clone_details) {
print_full = TRUE;
}
if (is_set(rsc->flags, pe_rsc_unique)) {
// Print individual instance when unique (except stopped orphans)
if (partially_active || is_not_set(rsc->flags, pe_rsc_orphan)) {
print_full = TRUE;
}
// Everything else in this block is for anonymous clones
} else if (is_set(options, pe_print_pending)
&& (child_rsc->pending_task != NULL)
&& strcmp(child_rsc->pending_task, "probe")) {
// Print individual instance when non-probe action is pending
print_full = TRUE;
} else if (partially_active == FALSE) {
// List stopped instances when requested (except orphans)
if (is_not_set(child_rsc->flags, pe_rsc_orphan)
&& is_not_set(options, pe_print_clone_active)) {
stopped_list = pcmk__add_word(stopped_list, child_rsc->id);
}
} else if (is_set_recursive(child_rsc, pe_rsc_orphan, TRUE)
|| is_set_recursive(child_rsc, pe_rsc_managed, FALSE) == FALSE
|| is_set_recursive(child_rsc, pe_rsc_failed, TRUE)) {
// Print individual instance when active orphaned/unmanaged/failed
print_full = TRUE;
} else if (child_rsc->fns->active(child_rsc, TRUE)) {
// Instance of fully active anonymous clone
pe_node_t *location = child_rsc->fns->location(child_rsc, NULL, TRUE);
if (location) {
// Instance is active on a single node
enum rsc_role_e a_role = child_rsc->fns->state(child_rsc, TRUE);
if (location->details->online == FALSE && location->details->unclean) {
print_full = TRUE;
} else if (a_role > RSC_ROLE_SLAVE) {
master_list = g_list_append(master_list, location);
} else {
started_list = g_list_append(started_list, location);
}
} else {
/* uncolocated group - bleh */
print_full = TRUE;
}
} else {
// Instance of partially active anonymous clone
print_full = TRUE;
}
if (print_full) {
- out->message(out, crm_map_element_name(child_rsc->xml), options, child_rsc, only_node);
+ GListPtr all = NULL;
+
+ /* Print every resource that's a child of this clone. */
+ all = g_list_prepend(all, strdup("*"));
+ out->message(out, crm_map_element_name(child_rsc->xml), options,
+ child_rsc, only_node, all);
+ g_list_free_full(all, free);
}
}
if (is_set(options, pe_print_clone_details)) {
free(stopped_list);
out->end_list(out);
return pcmk_rc_ok;
}
/* Masters */
master_list = g_list_sort(master_list, sort_node_uname);
for (gIter = master_list; gIter; gIter = gIter->next) {
pe_node_t *host = gIter->data;
if (!pcmk__str_in_list(only_node, host->details->uname)) {
continue;
}
list_text = pcmk__add_word(list_text, host->details->uname);
active_instances++;
}
if (list_text != NULL) {
out->list_item(out, NULL, " Masters: [%s ]", list_text);
g_list_free(master_list);
free(list_text);
list_text = NULL;
}
/* Started/Slaves */
started_list = g_list_sort(started_list, sort_node_uname);
for (gIter = started_list; gIter; gIter = gIter->next) {
pe_node_t *host = gIter->data;
if (!pcmk__str_in_list(only_node, host->details->uname)) {
continue;
}
list_text = pcmk__add_word(list_text, host->details->uname);
active_instances++;
}
if (list_text != NULL) {
if (is_set(rsc->flags, pe_rsc_promotable)) {
enum rsc_role_e role = configured_role(rsc);
if(role == RSC_ROLE_SLAVE) {
out->list_item(out, NULL, " Slaves (target-role): [%s ]", list_text);
} else {
out->list_item(out, NULL, " Slaves: [%s ]", list_text);
}
} else {
out->list_item(out, NULL, " Started: [%s ]", list_text);
}
g_list_free(started_list);
free(list_text);
list_text = NULL;
}
if (is_not_set(options, pe_print_clone_active)) {
const char *state = "Stopped";
enum rsc_role_e role = configured_role(rsc);
if (role == RSC_ROLE_STOPPED) {
state = "Stopped (disabled)";
}
if (is_not_set(rsc->flags, pe_rsc_unique)
&& (clone_data->clone_max > active_instances)) {
GListPtr nIter;
GListPtr list = g_hash_table_get_values(rsc->allowed_nodes);
/* Custom stopped list for non-unique clones */
free(stopped_list);
stopped_list = NULL;
if (list == NULL) {
/* Clusters with symmetrical=false haven't calculated allowed_nodes yet
* If we've not probed for them yet, the Stopped list will be empty
*/
list = g_hash_table_get_values(rsc->known_on);
}
list = g_list_sort(list, sort_node_uname);
for (nIter = list; nIter != NULL; nIter = nIter->next) {
pe_node_t *node = (pe_node_t *)nIter->data;
if (pe_find_node(rsc->running_on, node->details->uname) == NULL &&
pcmk__str_in_list(only_node, node->details->uname)) {
stopped_list = pcmk__add_word(stopped_list,
node->details->uname);
}
}
g_list_free(list);
}
if (stopped_list != NULL) {
out->list_item(out, NULL, " %s: [%s ]", state, stopped_list);
free(stopped_list);
}
}
out->end_list(out);
- return pcmk_rc_ok;
+ return rc;
}
-PCMK__OUTPUT_ARGS("clone", "unsigned int", "pe_resource_t *", "GListPtr")
+PCMK__OUTPUT_ARGS("clone", "unsigned int", "pe_resource_t *", "GListPtr", "GListPtr")
int
pe__clone_text(pcmk__output_t *out, va_list args)
{
unsigned int options = va_arg(args, unsigned int);
pe_resource_t *rsc = va_arg(args, pe_resource_t *);
GListPtr only_node = va_arg(args, GListPtr);
+ GListPtr only_rsc = va_arg(args, GListPtr);
char *list_text = NULL;
char *stopped_list = NULL;
GListPtr master_list = NULL;
GListPtr started_list = NULL;
GListPtr gIter = rsc->children;
clone_variant_data_t *clone_data = NULL;
int active_instances = 0;
+ int rc = pcmk_rc_no_output;
+ gboolean print_everything = TRUE;
get_clone_variant_data(clone_data, rsc);
+ if (rsc->fns->is_filtered(rsc, only_rsc, TRUE)) {
+ return rc;
+ }
+
+ print_everything = pcmk__str_in_list(only_rsc, rsc_printable_id(rsc)) ||
+ (strstr(rsc->id, ":") != NULL && pcmk__str_in_list(only_rsc, rsc->id));
+
out->begin_list(out, NULL, NULL, "Clone Set: %s [%s]%s%s%s",
rsc->id, ID(clone_data->xml_obj_child),
is_set(rsc->flags, pe_rsc_promotable) ? " (promotable)" : "",
is_set(rsc->flags, pe_rsc_unique) ? " (unique)" : "",
is_set(rsc->flags, pe_rsc_managed) ? "" : " (unmanaged)");
+ rc = pcmk_rc_ok;
for (; gIter != NULL; gIter = gIter->next) {
gboolean print_full = FALSE;
pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
gboolean partially_active = child_rsc->fns->active(child_rsc, FALSE);
if (pcmk__rsc_filtered_by_node(child_rsc, only_node)) {
continue;
}
+ if (child_rsc->fns->is_filtered(child_rsc, only_rsc, print_everything)) {
+ continue;
+ }
+
if (options & pe_print_clone_details) {
print_full = TRUE;
}
if (is_set(rsc->flags, pe_rsc_unique)) {
// Print individual instance when unique (except stopped orphans)
if (partially_active || is_not_set(rsc->flags, pe_rsc_orphan)) {
print_full = TRUE;
}
// Everything else in this block is for anonymous clones
} else if (is_set(options, pe_print_pending)
&& (child_rsc->pending_task != NULL)
&& strcmp(child_rsc->pending_task, "probe")) {
// Print individual instance when non-probe action is pending
print_full = TRUE;
} else if (partially_active == FALSE) {
// List stopped instances when requested (except orphans)
if (is_not_set(child_rsc->flags, pe_rsc_orphan)
&& is_not_set(options, pe_print_clone_active)) {
stopped_list = pcmk__add_word(stopped_list, child_rsc->id);
}
} else if (is_set_recursive(child_rsc, pe_rsc_orphan, TRUE)
|| is_set_recursive(child_rsc, pe_rsc_managed, FALSE) == FALSE
|| is_set_recursive(child_rsc, pe_rsc_failed, TRUE)) {
// Print individual instance when active orphaned/unmanaged/failed
print_full = TRUE;
} else if (child_rsc->fns->active(child_rsc, TRUE)) {
// Instance of fully active anonymous clone
pe_node_t *location = child_rsc->fns->location(child_rsc, NULL, TRUE);
if (location) {
// Instance is active on a single node
enum rsc_role_e a_role = child_rsc->fns->state(child_rsc, TRUE);
if (location->details->online == FALSE && location->details->unclean) {
print_full = TRUE;
} else if (a_role > RSC_ROLE_SLAVE) {
master_list = g_list_append(master_list, location);
} else {
started_list = g_list_append(started_list, location);
}
} else {
/* uncolocated group - bleh */
print_full = TRUE;
}
} else {
// Instance of partially active anonymous clone
print_full = TRUE;
}
if (print_full) {
- out->message(out, crm_map_element_name(child_rsc->xml), options, child_rsc, only_node);
+ GListPtr all = NULL;
+
+ /* Print every resource that's a child of this clone. */
+ all = g_list_prepend(all, strdup("*"));
+ out->message(out, crm_map_element_name(child_rsc->xml), options,
+ child_rsc, only_node, all);
+ g_list_free_full(all, free);
}
}
if (is_set(options, pe_print_clone_details)) {
free(stopped_list);
out->end_list(out);
return pcmk_rc_ok;
}
/* Masters */
master_list = g_list_sort(master_list, sort_node_uname);
for (gIter = master_list; gIter; gIter = gIter->next) {
pe_node_t *host = gIter->data;
if (!pcmk__str_in_list(only_node, host->details->uname)) {
continue;
}
list_text = pcmk__add_word(list_text, host->details->uname);
active_instances++;
}
if (list_text != NULL) {
out->list_item(out, "Masters", "[%s ]", list_text);
g_list_free(master_list);
free(list_text);
list_text = NULL;
}
/* Started/Slaves */
started_list = g_list_sort(started_list, sort_node_uname);
for (gIter = started_list; gIter; gIter = gIter->next) {
pe_node_t *host = gIter->data;
if (!pcmk__str_in_list(only_node, host->details->uname)) {
continue;
}
list_text = pcmk__add_word(list_text, host->details->uname);
active_instances++;
}
if (list_text != NULL) {
if (is_set(rsc->flags, pe_rsc_promotable)) {
enum rsc_role_e role = configured_role(rsc);
if(role == RSC_ROLE_SLAVE) {
out->list_item(out, "Slaves (target-role)", "[%s ]", list_text);
} else {
out->list_item(out, "Slaves", "[%s ]", list_text);
}
} else {
out->list_item(out, "Started", "[%s ]", list_text);
}
g_list_free(started_list);
free(list_text);
list_text = NULL;
}
if (is_not_set(options, pe_print_clone_active)) {
const char *state = "Stopped";
enum rsc_role_e role = configured_role(rsc);
if (role == RSC_ROLE_STOPPED) {
state = "Stopped (disabled)";
}
if (is_not_set(rsc->flags, pe_rsc_unique)
&& (clone_data->clone_max > active_instances)) {
GListPtr nIter;
GListPtr list = g_hash_table_get_values(rsc->allowed_nodes);
/* Custom stopped list for non-unique clones */
free(stopped_list);
stopped_list = NULL;
if (list == NULL) {
/* Clusters with symmetrical=false haven't calculated allowed_nodes yet
* If we've not probed for them yet, the Stopped list will be empty
*/
list = g_hash_table_get_values(rsc->known_on);
}
list = g_list_sort(list, sort_node_uname);
for (nIter = list; nIter != NULL; nIter = nIter->next) {
pe_node_t *node = (pe_node_t *)nIter->data;
if (pe_find_node(rsc->running_on, node->details->uname) == NULL &&
pcmk__str_in_list(only_node, node->details->uname)) {
stopped_list = pcmk__add_word(stopped_list,
node->details->uname);
}
}
g_list_free(list);
}
if (stopped_list != NULL) {
out->list_item(out, state, "[%s ]", stopped_list);
free(stopped_list);
}
}
out->end_list(out);
- return pcmk_rc_ok;
+ return rc;
}
void
clone_free(pe_resource_t * rsc)
{
clone_variant_data_t *clone_data = NULL;
get_clone_variant_data(clone_data, rsc);
pe_rsc_trace(rsc, "Freeing %s", rsc->id);
for (GListPtr gIter = rsc->children; gIter != NULL; gIter = gIter->next) {
pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
CRM_ASSERT(child_rsc);
pe_rsc_trace(child_rsc, "Freeing child %s", child_rsc->id);
free_xml(child_rsc->xml);
child_rsc->xml = NULL;
/* There could be a saved unexpanded xml */
free_xml(child_rsc->orig_xml);
child_rsc->orig_xml = NULL;
child_rsc->fns->free(child_rsc);
}
g_list_free(rsc->children);
if (clone_data) {
CRM_ASSERT(clone_data->demote_notify == NULL);
CRM_ASSERT(clone_data->stop_notify == NULL);
CRM_ASSERT(clone_data->start_notify == NULL);
CRM_ASSERT(clone_data->promote_notify == NULL);
}
common_free(rsc);
}
enum rsc_role_e
clone_resource_state(const pe_resource_t * rsc, gboolean current)
{
enum rsc_role_e clone_role = RSC_ROLE_UNKNOWN;
GListPtr gIter = rsc->children;
for (; gIter != NULL; gIter = gIter->next) {
pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
enum rsc_role_e a_role = child_rsc->fns->state(child_rsc, current);
if (a_role > clone_role) {
clone_role = a_role;
}
}
pe_rsc_trace(rsc, "%s role: %s", rsc->id, role2text(clone_role));
return clone_role;
}
/*!
* \internal
* \brief Check whether a clone has an instance for every node
*
* \param[in] rsc Clone to check
* \param[in] data_set Cluster state
*/
bool
pe__is_universal_clone(pe_resource_t *rsc,
pe_working_set_t *data_set)
{
if (pe_rsc_is_clone(rsc)) {
clone_variant_data_t *clone_data = NULL;
get_clone_variant_data(clone_data, rsc);
if (clone_data->clone_max == g_list_length(data_set->nodes)) {
return TRUE;
}
}
return FALSE;
}
+
+gboolean
+pe__clone_is_filtered(pe_resource_t *rsc, GListPtr only_rsc, gboolean check_parent)
+{
+ gboolean passes = FALSE;
+ clone_variant_data_t *clone_data = NULL;
+
+ if (pcmk__str_in_list(only_rsc, rsc_printable_id(rsc))) {
+ passes = TRUE;
+ } else {
+ get_clone_variant_data(clone_data, rsc);
+ passes = pcmk__str_in_list(only_rsc, ID(clone_data->xml_obj_child));
+
+ if (!passes) {
+ for (GListPtr gIter = rsc->children; gIter != NULL; gIter = gIter->next) {
+ pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
+
+ if (!child_rsc->fns->is_filtered(child_rsc, only_rsc, FALSE)) {
+ passes = TRUE;
+ break;
+ }
+ }
+ }
+ }
+
+ return !passes;
+}
diff --git a/lib/pengine/complex.c b/lib/pengine/complex.c
index 1f063484a8..a7a995602c 100644
--- a/lib/pengine/complex.c
+++ b/lib/pengine/complex.c
@@ -1,993 +1,997 @@
/*
* Copyright 2004-2020 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
* This source code is licensed under the GNU Lesser General Public License
* version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
*/
#include
#include
#include
#include
void populate_hash(xmlNode * nvpair_list, GHashTable * hash, const char **attrs, int attrs_length);
resource_object_functions_t resource_class_functions[] = {
{
native_unpack,
native_find_rsc,
native_parameter,
native_print,
native_active,
native_resource_state,
native_location,
native_free,
pe__count_common,
+ pe__native_is_filtered,
},
{
group_unpack,
native_find_rsc,
native_parameter,
group_print,
group_active,
group_resource_state,
native_location,
group_free,
pe__count_common,
+ pe__group_is_filtered,
},
{
clone_unpack,
native_find_rsc,
native_parameter,
clone_print,
clone_active,
clone_resource_state,
native_location,
clone_free,
pe__count_common,
+ pe__clone_is_filtered,
},
{
pe__unpack_bundle,
native_find_rsc,
native_parameter,
pe__print_bundle,
pe__bundle_active,
pe__bundle_resource_state,
native_location,
pe__free_bundle,
pe__count_bundle,
+ pe__bundle_is_filtered,
}
};
static enum pe_obj_types
get_resource_type(const char *name)
{
if (safe_str_eq(name, XML_CIB_TAG_RESOURCE)) {
return pe_native;
} else if (safe_str_eq(name, XML_CIB_TAG_GROUP)) {
return pe_group;
} else if (safe_str_eq(name, XML_CIB_TAG_INCARNATION)) {
return pe_clone;
} else if (safe_str_eq(name, XML_CIB_TAG_MASTER)) {
// @COMPAT deprecated since 2.0.0
return pe_clone;
} else if (safe_str_eq(name, XML_CIB_TAG_CONTAINER)) {
return pe_container;
}
return pe_unknown;
}
static void
dup_attr(gpointer key, gpointer value, gpointer user_data)
{
add_hash_param(user_data, key, value);
}
void
get_meta_attributes(GHashTable * meta_hash, pe_resource_t * rsc,
pe_node_t * node, pe_working_set_t * data_set)
{
pe_rsc_eval_data_t rsc_rule_data = {
.standard = crm_element_value(rsc->xml, XML_AGENT_ATTR_CLASS),
.provider = crm_element_value(rsc->xml, XML_AGENT_ATTR_PROVIDER),
.agent = crm_element_value(rsc->xml, XML_EXPR_ATTR_TYPE)
};
pe_rule_eval_data_t rule_data = {
.node_hash = NULL,
.role = RSC_ROLE_UNKNOWN,
.now = data_set->now,
.match_data = NULL,
.rsc_data = &rsc_rule_data,
.op_data = NULL
};
if (node) {
rule_data.node_hash = node->details->attrs;
}
if (rsc->xml) {
xmlAttrPtr xIter = NULL;
for (xIter = rsc->xml->properties; xIter; xIter = xIter->next) {
const char *prop_name = (const char *)xIter->name;
const char *prop_value = crm_element_value(rsc->xml, prop_name);
add_hash_param(meta_hash, prop_name, prop_value);
}
}
pe__unpack_dataset_nvpairs(rsc->xml, XML_TAG_META_SETS, &rule_data,
meta_hash, NULL, FALSE, data_set);
/* set anything else based on the parent */
if (rsc->parent != NULL) {
g_hash_table_foreach(rsc->parent->meta, dup_attr, meta_hash);
}
/* and finally check the defaults */
pe__unpack_dataset_nvpairs(data_set->rsc_defaults, XML_TAG_META_SETS,
&rule_data, meta_hash, NULL, FALSE, data_set);
}
void
get_rsc_attributes(GHashTable * meta_hash, pe_resource_t * rsc,
pe_node_t * node, pe_working_set_t * data_set)
{
pe_rule_eval_data_t rule_data = {
.node_hash = NULL,
.role = RSC_ROLE_UNKNOWN,
.now = data_set->now,
.match_data = NULL,
.rsc_data = NULL,
.op_data = NULL
};
if (node) {
rule_data.node_hash = node->details->attrs;
}
pe__unpack_dataset_nvpairs(rsc->xml, XML_TAG_ATTR_SETS, &rule_data,
meta_hash, NULL, FALSE, data_set);
/* set anything else based on the parent */
if (rsc->parent != NULL) {
get_rsc_attributes(meta_hash, rsc->parent, node, data_set);
} else {
/* and finally check the defaults */
pe__unpack_dataset_nvpairs(data_set->rsc_defaults, XML_TAG_ATTR_SETS,
&rule_data, meta_hash, NULL, FALSE, data_set);
}
}
#if ENABLE_VERSIONED_ATTRS
void
pe_get_versioned_attributes(xmlNode * meta_hash, pe_resource_t * rsc,
pe_node_t * node, pe_working_set_t * data_set)
{
GHashTable *node_hash = NULL;
if (node) {
node_hash = node->details->attrs;
}
pe_unpack_versioned_attributes(data_set->input, rsc->xml, XML_TAG_ATTR_SETS, node_hash,
meta_hash, data_set->now, NULL);
/* set anything else based on the parent */
if (rsc->parent != NULL) {
pe_get_versioned_attributes(meta_hash, rsc->parent, node, data_set);
} else {
/* and finally check the defaults */
pe_unpack_versioned_attributes(data_set->input, data_set->rsc_defaults, XML_TAG_ATTR_SETS,
node_hash, meta_hash, data_set->now, NULL);
}
}
#endif
static char *
template_op_key(xmlNode * op)
{
const char *name = crm_element_value(op, "name");
const char *role = crm_element_value(op, "role");
char *key = NULL;
if (role == NULL || crm_str_eq(role, RSC_ROLE_STARTED_S, TRUE)
|| crm_str_eq(role, RSC_ROLE_SLAVE_S, TRUE)) {
role = RSC_ROLE_UNKNOWN_S;
}
key = crm_strdup_printf("%s-%s", name, role);
return key;
}
static gboolean
unpack_template(xmlNode * xml_obj, xmlNode ** expanded_xml, pe_working_set_t * data_set)
{
xmlNode *cib_resources = NULL;
xmlNode *template = NULL;
xmlNode *new_xml = NULL;
xmlNode *child_xml = NULL;
xmlNode *rsc_ops = NULL;
xmlNode *template_ops = NULL;
const char *template_ref = NULL;
const char *clone = NULL;
const char *id = NULL;
if (xml_obj == NULL) {
pe_err("No resource object for template unpacking");
return FALSE;
}
template_ref = crm_element_value(xml_obj, XML_CIB_TAG_RSC_TEMPLATE);
if (template_ref == NULL) {
return TRUE;
}
id = ID(xml_obj);
if (id == NULL) {
pe_err("'%s' object must have a id", crm_element_name(xml_obj));
return FALSE;
}
if (crm_str_eq(template_ref, id, TRUE)) {
pe_err("The resource object '%s' should not reference itself", id);
return FALSE;
}
cib_resources = get_xpath_object("//"XML_CIB_TAG_RESOURCES, data_set->input, LOG_TRACE);
if (cib_resources == NULL) {
pe_err("No resources configured");
return FALSE;
}
template = find_entity(cib_resources, XML_CIB_TAG_RSC_TEMPLATE, template_ref);
if (template == NULL) {
pe_err("No template named '%s'", template_ref);
return FALSE;
}
new_xml = copy_xml(template);
xmlNodeSetName(new_xml, xml_obj->name);
crm_xml_replace(new_xml, XML_ATTR_ID, id);
clone = crm_element_value(xml_obj, XML_RSC_ATTR_INCARNATION);
if(clone) {
crm_xml_add(new_xml, XML_RSC_ATTR_INCARNATION, clone);
}
template_ops = find_xml_node(new_xml, "operations", FALSE);
for (child_xml = __xml_first_child_element(xml_obj); child_xml != NULL;
child_xml = __xml_next_element(child_xml)) {
xmlNode *new_child = NULL;
new_child = add_node_copy(new_xml, child_xml);
if (crm_str_eq((const char *)new_child->name, "operations", TRUE)) {
rsc_ops = new_child;
}
}
if (template_ops && rsc_ops) {
xmlNode *op = NULL;
GHashTable *rsc_ops_hash = g_hash_table_new_full(crm_str_hash,
g_str_equal, free,
NULL);
for (op = __xml_first_child_element(rsc_ops); op != NULL;
op = __xml_next_element(op)) {
char *key = template_op_key(op);
g_hash_table_insert(rsc_ops_hash, key, op);
}
for (op = __xml_first_child_element(template_ops); op != NULL;
op = __xml_next_element(op)) {
char *key = template_op_key(op);
if (g_hash_table_lookup(rsc_ops_hash, key) == NULL) {
add_node_copy(rsc_ops, op);
}
free(key);
}
if (rsc_ops_hash) {
g_hash_table_destroy(rsc_ops_hash);
}
free_xml(template_ops);
}
/*free_xml(*expanded_xml); */
*expanded_xml = new_xml;
/* Disable multi-level templates for now */
/*if(unpack_template(new_xml, expanded_xml, data_set) == FALSE) {
free_xml(*expanded_xml);
*expanded_xml = NULL;
return FALSE;
} */
return TRUE;
}
static gboolean
add_template_rsc(xmlNode * xml_obj, pe_working_set_t * data_set)
{
const char *template_ref = NULL;
const char *id = NULL;
if (xml_obj == NULL) {
pe_err("No resource object for processing resource list of template");
return FALSE;
}
template_ref = crm_element_value(xml_obj, XML_CIB_TAG_RSC_TEMPLATE);
if (template_ref == NULL) {
return TRUE;
}
id = ID(xml_obj);
if (id == NULL) {
pe_err("'%s' object must have a id", crm_element_name(xml_obj));
return FALSE;
}
if (crm_str_eq(template_ref, id, TRUE)) {
pe_err("The resource object '%s' should not reference itself", id);
return FALSE;
}
if (add_tag_ref(data_set->template_rsc_sets, template_ref, id) == FALSE) {
return FALSE;
}
return TRUE;
}
static bool
detect_promotable(pe_resource_t *rsc)
{
const char *promotable = g_hash_table_lookup(rsc->meta,
XML_RSC_ATTR_PROMOTABLE);
if (crm_is_true(promotable)) {
return TRUE;
}
// @COMPAT deprecated since 2.0.0
if (safe_str_eq(crm_element_name(rsc->xml), XML_CIB_TAG_MASTER)) {
/* @TODO in some future version, pe_warn_once() here,
* then drop support in even later version
*/
g_hash_table_insert(rsc->meta, strdup(XML_RSC_ATTR_PROMOTABLE),
strdup(XML_BOOLEAN_TRUE));
return TRUE;
}
return FALSE;
}
gboolean
common_unpack(xmlNode * xml_obj, pe_resource_t ** rsc,
pe_resource_t * parent, pe_working_set_t * data_set)
{
bool isdefault = FALSE;
xmlNode *expanded_xml = NULL;
xmlNode *ops = NULL;
const char *value = NULL;
const char *rclass = NULL; /* Look for this after any templates have been expanded */
const char *id = crm_element_value(xml_obj, XML_ATTR_ID);
bool guest_node = FALSE;
bool remote_node = FALSE;
bool has_versioned_params = FALSE;
pe_rule_eval_data_t rule_data = {
.node_hash = NULL,
.role = RSC_ROLE_UNKNOWN,
.now = data_set->now,
.match_data = NULL,
.rsc_data = NULL,
.op_data = NULL
};
crm_log_xml_trace(xml_obj, "Processing resource input...");
if (id == NULL) {
pe_err("Must specify id tag in ");
return FALSE;
} else if (rsc == NULL) {
pe_err("Nowhere to unpack resource into");
return FALSE;
}
if (unpack_template(xml_obj, &expanded_xml, data_set) == FALSE) {
return FALSE;
}
*rsc = calloc(1, sizeof(pe_resource_t));
(*rsc)->cluster = data_set;
if (expanded_xml) {
crm_log_xml_trace(expanded_xml, "Expanded resource...");
(*rsc)->xml = expanded_xml;
(*rsc)->orig_xml = xml_obj;
} else {
(*rsc)->xml = xml_obj;
(*rsc)->orig_xml = NULL;
}
/* Do not use xml_obj from here on, use (*rsc)->xml in case templates are involved */
rclass = crm_element_value((*rsc)->xml, XML_AGENT_ATTR_CLASS);
(*rsc)->parent = parent;
ops = find_xml_node((*rsc)->xml, "operations", FALSE);
(*rsc)->ops_xml = expand_idref(ops, data_set->input);
(*rsc)->variant = get_resource_type(crm_element_name((*rsc)->xml));
if ((*rsc)->variant == pe_unknown) {
pe_err("Unknown resource type: %s", crm_element_name((*rsc)->xml));
free(*rsc);
return FALSE;
}
(*rsc)->parameters = crm_str_table_new();
#if ENABLE_VERSIONED_ATTRS
(*rsc)->versioned_parameters = create_xml_node(NULL, XML_TAG_RSC_VER_ATTRS);
#endif
(*rsc)->meta = crm_str_table_new();
(*rsc)->allowed_nodes =
g_hash_table_new_full(crm_str_hash, g_str_equal, NULL, free);
(*rsc)->known_on = g_hash_table_new_full(crm_str_hash, g_str_equal, NULL,
free);
value = crm_element_value((*rsc)->xml, XML_RSC_ATTR_INCARNATION);
if (value) {
(*rsc)->id = crm_strdup_printf("%s:%s", id, value);
add_hash_param((*rsc)->meta, XML_RSC_ATTR_INCARNATION, value);
} else {
(*rsc)->id = strdup(id);
}
(*rsc)->fns = &resource_class_functions[(*rsc)->variant];
pe_rsc_trace((*rsc), "Unpacking resource...");
get_meta_attributes((*rsc)->meta, *rsc, NULL, data_set);
get_rsc_attributes((*rsc)->parameters, *rsc, NULL, data_set);
#if ENABLE_VERSIONED_ATTRS
pe_get_versioned_attributes((*rsc)->versioned_parameters, *rsc, NULL, data_set);
#endif
(*rsc)->flags = 0;
set_bit((*rsc)->flags, pe_rsc_runnable);
set_bit((*rsc)->flags, pe_rsc_provisional);
if (is_not_set(data_set->flags, pe_flag_maintenance_mode)) {
set_bit((*rsc)->flags, pe_rsc_managed);
}
(*rsc)->rsc_cons = NULL;
(*rsc)->rsc_tickets = NULL;
(*rsc)->actions = NULL;
(*rsc)->role = RSC_ROLE_STOPPED;
(*rsc)->next_role = RSC_ROLE_UNKNOWN;
(*rsc)->recovery_type = recovery_stop_start;
(*rsc)->stickiness = 0;
(*rsc)->migration_threshold = INFINITY;
(*rsc)->failure_timeout = 0;
value = g_hash_table_lookup((*rsc)->meta, XML_CIB_ATTR_PRIORITY);
(*rsc)->priority = crm_parse_int(value, "0");
value = g_hash_table_lookup((*rsc)->meta, XML_RSC_ATTR_NOTIFY);
if (crm_is_true(value)) {
set_bit((*rsc)->flags, pe_rsc_notify);
}
if (xml_contains_remote_node((*rsc)->xml)) {
(*rsc)->is_remote_node = TRUE;
if (g_hash_table_lookup((*rsc)->meta, XML_RSC_ATTR_CONTAINER)) {
guest_node = TRUE;
} else {
remote_node = TRUE;
}
}
value = g_hash_table_lookup((*rsc)->meta, XML_OP_ATTR_ALLOW_MIGRATE);
#if ENABLE_VERSIONED_ATTRS
has_versioned_params = xml_has_children((*rsc)->versioned_parameters);
#endif
if (crm_is_true(value) && has_versioned_params) {
pe_rsc_trace((*rsc), "Migration is disabled for resources with versioned parameters");
} else if (crm_is_true(value)) {
set_bit((*rsc)->flags, pe_rsc_allow_migrate);
} else if ((value == NULL) && remote_node && !has_versioned_params) {
/* By default, we want remote nodes to be able
* to float around the cluster without having to stop all the
* resources within the remote-node before moving. Allowing
* migration support enables this feature. If this ever causes
* problems, migration support can be explicitly turned off with
* allow-migrate=false.
* We don't support migration for versioned resources, though. */
set_bit((*rsc)->flags, pe_rsc_allow_migrate);
}
value = g_hash_table_lookup((*rsc)->meta, XML_RSC_ATTR_MANAGED);
if (value != NULL && safe_str_neq("default", value)) {
if (crm_is_true(value)) {
set_bit((*rsc)->flags, pe_rsc_managed);
} else {
clear_bit((*rsc)->flags, pe_rsc_managed);
}
}
value = g_hash_table_lookup((*rsc)->meta, XML_RSC_ATTR_MAINTENANCE);
if (crm_is_true(value)) {
clear_bit((*rsc)->flags, pe_rsc_managed);
set_bit((*rsc)->flags, pe_rsc_maintenance);
}
if (is_set(data_set->flags, pe_flag_maintenance_mode)) {
clear_bit((*rsc)->flags, pe_rsc_managed);
set_bit((*rsc)->flags, pe_rsc_maintenance);
}
if (pe_rsc_is_clone(uber_parent(*rsc))) {
value = g_hash_table_lookup((*rsc)->meta, XML_RSC_ATTR_UNIQUE);
if (crm_is_true(value)) {
set_bit((*rsc)->flags, pe_rsc_unique);
}
if (detect_promotable(*rsc)) {
set_bit((*rsc)->flags, pe_rsc_promotable);
}
} else {
set_bit((*rsc)->flags, pe_rsc_unique);
}
pe_rsc_trace((*rsc), "Options for %s", (*rsc)->id);
value = g_hash_table_lookup((*rsc)->meta, XML_RSC_ATTR_RESTART);
if (safe_str_eq(value, "restart")) {
(*rsc)->restart_type = pe_restart_restart;
pe_rsc_trace((*rsc), "\tDependency restart handling: restart");
pe_warn_once(pe_wo_restart_type,
"Support for restart-type is deprecated and will be removed in a future release");
} else {
(*rsc)->restart_type = pe_restart_ignore;
pe_rsc_trace((*rsc), "\tDependency restart handling: ignore");
}
value = g_hash_table_lookup((*rsc)->meta, XML_RSC_ATTR_MULTIPLE);
if (safe_str_eq(value, "stop_only")) {
(*rsc)->recovery_type = recovery_stop_only;
pe_rsc_trace((*rsc), "\tMultiple running resource recovery: stop only");
} else if (safe_str_eq(value, "block")) {
(*rsc)->recovery_type = recovery_block;
pe_rsc_trace((*rsc), "\tMultiple running resource recovery: block");
} else {
(*rsc)->recovery_type = recovery_stop_start;
pe_rsc_trace((*rsc), "\tMultiple running resource recovery: stop/start");
}
value = g_hash_table_lookup((*rsc)->meta, XML_RSC_ATTR_STICKINESS);
if (value != NULL && safe_str_neq("default", value)) {
(*rsc)->stickiness = char2score(value);
}
value = g_hash_table_lookup((*rsc)->meta, XML_RSC_ATTR_FAIL_STICKINESS);
if (value != NULL && safe_str_neq("default", value)) {
(*rsc)->migration_threshold = char2score(value);
if ((*rsc)->migration_threshold < 0) {
/* @TODO We use 1 here to preserve previous behavior, but this
* should probably use the default (INFINITY) or 0 (to disable)
* instead.
*/
pe_warn_once(pe_wo_neg_threshold,
XML_RSC_ATTR_FAIL_STICKINESS
" must be non-negative, using 1 instead");
(*rsc)->migration_threshold = 1;
}
}
if (safe_str_eq(rclass, PCMK_RESOURCE_CLASS_STONITH)) {
set_bit(data_set->flags, pe_flag_have_stonith_resource);
set_bit((*rsc)->flags, pe_rsc_fence_device);
}
value = g_hash_table_lookup((*rsc)->meta, XML_RSC_ATTR_REQUIRES);
handle_requires_pref:
if (safe_str_eq(value, "nothing")) {
} else if (safe_str_eq(value, "quorum")) {
set_bit((*rsc)->flags, pe_rsc_needs_quorum);
} else if (safe_str_eq(value, "unfencing")) {
if (is_set((*rsc)->flags, pe_rsc_fence_device)) {
pcmk__config_warn("Resetting '" XML_RSC_ATTR_REQUIRES "' for %s "
"to 'quorum' because fencing devices cannot "
"require unfencing", (*rsc)->id);
value = "quorum";
isdefault = TRUE;
goto handle_requires_pref;
} else if (is_not_set(data_set->flags, pe_flag_stonith_enabled)) {
pcmk__config_warn("Resetting '" XML_RSC_ATTR_REQUIRES "' for %s "
"to 'quorum' because fencing is disabled",
(*rsc)->id);
value = "quorum";
isdefault = TRUE;
goto handle_requires_pref;
} else {
set_bit((*rsc)->flags, pe_rsc_needs_fencing);
set_bit((*rsc)->flags, pe_rsc_needs_unfencing);
}
} else if (safe_str_eq(value, "fencing")) {
set_bit((*rsc)->flags, pe_rsc_needs_fencing);
if (is_not_set(data_set->flags, pe_flag_stonith_enabled)) {
pcmk__config_warn("%s requires fencing but fencing is disabled",
(*rsc)->id);
}
} else {
const char *orig_value = value;
isdefault = TRUE;
if(is_set((*rsc)->flags, pe_rsc_fence_device)) {
value = "quorum";
} else if (((*rsc)->variant == pe_native)
&& safe_str_eq(crm_element_value((*rsc)->xml, XML_AGENT_ATTR_CLASS),
PCMK_RESOURCE_CLASS_OCF)
&& safe_str_eq(crm_element_value((*rsc)->xml, XML_AGENT_ATTR_PROVIDER), "pacemaker")
&& safe_str_eq(crm_element_value((*rsc)->xml, XML_ATTR_TYPE), "remote")
) {
value = "quorum";
} else if (is_set(data_set->flags, pe_flag_enable_unfencing)) {
value = "unfencing";
} else if (is_set(data_set->flags, pe_flag_stonith_enabled)) {
value = "fencing";
} else if (data_set->no_quorum_policy == no_quorum_ignore) {
value = "nothing";
} else {
value = "quorum";
}
if (orig_value != NULL) {
pcmk__config_err("Resetting '" XML_RSC_ATTR_REQUIRES "' for %s "
"to '%s' because '%s' is not valid",
(*rsc)->id, value, orig_value);
}
goto handle_requires_pref;
}
pe_rsc_trace((*rsc), "\tRequired to start: %s%s", value, isdefault?" (default)":"");
value = g_hash_table_lookup((*rsc)->meta, XML_RSC_ATTR_FAIL_TIMEOUT);
if (value != NULL) {
// Stored as seconds
(*rsc)->failure_timeout = (int) (crm_parse_interval_spec(value) / 1000);
}
if (remote_node) {
value = g_hash_table_lookup((*rsc)->parameters, XML_REMOTE_ATTR_RECONNECT_INTERVAL);
if (value) {
/* reconnect delay works by setting failure_timeout and preventing the
* connection from starting until the failure is cleared. */
(*rsc)->remote_reconnect_ms = crm_parse_interval_spec(value);
/* we want to override any default failure_timeout in use when remote
* reconnect_interval is in use. */
(*rsc)->failure_timeout = (*rsc)->remote_reconnect_ms / 1000;
}
}
get_target_role(*rsc, &((*rsc)->next_role));
pe_rsc_trace((*rsc), "\tDesired next state: %s",
(*rsc)->next_role != RSC_ROLE_UNKNOWN ? role2text((*rsc)->next_role) : "default");
if ((*rsc)->fns->unpack(*rsc, data_set) == FALSE) {
return FALSE;
}
if (is_set(data_set->flags, pe_flag_symmetric_cluster)) {
// This tag must stay exactly the same because it is tested elsewhere
resource_location(*rsc, NULL, 0, "symmetric_default", data_set);
} else if (guest_node) {
/* remote resources tied to a container resource must always be allowed
* to opt-in to the cluster. Whether the connection resource is actually
* allowed to be placed on a node is dependent on the container resource */
resource_location(*rsc, NULL, 0, "remote_connection_default", data_set);
}
pe_rsc_trace((*rsc), "\tAction notification: %s",
is_set((*rsc)->flags, pe_rsc_notify) ? "required" : "not required");
(*rsc)->utilization = crm_str_table_new();
pe__unpack_dataset_nvpairs((*rsc)->xml, XML_TAG_UTILIZATION, &rule_data,
(*rsc)->utilization, NULL, FALSE, data_set);
/* data_set->resources = g_list_append(data_set->resources, (*rsc)); */
if (expanded_xml) {
if (add_template_rsc(xml_obj, data_set) == FALSE) {
return FALSE;
}
}
return TRUE;
}
void
common_update_score(pe_resource_t * rsc, const char *id, int score)
{
pe_node_t *node = NULL;
node = pe_hash_table_lookup(rsc->allowed_nodes, id);
if (node != NULL) {
pe_rsc_trace(rsc, "Updating score for %s on %s: %d + %d", rsc->id, id, node->weight, score);
node->weight = pe__add_scores(node->weight, score);
}
if (rsc->children) {
GListPtr gIter = rsc->children;
for (; gIter != NULL; gIter = gIter->next) {
pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
common_update_score(child_rsc, id, score);
}
}
}
gboolean
is_parent(pe_resource_t *child, pe_resource_t *rsc)
{
pe_resource_t *parent = child;
if (parent == NULL || rsc == NULL) {
return FALSE;
}
while (parent->parent != NULL) {
if (parent->parent == rsc) {
return TRUE;
}
parent = parent->parent;
}
return FALSE;
}
pe_resource_t *
uber_parent(pe_resource_t * rsc)
{
pe_resource_t *parent = rsc;
if (parent == NULL) {
return NULL;
}
while (parent->parent != NULL && parent->parent->variant != pe_container) {
parent = parent->parent;
}
return parent;
}
void
common_free(pe_resource_t * rsc)
{
if (rsc == NULL) {
return;
}
pe_rsc_trace(rsc, "Freeing %s %d", rsc->id, rsc->variant);
g_list_free(rsc->rsc_cons);
g_list_free(rsc->rsc_cons_lhs);
g_list_free(rsc->rsc_tickets);
g_list_free(rsc->dangling_migrations);
if (rsc->parameters != NULL) {
g_hash_table_destroy(rsc->parameters);
}
#if ENABLE_VERSIONED_ATTRS
if (rsc->versioned_parameters != NULL) {
free_xml(rsc->versioned_parameters);
}
#endif
if (rsc->meta != NULL) {
g_hash_table_destroy(rsc->meta);
}
if (rsc->utilization != NULL) {
g_hash_table_destroy(rsc->utilization);
}
if (rsc->parent == NULL && is_set(rsc->flags, pe_rsc_orphan)) {
free_xml(rsc->xml);
rsc->xml = NULL;
free_xml(rsc->orig_xml);
rsc->orig_xml = NULL;
/* if rsc->orig_xml, then rsc->xml is an expanded xml from a template */
} else if (rsc->orig_xml) {
free_xml(rsc->xml);
rsc->xml = NULL;
}
if (rsc->running_on) {
g_list_free(rsc->running_on);
rsc->running_on = NULL;
}
if (rsc->known_on) {
g_hash_table_destroy(rsc->known_on);
rsc->known_on = NULL;
}
if (rsc->actions) {
g_list_free(rsc->actions);
rsc->actions = NULL;
}
if (rsc->allowed_nodes) {
g_hash_table_destroy(rsc->allowed_nodes);
rsc->allowed_nodes = NULL;
}
g_list_free(rsc->fillers);
g_list_free(rsc->rsc_location);
pe_rsc_trace(rsc, "Resource freed");
free(rsc->id);
free(rsc->clone_name);
free(rsc->allocated_to);
free(rsc->variant_opaque);
free(rsc->pending_task);
free(rsc);
}
/*!
* \brief
* \internal Find a node (and optionally count all) where resource is active
*
* \param[in] rsc Resource to check
* \param[out] count_all If not NULL, will be set to count of active nodes
* \param[out] count_clean If not NULL, will be set to count of clean nodes
*
* \return An active node (or NULL if resource is not active anywhere)
*
* \note The order of preference is: an active node that is the resource's
* partial migration source; if the resource's "requires" is "quorum" or
* "nothing", the first active node in the list that is clean and online;
* the first active node in the list.
*/
pe_node_t *
pe__find_active_on(const pe_resource_t *rsc, unsigned int *count_all,
unsigned int *count_clean)
{
pe_node_t *active = NULL;
pe_node_t *node = NULL;
bool keep_looking = FALSE;
bool is_happy = FALSE;
if (count_all) {
*count_all = 0;
}
if (count_clean) {
*count_clean = 0;
}
if (rsc == NULL) {
return NULL;
}
for (GList *node_iter = rsc->running_on; node_iter != NULL;
node_iter = node_iter->next) {
node = node_iter->data;
keep_looking = FALSE;
is_happy = node->details->online && !node->details->unclean;
if (count_all) {
++*count_all;
}
if (count_clean && is_happy) {
++*count_clean;
}
if (count_all || count_clean) {
// If we're counting, we need to go through entire list
keep_looking = TRUE;
}
if (rsc->partial_migration_source != NULL) {
if (node->details == rsc->partial_migration_source->details) {
// This is the migration source
active = node;
} else {
keep_looking = TRUE;
}
} else if (is_not_set(rsc->flags, pe_rsc_needs_fencing)) {
if (is_happy && (!active || !active->details->online
|| active->details->unclean)) {
// This is the first clean node
active = node;
} else {
keep_looking = TRUE;
}
}
if (active == NULL) {
// This is first node in list
active = node;
}
if (keep_looking == FALSE) {
// Don't waste time iterating if we don't have to
break;
}
}
return active;
}
/*!
* \brief
* \internal Find and count active nodes according to "requires"
*
* \param[in] rsc Resource to check
* \param[out] count If not NULL, will be set to count of active nodes
*
* \return An active node (or NULL if resource is not active anywhere)
*
* \note This is a convenience wrapper for pe__find_active_on() where the count
* of all active nodes or only clean active nodes is desired according to
* the "requires" meta-attribute.
*/
pe_node_t *
pe__find_active_requires(const pe_resource_t *rsc, unsigned int *count)
{
if (rsc && is_not_set(rsc->flags, pe_rsc_needs_fencing)) {
return pe__find_active_on(rsc, NULL, count);
}
return pe__find_active_on(rsc, count, NULL);
}
void
pe__count_common(pe_resource_t *rsc)
{
if (rsc->children != NULL) {
for (GList *item = rsc->children; item != NULL; item = item->next) {
((pe_resource_t *) item->data)->fns->count(item->data);
}
} else if (is_not_set(rsc->flags, pe_rsc_orphan)
|| (rsc->role > RSC_ROLE_STOPPED)) {
rsc->cluster->ninstances++;
if (pe__resource_is_disabled(rsc)) {
rsc->cluster->disabled_resources++;
}
if (is_set(rsc->flags, pe_rsc_block)) {
rsc->cluster->blocked_resources++;
}
}
}
diff --git a/lib/pengine/group.c b/lib/pengine/group.c
index ded36791db..c8b677810f 100644
--- a/lib/pengine/group.c
+++ b/lib/pengine/group.c
@@ -1,305 +1,392 @@
/*
* Copyright 2004-2020 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
* This source code is licensed under the GNU Lesser General Public License
* version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
*/
#include
#include
#include
#include
#include
#include
#define VARIANT_GROUP 1
#include "./variant.h"
gboolean
group_unpack(pe_resource_t * rsc, pe_working_set_t * data_set)
{
xmlNode *xml_obj = rsc->xml;
xmlNode *xml_native_rsc = NULL;
group_variant_data_t *group_data = NULL;
const char *group_ordered = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_ORDERED);
const char *group_colocated = g_hash_table_lookup(rsc->meta, "collocated");
const char *clone_id = NULL;
pe_rsc_trace(rsc, "Processing resource %s...", rsc->id);
group_data = calloc(1, sizeof(group_variant_data_t));
group_data->num_children = 0;
group_data->first_child = NULL;
group_data->last_child = NULL;
rsc->variant_opaque = group_data;
// We don't actually need the null checks but it speeds up the common case
if ((group_ordered == NULL)
|| (crm_str_to_boolean(group_ordered, &(group_data->ordered)) < 0)) {
group_data->ordered = TRUE;
}
if ((group_colocated == NULL)
|| (crm_str_to_boolean(group_colocated, &(group_data->colocated)) < 0)) {
group_data->colocated = TRUE;
}
clone_id = crm_element_value(rsc->xml, XML_RSC_ATTR_INCARNATION);
for (xml_native_rsc = __xml_first_child_element(xml_obj); xml_native_rsc != NULL;
xml_native_rsc = __xml_next_element(xml_native_rsc)) {
if (crm_str_eq((const char *)xml_native_rsc->name, XML_CIB_TAG_RESOURCE, TRUE)) {
pe_resource_t *new_rsc = NULL;
crm_xml_add(xml_native_rsc, XML_RSC_ATTR_INCARNATION, clone_id);
if (common_unpack(xml_native_rsc, &new_rsc, rsc, data_set) == FALSE) {
pe_err("Failed unpacking resource %s", crm_element_value(xml_obj, XML_ATTR_ID));
if (new_rsc != NULL && new_rsc->fns != NULL) {
new_rsc->fns->free(new_rsc);
}
continue;
}
group_data->num_children++;
rsc->children = g_list_append(rsc->children, new_rsc);
if (group_data->first_child == NULL) {
group_data->first_child = new_rsc;
}
group_data->last_child = new_rsc;
pe_rsc_trace(rsc, "Added %s member %s", rsc->id, new_rsc->id);
}
}
if (group_data->num_children == 0) {
pcmk__config_warn("Group %s does not have any children", rsc->id);
return TRUE; // Allow empty groups, children can be added later
}
pe_rsc_trace(rsc, "Added %d children to resource %s...", group_data->num_children, rsc->id);
return TRUE;
}
gboolean
group_active(pe_resource_t * rsc, gboolean all)
{
gboolean c_all = TRUE;
gboolean c_any = FALSE;
GListPtr gIter = rsc->children;
for (; gIter != NULL; gIter = gIter->next) {
pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
if (child_rsc->fns->active(child_rsc, all)) {
c_any = TRUE;
} else {
c_all = FALSE;
}
}
if (c_any == FALSE) {
return FALSE;
} else if (all && c_all == FALSE) {
return FALSE;
}
return TRUE;
}
static void
group_print_xml(pe_resource_t * rsc, const char *pre_text, long options, void *print_data)
{
GListPtr gIter = rsc->children;
char *child_text = crm_strdup_printf("%s ", pre_text);
status_print("%sid);
status_print("number_resources=\"%d\" ", g_list_length(rsc->children));
status_print(">\n");
for (; gIter != NULL; gIter = gIter->next) {
pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
child_rsc->fns->print(child_rsc, child_text, options, print_data);
}
status_print("%s\n", pre_text);
free(child_text);
}
void
group_print(pe_resource_t * rsc, const char *pre_text, long options, void *print_data)
{
char *child_text = NULL;
GListPtr gIter = rsc->children;
if (pre_text == NULL) {
pre_text = " ";
}
if (options & pe_print_xml) {
group_print_xml(rsc, pre_text, options, print_data);
return;
}
child_text = crm_strdup_printf("%s ", pre_text);
status_print("%sResource Group: %s", pre_text ? pre_text : "", rsc->id);
if (options & pe_print_html) {
status_print("\n\n");
} else if ((options & pe_print_log) == 0) {
status_print("\n");
}
if (options & pe_print_brief) {
print_rscs_brief(rsc->children, child_text, options, print_data, TRUE);
} else {
for (; gIter != NULL; gIter = gIter->next) {
pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
if (options & pe_print_html) {
status_print("- \n");
}
child_rsc->fns->print(child_rsc, child_text, options, print_data);
if (options & pe_print_html) {
status_print("
\n");
}
}
}
if (options & pe_print_html) {
status_print("
\n");
}
free(child_text);
}
-PCMK__OUTPUT_ARGS("group", "unsigned int", "pe_resource_t *", "GListPtr")
+PCMK__OUTPUT_ARGS("group", "unsigned int", "pe_resource_t *", "GListPtr", "GListPtr")
int
pe__group_xml(pcmk__output_t *out, va_list args)
{
unsigned int options = va_arg(args, unsigned int);
pe_resource_t *rsc = va_arg(args, pe_resource_t *);
- GListPtr only_node G_GNUC_UNUSED = va_arg(args, GListPtr);
+ GListPtr only_node = va_arg(args, GListPtr);
+ GListPtr only_rsc = va_arg(args, GListPtr);
GListPtr gIter = rsc->children;
char *count = crm_itoa(g_list_length(gIter));
int rc = pcmk_rc_no_output;
- gboolean printed_header = FALSE;
+ gboolean print_everything = TRUE;
+
+ if (rsc->fns->is_filtered(rsc, only_rsc, TRUE)) {
+ free(count);
+ return rc;
+ }
+
+ print_everything = pcmk__str_in_list(only_rsc, rsc_printable_id(rsc)) ||
+ (strstr(rsc->id, ":") != NULL && pcmk__str_in_list(only_rsc, rsc->id));
for (; gIter != NULL; gIter = gIter->next) {
pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
- if (!printed_header) {
- printed_header = TRUE;
+ if (child_rsc->fns->is_filtered(child_rsc, only_rsc, print_everything)) {
+ continue;
+ }
+ if (rc == pcmk_rc_no_output) {
rc = pe__name_and_nvpairs_xml(out, true, "group", 2
, "id", rsc->id
, "number_resources", count);
free(count);
CRM_ASSERT(rc == pcmk_rc_ok);
}
- out->message(out, crm_map_element_name(child_rsc->xml), options, child_rsc, only_node);
+ out->message(out, crm_map_element_name(child_rsc->xml), options, child_rsc,
+ only_node, only_rsc);
}
- if (printed_header) {
+ if (rc == pcmk_rc_ok) {
pcmk__output_xml_pop_parent(out);
}
return rc;
}
-PCMK__OUTPUT_ARGS("group", "unsigned int", "pe_resource_t *", "GListPtr")
+PCMK__OUTPUT_ARGS("group", "unsigned int", "pe_resource_t *", "GListPtr", "GListPtr")
int
pe__group_html(pcmk__output_t *out, va_list args)
{
unsigned int options = va_arg(args, unsigned int);
pe_resource_t *rsc = va_arg(args, pe_resource_t *);
- GListPtr only_node G_GNUC_UNUSED = va_arg(args, GListPtr);
+ GListPtr only_node = va_arg(args, GListPtr);
+ GListPtr only_rsc = va_arg(args, GListPtr);
- out->begin_list(out, NULL, NULL, "Resource Group: %s", rsc->id);
+ int rc = pcmk_rc_no_output;
+ gboolean print_everything = TRUE;
+
+ if (rsc->fns->is_filtered(rsc, only_rsc, TRUE)) {
+ return rc;
+ }
+
+ print_everything = pcmk__str_in_list(only_rsc, rsc_printable_id(rsc)) ||
+ (strstr(rsc->id, ":") != NULL && pcmk__str_in_list(only_rsc, rsc->id));
if (options & pe_print_brief) {
- pe__rscs_brief_output(out, rsc->children, options, TRUE);
+ GListPtr rscs = pe__filter_rsc_list(rsc->children, only_rsc);
+
+ if (rscs != NULL) {
+ out->begin_list(out, NULL, NULL, "Resource Group: %s", rsc->id);
+ pe__rscs_brief_output(out, rscs, options, TRUE);
+
+ rc = pcmk_rc_ok;
+ g_list_free(rscs);
+ }
} else {
for (GListPtr gIter = rsc->children; gIter; gIter = gIter->next) {
pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
- out->message(out, crm_map_element_name(child_rsc->xml), options, child_rsc, only_node);
+
+ if (child_rsc->fns->is_filtered(child_rsc, only_rsc, print_everything)) {
+ continue;
+ }
+
+ PCMK__OUTPUT_LIST_HEADER(out, FALSE, rc, "Resource Group: %s", rsc->id);
+
+ out->message(out, crm_map_element_name(child_rsc->xml), options,
+ child_rsc, only_node, only_rsc);
}
}
- out->end_list(out);
+ PCMK__OUTPUT_LIST_FOOTER(out, rc);
- return pcmk_rc_ok;
+ return rc;
}
-PCMK__OUTPUT_ARGS("group", "unsigned int", "pe_resource_t *", "GListPtr")
+PCMK__OUTPUT_ARGS("group", "unsigned int", "pe_resource_t *", "GListPtr", "GListPtr")
int
pe__group_text(pcmk__output_t *out, va_list args)
{
unsigned int options = va_arg(args, unsigned int);
pe_resource_t *rsc = va_arg(args, pe_resource_t *);
- GListPtr only_node G_GNUC_UNUSED = va_arg(args, GListPtr);
+ GListPtr only_node = va_arg(args, GListPtr);
+ GListPtr only_rsc = va_arg(args, GListPtr);
+
+ int rc = pcmk_rc_no_output;
+ gboolean print_everything = TRUE;
+
+ if (rsc->fns->is_filtered(rsc, only_rsc, TRUE)) {
+ return rc;
+ }
- out->begin_list(out, NULL, NULL, "Resource Group: %s", rsc->id);
+ print_everything = pcmk__str_in_list(only_rsc, rsc_printable_id(rsc)) ||
+ (strstr(rsc->id, ":") != NULL && pcmk__str_in_list(only_rsc, rsc->id));
if (options & pe_print_brief) {
- pe__rscs_brief_output(out, rsc->children, options, TRUE);
+ GListPtr rscs = pe__filter_rsc_list(rsc->children, only_rsc);
+
+ if (rscs != NULL) {
+ out->begin_list(out, NULL, NULL, "Resource Group: %s", rsc->id);
+ pe__rscs_brief_output(out, rscs, options, TRUE);
+
+ rc = pcmk_rc_ok;
+ g_list_free(rscs);
+ }
} else {
for (GListPtr gIter = rsc->children; gIter; gIter = gIter->next) {
pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
- out->message(out, crm_map_element_name(child_rsc->xml), options, child_rsc, only_node);
- }
+ if (child_rsc->fns->is_filtered(child_rsc, only_rsc, print_everything)) {
+ continue;
+ }
+
+ PCMK__OUTPUT_LIST_HEADER(out, FALSE, rc, "Resource Group: %s", rsc->id);
+
+ out->message(out, crm_map_element_name(child_rsc->xml), options,
+ child_rsc, only_node, only_rsc);
+ }
}
- out->end_list(out);
- return pcmk_rc_ok;
+ PCMK__OUTPUT_LIST_FOOTER(out, rc);
+
+ return rc;
}
void
group_free(pe_resource_t * rsc)
{
CRM_CHECK(rsc != NULL, return);
pe_rsc_trace(rsc, "Freeing %s", rsc->id);
for (GListPtr gIter = rsc->children; gIter != NULL; gIter = gIter->next) {
pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
CRM_ASSERT(child_rsc);
pe_rsc_trace(child_rsc, "Freeing child %s", child_rsc->id);
child_rsc->fns->free(child_rsc);
}
pe_rsc_trace(rsc, "Freeing child list");
g_list_free(rsc->children);
common_free(rsc);
}
enum rsc_role_e
group_resource_state(const pe_resource_t * rsc, gboolean current)
{
enum rsc_role_e group_role = RSC_ROLE_UNKNOWN;
GListPtr gIter = rsc->children;
for (; gIter != NULL; gIter = gIter->next) {
pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
enum rsc_role_e role = child_rsc->fns->state(child_rsc, current);
if (role > group_role) {
group_role = role;
}
}
pe_rsc_trace(rsc, "%s role: %s", rsc->id, role2text(group_role));
return group_role;
}
+
+gboolean
+pe__group_is_filtered(pe_resource_t *rsc, GListPtr only_rsc, gboolean check_parent)
+{
+ gboolean passes = FALSE;
+
+ if (check_parent && pcmk__str_in_list(only_rsc, rsc_printable_id(uber_parent(rsc)))) {
+ passes = TRUE;
+ } else if (pcmk__str_in_list(only_rsc, rsc_printable_id(rsc))) {
+ passes = TRUE;
+ } else if (strstr(rsc->id, ":") != NULL && pcmk__str_in_list(only_rsc, rsc->id)) {
+ passes = TRUE;
+ } else {
+ for (GListPtr gIter = rsc->children; gIter != NULL; gIter = gIter->next) {
+ pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
+
+ if (!child_rsc->fns->is_filtered(child_rsc, only_rsc, FALSE)) {
+ passes = TRUE;
+ break;
+ }
+ }
+ }
+
+ return !passes;
+}
diff --git a/lib/pengine/native.c b/lib/pengine/native.c
index 9906cd7d27..ce17d8ca4c 100644
--- a/lib/pengine/native.c
+++ b/lib/pengine/native.c
@@ -1,1460 +1,1494 @@
/*
* Copyright 2004-2020 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
* This source code is licensed under the GNU Lesser General Public License
* version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
*/
#include
#include
#include
#include
#include
#include
#include
#define VARIANT_NATIVE 1
#include "./variant.h"
/*!
* \internal
* \brief Check whether a resource is active on multiple nodes
*/
static bool
is_multiply_active(pe_resource_t *rsc)
{
unsigned int count = 0;
if (rsc->variant == pe_native) {
pe__find_active_requires(rsc, &count);
}
return count > 1;
}
static void
native_priority_to_node(pe_resource_t * rsc, pe_node_t * node)
{
int priority = 0;
if (rsc->priority == 0) {
return;
}
if (rsc->role == RSC_ROLE_MASTER) {
// Promoted instance takes base priority + 1
priority = rsc->priority + 1;
} else {
priority = rsc->priority;
}
node->details->priority += priority;
pe_rsc_trace(rsc, "Node '%s' now has priority %d with %s'%s' (priority: %d%s)",
node->details->uname, node->details->priority,
rsc->role == RSC_ROLE_MASTER ? "promoted " : "",
rsc->id, rsc->priority,
rsc->role == RSC_ROLE_MASTER ? " + 1" : "");
/* Priority of a resource running on a guest node is added to the cluster
* node as well. */
if (node->details->remote_rsc
&& node->details->remote_rsc->container) {
GListPtr gIter = node->details->remote_rsc->container->running_on;
for (; gIter != NULL; gIter = gIter->next) {
pe_node_t *a_node = gIter->data;
a_node->details->priority += priority;
pe_rsc_trace(rsc, "Node '%s' now has priority %d with %s'%s' (priority: %d%s) "
"from guest node '%s'",
a_node->details->uname, a_node->details->priority,
rsc->role == RSC_ROLE_MASTER ? "promoted " : "",
rsc->id, rsc->priority,
rsc->role == RSC_ROLE_MASTER ? " + 1" : "",
node->details->uname);
}
}
}
void
native_add_running(pe_resource_t * rsc, pe_node_t * node, pe_working_set_t * data_set)
{
GListPtr gIter = rsc->running_on;
CRM_CHECK(node != NULL, return);
for (; gIter != NULL; gIter = gIter->next) {
pe_node_t *a_node = (pe_node_t *) gIter->data;
CRM_CHECK(a_node != NULL, return);
if (safe_str_eq(a_node->details->id, node->details->id)) {
return;
}
}
pe_rsc_trace(rsc, "Adding %s to %s %s", rsc->id, node->details->uname,
is_set(rsc->flags, pe_rsc_managed)?"":"(unmanaged)");
rsc->running_on = g_list_append(rsc->running_on, node);
if (rsc->variant == pe_native) {
node->details->running_rsc = g_list_append(node->details->running_rsc, rsc);
native_priority_to_node(rsc, node);
}
if (rsc->variant == pe_native && node->details->maintenance) {
clear_bit(rsc->flags, pe_rsc_managed);
}
if (is_not_set(rsc->flags, pe_rsc_managed)) {
pe_resource_t *p = rsc->parent;
pe_rsc_info(rsc, "resource %s isn't managed", rsc->id);
resource_location(rsc, node, INFINITY, "not_managed_default", data_set);
while(p && node->details->online) {
/* add without the additional location constraint */
p->running_on = g_list_append(p->running_on, node);
p = p->parent;
}
return;
}
if (is_multiply_active(rsc)) {
switch (rsc->recovery_type) {
case recovery_stop_only:
{
GHashTableIter gIter;
pe_node_t *local_node = NULL;
/* make sure it doesn't come up again */
if (rsc->allowed_nodes != NULL) {
g_hash_table_destroy(rsc->allowed_nodes);
}
rsc->allowed_nodes = pe__node_list2table(data_set->nodes);
g_hash_table_iter_init(&gIter, rsc->allowed_nodes);
while (g_hash_table_iter_next(&gIter, NULL, (void **)&local_node)) {
local_node->weight = -INFINITY;
}
}
break;
case recovery_stop_start:
break;
case recovery_block:
clear_bit(rsc->flags, pe_rsc_managed);
set_bit(rsc->flags, pe_rsc_block);
/* If the resource belongs to a group or bundle configured with
* multiple-active=block, block the entire entity.
*/
if (rsc->parent
&& (rsc->parent->variant == pe_group || rsc->parent->variant == pe_container)
&& rsc->parent->recovery_type == recovery_block) {
GListPtr gIter = rsc->parent->children;
for (; gIter != NULL; gIter = gIter->next) {
pe_resource_t *child = (pe_resource_t *) gIter->data;
clear_bit(child->flags, pe_rsc_managed);
set_bit(child->flags, pe_rsc_block);
}
}
break;
}
crm_debug("%s is active on multiple nodes including %s: %s",
rsc->id, node->details->uname,
recovery2text(rsc->recovery_type));
} else {
pe_rsc_trace(rsc, "Resource %s is active on: %s", rsc->id, node->details->uname);
}
if (rsc->parent != NULL) {
native_add_running(rsc->parent, node, data_set);
}
}
static void
recursive_clear_unique(pe_resource_t *rsc)
{
clear_bit(rsc->flags, pe_rsc_unique);
add_hash_param(rsc->meta, XML_RSC_ATTR_UNIQUE, XML_BOOLEAN_FALSE);
for (GList *child = rsc->children; child != NULL; child = child->next) {
recursive_clear_unique((pe_resource_t *) child->data);
}
}
gboolean
native_unpack(pe_resource_t * rsc, pe_working_set_t * data_set)
{
pe_resource_t *parent = uber_parent(rsc);
native_variant_data_t *native_data = NULL;
const char *standard = crm_element_value(rsc->xml, XML_AGENT_ATTR_CLASS);
uint32_t ra_caps = pcmk_get_ra_caps(standard);
pe_rsc_trace(rsc, "Processing resource %s...", rsc->id);
native_data = calloc(1, sizeof(native_variant_data_t));
rsc->variant_opaque = native_data;
// Only some agent standards support unique and promotable clones
if (is_not_set(ra_caps, pcmk_ra_cap_unique)
&& is_set(rsc->flags, pe_rsc_unique) && pe_rsc_is_clone(parent)) {
/* @COMPAT We should probably reject this situation as an error (as we
* do for promotable below) rather than warn and convert, but that would
* be a backward-incompatible change that we should probably do with a
* transform at a schema major version bump.
*/
pe__force_anon(standard, parent, rsc->id, data_set);
/* Clear globally-unique on the parent and all its descendents unpacked
* so far (clearing the parent should make any future children unpacking
* correct). We have to clear this resource explicitly because it isn't
* hooked into the parent's children yet.
*/
recursive_clear_unique(parent);
recursive_clear_unique(rsc);
}
if (is_not_set(ra_caps, pcmk_ra_cap_promotable)
&& is_set(parent->flags, pe_rsc_promotable)) {
pe_err("Resource %s is of type %s and therefore "
"cannot be used as a promotable clone resource",
rsc->id, standard);
return FALSE;
}
return TRUE;
}
static bool
rsc_is_on_node(pe_resource_t *rsc, const pe_node_t *node, int flags)
{
pe_rsc_trace(rsc, "Checking whether %s is on %s",
rsc->id, node->details->uname);
if (is_set(flags, pe_find_current) && rsc->running_on) {
for (GListPtr iter = rsc->running_on; iter; iter = iter->next) {
pe_node_t *loc = (pe_node_t *) iter->data;
if (loc->details == node->details) {
return TRUE;
}
}
} else if (is_set(flags, pe_find_inactive) && (rsc->running_on == NULL)) {
return TRUE;
} else if (is_not_set(flags, pe_find_current) && rsc->allocated_to
&& (rsc->allocated_to->details == node->details)) {
return TRUE;
}
return FALSE;
}
pe_resource_t *
native_find_rsc(pe_resource_t * rsc, const char *id, const pe_node_t *on_node,
int flags)
{
bool match = FALSE;
pe_resource_t *result = NULL;
CRM_CHECK(id && rsc && rsc->id, return NULL);
if (flags & pe_find_clone) {
const char *rid = ID(rsc->xml);
if (!pe_rsc_is_clone(uber_parent(rsc))) {
match = FALSE;
} else if (!strcmp(id, rsc->id) || safe_str_eq(id, rid)) {
match = TRUE;
}
} else if (!strcmp(id, rsc->id)) {
match = TRUE;
} else if (is_set(flags, pe_find_renamed)
&& rsc->clone_name && strcmp(rsc->clone_name, id) == 0) {
match = TRUE;
} else if (is_set(flags, pe_find_any)
|| (is_set(flags, pe_find_anon)
&& is_not_set(rsc->flags, pe_rsc_unique))) {
match = pe_base_name_eq(rsc, id);
}
if (match && on_node) {
bool match_node = rsc_is_on_node(rsc, on_node, flags);
if (match_node == FALSE) {
match = FALSE;
}
}
if (match) {
return rsc;
}
for (GListPtr gIter = rsc->children; gIter != NULL; gIter = gIter->next) {
pe_resource_t *child = (pe_resource_t *) gIter->data;
result = rsc->fns->find_rsc(child, id, on_node, flags);
if (result) {
return result;
}
}
return NULL;
}
char *
native_parameter(pe_resource_t * rsc, pe_node_t * node, gboolean create, const char *name,
pe_working_set_t * data_set)
{
char *value_copy = NULL;
const char *value = NULL;
GHashTable *hash = NULL;
GHashTable *local_hash = NULL;
CRM_CHECK(rsc != NULL, return NULL);
CRM_CHECK(name != NULL && strlen(name) != 0, return NULL);
pe_rsc_trace(rsc, "Looking up %s in %s", name, rsc->id);
if (create || g_hash_table_size(rsc->parameters) == 0) {
if (node != NULL) {
pe_rsc_trace(rsc, "Creating hash with node %s", node->details->uname);
} else {
pe_rsc_trace(rsc, "Creating default hash");
}
local_hash = crm_str_table_new();
get_rsc_attributes(local_hash, rsc, node, data_set);
hash = local_hash;
} else {
hash = rsc->parameters;
}
value = g_hash_table_lookup(hash, name);
if (value == NULL) {
/* try meta attributes instead */
value = g_hash_table_lookup(rsc->meta, name);
}
if (value != NULL) {
value_copy = strdup(value);
}
if (local_hash != NULL) {
g_hash_table_destroy(local_hash);
}
return value_copy;
}
gboolean
native_active(pe_resource_t * rsc, gboolean all)
{
for (GList *gIter = rsc->running_on; gIter != NULL; gIter = gIter->next) {
pe_node_t *a_node = (pe_node_t *) gIter->data;
if (a_node->details->unclean) {
pe_rsc_trace(rsc, "Resource %s: node %s is unclean",
rsc->id, a_node->details->uname);
return TRUE;
} else if (a_node->details->online == FALSE) {
pe_rsc_trace(rsc, "Resource %s: node %s is offline",
rsc->id, a_node->details->uname);
} else {
pe_rsc_trace(rsc, "Resource %s active on %s",
rsc->id, a_node->details->uname);
return TRUE;
}
}
return FALSE;
}
struct print_data_s {
long options;
void *print_data;
};
static void
native_print_attr(gpointer key, gpointer value, gpointer user_data)
{
long options = ((struct print_data_s *)user_data)->options;
void *print_data = ((struct print_data_s *)user_data)->print_data;
status_print("Option: %s = %s\n", (char *)key, (char *)value);
}
static const char *
native_pending_state(pe_resource_t * rsc)
{
const char *pending_state = NULL;
if (safe_str_eq(rsc->pending_task, CRMD_ACTION_START)) {
pending_state = "Starting";
} else if (safe_str_eq(rsc->pending_task, CRMD_ACTION_STOP)) {
pending_state = "Stopping";
} else if (safe_str_eq(rsc->pending_task, CRMD_ACTION_MIGRATE)) {
pending_state = "Migrating";
} else if (safe_str_eq(rsc->pending_task, CRMD_ACTION_MIGRATED)) {
/* Work might be done in here. */
pending_state = "Migrating";
} else if (safe_str_eq(rsc->pending_task, CRMD_ACTION_PROMOTE)) {
pending_state = "Promoting";
} else if (safe_str_eq(rsc->pending_task, CRMD_ACTION_DEMOTE)) {
pending_state = "Demoting";
}
return pending_state;
}
static const char *
native_pending_task(pe_resource_t * rsc)
{
const char *pending_task = NULL;
if (safe_str_eq(rsc->pending_task, CRMD_ACTION_STATUS)) {
pending_task = "Monitoring";
/* Pending probes are not printed, even if pending
* operations are requested. If someone ever requests that
* behavior, uncomment this and the corresponding part of
* unpack.c:unpack_rsc_op().
*/
/*
} else if (safe_str_eq(rsc->pending_task, "probe")) {
pending_task = "Checking";
*/
}
return pending_task;
}
static enum rsc_role_e
native_displayable_role(pe_resource_t *rsc)
{
enum rsc_role_e role = rsc->role;
if ((role == RSC_ROLE_STARTED)
&& is_set(uber_parent(rsc)->flags, pe_rsc_promotable)) {
role = RSC_ROLE_SLAVE;
}
return role;
}
static const char *
native_displayable_state(pe_resource_t *rsc, long options)
{
const char *rsc_state = NULL;
if (options & pe_print_pending) {
rsc_state = native_pending_state(rsc);
}
if (rsc_state == NULL) {
rsc_state = role2text(native_displayable_role(rsc));
}
return rsc_state;
}
static void
native_print_xml(pe_resource_t * rsc, const char *pre_text, long options, void *print_data)
{
const char *class = crm_element_value(rsc->xml, XML_AGENT_ATTR_CLASS);
const char *prov = crm_element_value(rsc->xml, XML_AGENT_ATTR_PROVIDER);
const char *rsc_state = native_displayable_state(rsc, options);
const char *target_role = NULL;
/* resource information. */
status_print("%sxml, XML_ATTR_TYPE));
status_print("role=\"%s\" ", rsc_state);
if (rsc->meta) {
target_role = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_TARGET_ROLE);
}
if (target_role) {
status_print("target_role=\"%s\" ", target_role);
}
status_print("active=\"%s\" ", rsc->fns->active(rsc, TRUE) ? "true" : "false");
status_print("orphaned=\"%s\" ", is_set(rsc->flags, pe_rsc_orphan) ? "true" : "false");
status_print("blocked=\"%s\" ", is_set(rsc->flags, pe_rsc_block) ? "true" : "false");
status_print("managed=\"%s\" ", is_set(rsc->flags, pe_rsc_managed) ? "true" : "false");
status_print("failed=\"%s\" ", is_set(rsc->flags, pe_rsc_failed) ? "true" : "false");
status_print("failure_ignored=\"%s\" ",
is_set(rsc->flags, pe_rsc_failure_ignored) ? "true" : "false");
status_print("nodes_running_on=\"%d\" ", g_list_length(rsc->running_on));
if (options & pe_print_pending) {
const char *pending_task = native_pending_task(rsc);
if (pending_task) {
status_print("pending=\"%s\" ", pending_task);
}
}
if (options & pe_print_dev) {
status_print("provisional=\"%s\" ",
is_set(rsc->flags, pe_rsc_provisional) ? "true" : "false");
status_print("runnable=\"%s\" ", is_set(rsc->flags, pe_rsc_runnable) ? "true" : "false");
status_print("priority=\"%f\" ", (double)rsc->priority);
status_print("variant=\"%s\" ", crm_element_name(rsc->xml));
}
/* print out the nodes this resource is running on */
if (options & pe_print_rsconly) {
status_print("/>\n");
/* do nothing */
} else if (rsc->running_on != NULL) {
GListPtr gIter = rsc->running_on;
status_print(">\n");
for (; gIter != NULL; gIter = gIter->next) {
pe_node_t *node = (pe_node_t *) gIter->data;
status_print("%s \n", pre_text,
node->details->uname, node->details->id,
node->details->online ? "false" : "true");
}
status_print("%s\n", pre_text);
} else {
status_print("/>\n");
}
}
// Append a flag to resource description string's flags list
static bool
add_output_flag(GString *s, const char *flag_desc, bool have_flags)
{
g_string_append(s, (have_flags? ", " : " ("));
g_string_append(s, flag_desc);
return true;
}
// Append a node name to resource description string's node list
static bool
add_output_node(GString *s, const char *node, bool have_nodes)
{
g_string_append(s, (have_nodes? " " : " [ "));
g_string_append(s, node);
return true;
}
/*!
* \internal
* \brief Create a string description of a resource
*
* \param[in] rsc Resource to describe
* \param[in] name Desired identifier for the resource
* \param[in] node If not NULL, node that resource is "on"
* \param[in] options Bitmask of pe_print_*
* \param[in] target_role Resource's target role
* \param[in] show_nodes Whether to display nodes when multiply active
*
* \return Newly allocated string description of resource
* \note Caller must free the result with g_free().
*/
static gchar *
native_output_string(pe_resource_t *rsc, const char *name, pe_node_t *node,
long options, const char *target_role, bool show_nodes)
{
const char *class = crm_element_value(rsc->xml, XML_AGENT_ATTR_CLASS);
const char *provider = NULL;
const char *kind = crm_element_value(rsc->xml, XML_ATTR_TYPE);
char *retval = NULL;
GString *outstr = NULL;
bool have_flags = false;
CRM_CHECK(name != NULL, name = "unknown");
CRM_CHECK(kind != NULL, kind = "unknown");
CRM_CHECK(class != NULL, class = "unknown");
if (is_set(pcmk_get_ra_caps(class), pcmk_ra_cap_provider)) {
provider = crm_element_value(rsc->xml, XML_AGENT_ATTR_PROVIDER);
}
if ((node == NULL) && (rsc->lock_node != NULL)) {
node = rsc->lock_node;
}
if (is_set(options, pe_print_rsconly)
|| pcmk__list_of_multiple(rsc->running_on)) {
node = NULL;
}
// We need a string of at least this size
outstr = g_string_sized_new(strlen(name) + strlen(class) + strlen(kind)
+ (provider? (strlen(provider) + 2) : 0)
+ (node? strlen(node->details->uname) + 1 : 0)
+ 11);
// Resource name and agent
g_string_printf(outstr, "%s\t(%s%s%s:%s):\t", name, class,
/* @COMPAT This should be a single ':' (see CLBZ#5395) but
* to avoid breaking anything relying on it, we're keeping
* it like this until the next minor version bump.
*/
(provider? "::" : ""), (provider? provider : ""), kind);
// State on node
if (is_set(rsc->flags, pe_rsc_orphan)) {
g_string_append(outstr, " ORPHANED");
}
if (is_set(rsc->flags, pe_rsc_failed)) {
enum rsc_role_e role = native_displayable_role(rsc);
if (role > RSC_ROLE_SLAVE) {
g_string_append_printf(outstr, " FAILED %s", role2text(role));
} else {
g_string_append(outstr, " FAILED");
}
} else {
g_string_append_printf(outstr, " %s", native_displayable_state(rsc, options));
}
if (node) {
g_string_append_printf(outstr, " %s", node->details->uname);
}
// Flags, as: ( [...])
if (node && !(node->details->online) && node->details->unclean) {
have_flags = add_output_flag(outstr, "UNCLEAN", have_flags);
}
if (node && (node == rsc->lock_node)) {
have_flags = add_output_flag(outstr, "LOCKED", have_flags);
}
if (is_set(options, pe_print_pending)) {
const char *pending_task = native_pending_task(rsc);
if (pending_task) {
have_flags = add_output_flag(outstr, pending_task, have_flags);
}
}
if (target_role) {
enum rsc_role_e target_role_e = text2role(target_role);
/* Only show target role if it limits our abilities (i.e. ignore
* Started, as it is the default anyways, and doesn't prevent the
* resource from becoming Master).
*/
if (target_role_e == RSC_ROLE_STOPPED) {
have_flags = add_output_flag(outstr, "disabled", have_flags);
} else if (is_set(uber_parent(rsc)->flags, pe_rsc_promotable)
&& target_role_e == RSC_ROLE_SLAVE) {
have_flags = add_output_flag(outstr, "target-role:", have_flags);
g_string_append(outstr, target_role);
}
}
if (is_set(rsc->flags, pe_rsc_block)) {
have_flags = add_output_flag(outstr, "blocked", have_flags);
} else if (is_not_set(rsc->flags, pe_rsc_managed)) {
have_flags = add_output_flag(outstr, "unmanaged", have_flags);
}
if (is_set(rsc->flags, pe_rsc_failure_ignored)) {
have_flags = add_output_flag(outstr, "failure ignored", have_flags);
}
if (is_set(options, pe_print_dev)) {
if (is_set(options, pe_rsc_provisional)) {
have_flags = add_output_flag(outstr, "provisional", have_flags);
}
if (is_not_set(options, pe_rsc_runnable)) {
have_flags = add_output_flag(outstr, "non-startable", have_flags);
}
have_flags = add_output_flag(outstr, "variant:", have_flags);
g_string_append_printf(outstr, "%s priority:%f",
crm_element_name(rsc->xml),
(double) (rsc->priority));
}
if (have_flags) {
g_string_append(outstr, ")");
}
// User-supplied description
if (is_set(options, pe_print_rsconly)
|| pcmk__list_of_multiple(rsc->running_on)) {
const char *desc = crm_element_value(rsc->xml, XML_ATTR_DESC);
if (desc) {
g_string_append_printf(outstr, " %s", desc);
}
}
if (show_nodes && is_not_set(options, pe_print_rsconly)
&& pcmk__list_of_multiple(rsc->running_on)) {
bool have_nodes = false;
for (GList *iter = rsc->running_on; iter != NULL; iter = iter->next) {
pe_node_t *n = (pe_node_t *) iter->data;
have_nodes = add_output_node(outstr, n->details->uname, have_nodes);
}
if (have_nodes) {
g_string_append(outstr, " ]");
}
}
retval = outstr->str;
g_string_free(outstr, FALSE);
return retval;
}
int
pe__common_output_html(pcmk__output_t *out, pe_resource_t * rsc,
const char *name, pe_node_t *node, long options)
{
const char *kind = crm_element_value(rsc->xml, XML_ATTR_TYPE);
const char *target_role = NULL;
xmlNodePtr list_node = NULL;
const char *cl = NULL;
CRM_ASSERT(rsc->variant == pe_native);
CRM_ASSERT(kind != NULL);
if (rsc->meta) {
const char *is_internal = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_INTERNAL_RSC);
if (crm_is_true(is_internal) && is_not_set(options, pe_print_implicit)) {
crm_trace("skipping print of internal resource %s", rsc->id);
return pcmk_rc_no_output;
}
target_role = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_TARGET_ROLE);
}
if (is_not_set(rsc->flags, pe_rsc_managed)) {
cl = "rsc-managed";
} else if (is_set(rsc->flags, pe_rsc_failed)) {
cl = "rsc-failed";
} else if (rsc->variant == pe_native && (rsc->running_on == NULL)) {
cl = "rsc-failed";
} else if (pcmk__list_of_multiple(rsc->running_on)) {
cl = "rsc-multiple";
} else if (is_set(rsc->flags, pe_rsc_failure_ignored)) {
cl = "rsc-failure-ignored";
} else {
cl = "rsc-ok";
}
{
gchar *s = native_output_string(rsc, name, node, options, target_role,
true);
list_node = pcmk__output_create_html_node(out, "li", NULL, NULL, NULL);
pcmk_create_html_node(list_node, "span", NULL, cl, s);
g_free(s);
}
if (is_set(options, pe_print_details)) {
GHashTableIter iter;
gpointer key, value;
out->begin_list(out, NULL, NULL, "Options");
g_hash_table_iter_init(&iter, rsc->parameters);
while (g_hash_table_iter_next(&iter, &key, &value)) {
out->list_item(out, NULL, "Option: %s = %s", (char *) key, (char *) value);
}
out->end_list(out);
}
if (is_set(options, pe_print_dev)) {
GHashTableIter iter;
pe_node_t *n = NULL;
out->begin_list(out, NULL, NULL, "Allowed Nodes");
g_hash_table_iter_init(&iter, rsc->allowed_nodes);
while (g_hash_table_iter_next(&iter, NULL, (void **)&n)) {
out->list_item(out, NULL, "%s %d", n->details->uname, n->weight);
}
out->end_list(out);
}
if (is_set(options, pe_print_max_details)) {
GHashTableIter iter;
pe_node_t *n = NULL;
out->begin_list(out, NULL, NULL, "=== Allowed Nodes");
g_hash_table_iter_init(&iter, rsc->allowed_nodes);
while (g_hash_table_iter_next(&iter, NULL, (void **)&n)) {
pe__output_node(n, FALSE, out);
}
out->end_list(out);
}
return pcmk_rc_ok;
}
int
pe__common_output_text(pcmk__output_t *out, pe_resource_t * rsc,
const char *name, pe_node_t *node, long options)
{
const char *target_role = NULL;
CRM_ASSERT(rsc->variant == pe_native);
if (rsc->meta) {
const char *is_internal = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_INTERNAL_RSC);
if (crm_is_true(is_internal) && is_not_set(options, pe_print_implicit)) {
crm_trace("skipping print of internal resource %s", rsc->id);
return pcmk_rc_no_output;
}
target_role = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_TARGET_ROLE);
}
{
gchar *s = native_output_string(rsc, name, node, options, target_role,
true);
out->list_item(out, NULL, "%s", s);
g_free(s);
}
if (is_set(options, pe_print_details)) {
GHashTableIter iter;
gpointer key, value;
out->begin_list(out, NULL, NULL, "Options");
g_hash_table_iter_init(&iter, rsc->parameters);
while (g_hash_table_iter_next(&iter, &key, &value)) {
out->list_item(out, NULL, "Option: %s = %s", (char *) key, (char *) value);
}
out->end_list(out);
}
if (is_set(options, pe_print_dev)) {
GHashTableIter iter;
pe_node_t *n = NULL;
out->begin_list(out, NULL, NULL, "Allowed Nodes");
g_hash_table_iter_init(&iter, rsc->allowed_nodes);
while (g_hash_table_iter_next(&iter, NULL, (void **)&n)) {
out->list_item(out, NULL, "%s %d", n->details->uname, n->weight);
}
out->end_list(out);
}
if (is_set(options, pe_print_max_details)) {
GHashTableIter iter;
pe_node_t *n = NULL;
out->begin_list(out, NULL, NULL, "=== Allowed Nodes");
g_hash_table_iter_init(&iter, rsc->allowed_nodes);
while (g_hash_table_iter_next(&iter, NULL, (void **)&n)) {
pe__output_node(n, FALSE, out);
}
out->end_list(out);
}
return pcmk_rc_ok;
}
void
common_print(pe_resource_t * rsc, const char *pre_text, const char *name, pe_node_t *node, long options, void *print_data)
{
const char *target_role = NULL;
CRM_ASSERT(rsc->variant == pe_native);
if (rsc->meta) {
const char *is_internal = g_hash_table_lookup(rsc->meta,
XML_RSC_ATTR_INTERNAL_RSC);
if (crm_is_true(is_internal) && is_not_set(options, pe_print_implicit)) {
crm_trace("skipping print of internal resource %s", rsc->id);
return;
}
target_role = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_TARGET_ROLE);
}
if (options & pe_print_xml) {
native_print_xml(rsc, pre_text, options, print_data);
return;
}
if ((pre_text == NULL) && (options & pe_print_printf)) {
pre_text = " ";
}
if (options & pe_print_html) {
if (is_not_set(rsc->flags, pe_rsc_managed)) {
status_print("");
} else if (is_set(rsc->flags, pe_rsc_failed)) {
status_print("");
} else if (rsc->running_on == NULL) {
status_print("");
} else if (pcmk__list_of_multiple(rsc->running_on)) {
status_print("");
} else if (is_set(rsc->flags, pe_rsc_failure_ignored)) {
status_print("");
} else {
status_print("");
}
}
{
gchar *resource_s = native_output_string(rsc, name, node, options,
target_role, false);
status_print("%s%s", (pre_text? pre_text : ""), resource_s);
g_free(resource_s);
}
#if CURSES_ENABLED
if (is_set(options, pe_print_ncurses)
&& is_not_set(options, pe_print_rsconly)
&& !pcmk__list_of_multiple(rsc->running_on)) {
/* coverity[negative_returns] False positive */
move(-1, 0);
}
#endif
if (is_set(options, pe_print_html)) {
status_print(" ");
}
if (is_not_set(options, pe_print_rsconly)
&& pcmk__list_of_multiple(rsc->running_on)) {
GListPtr gIter = rsc->running_on;
int counter = 0;
if (options & pe_print_html) {
status_print("\n");
} else if ((options & pe_print_printf)
|| (options & pe_print_ncurses)) {
status_print("[");
}
for (; gIter != NULL; gIter = gIter->next) {
pe_node_t *n = (pe_node_t *) gIter->data;
counter++;
if (options & pe_print_html) {
status_print("- \n%s", n->details->uname);
} else if ((options & pe_print_printf)
|| (options & pe_print_ncurses)) {
status_print(" %s", n->details->uname);
} else if ((options & pe_print_log)) {
status_print("\t%d : %s", counter, n->details->uname);
} else {
status_print("%s", n->details->uname);
}
if (options & pe_print_html) {
status_print("
\n");
}
}
if (options & pe_print_html) {
status_print("
\n");
} else if ((options & pe_print_printf)
|| (options & pe_print_ncurses)) {
status_print(" ]");
}
}
if (options & pe_print_html) {
status_print("
\n");
} else if (options & pe_print_suppres_nl) {
/* nothing */
} else if ((options & pe_print_printf) || (options & pe_print_ncurses)) {
status_print("\n");
}
if (options & pe_print_details) {
struct print_data_s pdata;
pdata.options = options;
pdata.print_data = print_data;
g_hash_table_foreach(rsc->parameters, native_print_attr, &pdata);
}
if (options & pe_print_dev) {
GHashTableIter iter;
pe_node_t *n = NULL;
status_print("%s\tAllowed Nodes", pre_text);
g_hash_table_iter_init(&iter, rsc->allowed_nodes);
while (g_hash_table_iter_next(&iter, NULL, (void **)&n)) {
status_print("%s\t * %s %d", pre_text, n->details->uname, n->weight);
}
}
if (options & pe_print_max_details) {
GHashTableIter iter;
pe_node_t *n = NULL;
status_print("%s\t=== Allowed Nodes\n", pre_text);
g_hash_table_iter_init(&iter, rsc->allowed_nodes);
while (g_hash_table_iter_next(&iter, NULL, (void **)&n)) {
print_node("\t", n, FALSE);
}
}
}
void
native_print(pe_resource_t * rsc, const char *pre_text, long options, void *print_data)
{
pe_node_t *node = NULL;
CRM_ASSERT(rsc->variant == pe_native);
if (options & pe_print_xml) {
native_print_xml(rsc, pre_text, options, print_data);
return;
}
node = pe__current_node(rsc);
if (node == NULL) {
// This is set only if a non-probe action is pending on this node
node = rsc->pending_node;
}
common_print(rsc, pre_text, rsc_printable_id(rsc), node, options, print_data);
}
-PCMK__OUTPUT_ARGS("primitive", "unsigned int", "pe_resource_t *", "GListPtr")
+PCMK__OUTPUT_ARGS("primitive", "unsigned int", "pe_resource_t *", "GListPtr", "GListPtr")
int
pe__resource_xml(pcmk__output_t *out, va_list args)
{
unsigned int options = va_arg(args, unsigned int);
pe_resource_t *rsc = va_arg(args, pe_resource_t *);
GListPtr only_node G_GNUC_UNUSED = va_arg(args, GListPtr);
+ GListPtr only_rsc = va_arg(args, GListPtr);
const char *class = crm_element_value(rsc->xml, XML_AGENT_ATTR_CLASS);
const char *prov = crm_element_value(rsc->xml, XML_AGENT_ATTR_PROVIDER);
const char *rsc_state = native_displayable_state(rsc, options);
long is_print_pending = options & pe_print_pending;
long is_print_dev = options & pe_print_dev;
char ra_name[LINE_MAX];
char *nodes_running_on = NULL;
char *priority = NULL;
int rc = pcmk_rc_no_output;
CRM_ASSERT(rsc->variant == pe_native);
+ if (rsc->fns->is_filtered(rsc, only_rsc, TRUE)) {
+ return pcmk_rc_no_output;
+ }
+
/* resource information. */
sprintf(ra_name, "%s%s%s:%s", class, prov ? "::" : "", prov ? prov : ""
, crm_element_value(rsc->xml, XML_ATTR_TYPE));
nodes_running_on = crm_itoa(g_list_length(rsc->running_on));
priority = crm_ftoa(rsc->priority);
rc = pe__name_and_nvpairs_xml(out, true, "resource", 16
, "id", rsc_printable_id(rsc)
, "resource_agent", ra_name
, "role", rsc_state
, "target_role", (rsc->meta ? g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_TARGET_ROLE) : NULL)
, "active", BOOL2STR(rsc->fns->active(rsc, TRUE))
, "orphaned", BOOL2STR(is_set(rsc->flags, pe_rsc_orphan))
, "blocked", BOOL2STR(is_set(rsc->flags, pe_rsc_block))
, "managed", BOOL2STR(is_set(rsc->flags, pe_rsc_managed))
, "failed", BOOL2STR(is_set(rsc->flags, pe_rsc_failed))
, "failure_ignored", BOOL2STR(is_set(rsc->flags, pe_rsc_failure_ignored))
, "nodes_running_on", nodes_running_on
, "pending", (is_print_pending ? native_pending_task(rsc) : NULL)
, "provisional", (is_print_dev ? BOOL2STR(is_set(rsc->flags, pe_rsc_provisional)) : NULL)
, "runnable", (is_print_dev ? BOOL2STR(is_set(rsc->flags, pe_rsc_runnable)) : NULL)
, "priority", (is_print_dev ? priority : NULL)
, "variant", (is_print_dev ? crm_element_name(rsc->xml) : NULL));
free(priority);
free(nodes_running_on);
CRM_ASSERT(rc == pcmk_rc_ok);
if (rsc->running_on != NULL) {
GListPtr gIter = rsc->running_on;
for (; gIter != NULL; gIter = gIter->next) {
pe_node_t *node = (pe_node_t *) gIter->data;
rc = pe__name_and_nvpairs_xml(out, false, "node", 3
, "name", node->details->uname
, "id", node->details->id
, "cached", BOOL2STR(node->details->online));
CRM_ASSERT(rc == pcmk_rc_ok);
}
}
pcmk__output_xml_pop_parent(out);
return rc;
}
-PCMK__OUTPUT_ARGS("primitive", "unsigned int", "pe_resource_t *", "GListPtr")
+PCMK__OUTPUT_ARGS("primitive", "unsigned int", "pe_resource_t *", "GListPtr", "GListPtr")
int
pe__resource_html(pcmk__output_t *out, va_list args)
{
unsigned int options = va_arg(args, unsigned int);
pe_resource_t *rsc = va_arg(args, pe_resource_t *);
GListPtr only_node G_GNUC_UNUSED = va_arg(args, GListPtr);
+ GListPtr only_rsc = va_arg(args, GListPtr);
pe_node_t *node = pe__current_node(rsc);
+ if (rsc->fns->is_filtered(rsc, only_rsc, TRUE)) {
+ return pcmk_rc_no_output;
+ }
+
CRM_ASSERT(rsc->variant == pe_native);
if (node == NULL) {
// This is set only if a non-probe action is pending on this node
node = rsc->pending_node;
}
return pe__common_output_html(out, rsc, rsc_printable_id(rsc), node, options);
}
-PCMK__OUTPUT_ARGS("primitive", "unsigned int", "pe_resource_t *", "GListPtr")
+PCMK__OUTPUT_ARGS("primitive", "unsigned int", "pe_resource_t *", "GListPtr", "GListPtr")
int
pe__resource_text(pcmk__output_t *out, va_list args)
{
unsigned int options = va_arg(args, unsigned int);
pe_resource_t *rsc = va_arg(args, pe_resource_t *);
GListPtr only_node G_GNUC_UNUSED = va_arg(args, GListPtr);
+ GListPtr only_rsc = va_arg(args, GListPtr);
pe_node_t *node = pe__current_node(rsc);
CRM_ASSERT(rsc->variant == pe_native);
+ if (rsc->fns->is_filtered(rsc, only_rsc, TRUE)) {
+ return pcmk_rc_no_output;
+ }
+
if (node == NULL) {
// This is set only if a non-probe action is pending on this node
node = rsc->pending_node;
}
return pe__common_output_text(out, rsc, rsc_printable_id(rsc), node, options);
}
void
native_free(pe_resource_t * rsc)
{
pe_rsc_trace(rsc, "Freeing resource action list (not the data)");
common_free(rsc);
}
enum rsc_role_e
native_resource_state(const pe_resource_t * rsc, gboolean current)
{
enum rsc_role_e role = rsc->next_role;
if (current) {
role = rsc->role;
}
pe_rsc_trace(rsc, "%s state: %s", rsc->id, role2text(role));
return role;
}
/*!
* \internal
* \brief List nodes where a resource (or any of its children) is
*
* \param[in] rsc Resource to check
* \param[out] list List to add result to
* \param[in] current 0 = where known, 1 = running, 2 = running or pending
*
* \return If list contains only one node, that node
*/
pe_node_t *
native_location(const pe_resource_t *rsc, GList **list, int current)
{
pe_node_t *one = NULL;
GListPtr result = NULL;
if (rsc->children) {
GListPtr gIter = rsc->children;
for (; gIter != NULL; gIter = gIter->next) {
pe_resource_t *child = (pe_resource_t *) gIter->data;
child->fns->location(child, &result, current);
}
} else if (current) {
if (rsc->running_on) {
result = g_list_copy(rsc->running_on);
}
if ((current == 2) && rsc->pending_node
&& !pe_find_node_id(result, rsc->pending_node->details->id)) {
result = g_list_append(result, rsc->pending_node);
}
} else if (current == FALSE && rsc->allocated_to) {
result = g_list_append(NULL, rsc->allocated_to);
}
if (result && (result->next == NULL)) {
one = result->data;
}
if (list) {
GListPtr gIter = result;
for (; gIter != NULL; gIter = gIter->next) {
pe_node_t *node = (pe_node_t *) gIter->data;
if (*list == NULL || pe_find_node_id(*list, node->details->id) == NULL) {
*list = g_list_append(*list, node);
}
}
}
g_list_free(result);
return one;
}
static void
get_rscs_brief(GListPtr rsc_list, GHashTable * rsc_table, GHashTable * active_table)
{
GListPtr gIter = rsc_list;
for (; gIter != NULL; gIter = gIter->next) {
pe_resource_t *rsc = (pe_resource_t *) gIter->data;
const char *class = crm_element_value(rsc->xml, XML_AGENT_ATTR_CLASS);
const char *kind = crm_element_value(rsc->xml, XML_ATTR_TYPE);
int offset = 0;
char buffer[LINE_MAX];
int *rsc_counter = NULL;
int *active_counter = NULL;
if (rsc->variant != pe_native) {
continue;
}
offset += snprintf(buffer + offset, LINE_MAX - offset, "%s", class);
if (is_set(pcmk_get_ra_caps(class), pcmk_ra_cap_provider)) {
const char *prov = crm_element_value(rsc->xml, XML_AGENT_ATTR_PROVIDER);
offset += snprintf(buffer + offset, LINE_MAX - offset, "::%s", prov);
}
offset += snprintf(buffer + offset, LINE_MAX - offset, ":%s", kind);
CRM_LOG_ASSERT(offset > 0);
if (rsc_table) {
rsc_counter = g_hash_table_lookup(rsc_table, buffer);
if (rsc_counter == NULL) {
rsc_counter = calloc(1, sizeof(int));
*rsc_counter = 0;
g_hash_table_insert(rsc_table, strdup(buffer), rsc_counter);
}
(*rsc_counter)++;
}
if (active_table) {
GListPtr gIter2 = rsc->running_on;
for (; gIter2 != NULL; gIter2 = gIter2->next) {
pe_node_t *node = (pe_node_t *) gIter2->data;
GHashTable *node_table = NULL;
if (node->details->unclean == FALSE && node->details->online == FALSE) {
continue;
}
node_table = g_hash_table_lookup(active_table, node->details->uname);
if (node_table == NULL) {
node_table = crm_str_table_new();
g_hash_table_insert(active_table, strdup(node->details->uname), node_table);
}
active_counter = g_hash_table_lookup(node_table, buffer);
if (active_counter == NULL) {
active_counter = calloc(1, sizeof(int));
*active_counter = 0;
g_hash_table_insert(node_table, strdup(buffer), active_counter);
}
(*active_counter)++;
}
}
}
}
static void
destroy_node_table(gpointer data)
{
GHashTable *node_table = data;
if (node_table) {
g_hash_table_destroy(node_table);
}
}
void
print_rscs_brief(GListPtr rsc_list, const char *pre_text, long options,
void *print_data, gboolean print_all)
{
GHashTable *rsc_table = crm_str_table_new();
GHashTable *active_table = g_hash_table_new_full(crm_str_hash, g_str_equal,
free, destroy_node_table);
GHashTableIter hash_iter;
char *type = NULL;
int *rsc_counter = NULL;
get_rscs_brief(rsc_list, rsc_table, active_table);
g_hash_table_iter_init(&hash_iter, rsc_table);
while (g_hash_table_iter_next(&hash_iter, (gpointer *)&type, (gpointer *)&rsc_counter)) {
GHashTableIter hash_iter2;
char *node_name = NULL;
GHashTable *node_table = NULL;
int active_counter_all = 0;
g_hash_table_iter_init(&hash_iter2, active_table);
while (g_hash_table_iter_next(&hash_iter2, (gpointer *)&node_name, (gpointer *)&node_table)) {
int *active_counter = g_hash_table_lookup(node_table, type);
if (active_counter == NULL || *active_counter == 0) {
continue;
} else {
active_counter_all += *active_counter;
}
if (options & pe_print_rsconly) {
node_name = NULL;
}
if (options & pe_print_html) {
status_print("\n");
}
if (print_all) {
status_print("%s%d/%d\t(%s):\tActive %s\n", pre_text ? pre_text : "",
active_counter ? *active_counter : 0,
rsc_counter ? *rsc_counter : 0, type,
active_counter && (*active_counter > 0) && node_name ? node_name : "");
} else {
status_print("%s%d\t(%s):\tActive %s\n", pre_text ? pre_text : "",
active_counter ? *active_counter : 0, type,
active_counter && (*active_counter > 0) && node_name ? node_name : "");
}
if (options & pe_print_html) {
status_print("\n");
}
}
if (print_all && active_counter_all == 0) {
if (options & pe_print_html) {
status_print("\n");
}
status_print("%s%d/%d\t(%s):\tActive\n", pre_text ? pre_text : "",
active_counter_all,
rsc_counter ? *rsc_counter : 0, type);
if (options & pe_print_html) {
status_print("\n");
}
}
}
if (rsc_table) {
g_hash_table_destroy(rsc_table);
rsc_table = NULL;
}
if (active_table) {
g_hash_table_destroy(active_table);
active_table = NULL;
}
}
int
pe__rscs_brief_output(pcmk__output_t *out, GListPtr rsc_list, long options, gboolean print_all)
{
GHashTable *rsc_table = crm_str_table_new();
GHashTable *active_table = g_hash_table_new_full(crm_str_hash, g_str_equal,
free, destroy_node_table);
GListPtr sorted_rscs;
int rc = pcmk_rc_no_output;
get_rscs_brief(rsc_list, rsc_table, active_table);
/* Make a list of the rsc_table keys so that it can be sorted. This is to make sure
* output order stays consistent between systems.
*/
sorted_rscs = g_hash_table_get_keys(rsc_table);
sorted_rscs = g_list_sort(sorted_rscs, (GCompareFunc) strcmp);
for (GListPtr gIter = sorted_rscs; gIter; gIter = gIter->next) {
char *type = (char *) gIter->data;
int *rsc_counter = g_hash_table_lookup(rsc_table, type);
GHashTableIter hash_iter2;
char *node_name = NULL;
GHashTable *node_table = NULL;
int active_counter_all = 0;
g_hash_table_iter_init(&hash_iter2, active_table);
while (g_hash_table_iter_next(&hash_iter2, (gpointer *)&node_name, (gpointer *)&node_table)) {
int *active_counter = g_hash_table_lookup(node_table, type);
if (active_counter == NULL || *active_counter == 0) {
continue;
} else {
active_counter_all += *active_counter;
}
if (options & pe_print_rsconly) {
node_name = NULL;
}
if (print_all) {
out->list_item(out, NULL, " %d/%d\t(%s):\tActive %s",
*active_counter,
rsc_counter ? *rsc_counter : 0, type,
(*active_counter > 0) && node_name ? node_name : "");
} else {
out->list_item(out, NULL, " %d\t(%s):\tActive %s",
*active_counter, type,
(*active_counter > 0) && node_name ? node_name : "");
}
rc = pcmk_rc_ok;
}
if (print_all && active_counter_all == 0) {
out->list_item(out, NULL, " %d/%d\t(%s):\tActive",
active_counter_all,
rsc_counter ? *rsc_counter : 0, type);
rc = pcmk_rc_ok;
}
}
if (rsc_table) {
g_hash_table_destroy(rsc_table);
rsc_table = NULL;
}
if (active_table) {
g_hash_table_destroy(active_table);
active_table = NULL;
}
if (sorted_rscs) {
g_list_free(sorted_rscs);
}
return rc;
}
+
+gboolean
+pe__native_is_filtered(pe_resource_t *rsc, GListPtr only_rsc, gboolean check_parent)
+{
+ if (pcmk__str_in_list(only_rsc, rsc_printable_id(rsc)) ||
+ pcmk__str_in_list(only_rsc, rsc->id)) {
+ return FALSE;
+ } else if (check_parent) {
+ pe_resource_t *up = uber_parent(rsc);
+
+ if (pe_rsc_is_bundled(rsc)) {
+ return up->parent->fns->is_filtered(up->parent, only_rsc, FALSE);
+ } else {
+ return up->fns->is_filtered(up, only_rsc, FALSE);
+ }
+ }
+
+ return TRUE;
+}
diff --git a/lib/pengine/pe_output.c b/lib/pengine/pe_output.c
index 12646e6598..ad1f44c7bd 100644
--- a/lib/pengine/pe_output.c
+++ b/lib/pengine/pe_output.c
@@ -1,1782 +1,1807 @@
/*
* Copyright 2019-2020 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
* This source code is licensed under the GNU Lesser General Public License
* version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
*/
#include
#include
#include
#include
static char *
failed_action_string(xmlNodePtr xml_op) {
const char *op_key = crm_element_value(xml_op, XML_LRM_ATTR_TASK_KEY);
int rc = crm_parse_int(crm_element_value(xml_op, XML_LRM_ATTR_RC), "0");
int status = crm_parse_int(crm_element_value(xml_op, XML_LRM_ATTR_OPSTATUS), "0");
const char *exit_reason = crm_element_value(xml_op, XML_LRM_ATTR_EXIT_REASON);
time_t last_change = 0;
if (crm_element_value_epoch(xml_op, XML_RSC_OP_LAST_CHANGE,
&last_change) == pcmk_ok) {
crm_time_t *crm_when = crm_time_new(NULL);
char *time_s = NULL;
char *buf = NULL;
crm_time_set_timet(crm_when, &last_change);
time_s = crm_time_as_string(crm_when, crm_time_log_date | crm_time_log_timeofday | crm_time_log_with_timezone);
buf = crm_strdup_printf("%s on %s '%s' (%d): call=%s, status='%s', "
"exitreason='%s', " XML_RSC_OP_LAST_CHANGE
"='%s', queued=%sms, exec=%sms",
op_key ? op_key : ID(xml_op),
crm_element_value(xml_op, XML_ATTR_UNAME),
services_ocf_exitcode_str(rc), rc,
crm_element_value(xml_op, XML_LRM_ATTR_CALLID),
services_lrm_status_str(status),
exit_reason ? exit_reason : "none",
time_s,
crm_element_value(xml_op, XML_RSC_OP_T_QUEUE),
crm_element_value(xml_op, XML_RSC_OP_T_EXEC));
crm_time_free(crm_when);
free(time_s);
return buf;
} else {
return crm_strdup_printf("%s on %s '%s' (%d): call=%s, status=%s, exitreason='%s'",
op_key ? op_key : ID(xml_op),
crm_element_value(xml_op, XML_ATTR_UNAME),
services_ocf_exitcode_str(rc), rc,
crm_element_value(xml_op, XML_LRM_ATTR_CALLID),
services_lrm_status_str(status),
exit_reason ? exit_reason : "none");
}
}
static const char *
get_cluster_stack(pe_working_set_t *data_set)
{
xmlNode *stack = get_xpath_object("//nvpair[@name='cluster-infrastructure']",
data_set->input, LOG_DEBUG);
return stack? crm_element_value(stack, XML_NVPAIR_ATTR_VALUE) : "unknown";
}
static char *
last_changed_string(const char *last_written, const char *user,
const char *client, const char *origin) {
if (last_written != NULL || user != NULL || client != NULL || origin != NULL) {
return crm_strdup_printf("%s%s%s%s%s%s%s",
last_written ? last_written : "",
user ? " by " : "",
user ? user : "",
client ? " via " : "",
client ? client : "",
origin ? " on " : "",
origin ? origin : "");
} else {
return strdup("");
}
}
static char *
op_history_string(xmlNode *xml_op, const char *task, const char *interval_ms_s,
int rc, gboolean print_timing) {
const char *call = crm_element_value(xml_op, XML_LRM_ATTR_CALLID);
char *interval_str = NULL;
char *buf = NULL;
if (interval_ms_s && safe_str_neq(interval_ms_s, "0")) {
char *pair = pcmk_format_nvpair("interval", interval_ms_s, "ms");
interval_str = crm_strdup_printf(" %s", pair);
free(pair);
}
if (print_timing) {
char *last_change_str = NULL;
char *last_run_str = NULL;
char *exec_str = NULL;
char *queue_str = NULL;
const char *value = NULL;
time_t epoch = 0;
if ((crm_element_value_epoch(xml_op, XML_RSC_OP_LAST_CHANGE, &epoch) == pcmk_ok)
&& (epoch > 0)) {
char *time = pcmk_format_named_time(XML_RSC_OP_LAST_CHANGE, epoch);
last_change_str = crm_strdup_printf(" %s", time);
free(time);
}
if ((crm_element_value_epoch(xml_op, XML_RSC_OP_LAST_RUN, &epoch) == pcmk_ok)
&& (epoch > 0)) {
char *time = pcmk_format_named_time(XML_RSC_OP_LAST_RUN, epoch);
last_run_str = crm_strdup_printf(" %s", time);
free(time);
}
value = crm_element_value(xml_op, XML_RSC_OP_T_EXEC);
if (value) {
char *pair = pcmk_format_nvpair(XML_RSC_OP_T_EXEC, value, "ms");
exec_str = crm_strdup_printf(" %s", pair);
free(pair);
}
value = crm_element_value(xml_op, XML_RSC_OP_T_QUEUE);
if (value) {
char *pair = pcmk_format_nvpair(XML_RSC_OP_T_QUEUE, value, "ms");
queue_str = crm_strdup_printf(" %s", pair);
free(pair);
}
buf = crm_strdup_printf("(%s) %s:%s%s%s%s%s rc=%d (%s)", call, task,
interval_str ? interval_str : "",
last_change_str ? last_change_str : "",
last_run_str ? last_run_str : "",
exec_str ? exec_str : "",
queue_str ? queue_str : "",
rc, services_ocf_exitcode_str(rc));
if (last_change_str) {
free(last_change_str);
}
if (last_run_str) {
free(last_run_str);
}
if (exec_str) {
free(exec_str);
}
if (queue_str) {
free(queue_str);
}
} else {
buf = crm_strdup_printf("(%s) %s%s%s", call, task,
interval_str ? ":" : "",
interval_str ? interval_str : "");
}
if (interval_str) {
free(interval_str);
}
return buf;
}
static char *
resource_history_string(pe_resource_t *rsc, const char *rsc_id, gboolean all,
int failcount, time_t last_failure) {
char *buf = NULL;
if (rsc == NULL) {
buf = crm_strdup_printf("%s: orphan", rsc_id);
} else if (all || failcount || last_failure > 0) {
char *failcount_s = NULL;
char *lastfail_s = NULL;
if (failcount > 0) {
failcount_s = crm_strdup_printf(" %s=%d", PCMK__FAIL_COUNT_PREFIX,
failcount);
} else {
failcount_s = strdup("");
}
if (last_failure > 0) {
lastfail_s = crm_strdup_printf(" %s='%s'",
PCMK__LAST_FAILURE_PREFIX,
pcmk__epoch2str(&last_failure));
}
buf = crm_strdup_printf("%s: migration-threshold=%d%s%s",
rsc_id, rsc->migration_threshold, failcount_s,
lastfail_s? lastfail_s : "");
free(failcount_s);
free(lastfail_s);
} else {
buf = crm_strdup_printf("%s:", rsc_id);
}
return buf;
}
PCMK__OUTPUT_ARGS("cluster-summary", "pe_working_set_t *", "gboolean", "gboolean", "gboolean",
"gboolean", "gboolean", "gboolean")
int
pe__cluster_summary(pcmk__output_t *out, va_list args) {
pe_working_set_t *data_set = va_arg(args, pe_working_set_t *);
gboolean print_clone_detail = va_arg(args, gboolean);
gboolean show_stack = va_arg(args, gboolean);
gboolean show_dc = va_arg(args, gboolean);
gboolean show_times = va_arg(args, gboolean);
gboolean show_counts = va_arg(args, gboolean);
gboolean show_options = va_arg(args, gboolean);
int rc = pcmk_rc_no_output;
const char *stack_s = get_cluster_stack(data_set);
if (show_stack) {
PCMK__OUTPUT_LIST_HEADER(out, FALSE, rc, "Cluster Summary");
out->message(out, "cluster-stack", stack_s);
}
/* Always print DC if none, even if not requested */
if (data_set->dc_node == NULL || show_dc) {
xmlNode *dc_version = get_xpath_object("//nvpair[@name='dc-version']",
data_set->input, LOG_DEBUG);
const char *dc_version_s = dc_version?
crm_element_value(dc_version, XML_NVPAIR_ATTR_VALUE)
: NULL;
const char *quorum = crm_element_value(data_set->input, XML_ATTR_HAVE_QUORUM);
char *dc_name = data_set->dc_node ? pe__node_display_name(data_set->dc_node, print_clone_detail) : NULL;
PCMK__OUTPUT_LIST_HEADER(out, FALSE, rc, "Cluster Summary");
out->message(out, "cluster-dc", data_set->dc_node, quorum, dc_version_s, dc_name);
free(dc_name);
}
if (show_times) {
const char *last_written = crm_element_value(data_set->input, XML_CIB_ATTR_WRITTEN);
const char *user = crm_element_value(data_set->input, XML_ATTR_UPDATE_USER);
const char *client = crm_element_value(data_set->input, XML_ATTR_UPDATE_CLIENT);
const char *origin = crm_element_value(data_set->input, XML_ATTR_UPDATE_ORIG);
PCMK__OUTPUT_LIST_HEADER(out, FALSE, rc, "Cluster Summary");
out->message(out, "cluster-times", last_written, user, client, origin);
}
if (show_counts) {
PCMK__OUTPUT_LIST_HEADER(out, FALSE, rc, "Cluster Summary");
out->message(out, "cluster-counts", g_list_length(data_set->nodes),
data_set->ninstances, data_set->disabled_resources,
data_set->blocked_resources);
}
if (show_options) {
PCMK__OUTPUT_LIST_HEADER(out, FALSE, rc, "Cluster Summary");
out->message(out, "cluster-options", data_set);
}
PCMK__OUTPUT_LIST_FOOTER(out, rc);
if (is_set(data_set->flags, pe_flag_maintenance_mode)) {
out->message(out, "maint-mode");
rc = pcmk_rc_ok;
}
return rc;
}
PCMK__OUTPUT_ARGS("cluster-summary", "pe_working_set_t *", "gboolean", "gboolean", "gboolean",
"gboolean", "gboolean", "gboolean")
int
pe__cluster_summary_html(pcmk__output_t *out, va_list args) {
pe_working_set_t *data_set = va_arg(args, pe_working_set_t *);
gboolean print_clone_detail = va_arg(args, gboolean);
gboolean show_stack = va_arg(args, gboolean);
gboolean show_dc = va_arg(args, gboolean);
gboolean show_times = va_arg(args, gboolean);
gboolean show_counts = va_arg(args, gboolean);
gboolean show_options = va_arg(args, gboolean);
int rc = pcmk_rc_no_output;
const char *stack_s = get_cluster_stack(data_set);
if (show_stack) {
PCMK__OUTPUT_LIST_HEADER(out, FALSE, rc, "Cluster Summary");
out->message(out, "cluster-stack", stack_s);
}
/* Always print DC if none, even if not requested */
if (data_set->dc_node == NULL || show_dc) {
xmlNode *dc_version = get_xpath_object("//nvpair[@name='dc-version']",
data_set->input, LOG_DEBUG);
const char *dc_version_s = dc_version?
crm_element_value(dc_version, XML_NVPAIR_ATTR_VALUE)
: NULL;
const char *quorum = crm_element_value(data_set->input, XML_ATTR_HAVE_QUORUM);
char *dc_name = data_set->dc_node ? pe__node_display_name(data_set->dc_node, print_clone_detail) : NULL;
PCMK__OUTPUT_LIST_HEADER(out, FALSE, rc, "Cluster Summary");
out->message(out, "cluster-dc", data_set->dc_node, quorum, dc_version_s, dc_name);
free(dc_name);
}
if (show_times) {
const char *last_written = crm_element_value(data_set->input, XML_CIB_ATTR_WRITTEN);
const char *user = crm_element_value(data_set->input, XML_ATTR_UPDATE_USER);
const char *client = crm_element_value(data_set->input, XML_ATTR_UPDATE_CLIENT);
const char *origin = crm_element_value(data_set->input, XML_ATTR_UPDATE_ORIG);
PCMK__OUTPUT_LIST_HEADER(out, FALSE, rc, "Cluster Summary");
out->message(out, "cluster-times", last_written, user, client, origin);
}
if (show_counts) {
PCMK__OUTPUT_LIST_HEADER(out, FALSE, rc, "Cluster Summary");
out->message(out, "cluster-counts", g_list_length(data_set->nodes),
data_set->ninstances, data_set->disabled_resources,
data_set->blocked_resources);
}
if (show_options) {
/* Kind of a hack - close the list we may have opened earlier in this
* function so we can put all the options into their own list. We
* only want to do this on HTML output, though.
*/
PCMK__OUTPUT_LIST_FOOTER(out, rc);
out->begin_list(out, NULL, NULL, "Config Options");
out->message(out, "cluster-options", data_set);
}
PCMK__OUTPUT_LIST_FOOTER(out, rc);
if (is_set(data_set->flags, pe_flag_maintenance_mode)) {
out->message(out, "maint-mode");
rc = pcmk_rc_ok;
}
return rc;
}
char *
pe__node_display_name(pe_node_t *node, bool print_detail)
{
char *node_name;
const char *node_host = NULL;
const char *node_id = NULL;
int name_len;
CRM_ASSERT((node != NULL) && (node->details != NULL) && (node->details->uname != NULL));
/* Host is displayed only if this is a guest node */
if (pe__is_guest_node(node)) {
pe_node_t *host_node = pe__current_node(node->details->remote_rsc);
if (host_node && host_node->details) {
node_host = host_node->details->uname;
}
if (node_host == NULL) {
node_host = ""; /* so we at least get "uname@" to indicate guest */
}
}
/* Node ID is displayed if different from uname and detail is requested */
if (print_detail && safe_str_neq(node->details->uname, node->details->id)) {
node_id = node->details->id;
}
/* Determine name length */
name_len = strlen(node->details->uname) + 1;
if (node_host) {
name_len += strlen(node_host) + 1; /* "@node_host" */
}
if (node_id) {
name_len += strlen(node_id) + 3; /* + " (node_id)" */
}
/* Allocate and populate display name */
node_name = malloc(name_len);
CRM_ASSERT(node_name != NULL);
strcpy(node_name, node->details->uname);
if (node_host) {
strcat(node_name, "@");
strcat(node_name, node_host);
}
if (node_id) {
strcat(node_name, " (");
strcat(node_name, node_id);
strcat(node_name, ")");
}
return node_name;
}
int
pe__name_and_nvpairs_xml(pcmk__output_t *out, bool is_list, const char *tag_name
, size_t pairs_count, ...)
{
xmlNodePtr xml_node = NULL;
va_list args;
CRM_ASSERT(tag_name != NULL);
xml_node = pcmk__output_xml_peek_parent(out);
CRM_ASSERT(xml_node != NULL);
xml_node = is_list
? create_xml_node(xml_node, tag_name)
: xmlNewChild(xml_node, NULL, (pcmkXmlStr) tag_name, NULL);
va_start(args, pairs_count);
while(pairs_count--) {
const char *param_name = va_arg(args, const char *);
const char *param_value = va_arg(args, const char *);
if (param_name && param_value) {
xmlSetProp(xml_node, (pcmkXmlStr)param_name, (pcmkXmlStr)param_value);
}
};
va_end(args);
if (is_list) {
pcmk__output_xml_push_parent(out, xml_node);
}
return pcmk_rc_ok;
}
PCMK__OUTPUT_ARGS("ban", "pe_node_t *", "pe__location_t *", "gboolean")
int
pe__ban_html(pcmk__output_t *out, va_list args) {
pe_node_t *pe_node = va_arg(args, pe_node_t *);
pe__location_t *location = va_arg(args, pe__location_t *);
gboolean print_clone_detail = va_arg(args, gboolean);
char *node_name = pe__node_display_name(pe_node, print_clone_detail);
char *buf = crm_strdup_printf("%s\tprevents %s from running %son %s",
location->id, location->rsc_lh->id,
location->role_filter == RSC_ROLE_MASTER ? "as Master " : "",
node_name);
pcmk__output_create_html_node(out, "li", NULL, NULL, buf);
free(node_name);
free(buf);
return pcmk_rc_ok;
}
PCMK__OUTPUT_ARGS("ban", "pe_node_t *", "pe__location_t *", "gboolean")
int
pe__ban_text(pcmk__output_t *out, va_list args) {
pe_node_t *pe_node = va_arg(args, pe_node_t *);
pe__location_t *location = va_arg(args, pe__location_t *);
gboolean print_clone_detail = va_arg(args, gboolean);
char *node_name = pe__node_display_name(pe_node, print_clone_detail);
out->list_item(out, NULL, "%s\tprevents %s from running %son %s",
location->id, location->rsc_lh->id,
location->role_filter == RSC_ROLE_MASTER ? "as Master " : "",
node_name);
free(node_name);
return pcmk_rc_ok;
}
PCMK__OUTPUT_ARGS("ban", "pe_node_t *", "pe__location_t *", "gboolean")
int
pe__ban_xml(pcmk__output_t *out, va_list args) {
xmlNodePtr node = pcmk__output_create_xml_node(out, "ban");
pe_node_t *pe_node = va_arg(args, pe_node_t *);
pe__location_t *location = va_arg(args, pe__location_t *);
gboolean print_clone_detail G_GNUC_UNUSED = va_arg(args, gboolean);
char *weight_s = crm_itoa(pe_node->weight);
xmlSetProp(node, (pcmkXmlStr) "id", (pcmkXmlStr) location->id);
xmlSetProp(node, (pcmkXmlStr) "resource", (pcmkXmlStr) location->rsc_lh->id);
xmlSetProp(node, (pcmkXmlStr) "node", (pcmkXmlStr) pe_node->details->uname);
xmlSetProp(node, (pcmkXmlStr) "weight", (pcmkXmlStr) weight_s);
xmlSetProp(node, (pcmkXmlStr) "master_only",
(pcmkXmlStr) (location->role_filter == RSC_ROLE_MASTER ? "true" : "false"));
free(weight_s);
return pcmk_rc_ok;
}
PCMK__OUTPUT_ARGS("cluster-counts", "unsigned int", "int", "int", "int")
int
pe__cluster_counts_html(pcmk__output_t *out, va_list args) {
xmlNodePtr nodes_node = pcmk__output_create_xml_node(out, "li");
xmlNodePtr resources_node = pcmk__output_create_xml_node(out, "li");
unsigned int nnodes = va_arg(args, unsigned int);
int nresources = va_arg(args, int);
int ndisabled = va_arg(args, int);
int nblocked = va_arg(args, int);
char *nnodes_str = crm_strdup_printf("%d node%s configured",
nnodes, pcmk__plural_s(nnodes));
pcmk_create_html_node(nodes_node, "span", NULL, NULL, nnodes_str);
free(nnodes_str);
if (ndisabled && nblocked) {
char *s = crm_strdup_printf("%d resource instance%s configured (%d ",
nresources, pcmk__plural_s(nresources),
ndisabled);
pcmk_create_html_node(resources_node, "span", NULL, NULL, s);
free(s);
pcmk_create_html_node(resources_node, "span", NULL, "bold", "DISABLED");
s = crm_strdup_printf(", %d ", nblocked);
pcmk_create_html_node(resources_node, "span", NULL, NULL, s);
free(s);
pcmk_create_html_node(resources_node, "span", NULL, "bold", "BLOCKED");
pcmk_create_html_node(resources_node, "span", NULL, NULL,
" from further action due to failure)");
} else if (ndisabled && !nblocked) {
char *s = crm_strdup_printf("%d resource instance%s configured (%d ",
nresources, pcmk__plural_s(nresources),
ndisabled);
pcmk_create_html_node(resources_node, "span", NULL, NULL, s);
free(s);
pcmk_create_html_node(resources_node, "span", NULL, "bold", "DISABLED");
pcmk_create_html_node(resources_node, "span", NULL, NULL, ")");
} else if (!ndisabled && nblocked) {
char *s = crm_strdup_printf("%d resource instance%s configured (%d ",
nresources, pcmk__plural_s(nresources),
nblocked);
pcmk_create_html_node(resources_node, "span", NULL, NULL, s);
free(s);
pcmk_create_html_node(resources_node, "span", NULL, "bold", "BLOCKED");
pcmk_create_html_node(resources_node, "span", NULL, NULL,
" from further action due to failure)");
} else {
char *s = crm_strdup_printf("%d resource instance%s configured",
nresources, pcmk__plural_s(nresources));
pcmk_create_html_node(resources_node, "span", NULL, NULL, s);
free(s);
}
return pcmk_rc_ok;
}
PCMK__OUTPUT_ARGS("cluster-counts", "unsigned int", "int", "int", "int")
int
pe__cluster_counts_text(pcmk__output_t *out, va_list args) {
unsigned int nnodes = va_arg(args, unsigned int);
int nresources = va_arg(args, int);
int ndisabled = va_arg(args, int);
int nblocked = va_arg(args, int);
out->list_item(out, NULL, "%d node%s configured",
nnodes, pcmk__plural_s(nnodes));
if (ndisabled && nblocked) {
out->list_item(out, NULL, "%d resource instance%s configured "
"(%d DISABLED, %d BLOCKED from "
"further action due to failure)",
nresources, pcmk__plural_s(nresources), ndisabled,
nblocked);
} else if (ndisabled && !nblocked) {
out->list_item(out, NULL, "%d resource instance%s configured "
"(%d DISABLED)",
nresources, pcmk__plural_s(nresources), ndisabled);
} else if (!ndisabled && nblocked) {
out->list_item(out, NULL, "%d resource instance%s configured "
"(%d BLOCKED from further action "
"due to failure)",
nresources, pcmk__plural_s(nresources), nblocked);
} else {
out->list_item(out, NULL, "%d resource instance%s configured",
nresources, pcmk__plural_s(nresources));
}
return pcmk_rc_ok;
}
PCMK__OUTPUT_ARGS("cluster-counts", "unsigned int", "int", "int", "int")
int
pe__cluster_counts_xml(pcmk__output_t *out, va_list args) {
xmlNodePtr nodes_node = pcmk__output_create_xml_node(out, "nodes_configured");
xmlNodePtr resources_node = pcmk__output_create_xml_node(out, "resources_configured");
unsigned int nnodes = va_arg(args, unsigned int);
int nresources = va_arg(args, int);
int ndisabled = va_arg(args, int);
int nblocked = va_arg(args, int);
char *s = crm_itoa(nnodes);
xmlSetProp(nodes_node, (pcmkXmlStr) "number", (pcmkXmlStr) s);
free(s);
s = crm_itoa(nresources);
xmlSetProp(resources_node, (pcmkXmlStr) "number", (pcmkXmlStr) s);
free(s);
s = crm_itoa(ndisabled);
xmlSetProp(resources_node, (pcmkXmlStr) "disabled", (pcmkXmlStr) s);
free(s);
s = crm_itoa(nblocked);
xmlSetProp(resources_node, (pcmkXmlStr) "blocked", (pcmkXmlStr) s);
free(s);
return pcmk_rc_ok;
}
PCMK__OUTPUT_ARGS("cluster-dc", "pe_node_t *", "const char *", "const char *", "char *")
int
pe__cluster_dc_html(pcmk__output_t *out, va_list args) {
xmlNodePtr node = pcmk__output_create_xml_node(out, "li");
pe_node_t *dc = va_arg(args, pe_node_t *);
const char *quorum = va_arg(args, const char *);
const char *dc_version_s = va_arg(args, const char *);
char *dc_name = va_arg(args, char *);
pcmk_create_html_node(node, "span", NULL, "bold", "Current DC: ");
if (dc) {
if (crm_is_true(quorum)) {
char *buf = crm_strdup_printf("%s (version %s) - partition with quorum",
dc_name, dc_version_s ? dc_version_s : "unknown");
pcmk_create_html_node(node, "span", NULL, NULL, buf);
free(buf);
} else {
char *buf = crm_strdup_printf("%s (version %s) - partition",
dc_name, dc_version_s ? dc_version_s : "unknown");
pcmk_create_html_node(node, "span", NULL, NULL, buf);
free(buf);
pcmk_create_html_node(node, "span", NULL, "warning", "WITHOUT");
pcmk_create_html_node(node, "span", NULL, NULL, "quorum");
}
} else {
pcmk_create_html_node(node ,"span", NULL, "warning", "NONE");
}
return pcmk_rc_ok;
}
PCMK__OUTPUT_ARGS("cluster-dc", "pe_node_t *", "const char *", "const char *", "char *")
int
pe__cluster_dc_text(pcmk__output_t *out, va_list args) {
pe_node_t *dc = va_arg(args, pe_node_t *);
const char *quorum = va_arg(args, const char *);
const char *dc_version_s = va_arg(args, const char *);
char *dc_name = va_arg(args, char *);
if (dc) {
out->list_item(out, "Current DC", "%s (version %s) - partition %s quorum",
dc_name, dc_version_s ? dc_version_s : "unknown",
crm_is_true(quorum) ? "with" : "WITHOUT");
} else {
out->list_item(out, "Current DC", "NONE");
}
return pcmk_rc_ok;
}
PCMK__OUTPUT_ARGS("cluster-dc", "pe_node_t *", "const char *", "const char *", "char *")
int
pe__cluster_dc_xml(pcmk__output_t *out, va_list args) {
xmlNodePtr node = pcmk__output_create_xml_node(out, "current_dc");
pe_node_t *dc = va_arg(args, pe_node_t *);
const char *quorum = va_arg(args, const char *);
const char *dc_version_s = va_arg(args, const char *);
char *dc_name G_GNUC_UNUSED = va_arg(args, char *);
if (dc) {
xmlSetProp(node, (pcmkXmlStr) "present", (pcmkXmlStr) "true");
xmlSetProp(node, (pcmkXmlStr) "version", (pcmkXmlStr) (dc_version_s ? dc_version_s : ""));
xmlSetProp(node, (pcmkXmlStr) "name", (pcmkXmlStr) dc->details->uname);
xmlSetProp(node, (pcmkXmlStr) "id", (pcmkXmlStr) dc->details->id);
xmlSetProp(node, (pcmkXmlStr) "with_quorum", (pcmkXmlStr) (crm_is_true(quorum) ? "true" : "false"));
} else {
xmlSetProp(node, (pcmkXmlStr) "present", (pcmkXmlStr) "false");
}
return pcmk_rc_ok;
}
PCMK__OUTPUT_ARGS("maint-mode")
int
pe__cluster_maint_mode_text(pcmk__output_t *out, va_list args) {
fprintf(out->dest, "\n *** Resource management is DISABLED ***");
fprintf(out->dest, "\n The cluster will not attempt to start, stop or recover services");
fprintf(out->dest, "\n");
return pcmk_rc_ok;
}
PCMK__OUTPUT_ARGS("cluster-options", "pe_working_set_t *")
int
pe__cluster_options_html(pcmk__output_t *out, va_list args) {
pe_working_set_t *data_set = va_arg(args, pe_working_set_t *);
out->list_item(out, NULL, "STONITH of failed nodes %s",
is_set(data_set->flags, pe_flag_stonith_enabled) ? "enabled" : "disabled");
out->list_item(out, NULL, "Cluster is %s",
is_set(data_set->flags, pe_flag_symmetric_cluster) ? "symmetric" : "asymmetric");
switch (data_set->no_quorum_policy) {
case no_quorum_freeze:
out->list_item(out, NULL, "No quorum policy: Freeze resources");
break;
case no_quorum_stop:
out->list_item(out, NULL, "No quorum policy: Stop ALL resources");
break;
case no_quorum_demote:
out->list_item(out, NULL, "No quorum policy: Demote promotable "
"resources and stop all other resources");
break;
case no_quorum_ignore:
out->list_item(out, NULL, "No quorum policy: Ignore");
break;
case no_quorum_suicide:
out->list_item(out, NULL, "No quorum policy: Suicide");
break;
}
if (is_set(data_set->flags, pe_flag_maintenance_mode)) {
xmlNodePtr node = pcmk__output_create_xml_node(out, "li");
pcmk_create_html_node(node, "span", NULL, NULL, "Resource management: ");
pcmk_create_html_node(node, "span", NULL, "bold", "DISABLED");
pcmk_create_html_node(node, "span", NULL, NULL,
" (the cluster will not attempt to start, stop, or recover services)");
} else {
out->list_item(out, NULL, "Resource management: enabled");
}
return pcmk_rc_ok;
}
PCMK__OUTPUT_ARGS("cluster-options", "pe_working_set_t *")
int
pe__cluster_options_log(pcmk__output_t *out, va_list args) {
pe_working_set_t *data_set = va_arg(args, pe_working_set_t *);
if (is_set(data_set->flags, pe_flag_maintenance_mode)) {
out->info(out, "Resource management is DISABLED. The cluster will not attempt to start, stop or recover services.");
return pcmk_rc_ok;
} else {
return pcmk_rc_no_output;
}
}
PCMK__OUTPUT_ARGS("cluster-options", "pe_working_set_t *")
int
pe__cluster_options_text(pcmk__output_t *out, va_list args) {
pe_working_set_t *data_set = va_arg(args, pe_working_set_t *);
out->list_item(out, NULL, "STONITH of failed nodes %s",
is_set(data_set->flags, pe_flag_stonith_enabled) ? "enabled" : "disabled");
out->list_item(out, NULL, "Cluster is %s",
is_set(data_set->flags, pe_flag_symmetric_cluster) ? "symmetric" : "asymmetric");
switch (data_set->no_quorum_policy) {
case no_quorum_freeze:
out->list_item(out, NULL, "No quorum policy: Freeze resources");
break;
case no_quorum_stop:
out->list_item(out, NULL, "No quorum policy: Stop ALL resources");
break;
case no_quorum_demote:
out->list_item(out, NULL, "No quorum policy: Demote promotable "
"resources and stop all other resources");
break;
case no_quorum_ignore:
out->list_item(out, NULL, "No quorum policy: Ignore");
break;
case no_quorum_suicide:
out->list_item(out, NULL, "No quorum policy: Suicide");
break;
}
return pcmk_rc_ok;
}
PCMK__OUTPUT_ARGS("cluster-options", "pe_working_set_t *")
int
pe__cluster_options_xml(pcmk__output_t *out, va_list args) {
xmlNodePtr node = pcmk__output_create_xml_node(out, "cluster_options");
pe_working_set_t *data_set = va_arg(args, pe_working_set_t *);
xmlSetProp(node, (pcmkXmlStr) "stonith-enabled",
(pcmkXmlStr) (is_set(data_set->flags, pe_flag_stonith_enabled) ? "true" : "false"));
xmlSetProp(node, (pcmkXmlStr) "symmetric-cluster",
(pcmkXmlStr) (is_set(data_set->flags, pe_flag_symmetric_cluster) ? "true" : "false"));
switch (data_set->no_quorum_policy) {
case no_quorum_freeze:
xmlSetProp(node, (pcmkXmlStr) "no-quorum-policy", (pcmkXmlStr) "freeze");
break;
case no_quorum_stop:
xmlSetProp(node, (pcmkXmlStr) "no-quorum-policy", (pcmkXmlStr) "stop");
break;
case no_quorum_demote:
xmlSetProp(node, (pcmkXmlStr) "no-quorum-policy", (pcmkXmlStr) "demote");
break;
case no_quorum_ignore:
xmlSetProp(node, (pcmkXmlStr) "no-quorum-policy", (pcmkXmlStr) "ignore");
break;
case no_quorum_suicide:
xmlSetProp(node, (pcmkXmlStr) "no-quorum-policy", (pcmkXmlStr) "suicide");
break;
}
xmlSetProp(node, (pcmkXmlStr) "maintenance-mode",
(pcmkXmlStr) (is_set(data_set->flags, pe_flag_maintenance_mode) ? "true" : "false"));
return pcmk_rc_ok;
}
PCMK__OUTPUT_ARGS("cluster-stack", "const char *")
int
pe__cluster_stack_html(pcmk__output_t *out, va_list args) {
xmlNodePtr node = pcmk__output_create_xml_node(out, "li");
const char *stack_s = va_arg(args, const char *);
pcmk_create_html_node(node, "span", NULL, "bold", "Stack: ");
pcmk_create_html_node(node, "span", NULL, NULL, stack_s);
return pcmk_rc_ok;
}
PCMK__OUTPUT_ARGS("cluster-stack", "const char *")
int
pe__cluster_stack_text(pcmk__output_t *out, va_list args) {
const char *stack_s = va_arg(args, const char *);
out->list_item(out, "Stack", "%s", stack_s);
return pcmk_rc_ok;
}
PCMK__OUTPUT_ARGS("cluster-stack", "const char *")
int
pe__cluster_stack_xml(pcmk__output_t *out, va_list args) {
xmlNodePtr node = pcmk__output_create_xml_node(out, "stack");
const char *stack_s = va_arg(args, const char *);
xmlSetProp(node, (pcmkXmlStr) "type", (pcmkXmlStr) stack_s);
return pcmk_rc_ok;
}
PCMK__OUTPUT_ARGS("cluster-times", "const char *", "const char *", "const char *", "const char *")
int
pe__cluster_times_html(pcmk__output_t *out, va_list args) {
xmlNodePtr updated_node = pcmk__output_create_xml_node(out, "li");
xmlNodePtr changed_node = pcmk__output_create_xml_node(out, "li");
const char *last_written = va_arg(args, const char *);
const char *user = va_arg(args, const char *);
const char *client = va_arg(args, const char *);
const char *origin = va_arg(args, const char *);
char *buf = last_changed_string(last_written, user, client, origin);
pcmk_create_html_node(updated_node, "span", NULL, "bold", "Last updated: ");
pcmk_create_html_node(updated_node, "span", NULL, NULL,
pcmk__epoch2str(NULL));
pcmk_create_html_node(changed_node, "span", NULL, "bold", "Last change: ");
pcmk_create_html_node(changed_node, "span", NULL, NULL, buf);
free(buf);
return pcmk_rc_ok;
}
PCMK__OUTPUT_ARGS("cluster-times", "const char *", "const char *", "const char *", "const char *")
int
pe__cluster_times_xml(pcmk__output_t *out, va_list args) {
xmlNodePtr updated_node = pcmk__output_create_xml_node(out, "last_update");
xmlNodePtr changed_node = pcmk__output_create_xml_node(out, "last_change");
const char *last_written = va_arg(args, const char *);
const char *user = va_arg(args, const char *);
const char *client = va_arg(args, const char *);
const char *origin = va_arg(args, const char *);
xmlSetProp(updated_node, (pcmkXmlStr) "time",
(pcmkXmlStr) pcmk__epoch2str(NULL));
xmlSetProp(changed_node, (pcmkXmlStr) "time", (pcmkXmlStr) (last_written ? last_written : ""));
xmlSetProp(changed_node, (pcmkXmlStr) "user", (pcmkXmlStr) (user ? user : ""));
xmlSetProp(changed_node, (pcmkXmlStr) "client", (pcmkXmlStr) (client ? client : ""));
xmlSetProp(changed_node, (pcmkXmlStr) "origin", (pcmkXmlStr) (origin ? origin : ""));
return pcmk_rc_ok;
}
PCMK__OUTPUT_ARGS("cluster-times", "const char *", "const char *", "const char *", "const char *")
int
pe__cluster_times_text(pcmk__output_t *out, va_list args) {
const char *last_written = va_arg(args, const char *);
const char *user = va_arg(args, const char *);
const char *client = va_arg(args, const char *);
const char *origin = va_arg(args, const char *);
char *buf = last_changed_string(last_written, user, client, origin);
out->list_item(out, "Last updated", "%s", pcmk__epoch2str(NULL));
out->list_item(out, "Last change", " %s", buf);
free(buf);
return pcmk_rc_ok;
}
PCMK__OUTPUT_ARGS("failed-action", "xmlNodePtr")
int
pe__failed_action_text(pcmk__output_t *out, va_list args) {
xmlNodePtr xml_op = va_arg(args, xmlNodePtr);
char *s = failed_action_string(xml_op);
out->list_item(out, NULL, "%s", s);
free(s);
return pcmk_rc_ok;
}
PCMK__OUTPUT_ARGS("failed-action", "xmlNodePtr")
int
pe__failed_action_xml(pcmk__output_t *out, va_list args) {
xmlNodePtr xml_op = va_arg(args, xmlNodePtr);
const char *op_key = crm_element_value(xml_op, XML_LRM_ATTR_TASK_KEY);
const char *last = crm_element_value(xml_op, XML_RSC_OP_LAST_CHANGE);
int rc = crm_parse_int(crm_element_value(xml_op, XML_LRM_ATTR_RC), "0");
int status = crm_parse_int(crm_element_value(xml_op, XML_LRM_ATTR_OPSTATUS), "0");
const char *exit_reason = crm_element_value(xml_op, XML_LRM_ATTR_EXIT_REASON);
char *rc_s = crm_itoa(rc);
char *reason_s = crm_xml_escape(exit_reason ? exit_reason : "none");
xmlNodePtr node = pcmk__output_create_xml_node(out, "failure");
xmlSetProp(node, (pcmkXmlStr) (op_key ? "op_key" : "id"),
(pcmkXmlStr) (op_key ? op_key : "id"));
xmlSetProp(node, (pcmkXmlStr) "node",
(pcmkXmlStr) crm_element_value(xml_op, XML_ATTR_UNAME));
xmlSetProp(node, (pcmkXmlStr) "exitstatus",
(pcmkXmlStr) services_ocf_exitcode_str(rc));
xmlSetProp(node, (pcmkXmlStr) "exitreason", (pcmkXmlStr) reason_s);
xmlSetProp(node, (pcmkXmlStr) "exitcode", (pcmkXmlStr) rc_s);
xmlSetProp(node, (pcmkXmlStr) "call",
(pcmkXmlStr) crm_element_value(xml_op, XML_LRM_ATTR_CALLID));
xmlSetProp(node, (pcmkXmlStr) "status",
(pcmkXmlStr) services_lrm_status_str(status));
if (last) {
guint interval_ms = 0;
char *s = NULL;
time_t when = crm_parse_int(last, "0");
crm_time_t *crm_when = crm_time_new(NULL);
char *rc_change = NULL;
crm_element_value_ms(xml_op, XML_LRM_ATTR_INTERVAL_MS, &interval_ms);
s = crm_itoa(interval_ms);
crm_time_set_timet(crm_when, &when);
rc_change = crm_time_as_string(crm_when, crm_time_log_date | crm_time_log_timeofday | crm_time_log_with_timezone);
xmlSetProp(node, (pcmkXmlStr) XML_RSC_OP_LAST_CHANGE, (pcmkXmlStr) rc_change);
xmlSetProp(node, (pcmkXmlStr) "queued",
(pcmkXmlStr) crm_element_value(xml_op, XML_RSC_OP_T_QUEUE));
xmlSetProp(node, (pcmkXmlStr) "exec",
(pcmkXmlStr) crm_element_value(xml_op, XML_RSC_OP_T_EXEC));
xmlSetProp(node, (pcmkXmlStr) "interval", (pcmkXmlStr) s);
xmlSetProp(node, (pcmkXmlStr) "task",
(pcmkXmlStr) crm_element_value(xml_op, XML_LRM_ATTR_TASK));
free(s);
free(rc_change);
crm_time_free(crm_when);
}
free(reason_s);
free(rc_s);
return pcmk_rc_ok;
}
-PCMK__OUTPUT_ARGS("node", "pe_node_t *", "unsigned int", "gboolean", "const char *", "gboolean", "gboolean", "gboolean", "GListPtr")
+PCMK__OUTPUT_ARGS("node", "pe_node_t *", "unsigned int", "gboolean", "const char *",
+ "gboolean", "gboolean", "gboolean", "GListPtr", "GListPtr")
int
pe__node_html(pcmk__output_t *out, va_list args) {
pe_node_t *node = va_arg(args, pe_node_t *);
unsigned int print_opts = va_arg(args, unsigned int);
gboolean full = va_arg(args, gboolean);
const char *node_mode G_GNUC_UNUSED = va_arg(args, const char *);
gboolean print_clone_detail = va_arg(args, gboolean);
gboolean print_brief = va_arg(args, gboolean);
gboolean group_by_node = va_arg(args, gboolean);
GListPtr only_node = va_arg(args, GListPtr);
+ GListPtr only_rsc = va_arg(args, GListPtr);
char *node_name = pe__node_display_name(node, print_clone_detail);
char *buf = crm_strdup_printf("Node: %s", node_name);
if (full) {
xmlNodePtr item_node = pcmk__output_create_xml_node(out, "li");
pcmk_create_html_node(item_node, "span", NULL, NULL, buf);
if (node->details->standby_onfail && node->details->online) {
pcmk_create_html_node(item_node, "span", NULL, "standby", " standby (on-fail)");
} else if (node->details->standby && node->details->online) {
char *s = crm_strdup_printf(" standby%s", node->details->running_rsc ? " (with active resources)" : "");
pcmk_create_html_node(item_node, "span", NULL, " standby", s);
free(s);
} else if (node->details->standby) {
pcmk_create_html_node(item_node, "span", NULL, "offline", " OFFLINE (standby)");
} else if (node->details->maintenance && node->details->online) {
pcmk_create_html_node(item_node, "span", NULL, "maint", " maintenance");
} else if (node->details->maintenance) {
pcmk_create_html_node(item_node, "span", NULL, "offline", " OFFLINE (maintenance)");
} else if (node->details->online) {
pcmk_create_html_node(item_node, "span", NULL, "online", " online");
} else {
pcmk_create_html_node(item_node, "span", NULL, "offline", " OFFLINE");
}
if (print_brief && group_by_node) {
- out->begin_list(out, NULL, NULL, NULL);
- pe__rscs_brief_output(out, node->details->running_rsc, print_opts | pe_print_rsconly,
- FALSE);
- out->end_list(out);
+ GListPtr rscs = pe__filter_rsc_list(node->details->running_rsc, only_rsc);
+
+ if (rscs != NULL) {
+ out->begin_list(out, NULL, NULL, NULL);
+ pe__rscs_brief_output(out, rscs, print_opts | pe_print_rsconly, FALSE);
+ out->end_list(out);
+ }
} else if (group_by_node) {
GListPtr lpc2 = NULL;
out->begin_list(out, NULL, NULL, NULL);
for (lpc2 = node->details->running_rsc; lpc2 != NULL; lpc2 = lpc2->next) {
pe_resource_t *rsc = (pe_resource_t *) lpc2->data;
- out->message(out, crm_map_element_name(rsc->xml), print_opts | pe_print_rsconly, rsc, only_node);
+ out->message(out, crm_map_element_name(rsc->xml), print_opts | pe_print_rsconly,
+ rsc, only_node, only_rsc);
}
out->end_list(out);
}
} else {
out->begin_list(out, NULL, NULL, "%s", buf);
}
free(buf);
free(node_name);
return pcmk_rc_ok;
}
-PCMK__OUTPUT_ARGS("node", "pe_node_t *", "unsigned int", "gboolean", "const char *", "gboolean", "gboolean", "gboolean", "GListPtr")
+PCMK__OUTPUT_ARGS("node", "pe_node_t *", "unsigned int", "gboolean", "const char *",
+ "gboolean", "gboolean", "gboolean", "GListPtr", "GListPtr")
int
pe__node_text(pcmk__output_t *out, va_list args) {
pe_node_t *node = va_arg(args, pe_node_t *);
unsigned int print_opts = va_arg(args, unsigned int);
gboolean full = va_arg(args, gboolean);
const char *node_mode = va_arg(args, const char *);
gboolean print_clone_detail = va_arg(args, gboolean);
gboolean print_brief = va_arg(args, gboolean);
gboolean group_by_node = va_arg(args, gboolean);
GListPtr only_node = va_arg(args, GListPtr);
+ GListPtr only_rsc = va_arg(args, GListPtr);
if (full) {
char *node_name = pe__node_display_name(node, print_clone_detail);
char *buf = NULL;
/* Print the node name and status */
if (pe__is_guest_node(node)) {
buf = crm_strdup_printf("GuestNode %s: %s", node_name, node_mode);
} else if (pe__is_remote_node(node)) {
buf = crm_strdup_printf("RemoteNode %s: %s", node_name, node_mode);
} else {
buf = crm_strdup_printf("Node %s: %s", node_name, node_mode);
}
/* If we're grouping by node, print its resources */
if (group_by_node) {
- out->begin_list(out, NULL, NULL, "%s", buf);
- out->begin_list(out, NULL, NULL, "Resources");
-
if (print_brief) {
- pe__rscs_brief_output(out, node->details->running_rsc,
- print_opts | pe_print_rsconly, FALSE);
+ GListPtr rscs = pe__filter_rsc_list(node->details->running_rsc, only_rsc);
+
+ if (rscs != NULL) {
+ out->begin_list(out, NULL, NULL, "%s", buf);
+ out->begin_list(out, NULL, NULL, "Resources");
+
+ pe__rscs_brief_output(out, rscs, print_opts | pe_print_rsconly, FALSE);
+
+ out->end_list(out);
+ out->end_list(out);
+ }
+
} else {
GListPtr gIter2 = NULL;
+ out->begin_list(out, NULL, NULL, "%s", buf);
+ out->begin_list(out, NULL, NULL, "Resources");
+
for (gIter2 = node->details->running_rsc; gIter2 != NULL; gIter2 = gIter2->next) {
pe_resource_t *rsc = (pe_resource_t *) gIter2->data;
- out->message(out, crm_map_element_name(rsc->xml), print_opts | pe_print_rsconly, rsc, only_node);
+ out->message(out, crm_map_element_name(rsc->xml), print_opts | pe_print_rsconly,
+ rsc, only_node, only_rsc);
}
- }
- out->end_list(out);
- out->end_list(out);
+ out->end_list(out);
+ out->end_list(out);
+ }
} else {
out->list_item(out, NULL, "%s", buf);
}
free(buf);
free(node_name);
} else {
out->begin_list(out, NULL, NULL, "Node: %s", pe__node_display_name(node, print_clone_detail));
}
return pcmk_rc_ok;
}
-PCMK__OUTPUT_ARGS("node", "pe_node_t *", "unsigned int", "gboolean", "const char *", "gboolean", "gboolean", "gboolean", "GListPtr")
+PCMK__OUTPUT_ARGS("node", "pe_node_t *", "unsigned int", "gboolean", "const char *",
+ "gboolean", "gboolean", "gboolean", "GListPtr", "GListPtr")
int
pe__node_xml(pcmk__output_t *out, va_list args) {
pe_node_t *node = va_arg(args, pe_node_t *);
unsigned int print_opts = va_arg(args, unsigned int);
gboolean full = va_arg(args, gboolean);
const char *node_mode G_GNUC_UNUSED = va_arg(args, const char *);
gboolean print_clone_detail G_GNUC_UNUSED = va_arg(args, gboolean);
gboolean print_brief G_GNUC_UNUSED = va_arg(args, gboolean);
gboolean group_by_node = va_arg(args, gboolean);
GListPtr only_node = va_arg(args, GListPtr);
+ GListPtr only_rsc = va_arg(args, GListPtr);
if (full) {
const char *node_type = "unknown";
char *length_s = crm_itoa(g_list_length(node->details->running_rsc));
switch (node->details->type) {
case node_member:
node_type = "member";
break;
case node_remote:
node_type = "remote";
break;
case node_ping:
node_type = "ping";
break;
}
pe__name_and_nvpairs_xml(out, true, "node", 13,
"name", node->details->uname,
"id", node->details->id,
"online", node->details->online ? "true" : "false",
"standby", node->details->standby ? "true" : "false",
"standby_onfail", node->details->standby_onfail ? "true" : "false",
"maintenance", node->details->maintenance ? "true" : "false",
"pending", node->details->pending ? "true" : "false",
"unclean", node->details->unclean ? "true" : "false",
"shutdown", node->details->shutdown ? "true" : "false",
"expected_up", node->details->expected_up ? "true" : "false",
"is_dc", node->details->is_dc ? "true" : "false",
"resources_running", length_s,
"type", node_type);
if (pe__is_guest_node(node)) {
xmlNodePtr xml_node = pcmk__output_xml_peek_parent(out);
xmlSetProp(xml_node, (pcmkXmlStr) "id_as_resource",
(pcmkXmlStr) node->details->remote_rsc->container->id);
}
if (group_by_node) {
GListPtr lpc = NULL;
for (lpc = node->details->running_rsc; lpc != NULL; lpc = lpc->next) {
pe_resource_t *rsc = (pe_resource_t *) lpc->data;
- out->message(out, crm_map_element_name(rsc->xml), print_opts | pe_print_rsconly, rsc, only_node);
+ out->message(out, crm_map_element_name(rsc->xml), print_opts | pe_print_rsconly,
+ rsc, only_node, only_rsc);
}
}
free(length_s);
out->end_list(out);
} else {
xmlNodePtr parent = pcmk__output_xml_create_parent(out, "node");
xmlSetProp(parent, (pcmkXmlStr) "name", (pcmkXmlStr) node->details->uname);
}
return pcmk_rc_ok;
}
PCMK__OUTPUT_ARGS("node-attribute", "const char *", "const char *", "gboolean", "int")
int
pe__node_attribute_text(pcmk__output_t *out, va_list args) {
const char *name = va_arg(args, const char *);
const char *value = va_arg(args, const char *);
gboolean add_extra = va_arg(args, gboolean);
int expected_score = va_arg(args, int);
if (add_extra) {
int v = crm_parse_int(value, "0");
if (v <= 0) {
out->list_item(out, NULL, "%-32s\t: %-10s\t: Connectivity is lost", name, value);
} else if (v < expected_score) {
out->list_item(out, NULL, "%-32s\t: %-10s\t: Connectivity is degraded (Expected=%d)", name, value, expected_score);
} else {
out->list_item(out, NULL, "%-32s\t: %-10s", name, value);
}
} else {
out->list_item(out, NULL, "%-32s\t: %-10s", name, value);
}
return pcmk_rc_ok;
}
PCMK__OUTPUT_ARGS("node-attribute", "const char *", "const char *", "gboolean", "int")
int
pe__node_attribute_html(pcmk__output_t *out, va_list args) {
const char *name = va_arg(args, const char *);
const char *value = va_arg(args, const char *);
gboolean add_extra = va_arg(args, gboolean);
int expected_score = va_arg(args, int);
if (add_extra) {
int v = crm_parse_int(value, "0");
char *s = crm_strdup_printf("%s: %s", name, value);
xmlNodePtr item_node = pcmk__output_create_xml_node(out, "li");
pcmk_create_html_node(item_node, "span", NULL, NULL, s);
free(s);
if (v <= 0) {
pcmk_create_html_node(item_node, "span", NULL, "bold", "(connectivity is lost)");
} else if (v < expected_score) {
char *buf = crm_strdup_printf("(connectivity is degraded -- expected %d", expected_score);
pcmk_create_html_node(item_node, "span", NULL, "bold", buf);
free(buf);
}
} else {
out->list_item(out, NULL, "%s: %s", name, value);
}
return pcmk_rc_ok;
}
PCMK__OUTPUT_ARGS("node-attribute", "const char *", "const char *", "gboolean", "int")
int
pe__node_attribute_xml(pcmk__output_t *out, va_list args) {
const char *name = va_arg(args, const char *);
const char *value = va_arg(args, const char *);
gboolean add_extra = va_arg(args, gboolean);
int expected_score = va_arg(args, int);
xmlNodePtr node = pcmk__output_create_xml_node(out, "attribute");
xmlSetProp(node, (pcmkXmlStr) "name", (pcmkXmlStr) name);
xmlSetProp(node, (pcmkXmlStr) "value", (pcmkXmlStr) value);
if (add_extra) {
char *buf = crm_itoa(expected_score);
xmlSetProp(node, (pcmkXmlStr) "expected", (pcmkXmlStr) buf);
free(buf);
}
return pcmk_rc_ok;
}
-PCMK__OUTPUT_ARGS("node-list", "GListPtr", "GListPtr", "unsigned int", "gboolean", "gboolean", "gboolean")
+PCMK__OUTPUT_ARGS("node-list", "GListPtr", "GListPtr", "GListPtr", "unsigned int", "gboolean", "gboolean", "gboolean")
int
pe__node_list_html(pcmk__output_t *out, va_list args) {
GListPtr nodes = va_arg(args, GListPtr);
GListPtr only_node = va_arg(args, GListPtr);
+ GListPtr only_rsc = va_arg(args, GListPtr);
unsigned int print_opts = va_arg(args, unsigned int);
gboolean print_clone_detail = va_arg(args, gboolean);
gboolean print_brief = va_arg(args, gboolean);
gboolean group_by_node = va_arg(args, gboolean);
int rc = pcmk_rc_no_output;
for (GListPtr gIter = nodes; gIter != NULL; gIter = gIter->next) {
pe_node_t *node = (pe_node_t *) gIter->data;
if (!pcmk__str_in_list(only_node, node->details->uname)) {
continue;
}
PCMK__OUTPUT_LIST_HEADER(out, FALSE, rc, "Node List");
out->message(out, "node", node, print_opts, TRUE, NULL, print_clone_detail,
- print_brief, group_by_node, only_node);
+ print_brief, group_by_node, only_node, only_rsc);
}
PCMK__OUTPUT_LIST_FOOTER(out, rc);
return rc;
}
-PCMK__OUTPUT_ARGS("node-list", "GListPtr", "GListPtr", "unsigned int", "gboolean", "gboolean", "gboolean")
+PCMK__OUTPUT_ARGS("node-list", "GListPtr", "GListPtr", "GListPtr", "unsigned int", "gboolean", "gboolean", "gboolean")
int
pe__node_list_text(pcmk__output_t *out, va_list args) {
GListPtr nodes = va_arg(args, GListPtr);
GListPtr only_node = va_arg(args, GListPtr);
+ GListPtr only_rsc = va_arg(args, GListPtr);
unsigned int print_opts = va_arg(args, unsigned int);
gboolean print_clone_detail = va_arg(args, gboolean);
gboolean print_brief = va_arg(args, gboolean);
gboolean group_by_node = va_arg(args, gboolean);
/* space-separated lists of node names */
char *online_nodes = NULL;
char *online_remote_nodes = NULL;
char *online_guest_nodes = NULL;
char *offline_nodes = NULL;
char *offline_remote_nodes = NULL;
int rc = pcmk_rc_no_output;
for (GListPtr gIter = nodes; gIter != NULL; gIter = gIter->next) {
pe_node_t *node = (pe_node_t *) gIter->data;
const char *node_mode = NULL;
char *node_name = pe__node_display_name(node, print_clone_detail);
if (!pcmk__str_in_list(only_node, node->details->uname)) {
free(node_name);
continue;
}
PCMK__OUTPUT_LIST_HEADER(out, FALSE, rc, "Node List");
/* Get node mode */
if (node->details->unclean) {
if (node->details->online) {
node_mode = "UNCLEAN (online)";
} else if (node->details->pending) {
node_mode = "UNCLEAN (pending)";
} else {
node_mode = "UNCLEAN (offline)";
}
} else if (node->details->pending) {
node_mode = "pending";
} else if (node->details->standby_onfail && node->details->online) {
node_mode = "standby (on-fail)";
} else if (node->details->standby) {
if (node->details->online) {
if (node->details->running_rsc) {
node_mode = "standby (with active resources)";
} else {
node_mode = "standby";
}
} else {
node_mode = "OFFLINE (standby)";
}
} else if (node->details->maintenance) {
if (node->details->online) {
node_mode = "maintenance";
} else {
node_mode = "OFFLINE (maintenance)";
}
} else if (node->details->online) {
node_mode = "online";
if (group_by_node == FALSE) {
if (pe__is_guest_node(node)) {
online_guest_nodes = pcmk__add_word(online_guest_nodes,
node_name);
} else if (pe__is_remote_node(node)) {
online_remote_nodes = pcmk__add_word(online_remote_nodes,
node_name);
} else {
online_nodes = pcmk__add_word(online_nodes, node_name);
}
free(node_name);
continue;
}
} else {
node_mode = "OFFLINE";
if (group_by_node == FALSE) {
if (pe__is_remote_node(node)) {
offline_remote_nodes = pcmk__add_word(offline_remote_nodes,
node_name);
} else if (pe__is_guest_node(node)) {
/* ignore offline guest nodes */
} else {
offline_nodes = pcmk__add_word(offline_nodes, node_name);
}
free(node_name);
continue;
}
}
/* If we get here, node is in bad state, or we're grouping by node */
out->message(out, "node", node, print_opts, TRUE, node_mode, print_clone_detail,
- print_brief, group_by_node, only_node);
+ print_brief, group_by_node, only_node, only_rsc);
free(node_name);
}
/* If we're not grouping by node, summarize nodes by status */
if (online_nodes) {
out->list_item(out, "Online", "[%s ]", online_nodes);
free(online_nodes);
}
if (offline_nodes) {
out->list_item(out, "OFFLINE", "[%s ]", offline_nodes);
free(offline_nodes);
}
if (online_remote_nodes) {
out->list_item(out, "RemoteOnline", "[%s ]", online_remote_nodes);
free(online_remote_nodes);
}
if (offline_remote_nodes) {
out->list_item(out, "RemoteOFFLINE", "[%s ]", offline_remote_nodes);
free(offline_remote_nodes);
}
if (online_guest_nodes) {
out->list_item(out, "GuestOnline", "[%s ]", online_guest_nodes);
free(online_guest_nodes);
}
PCMK__OUTPUT_LIST_FOOTER(out, rc);
return rc;
}
-PCMK__OUTPUT_ARGS("node-list", "GListPtr", "GListPtr", "unsigned int", "gboolean", "gboolean", "gboolean")
+PCMK__OUTPUT_ARGS("node-list", "GListPtr", "GListPtr", "GListPtr", "unsigned int", "gboolean", "gboolean", "gboolean")
int
pe__node_list_xml(pcmk__output_t *out, va_list args) {
GListPtr nodes = va_arg(args, GListPtr);
GListPtr only_node = va_arg(args, GListPtr);
+ GListPtr only_rsc = va_arg(args, GListPtr);
unsigned int print_opts = va_arg(args, unsigned int);
gboolean print_clone_detail = va_arg(args, gboolean);
gboolean print_brief = va_arg(args, gboolean);
gboolean group_by_node = va_arg(args, gboolean);
out->begin_list(out, NULL, NULL, "nodes");
for (GListPtr gIter = nodes; gIter != NULL; gIter = gIter->next) {
pe_node_t *node = (pe_node_t *) gIter->data;
if (!pcmk__str_in_list(only_node, node->details->uname)) {
continue;
}
out->message(out, "node", node, print_opts, TRUE, NULL, print_clone_detail,
- print_brief, group_by_node, only_node);
+ print_brief, group_by_node, only_node, only_rsc);
}
out->end_list(out);
return pcmk_rc_ok;
}
PCMK__OUTPUT_ARGS("op-history", "struct xmlNode *", "const char *", "const char *", "int", "gboolean")
int
pe__op_history_text(pcmk__output_t *out, va_list args) {
xmlNode *xml_op = va_arg(args, xmlNode *);
const char *task = va_arg(args, const char *);
const char *interval_ms_s = va_arg(args, const char *);
int rc = va_arg(args, int);
gboolean print_timing = va_arg(args, gboolean);
char *buf = op_history_string(xml_op, task, interval_ms_s, rc, print_timing);
out->list_item(out, NULL, "%s", buf);
free(buf);
return pcmk_rc_ok;
}
PCMK__OUTPUT_ARGS("op-history", "struct xmlNode *", "const char *", "const char *", "int", "gboolean")
int
pe__op_history_xml(pcmk__output_t *out, va_list args) {
xmlNode *xml_op = va_arg(args, xmlNode *);
const char *task = va_arg(args, const char *);
const char *interval_ms_s = va_arg(args, const char *);
int rc = va_arg(args, int);
gboolean print_timing = va_arg(args, gboolean);
char *rc_s = NULL;
xmlNodePtr node = pcmk__output_create_xml_node(out, "operation_history");
xmlSetProp(node, (pcmkXmlStr) "call",
(pcmkXmlStr) crm_element_value(xml_op, XML_LRM_ATTR_CALLID));
xmlSetProp(node, (pcmkXmlStr) "task", (pcmkXmlStr) task);
if (interval_ms_s && safe_str_neq(interval_ms_s, "0")) {
char *s = crm_strdup_printf("%sms", interval_ms_s);
xmlSetProp(node, (pcmkXmlStr) "interval", (pcmkXmlStr) s);
free(s);
}
if (print_timing) {
const char *value = NULL;
value = crm_element_value(xml_op, XML_RSC_OP_LAST_CHANGE);
if (value) {
time_t int_value = (time_t) crm_parse_int(value, NULL);
if (int_value > 0) {
xmlSetProp(node, (pcmkXmlStr) XML_RSC_OP_LAST_CHANGE,
(pcmkXmlStr) pcmk__epoch2str(&int_value));
}
}
value = crm_element_value(xml_op, XML_RSC_OP_LAST_RUN);
if (value) {
time_t int_value = (time_t) crm_parse_int(value, NULL);
if (int_value > 0) {
xmlSetProp(node, (pcmkXmlStr) XML_RSC_OP_LAST_RUN,
(pcmkXmlStr) pcmk__epoch2str(&int_value));
}
}
value = crm_element_value(xml_op, XML_RSC_OP_T_EXEC);
if (value) {
char *s = crm_strdup_printf("%sms", value);
xmlSetProp(node, (pcmkXmlStr) XML_RSC_OP_T_EXEC, (pcmkXmlStr) s);
free(s);
}
value = crm_element_value(xml_op, XML_RSC_OP_T_QUEUE);
if (value) {
char *s = crm_strdup_printf("%sms", value);
xmlSetProp(node, (pcmkXmlStr) XML_RSC_OP_T_QUEUE, (pcmkXmlStr) s);
free(s);
}
}
rc_s = crm_itoa(rc);
xmlSetProp(node, (pcmkXmlStr) "rc", (pcmkXmlStr) rc_s);
xmlSetProp(node, (pcmkXmlStr) "rc_text", (pcmkXmlStr) services_ocf_exitcode_str(rc));
free(rc_s);
return pcmk_rc_ok;
}
PCMK__OUTPUT_ARGS("resource-history", "pe_resource_t *", "const char *", "gboolean", "int", "time_t", "gboolean")
int
pe__resource_history_text(pcmk__output_t *out, va_list args) {
pe_resource_t *rsc = va_arg(args, pe_resource_t *);
const char *rsc_id = va_arg(args, const char *);
gboolean all = va_arg(args, gboolean);
int failcount = va_arg(args, int);
time_t last_failure = va_arg(args, int);
gboolean as_header = va_arg(args, gboolean);
char *buf = resource_history_string(rsc, rsc_id, all, failcount, last_failure);
if (as_header) {
out->begin_list(out, NULL, NULL, "%s", buf);
} else {
out->list_item(out, NULL, "%s", buf);
}
free(buf);
return pcmk_rc_ok;
}
PCMK__OUTPUT_ARGS("resource-history", "pe_resource_t *", "const char *", "gboolean", "int", "time_t", "gboolean")
int
pe__resource_history_xml(pcmk__output_t *out, va_list args) {
pe_resource_t *rsc = va_arg(args, pe_resource_t *);
const char *rsc_id = va_arg(args, const char *);
gboolean all = va_arg(args, gboolean);
int failcount = va_arg(args, int);
time_t last_failure = va_arg(args, int);
gboolean as_header = va_arg(args, gboolean);
xmlNodePtr node = pcmk__output_xml_create_parent(out, "resource_history");
xmlSetProp(node, (pcmkXmlStr) "id", (pcmkXmlStr) rsc_id);
if (rsc == NULL) {
xmlSetProp(node, (pcmkXmlStr) "orphan", (pcmkXmlStr) "true");
} else if (all || failcount || last_failure > 0) {
char *migration_s = crm_itoa(rsc->migration_threshold);
xmlSetProp(node, (pcmkXmlStr) "orphan", (pcmkXmlStr) "false");
xmlSetProp(node, (pcmkXmlStr) "migration-threshold",
(pcmkXmlStr) migration_s);
free(migration_s);
if (failcount > 0) {
char *s = crm_itoa(failcount);
xmlSetProp(node, (pcmkXmlStr) PCMK__FAIL_COUNT_PREFIX,
(pcmkXmlStr) s);
free(s);
}
if (last_failure > 0) {
xmlSetProp(node, (pcmkXmlStr) PCMK__LAST_FAILURE_PREFIX,
(pcmkXmlStr) pcmk__epoch2str(&last_failure));
}
}
if (as_header == FALSE) {
pcmk__output_xml_pop_parent(out);
}
return pcmk_rc_ok;
}
PCMK__OUTPUT_ARGS("ticket", "pe_ticket_t *")
int
pe__ticket_html(pcmk__output_t *out, va_list args) {
pe_ticket_t *ticket = va_arg(args, pe_ticket_t *);
if (ticket->last_granted > -1) {
char *time = pcmk_format_named_time("last-granted", ticket->last_granted);
out->list_item(out, NULL, "%s:\t%s%s %s", ticket->id,
ticket->granted ? "granted" : "revoked",
ticket->standby ? " [standby]" : "",
time);
free(time);
} else {
out->list_item(out, NULL, "%s:\t%s%s", ticket->id,
ticket->granted ? "granted" : "revoked",
ticket->standby ? " [standby]" : "");
}
return pcmk_rc_ok;
}
PCMK__OUTPUT_ARGS("ticket", "pe_ticket_t *")
int
pe__ticket_text(pcmk__output_t *out, va_list args) {
pe_ticket_t *ticket = va_arg(args, pe_ticket_t *);
if (ticket->last_granted > -1) {
char *time = pcmk_format_named_time("last-granted", ticket->last_granted);
out->list_item(out, ticket->id, "\t%s%s %s",
ticket->granted ? "granted" : "revoked",
ticket->standby ? " [standby]" : "",
time);
free(time);
} else {
out->list_item(out, ticket->id, "\t%s%s",
ticket->granted ? "granted" : "revoked",
ticket->standby ? " [standby]" : "");
}
return pcmk_rc_ok;
}
PCMK__OUTPUT_ARGS("ticket", "pe_ticket_t *")
int
pe__ticket_xml(pcmk__output_t *out, va_list args) {
xmlNodePtr node = NULL;
pe_ticket_t *ticket = va_arg(args, pe_ticket_t *);
node = pcmk__output_create_xml_node(out, "ticket");
xmlSetProp(node, (pcmkXmlStr) "id", (pcmkXmlStr) ticket->id);
xmlSetProp(node, (pcmkXmlStr) "status", (pcmkXmlStr) (ticket->granted ? "granted" : "revoked"));
xmlSetProp(node, (pcmkXmlStr) "standby", (pcmkXmlStr) (ticket->standby ? "true" : "false"));
if (ticket->last_granted > -1) {
xmlSetProp(node, (pcmkXmlStr) "last-granted",
(pcmkXmlStr) pcmk__epoch2str(&ticket->last_granted));
}
return pcmk_rc_ok;
}
static pcmk__message_entry_t fmt_functions[] = {
{ "ban", "html", pe__ban_html },
{ "ban", "log", pe__ban_text },
{ "ban", "text", pe__ban_text },
{ "ban", "xml", pe__ban_xml },
{ "bundle", "xml", pe__bundle_xml },
{ "bundle", "html", pe__bundle_html },
{ "bundle", "text", pe__bundle_text },
{ "bundle", "log", pe__bundle_text },
{ "clone", "xml", pe__clone_xml },
{ "clone", "html", pe__clone_html },
{ "clone", "text", pe__clone_text },
{ "clone", "log", pe__clone_text },
{ "cluster-counts", "html", pe__cluster_counts_html },
{ "cluster-counts", "log", pe__cluster_counts_text },
{ "cluster-counts", "text", pe__cluster_counts_text },
{ "cluster-counts", "xml", pe__cluster_counts_xml },
{ "cluster-dc", "html", pe__cluster_dc_html },
{ "cluster-dc", "log", pe__cluster_dc_text },
{ "cluster-dc", "text", pe__cluster_dc_text },
{ "cluster-dc", "xml", pe__cluster_dc_xml },
{ "cluster-options", "html", pe__cluster_options_html },
{ "cluster-options", "log", pe__cluster_options_log },
{ "cluster-options", "text", pe__cluster_options_text },
{ "cluster-options", "xml", pe__cluster_options_xml },
{ "cluster-summary", "html", pe__cluster_summary_html },
{ "cluster-summary", "log", pe__cluster_summary },
{ "cluster-summary", "text", pe__cluster_summary },
{ "cluster-summary", "xml", pe__cluster_summary },
{ "cluster-stack", "html", pe__cluster_stack_html },
{ "cluster-stack", "log", pe__cluster_stack_text },
{ "cluster-stack", "text", pe__cluster_stack_text },
{ "cluster-stack", "xml", pe__cluster_stack_xml },
{ "cluster-times", "html", pe__cluster_times_html },
{ "cluster-times", "log", pe__cluster_times_text },
{ "cluster-times", "text", pe__cluster_times_text },
{ "cluster-times", "xml", pe__cluster_times_xml },
{ "failed-action", "html", pe__failed_action_text },
{ "failed-action", "log", pe__failed_action_text },
{ "failed-action", "text", pe__failed_action_text },
{ "failed-action", "xml", pe__failed_action_xml },
{ "group", "xml", pe__group_xml },
{ "group", "html", pe__group_html },
{ "group", "text", pe__group_text },
{ "group", "log", pe__group_text },
/* maint-mode only exists for text and log. Other formatters output it as
* part of the cluster-options handler.
*/
{ "maint-mode", "log", pe__cluster_maint_mode_text },
{ "maint-mode", "text", pe__cluster_maint_mode_text },
{ "node", "html", pe__node_html },
{ "node", "log", pe__node_text },
{ "node", "text", pe__node_text },
{ "node", "xml", pe__node_xml },
{ "node-list", "html", pe__node_list_html },
{ "node-list", "log", pe__node_list_text },
{ "node-list", "text", pe__node_list_text },
{ "node-list", "xml", pe__node_list_xml },
{ "node-attribute", "html", pe__node_attribute_html },
{ "node-attribute", "log", pe__node_attribute_text },
{ "node-attribute", "text", pe__node_attribute_text },
{ "node-attribute", "xml", pe__node_attribute_xml },
{ "op-history", "html", pe__op_history_text },
{ "op-history", "log", pe__op_history_text },
{ "op-history", "text", pe__op_history_text },
{ "op-history", "xml", pe__op_history_xml },
{ "primitive", "xml", pe__resource_xml },
{ "primitive", "html", pe__resource_html },
{ "primitive", "text", pe__resource_text },
{ "primitive", "log", pe__resource_text },
{ "resource-history", "html", pe__resource_history_text },
{ "resource-history", "log", pe__resource_history_text },
{ "resource-history", "text", pe__resource_history_text },
{ "resource-history", "xml", pe__resource_history_xml },
{ "ticket", "html", pe__ticket_html },
{ "ticket", "log", pe__ticket_text },
{ "ticket", "text", pe__ticket_text },
{ "ticket", "xml", pe__ticket_xml },
{ NULL, NULL, NULL }
};
void
pe__register_messages(pcmk__output_t *out) {
pcmk__register_messages(out, fmt_functions);
}
void
pe__output_node(pe_node_t *node, gboolean details, pcmk__output_t *out)
{
if (node == NULL) {
crm_trace("");
return;
}
CRM_ASSERT(node->details);
crm_trace("%sNode %s: (weight=%d, fixed=%s)",
node->details->online ? "" : "Unavailable/Unclean ",
node->details->uname, node->weight, node->fixed ? "True" : "False");
if (details) {
char *pe_mutable = strdup("\t\t");
GListPtr gIter = node->details->running_rsc;
- GListPtr unames = NULL;
+ GListPtr all = NULL;
- unames = g_list_prepend(unames, strdup("*"));
+ all = g_list_prepend(all, strdup("*"));
crm_trace("\t\t===Node Attributes");
g_hash_table_foreach(node->details->attrs, print_str_str, pe_mutable);
free(pe_mutable);
crm_trace("\t\t=== Resources");
for (; gIter != NULL; gIter = gIter->next) {
pe_resource_t *rsc = (pe_resource_t *) gIter->data;
out->message(out, crm_map_element_name(rsc->xml),
- pe_print_pending, rsc, unames);
+ pe_print_pending, rsc, all, all);
}
- g_list_free_full(unames, free);
+ g_list_free_full(all, free);
}
}
diff --git a/lib/pengine/utils.c b/lib/pengine/utils.c
index 0755b60ddd..4eebe33699 100644
--- a/lib/pengine/utils.c
+++ b/lib/pengine/utils.c
@@ -1,2812 +1,2832 @@
/*
* Copyright 2004-2020 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
* This source code is licensed under the GNU Lesser General Public License
* version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
*/
#include
#include
#include
#include
#include
#include
#include
#include
#include
extern xmlNode *get_object_root(const char *object_type, xmlNode * the_root);
void print_str_str(gpointer key, gpointer value, gpointer user_data);
gboolean ghash_free_str_str(gpointer key, gpointer value, gpointer user_data);
static void unpack_operation(pe_action_t * action, xmlNode * xml_obj, pe_resource_t * container,
pe_working_set_t * data_set, guint interval_ms);
static xmlNode *find_rsc_op_entry_helper(pe_resource_t * rsc, const char *key,
gboolean include_disabled);
#if ENABLE_VERSIONED_ATTRS
pe_rsc_action_details_t *
pe_rsc_action_details(pe_action_t *action)
{
pe_rsc_action_details_t *details;
CRM_CHECK(action != NULL, return NULL);
if (action->action_details == NULL) {
action->action_details = calloc(1, sizeof(pe_rsc_action_details_t));
CRM_CHECK(action->action_details != NULL, return NULL);
}
details = (pe_rsc_action_details_t *) action->action_details;
if (details->versioned_parameters == NULL) {
details->versioned_parameters = create_xml_node(NULL,
XML_TAG_OP_VER_ATTRS);
}
if (details->versioned_meta == NULL) {
details->versioned_meta = create_xml_node(NULL, XML_TAG_OP_VER_META);
}
return details;
}
static void
pe_free_rsc_action_details(pe_action_t *action)
{
pe_rsc_action_details_t *details;
if ((action == NULL) || (action->action_details == NULL)) {
return;
}
details = (pe_rsc_action_details_t *) action->action_details;
if (details->versioned_parameters) {
free_xml(details->versioned_parameters);
}
if (details->versioned_meta) {
free_xml(details->versioned_meta);
}
action->action_details = NULL;
}
#endif
/*!
* \internal
* \brief Check whether we can fence a particular node
*
* \param[in] data_set Working set for cluster
* \param[in] node Name of node to check
*
* \return true if node can be fenced, false otherwise
*/
bool
pe_can_fence(pe_working_set_t *data_set, pe_node_t *node)
{
if (pe__is_guest_node(node)) {
/* Guest nodes are fenced by stopping their container resource. We can
* do that if the container's host is either online or fenceable.
*/
pe_resource_t *rsc = node->details->remote_rsc->container;
for (GList *n = rsc->running_on; n != NULL; n = n->next) {
pe_node_t *container_node = n->data;
if (!container_node->details->online
&& !pe_can_fence(data_set, container_node)) {
return false;
}
}
return true;
} else if(is_not_set(data_set->flags, pe_flag_stonith_enabled)) {
return false; /* Turned off */
} else if (is_not_set(data_set->flags, pe_flag_have_stonith_resource)) {
return false; /* No devices */
} else if (is_set(data_set->flags, pe_flag_have_quorum)) {
return true;
} else if (data_set->no_quorum_policy == no_quorum_ignore) {
return true;
} else if(node == NULL) {
return false;
} else if(node->details->online) {
crm_notice("We can fence %s without quorum because they're in our membership", node->details->uname);
return true;
}
crm_trace("Cannot fence %s", node->details->uname);
return false;
}
/*!
* \internal
* \brief Copy a node object
*
* \param[in] this_node Node object to copy
*
* \return Newly allocated shallow copy of this_node
* \note This function asserts on errors and is guaranteed to return non-NULL.
*/
pe_node_t *
pe__copy_node(const pe_node_t *this_node)
{
pe_node_t *new_node = NULL;
CRM_ASSERT(this_node != NULL);
new_node = calloc(1, sizeof(pe_node_t));
CRM_ASSERT(new_node != NULL);
new_node->rsc_discover_mode = this_node->rsc_discover_mode;
new_node->weight = this_node->weight;
new_node->fixed = this_node->fixed;
new_node->details = this_node->details;
return new_node;
}
/* any node in list1 or list2 and not in the other gets a score of -INFINITY */
void
node_list_exclude(GHashTable * hash, GListPtr list, gboolean merge_scores)
{
GHashTable *result = hash;
pe_node_t *other_node = NULL;
GListPtr gIter = list;
GHashTableIter iter;
pe_node_t *node = NULL;
g_hash_table_iter_init(&iter, hash);
while (g_hash_table_iter_next(&iter, NULL, (void **)&node)) {
other_node = pe_find_node_id(list, node->details->id);
if (other_node == NULL) {
node->weight = -INFINITY;
} else if (merge_scores) {
node->weight = pe__add_scores(node->weight, other_node->weight);
}
}
for (; gIter != NULL; gIter = gIter->next) {
pe_node_t *node = (pe_node_t *) gIter->data;
other_node = pe_hash_table_lookup(result, node->details->id);
if (other_node == NULL) {
pe_node_t *new_node = pe__copy_node(node);
new_node->weight = -INFINITY;
g_hash_table_insert(result, (gpointer) new_node->details->id, new_node);
}
}
}
/*!
* \internal
* \brief Create a node hash table from a node list
*
* \param[in] list Node list
*
* \return Hash table equivalent of node list
*/
GHashTable *
pe__node_list2table(GList *list)
{
GHashTable *result = NULL;
result = g_hash_table_new_full(crm_str_hash, g_str_equal, NULL, free);
for (GList *gIter = list; gIter != NULL; gIter = gIter->next) {
pe_node_t *new_node = pe__copy_node((pe_node_t *) gIter->data);
g_hash_table_insert(result, (gpointer) new_node->details->id, new_node);
}
return result;
}
gint
sort_node_uname(gconstpointer a, gconstpointer b)
{
return pcmk_numeric_strcasecmp(((const pe_node_t *) a)->details->uname,
((const pe_node_t *) b)->details->uname);
}
/*!
* \internal
* \brief Output node weights to stdout
*
* \param[in] rsc Use allowed nodes for this resource
* \param[in] comment Text description to prefix lines with
* \param[in] nodes If rsc is not specified, use these nodes
*/
static void
pe__output_node_weights(pe_resource_t *rsc, const char *comment,
GHashTable *nodes)
{
char score[128]; // Stack-allocated since this is called frequently
// Sort the nodes so the output is consistent for regression tests
GList *list = g_list_sort(g_hash_table_get_values(nodes), sort_node_uname);
for (GList *gIter = list; gIter != NULL; gIter = gIter->next) {
pe_node_t *node = (pe_node_t *) gIter->data;
score2char_stack(node->weight, score, sizeof(score));
if (rsc) {
printf("%s: %s allocation score on %s: %s\n",
comment, rsc->id, node->details->uname, score);
} else {
printf("%s: %s = %s\n", comment, node->details->uname, score);
}
}
g_list_free(list);
}
/*!
* \internal
* \brief Log node weights at trace level
*
* \param[in] file Caller's filename
* \param[in] function Caller's function name
* \param[in] line Caller's line number
* \param[in] rsc Use allowed nodes for this resource
* \param[in] comment Text description to prefix lines with
* \param[in] nodes If rsc is not specified, use these nodes
*/
static void
pe__log_node_weights(const char *file, const char *function, int line,
pe_resource_t *rsc, const char *comment, GHashTable *nodes)
{
GHashTableIter iter;
pe_node_t *node = NULL;
char score[128]; // Stack-allocated since this is called frequently
// Don't waste time if we're not tracing at this point
pcmk__log_else(LOG_TRACE, return);
g_hash_table_iter_init(&iter, nodes);
while (g_hash_table_iter_next(&iter, NULL, (void **) &node)) {
score2char_stack(node->weight, score, sizeof(score));
if (rsc) {
qb_log_from_external_source(function, file,
"%s: %s allocation score on %s: %s",
LOG_TRACE, line, 0,
comment, rsc->id,
node->details->uname, score);
} else {
qb_log_from_external_source(function, file, "%s: %s = %s",
LOG_TRACE, line, 0,
comment, node->details->uname,
score);
}
}
}
/*!
* \internal
* \brief Log or output node weights
*
* \param[in] file Caller's filename
* \param[in] function Caller's function name
* \param[in] line Caller's line number
* \param[in] to_log Log if true, otherwise output
* \param[in] rsc Use allowed nodes for this resource
* \param[in] comment Text description to prefix lines with
* \param[in] nodes If rsc is not specified, use these nodes
*/
void
pe__show_node_weights_as(const char *file, const char *function, int line,
bool to_log, pe_resource_t *rsc, const char *comment,
GHashTable *nodes)
{
if (rsc != NULL) {
if (is_set(rsc->flags, pe_rsc_orphan)) {
// Don't show allocation scores for orphans
return;
}
nodes = rsc->allowed_nodes;
}
if (nodes == NULL) {
// Nothing to show
return;
}
if (to_log) {
pe__log_node_weights(file, function, line, rsc, comment, nodes);
} else {
pe__output_node_weights(rsc, comment, nodes);
}
// If this resource has children, repeat recursively for each
if (rsc && rsc->children) {
for (GList *gIter = rsc->children; gIter != NULL; gIter = gIter->next) {
pe_resource_t *child = (pe_resource_t *) gIter->data;
pe__show_node_weights_as(file, function, line, to_log, child,
comment, nodes);
}
}
}
static void
append_dump_text(gpointer key, gpointer value, gpointer user_data)
{
char **dump_text = user_data;
char *new_text = crm_strdup_printf("%s %s=%s",
*dump_text, (char *)key, (char *)value);
free(*dump_text);
*dump_text = new_text;
}
void
dump_node_capacity(int level, const char *comment, pe_node_t * node)
{
char *dump_text = crm_strdup_printf("%s: %s capacity:",
comment, node->details->uname);
g_hash_table_foreach(node->details->utilization, append_dump_text, &dump_text);
if (level == LOG_STDOUT) {
fprintf(stdout, "%s\n", dump_text);
} else {
crm_trace("%s", dump_text);
}
free(dump_text);
}
void
dump_rsc_utilization(int level, const char *comment, pe_resource_t * rsc, pe_node_t * node)
{
char *dump_text = crm_strdup_printf("%s: %s utilization on %s:",
comment, rsc->id, node->details->uname);
g_hash_table_foreach(rsc->utilization, append_dump_text, &dump_text);
switch (level) {
case LOG_STDOUT:
fprintf(stdout, "%s\n", dump_text);
break;
case LOG_NEVER:
break;
default:
crm_trace("%s", dump_text);
}
free(dump_text);
}
gint
sort_rsc_index(gconstpointer a, gconstpointer b)
{
const pe_resource_t *resource1 = (const pe_resource_t *)a;
const pe_resource_t *resource2 = (const pe_resource_t *)b;
if (a == NULL && b == NULL) {
return 0;
}
if (a == NULL) {
return 1;
}
if (b == NULL) {
return -1;
}
if (resource1->sort_index > resource2->sort_index) {
return -1;
}
if (resource1->sort_index < resource2->sort_index) {
return 1;
}
return 0;
}
gint
sort_rsc_priority(gconstpointer a, gconstpointer b)
{
const pe_resource_t *resource1 = (const pe_resource_t *)a;
const pe_resource_t *resource2 = (const pe_resource_t *)b;
if (a == NULL && b == NULL) {
return 0;
}
if (a == NULL) {
return 1;
}
if (b == NULL) {
return -1;
}
if (resource1->priority > resource2->priority) {
return -1;
}
if (resource1->priority < resource2->priority) {
return 1;
}
return 0;
}
static enum pe_quorum_policy
effective_quorum_policy(pe_resource_t *rsc, pe_working_set_t *data_set)
{
enum pe_quorum_policy policy = data_set->no_quorum_policy;
if (is_set(data_set->flags, pe_flag_have_quorum)) {
policy = no_quorum_ignore;
} else if (data_set->no_quorum_policy == no_quorum_demote) {
switch (rsc->role) {
case RSC_ROLE_MASTER:
case RSC_ROLE_SLAVE:
if (rsc->next_role > RSC_ROLE_SLAVE) {
rsc->next_role = RSC_ROLE_SLAVE;
}
policy = no_quorum_ignore;
break;
default:
policy = no_quorum_stop;
break;
}
}
return policy;
}
pe_action_t *
custom_action(pe_resource_t * rsc, char *key, const char *task,
pe_node_t * on_node, gboolean optional, gboolean save_action,
pe_working_set_t * data_set)
{
pe_action_t *action = NULL;
GListPtr possible_matches = NULL;
CRM_CHECK(key != NULL, return NULL);
CRM_CHECK(task != NULL, free(key); return NULL);
if (save_action && rsc != NULL) {
possible_matches = find_actions(rsc->actions, key, on_node);
} else if(save_action) {
#if 0
action = g_hash_table_lookup(data_set->singletons, key);
#else
/* More expensive but takes 'node' into account */
possible_matches = find_actions(data_set->actions, key, on_node);
#endif
}
if(data_set->singletons == NULL) {
data_set->singletons = g_hash_table_new_full(crm_str_hash, g_str_equal, NULL, NULL);
}
if (possible_matches != NULL) {
if (pcmk__list_of_multiple(possible_matches)) {
pe_warn("Action %s for %s on %s exists %d times",
task, rsc ? rsc->id : "",
on_node ? on_node->details->uname : "", g_list_length(possible_matches));
}
action = g_list_nth_data(possible_matches, 0);
pe_rsc_trace(rsc, "Found existing action %d (%s) for %s (%s) on %s",
action->id, action->uuid,
(rsc? rsc->id : "no resource"), task,
(on_node? on_node->details->uname : "no node"));
g_list_free(possible_matches);
}
if (action == NULL) {
if (save_action) {
pe_rsc_trace(rsc, "Creating %s action %d: %s for %s (%s) on %s",
(optional? "optional" : "mandatory"),
data_set->action_id, key,
(rsc? rsc->id : "no resource"), task,
(on_node? on_node->details->uname : "no node"));
}
action = calloc(1, sizeof(pe_action_t));
if (save_action) {
action->id = data_set->action_id++;
} else {
action->id = 0;
}
action->rsc = rsc;
CRM_ASSERT(task != NULL);
action->task = strdup(task);
if (on_node) {
action->node = pe__copy_node(on_node);
}
action->uuid = strdup(key);
if (safe_str_eq(task, CRM_OP_LRM_DELETE)) {
// Resource history deletion for a node can be done on the DC
pe_set_action_bit(action, pe_action_dc);
}
pe_set_action_bit(action, pe_action_runnable);
if (optional) {
pe_set_action_bit(action, pe_action_optional);
} else {
pe_clear_action_bit(action, pe_action_optional);
}
action->extra = crm_str_table_new();
action->meta = crm_str_table_new();
if (save_action) {
data_set->actions = g_list_prepend(data_set->actions, action);
if(rsc == NULL) {
g_hash_table_insert(data_set->singletons, action->uuid, action);
}
}
if (rsc != NULL) {
guint interval_ms = 0;
action->op_entry = find_rsc_op_entry_helper(rsc, key, TRUE);
parse_op_key(key, NULL, NULL, &interval_ms);
unpack_operation(action, action->op_entry, rsc->container, data_set,
interval_ms);
if (save_action) {
rsc->actions = g_list_prepend(rsc->actions, action);
}
}
if (save_action) {
pe_rsc_trace(rsc, "Action %d created", action->id);
}
}
if (!optional && is_set(action->flags, pe_action_optional)) {
pe_rsc_trace(rsc, "Unset optional on action %d", action->id);
pe_clear_action_bit(action, pe_action_optional);
}
if (rsc != NULL) {
enum action_tasks a_task = text2task(action->task);
enum pe_quorum_policy quorum_policy = effective_quorum_policy(rsc, data_set);
int warn_level = LOG_TRACE;
if (save_action) {
warn_level = LOG_WARNING;
}
if (is_set(action->flags, pe_action_have_node_attrs) == FALSE
&& action->node != NULL && action->op_entry != NULL) {
pe_rule_eval_data_t rule_data = {
.node_hash = action->node->details->attrs,
.role = RSC_ROLE_UNKNOWN,
.now = data_set->now,
.match_data = NULL,
.rsc_data = NULL,
.op_data = NULL
};
pe_set_action_bit(action, pe_action_have_node_attrs);
pe__unpack_dataset_nvpairs(action->op_entry, XML_TAG_ATTR_SETS,
&rule_data, action->extra, NULL,
FALSE, data_set);
}
if (is_set(action->flags, pe_action_pseudo)) {
/* leave untouched */
} else if (action->node == NULL) {
pe_rsc_trace(rsc, "Unset runnable on %s", action->uuid);
pe_clear_action_bit(action, pe_action_runnable);
} else if (is_not_set(rsc->flags, pe_rsc_managed)
&& g_hash_table_lookup(action->meta,
XML_LRM_ATTR_INTERVAL_MS) == NULL) {
crm_debug("Action %s (unmanaged)", action->uuid);
pe_rsc_trace(rsc, "Set optional on %s", action->uuid);
pe_set_action_bit(action, pe_action_optional);
/* action->runnable = FALSE; */
} else if (is_not_set(action->flags, pe_action_dc)
&& !(action->node->details->online)
&& (!pe__is_guest_node(action->node)
|| action->node->details->remote_requires_reset)) {
pe_clear_action_bit(action, pe_action_runnable);
do_crm_log(warn_level, "Action %s on %s is unrunnable (offline)",
action->uuid, action->node->details->uname);
if (is_set(action->rsc->flags, pe_rsc_managed)
&& save_action && a_task == stop_rsc
&& action->node->details->unclean == FALSE) {
pe_fence_node(data_set, action->node, "resource actions are unrunnable", FALSE);
}
} else if (is_not_set(action->flags, pe_action_dc)
&& action->node->details->pending) {
pe_clear_action_bit(action, pe_action_runnable);
do_crm_log(warn_level, "Action %s on %s is unrunnable (pending)",
action->uuid, action->node->details->uname);
} else if (action->needs == rsc_req_nothing) {
pe_rsc_trace(rsc, "Action %s does not require anything", action->uuid);
pe_action_set_reason(action, NULL, TRUE);
if (pe__is_guest_node(action->node)
&& !pe_can_fence(data_set, action->node)) {
/* An action that requires nothing usually does not require any
* fencing in order to be runnable. However, there is an
* exception: an action cannot be completed if it is on a guest
* node whose host is unclean and cannot be fenced.
*/
pe_clear_action_bit(action, pe_action_runnable);
crm_debug("%s\t%s (cancelled : host cannot be fenced)",
action->node->details->uname, action->uuid);
} else {
pe_set_action_bit(action, pe_action_runnable);
}
#if 0
/*
* No point checking this
* - if we don't have quorum we can't stonith anyway
*/
} else if (action->needs == rsc_req_stonith) {
crm_trace("Action %s requires only stonith", action->uuid);
action->runnable = TRUE;
#endif
} else if (quorum_policy == no_quorum_stop) {
pe_action_set_flag_reason(__FUNCTION__, __LINE__, action, NULL, "no quorum", pe_action_runnable, TRUE);
crm_debug("%s\t%s (cancelled : quorum)", action->node->details->uname, action->uuid);
} else if (quorum_policy == no_quorum_freeze) {
pe_rsc_trace(rsc, "Check resource is already active: %s %s %s %s", rsc->id, action->uuid, role2text(rsc->next_role), role2text(rsc->role));
if (rsc->fns->active(rsc, TRUE) == FALSE || rsc->next_role > rsc->role) {
pe_action_set_flag_reason(__FUNCTION__, __LINE__, action, NULL, "quorum freeze", pe_action_runnable, TRUE);
pe_rsc_debug(rsc, "%s\t%s (cancelled : quorum freeze)",
action->node->details->uname, action->uuid);
}
} else if(is_not_set(action->flags, pe_action_runnable)) {
pe_rsc_trace(rsc, "Action %s is runnable", action->uuid);
//pe_action_set_reason(action, NULL, TRUE);
pe_set_action_bit(action, pe_action_runnable);
}
if (save_action) {
switch (a_task) {
case stop_rsc:
set_bit(rsc->flags, pe_rsc_stopping);
break;
case start_rsc:
clear_bit(rsc->flags, pe_rsc_starting);
if (is_set(action->flags, pe_action_runnable)) {
set_bit(rsc->flags, pe_rsc_starting);
}
break;
default:
break;
}
}
}
free(key);
return action;
}
static bool
valid_stop_on_fail(const char *value)
{
return pcmk__str_none_of(value, "standby", "demote", "stop", NULL);
}
static const char *
unpack_operation_on_fail(pe_action_t * action)
{
const char *name = NULL;
const char *role = NULL;
const char *on_fail = NULL;
const char *interval_spec = NULL;
const char *enabled = NULL;
const char *value = g_hash_table_lookup(action->meta, XML_OP_ATTR_ON_FAIL);
if (safe_str_eq(action->task, CRMD_ACTION_STOP)
&& !valid_stop_on_fail(value)) {
pcmk__config_err("Resetting '" XML_OP_ATTR_ON_FAIL "' for %s stop "
"action to default value because '%s' is not "
"allowed for stop", action->rsc->id, value);
return NULL;
} else if (safe_str_eq(action->task, CRMD_ACTION_DEMOTE) && !value) {
/* demote on_fail defaults to master monitor value if present */
xmlNode *operation = NULL;
CRM_CHECK(action->rsc != NULL, return NULL);
for (operation = __xml_first_child_element(action->rsc->ops_xml);
operation && !value; operation = __xml_next_element(operation)) {
if (!crm_str_eq((const char *)operation->name, "op", TRUE)) {
continue;
}
name = crm_element_value(operation, "name");
role = crm_element_value(operation, "role");
on_fail = crm_element_value(operation, XML_OP_ATTR_ON_FAIL);
enabled = crm_element_value(operation, "enabled");
interval_spec = crm_element_value(operation, XML_LRM_ATTR_INTERVAL);
if (!on_fail) {
continue;
} else if (enabled && !crm_is_true(enabled)) {
continue;
} else if (safe_str_neq(name, "monitor") || safe_str_neq(role, "Master")) {
continue;
} else if (crm_parse_interval_spec(interval_spec) == 0) {
continue;
} else if (safe_str_eq(on_fail, "demote")) {
continue;
}
value = on_fail;
}
} else if (safe_str_eq(action->task, CRM_OP_LRM_DELETE)) {
value = "ignore";
} else if (safe_str_eq(value, "demote")) {
name = crm_element_value(action->op_entry, "name");
role = crm_element_value(action->op_entry, "role");
on_fail = crm_element_value(action->op_entry, XML_OP_ATTR_ON_FAIL);
interval_spec = crm_element_value(action->op_entry,
XML_LRM_ATTR_INTERVAL);
if (safe_str_neq(name, CRMD_ACTION_PROMOTE)
&& (safe_str_neq(name, CRMD_ACTION_STATUS)
|| safe_str_neq(role, "Master")
|| (crm_parse_interval_spec(interval_spec) == 0))) {
pcmk__config_err("Resetting '" XML_OP_ATTR_ON_FAIL "' for %s %s "
"action to default value because 'demote' is not "
"allowed for it", action->rsc->id, name);
return NULL;
}
}
return value;
}
static xmlNode *
find_min_interval_mon(pe_resource_t * rsc, gboolean include_disabled)
{
guint interval_ms = 0;
guint min_interval_ms = G_MAXUINT;
const char *name = NULL;
const char *value = NULL;
const char *interval_spec = NULL;
xmlNode *op = NULL;
xmlNode *operation = NULL;
for (operation = __xml_first_child_element(rsc->ops_xml); operation != NULL;
operation = __xml_next_element(operation)) {
if (crm_str_eq((const char *)operation->name, "op", TRUE)) {
name = crm_element_value(operation, "name");
interval_spec = crm_element_value(operation, XML_LRM_ATTR_INTERVAL);
value = crm_element_value(operation, "enabled");
if (!include_disabled && value && crm_is_true(value) == FALSE) {
continue;
}
if (safe_str_neq(name, RSC_STATUS)) {
continue;
}
interval_ms = crm_parse_interval_spec(interval_spec);
if (interval_ms && (interval_ms < min_interval_ms)) {
min_interval_ms = interval_ms;
op = operation;
}
}
}
return op;
}
static int
unpack_start_delay(const char *value, GHashTable *meta)
{
int start_delay = 0;
if (value != NULL) {
start_delay = crm_get_msec(value);
if (start_delay < 0) {
start_delay = 0;
}
if (meta) {
g_hash_table_replace(meta, strdup(XML_OP_ATTR_START_DELAY), crm_itoa(start_delay));
}
}
return start_delay;
}
// true if value contains valid, non-NULL interval origin for recurring op
static bool
unpack_interval_origin(const char *value, xmlNode *xml_obj, guint interval_ms,
crm_time_t *now, long long *start_delay)
{
long long result = 0;
guint interval_sec = interval_ms / 1000;
crm_time_t *origin = NULL;
// Ignore unspecified values and non-recurring operations
if ((value == NULL) || (interval_ms == 0) || (now == NULL)) {
return false;
}
// Parse interval origin from text
origin = crm_time_new(value);
if (origin == NULL) {
pcmk__config_err("Ignoring '" XML_OP_ATTR_ORIGIN "' for operation "
"'%s' because '%s' is not valid",
(ID(xml_obj)? ID(xml_obj) : "(missing ID)"), value);
return false;
}
// Get seconds since origin (negative if origin is in the future)
result = crm_time_get_seconds(now) - crm_time_get_seconds(origin);
crm_time_free(origin);
// Calculate seconds from closest interval to now
result = result % interval_sec;
// Calculate seconds remaining until next interval
result = ((result <= 0)? 0 : interval_sec) - result;
crm_info("Calculated a start delay of %llds for operation '%s'",
result,
(ID(xml_obj)? ID(xml_obj) : "(unspecified)"));
if (start_delay != NULL) {
*start_delay = result * 1000; // milliseconds
}
return true;
}
static int
unpack_timeout(const char *value)
{
int timeout = crm_get_msec(value);
if (timeout < 0) {
timeout = crm_get_msec(CRM_DEFAULT_OP_TIMEOUT_S);
}
return timeout;
}
int
pe_get_configured_timeout(pe_resource_t *rsc, const char *action, pe_working_set_t *data_set)
{
xmlNode *child = NULL;
const char *timeout = NULL;
GHashTable *action_meta = NULL;
int timeout_ms = 0;
pe_rule_eval_data_t rule_data = {
.node_hash = NULL,
.role = RSC_ROLE_UNKNOWN,
.now = data_set->now,
.match_data = NULL,
.rsc_data = NULL,
.op_data = NULL
};
for (child = first_named_child(rsc->ops_xml, XML_ATTR_OP);
child != NULL; child = crm_next_same_xml(child)) {
if (safe_str_eq(action, crm_element_value(child, XML_NVPAIR_ATTR_NAME))) {
timeout = crm_element_value(child, XML_ATTR_TIMEOUT);
break;
}
}
if (timeout == NULL && data_set->op_defaults) {
action_meta = crm_str_table_new();
pe__unpack_dataset_nvpairs(data_set->op_defaults, XML_TAG_META_SETS,
&rule_data, action_meta, NULL, FALSE, data_set);
timeout = g_hash_table_lookup(action_meta, XML_ATTR_TIMEOUT);
}
// @TODO check meta-attributes (including versioned meta-attributes)
// @TODO maybe use min-interval monitor timeout as default for monitors
timeout_ms = crm_get_msec(timeout);
if (timeout_ms < 0) {
timeout_ms = crm_get_msec(CRM_DEFAULT_OP_TIMEOUT_S);
}
if (action_meta != NULL) {
g_hash_table_destroy(action_meta);
}
return timeout_ms;
}
#if ENABLE_VERSIONED_ATTRS
static void
unpack_versioned_meta(xmlNode *versioned_meta, xmlNode *xml_obj,
guint interval_ms, crm_time_t *now)
{
xmlNode *attrs = NULL;
xmlNode *attr = NULL;
for (attrs = __xml_first_child_element(versioned_meta); attrs != NULL;
attrs = __xml_next_element(attrs)) {
for (attr = __xml_first_child_element(attrs); attr != NULL;
attr = __xml_next_element(attr)) {
const char *name = crm_element_value(attr, XML_NVPAIR_ATTR_NAME);
const char *value = crm_element_value(attr, XML_NVPAIR_ATTR_VALUE);
if (safe_str_eq(name, XML_OP_ATTR_START_DELAY)) {
int start_delay = unpack_start_delay(value, NULL);
crm_xml_add_int(attr, XML_NVPAIR_ATTR_VALUE, start_delay);
} else if (safe_str_eq(name, XML_OP_ATTR_ORIGIN)) {
long long start_delay = 0;
if (unpack_interval_origin(value, xml_obj, interval_ms, now,
&start_delay)) {
crm_xml_add(attr, XML_NVPAIR_ATTR_NAME,
XML_OP_ATTR_START_DELAY);
crm_xml_add_ll(attr, XML_NVPAIR_ATTR_VALUE, start_delay);
}
} else if (safe_str_eq(name, XML_ATTR_TIMEOUT)) {
int timeout = unpack_timeout(value);
crm_xml_add_int(attr, XML_NVPAIR_ATTR_VALUE, timeout);
}
}
}
}
#endif
/*!
* \brief Unpack operation XML into an action structure
*
* Unpack an operation's meta-attributes (normalizing the interval, timeout,
* and start delay values as integer milliseconds), requirements, and
* failure policy.
*
* \param[in,out] action Action to unpack into
* \param[in] xml_obj Operation XML (or NULL if all defaults)
* \param[in] container Resource that contains affected resource, if any
* \param[in] data_set Cluster state
* \param[in] interval_ms How frequently to perform the operation
*/
static void
unpack_operation(pe_action_t * action, xmlNode * xml_obj, pe_resource_t * container,
pe_working_set_t * data_set, guint interval_ms)
{
int timeout = 0;
char *value_ms = NULL;
const char *value = NULL;
const char *field = XML_LRM_ATTR_INTERVAL;
char *default_timeout = NULL;
#if ENABLE_VERSIONED_ATTRS
pe_rsc_action_details_t *rsc_details = NULL;
#endif
pe_rsc_eval_data_t rsc_rule_data = {
.standard = crm_element_value(action->rsc->xml, XML_AGENT_ATTR_CLASS),
.provider = crm_element_value(action->rsc->xml, XML_AGENT_ATTR_PROVIDER),
.agent = crm_element_value(action->rsc->xml, XML_EXPR_ATTR_TYPE)
};
pe_op_eval_data_t op_rule_data = {
.op_name = action->task,
.interval = interval_ms
};
pe_rule_eval_data_t rule_data = {
.node_hash = NULL,
.role = RSC_ROLE_UNKNOWN,
.now = data_set->now,
.match_data = NULL,
.rsc_data = &rsc_rule_data,
.op_data = &op_rule_data
};
CRM_CHECK(action && action->rsc, return);
// Cluster-wide
pe__unpack_dataset_nvpairs(data_set->op_defaults, XML_TAG_META_SETS, &rule_data,
action->meta, NULL, FALSE, data_set);
// Probe timeouts default differently, so handle timeout default later
default_timeout = g_hash_table_lookup(action->meta, XML_ATTR_TIMEOUT);
if (default_timeout) {
default_timeout = strdup(default_timeout);
g_hash_table_remove(action->meta, XML_ATTR_TIMEOUT);
}
if (xml_obj) {
xmlAttrPtr xIter = NULL;
// take precedence over defaults
pe__unpack_dataset_nvpairs(xml_obj, XML_TAG_META_SETS, &rule_data,
action->meta, NULL, TRUE, data_set);
#if ENABLE_VERSIONED_ATTRS
rsc_details = pe_rsc_action_details(action);
pe_eval_versioned_attributes(data_set->input, xml_obj,
XML_TAG_ATTR_SETS, &rule_data,
rsc_details->versioned_parameters,
NULL);
pe_eval_versioned_attributes(data_set->input, xml_obj,
XML_TAG_META_SETS, &rule_data,
rsc_details->versioned_meta,
NULL);
#endif
/* Anything set as an XML property has highest precedence.
* This ensures we use the name and interval from the tag.
*/
for (xIter = xml_obj->properties; xIter; xIter = xIter->next) {
const char *prop_name = (const char *)xIter->name;
const char *prop_value = crm_element_value(xml_obj, prop_name);
g_hash_table_replace(action->meta, strdup(prop_name), strdup(prop_value));
}
}
g_hash_table_remove(action->meta, "id");
// Normalize interval to milliseconds
if (interval_ms > 0) {
value_ms = crm_strdup_printf("%u", interval_ms);
g_hash_table_replace(action->meta, strdup(field), value_ms);
} else if (g_hash_table_lookup(action->meta, field) != NULL) {
g_hash_table_remove(action->meta, field);
}
// Handle timeout default, now that we know the interval
if (g_hash_table_lookup(action->meta, XML_ATTR_TIMEOUT)) {
free(default_timeout);
} else {
// Probe timeouts default to minimum-interval monitor's
if (safe_str_eq(action->task, RSC_STATUS) && (interval_ms == 0)) {
xmlNode *min_interval_mon = find_min_interval_mon(action->rsc, FALSE);
if (min_interval_mon) {
value = crm_element_value(min_interval_mon, XML_ATTR_TIMEOUT);
if (value) {
crm_trace("\t%s defaults to minimum-interval monitor's timeout '%s'",
action->uuid, value);
free(default_timeout);
default_timeout = strdup(value);
}
}
}
if (default_timeout) {
g_hash_table_insert(action->meta, strdup(XML_ATTR_TIMEOUT),
default_timeout);
}
}
if (pcmk__str_none_of(action->task, RSC_START, RSC_PROMOTE, NULL)) {
action->needs = rsc_req_nothing;
value = "nothing (not start/promote)";
} else if (is_set(action->rsc->flags, pe_rsc_needs_fencing)) {
action->needs = rsc_req_stonith;
value = "fencing (resource)";
} else if (is_set(action->rsc->flags, pe_rsc_needs_quorum)) {
action->needs = rsc_req_quorum;
value = "quorum (resource)";
} else {
action->needs = rsc_req_nothing;
value = "nothing (resource)";
}
pe_rsc_trace(action->rsc, "\tAction %s requires: %s", action->uuid, value);
value = unpack_operation_on_fail(action);
if (value == NULL) {
} else if (safe_str_eq(value, "block")) {
action->on_fail = action_fail_block;
g_hash_table_insert(action->meta, strdup(XML_OP_ATTR_ON_FAIL), strdup("block"));
value = "block"; // The above could destroy the original string
} else if (safe_str_eq(value, "fence")) {
action->on_fail = action_fail_fence;
value = "node fencing";
if (is_set(data_set->flags, pe_flag_stonith_enabled) == FALSE) {
pcmk__config_err("Resetting '" XML_OP_ATTR_ON_FAIL "' for "
"operation '%s' to 'stop' because 'fence' is not "
"valid when fencing is disabled", action->uuid);
action->on_fail = action_fail_stop;
action->fail_role = RSC_ROLE_STOPPED;
value = "stop resource";
}
} else if (safe_str_eq(value, "standby")) {
action->on_fail = action_fail_standby;
value = "node standby";
} else if (pcmk__str_any_of(value, "ignore", "nothing", NULL)) {
action->on_fail = action_fail_ignore;
value = "ignore";
} else if (safe_str_eq(value, "migrate")) {
action->on_fail = action_fail_migrate;
value = "force migration";
} else if (safe_str_eq(value, "stop")) {
action->on_fail = action_fail_stop;
action->fail_role = RSC_ROLE_STOPPED;
value = "stop resource";
} else if (safe_str_eq(value, "restart")) {
action->on_fail = action_fail_recover;
value = "restart (and possibly migrate)";
} else if (safe_str_eq(value, "restart-container")) {
if (container) {
action->on_fail = action_fail_restart_container;
value = "restart container (and possibly migrate)";
} else {
value = NULL;
}
} else if (safe_str_eq(value, "demote")) {
action->on_fail = action_fail_demote;
value = "demote instance";
} else {
pe_err("Resource %s: Unknown failure type (%s)", action->rsc->id, value);
value = NULL;
}
/* defaults */
if (value == NULL && container) {
action->on_fail = action_fail_restart_container;
value = "restart container (and possibly migrate) (default)";
/* For remote nodes, ensure that any failure that results in dropping an
* active connection to the node results in fencing of the node.
*
* There are only two action failures that don't result in fencing.
* 1. probes - probe failures are expected.
* 2. start - a start failure indicates that an active connection does not already
* exist. The user can set op on-fail=fence if they really want to fence start
* failures. */
} else if (((value == NULL) || !is_set(action->rsc->flags, pe_rsc_managed)) &&
(pe__resource_is_remote_conn(action->rsc, data_set) &&
!(safe_str_eq(action->task, CRMD_ACTION_STATUS) && (interval_ms == 0)) &&
(safe_str_neq(action->task, CRMD_ACTION_START)))) {
if (!is_set(action->rsc->flags, pe_rsc_managed)) {
action->on_fail = action_fail_stop;
action->fail_role = RSC_ROLE_STOPPED;
value = "stop unmanaged remote node (enforcing default)";
} else {
if (is_set(data_set->flags, pe_flag_stonith_enabled)) {
value = "fence remote node (default)";
} else {
value = "recover remote node connection (default)";
}
if (action->rsc->remote_reconnect_ms) {
action->fail_role = RSC_ROLE_STOPPED;
}
action->on_fail = action_fail_reset_remote;
}
} else if (value == NULL && safe_str_eq(action->task, CRMD_ACTION_STOP)) {
if (is_set(data_set->flags, pe_flag_stonith_enabled)) {
action->on_fail = action_fail_fence;
value = "resource fence (default)";
} else {
action->on_fail = action_fail_block;
value = "resource block (default)";
}
} else if (value == NULL) {
action->on_fail = action_fail_recover;
value = "restart (and possibly migrate) (default)";
}
pe_rsc_trace(action->rsc, "\t%s failure handling: %s", action->task, value);
value = NULL;
if (xml_obj != NULL) {
value = g_hash_table_lookup(action->meta, "role_after_failure");
if (value) {
pe_warn_once(pe_wo_role_after,
"Support for role_after_failure is deprecated and will be removed in a future release");
}
}
if (value != NULL && action->fail_role == RSC_ROLE_UNKNOWN) {
action->fail_role = text2role(value);
}
/* defaults */
if (action->fail_role == RSC_ROLE_UNKNOWN) {
if (safe_str_eq(action->task, CRMD_ACTION_PROMOTE)) {
action->fail_role = RSC_ROLE_SLAVE;
} else {
action->fail_role = RSC_ROLE_STARTED;
}
}
pe_rsc_trace(action->rsc, "\t%s failure results in: %s", action->task,
role2text(action->fail_role));
value = g_hash_table_lookup(action->meta, XML_OP_ATTR_START_DELAY);
if (value) {
unpack_start_delay(value, action->meta);
} else {
long long start_delay = 0;
value = g_hash_table_lookup(action->meta, XML_OP_ATTR_ORIGIN);
if (unpack_interval_origin(value, xml_obj, interval_ms, data_set->now,
&start_delay)) {
g_hash_table_replace(action->meta, strdup(XML_OP_ATTR_START_DELAY),
crm_strdup_printf("%lld", start_delay));
}
}
value = g_hash_table_lookup(action->meta, XML_ATTR_TIMEOUT);
timeout = unpack_timeout(value);
g_hash_table_replace(action->meta, strdup(XML_ATTR_TIMEOUT), crm_itoa(timeout));
#if ENABLE_VERSIONED_ATTRS
unpack_versioned_meta(rsc_details->versioned_meta, xml_obj, interval_ms,
data_set->now);
#endif
}
static xmlNode *
find_rsc_op_entry_helper(pe_resource_t * rsc, const char *key, gboolean include_disabled)
{
guint interval_ms = 0;
gboolean do_retry = TRUE;
char *local_key = NULL;
const char *name = NULL;
const char *value = NULL;
const char *interval_spec = NULL;
char *match_key = NULL;
xmlNode *op = NULL;
xmlNode *operation = NULL;
retry:
for (operation = __xml_first_child_element(rsc->ops_xml); operation != NULL;
operation = __xml_next_element(operation)) {
if (crm_str_eq((const char *)operation->name, "op", TRUE)) {
name = crm_element_value(operation, "name");
interval_spec = crm_element_value(operation, XML_LRM_ATTR_INTERVAL);
value = crm_element_value(operation, "enabled");
if (!include_disabled && value && crm_is_true(value) == FALSE) {
continue;
}
interval_ms = crm_parse_interval_spec(interval_spec);
match_key = pcmk__op_key(rsc->id, name, interval_ms);
if (safe_str_eq(key, match_key)) {
op = operation;
}
free(match_key);
if (rsc->clone_name) {
match_key = pcmk__op_key(rsc->clone_name, name, interval_ms);
if (safe_str_eq(key, match_key)) {
op = operation;
}
free(match_key);
}
if (op != NULL) {
free(local_key);
return op;
}
}
}
free(local_key);
if (do_retry == FALSE) {
return NULL;
}
do_retry = FALSE;
if (strstr(key, CRMD_ACTION_MIGRATE) || strstr(key, CRMD_ACTION_MIGRATED)) {
local_key = pcmk__op_key(rsc->id, "migrate", 0);
key = local_key;
goto retry;
} else if (strstr(key, "_notify_")) {
local_key = pcmk__op_key(rsc->id, "notify", 0);
key = local_key;
goto retry;
}
return NULL;
}
xmlNode *
find_rsc_op_entry(pe_resource_t * rsc, const char *key)
{
return find_rsc_op_entry_helper(rsc, key, FALSE);
}
void
print_node(const char *pre_text, pe_node_t * node, gboolean details)
{
if (node == NULL) {
crm_trace("%s%s: ", pre_text == NULL ? "" : pre_text, pre_text == NULL ? "" : ": ");
return;
}
CRM_ASSERT(node->details);
crm_trace("%s%s%sNode %s: (weight=%d, fixed=%s)",
pre_text == NULL ? "" : pre_text,
pre_text == NULL ? "" : ": ",
node->details->online ? "" : "Unavailable/Unclean ",
node->details->uname, node->weight, node->fixed ? "True" : "False");
if (details) {
int log_level = LOG_TRACE;
char *pe_mutable = strdup("\t\t");
GListPtr gIter = node->details->running_rsc;
crm_trace("\t\t===Node Attributes");
g_hash_table_foreach(node->details->attrs, print_str_str, pe_mutable);
free(pe_mutable);
crm_trace("\t\t=== Resources");
for (; gIter != NULL; gIter = gIter->next) {
pe_resource_t *rsc = (pe_resource_t *) gIter->data;
rsc->fns->print(rsc, "\t\t", pe_print_log|pe_print_pending,
&log_level);
}
}
}
/*
* Used by the HashTable for-loop
*/
void
print_str_str(gpointer key, gpointer value, gpointer user_data)
{
crm_trace("%s%s %s ==> %s",
user_data == NULL ? "" : (char *)user_data,
user_data == NULL ? "" : ": ", (char *)key, (char *)value);
}
void
pe_free_action(pe_action_t * action)
{
if (action == NULL) {
return;
}
g_list_free_full(action->actions_before, free); /* pe_action_wrapper_t* */
g_list_free_full(action->actions_after, free); /* pe_action_wrapper_t* */
if (action->extra) {
g_hash_table_destroy(action->extra);
}
if (action->meta) {
g_hash_table_destroy(action->meta);
}
#if ENABLE_VERSIONED_ATTRS
if (action->rsc) {
pe_free_rsc_action_details(action);
}
#endif
free(action->cancel_task);
free(action->reason);
free(action->task);
free(action->uuid);
free(action->node);
free(action);
}
GListPtr
find_recurring_actions(GListPtr input, pe_node_t * not_on_node)
{
const char *value = NULL;
GListPtr result = NULL;
GListPtr gIter = input;
CRM_CHECK(input != NULL, return NULL);
for (; gIter != NULL; gIter = gIter->next) {
pe_action_t *action = (pe_action_t *) gIter->data;
value = g_hash_table_lookup(action->meta, XML_LRM_ATTR_INTERVAL_MS);
if (value == NULL) {
/* skip */
} else if (safe_str_eq(value, "0")) {
/* skip */
} else if (safe_str_eq(CRMD_ACTION_CANCEL, action->task)) {
/* skip */
} else if (not_on_node == NULL) {
crm_trace("(null) Found: %s", action->uuid);
result = g_list_prepend(result, action);
} else if (action->node == NULL) {
/* skip */
} else if (action->node->details != not_on_node->details) {
crm_trace("Found: %s", action->uuid);
result = g_list_prepend(result, action);
}
}
return result;
}
enum action_tasks
get_complex_task(pe_resource_t * rsc, const char *name, gboolean allow_non_atomic)
{
enum action_tasks task = text2task(name);
if (rsc == NULL) {
return task;
} else if (allow_non_atomic == FALSE || rsc->variant == pe_native) {
switch (task) {
case stopped_rsc:
case started_rsc:
case action_demoted:
case action_promoted:
crm_trace("Folding %s back into its atomic counterpart for %s", name, rsc->id);
return task - 1;
default:
break;
}
}
return task;
}
pe_action_t *
find_first_action(GListPtr input, const char *uuid, const char *task, pe_node_t * on_node)
{
GListPtr gIter = NULL;
CRM_CHECK(uuid || task, return NULL);
for (gIter = input; gIter != NULL; gIter = gIter->next) {
pe_action_t *action = (pe_action_t *) gIter->data;
if (uuid != NULL && safe_str_neq(uuid, action->uuid)) {
continue;
} else if (task != NULL && safe_str_neq(task, action->task)) {
continue;
} else if (on_node == NULL) {
return action;
} else if (action->node == NULL) {
continue;
} else if (on_node->details == action->node->details) {
return action;
}
}
return NULL;
}
GListPtr
find_actions(GListPtr input, const char *key, const pe_node_t *on_node)
{
GListPtr gIter = input;
GListPtr result = NULL;
CRM_CHECK(key != NULL, return NULL);
for (; gIter != NULL; gIter = gIter->next) {
pe_action_t *action = (pe_action_t *) gIter->data;
if (safe_str_neq(key, action->uuid)) {
crm_trace("%s does not match action %s", key, action->uuid);
continue;
} else if (on_node == NULL) {
crm_trace("Action %s matches (ignoring node)", key);
result = g_list_prepend(result, action);
} else if (action->node == NULL) {
crm_trace("Action %s matches (unallocated, assigning to %s)",
key, on_node->details->uname);
action->node = pe__copy_node(on_node);
result = g_list_prepend(result, action);
} else if (on_node->details == action->node->details) {
crm_trace("Action %s on %s matches", key, on_node->details->uname);
result = g_list_prepend(result, action);
} else {
crm_trace("Action %s on node %s does not match requested node %s",
key, action->node->details->uname,
on_node->details->uname);
}
}
return result;
}
GList *
find_actions_exact(GList *input, const char *key, const pe_node_t *on_node)
{
GList *result = NULL;
CRM_CHECK(key != NULL, return NULL);
if (on_node == NULL) {
crm_trace("Not searching for action %s because node not specified",
key);
return NULL;
}
for (GList *gIter = input; gIter != NULL; gIter = gIter->next) {
pe_action_t *action = (pe_action_t *) gIter->data;
if (action->node == NULL) {
crm_trace("Skipping comparison of %s vs action %s without node",
key, action->uuid);
} else if (safe_str_neq(key, action->uuid)) {
crm_trace("Desired action %s doesn't match %s", key, action->uuid);
} else if (safe_str_neq(on_node->details->id,
action->node->details->id)) {
crm_trace("Action %s desired node ID %s doesn't match %s",
key, on_node->details->id, action->node->details->id);
} else {
crm_trace("Action %s matches", key);
result = g_list_prepend(result, action);
}
}
return result;
}
/*!
* \brief Find all actions of given type for a resource
*
* \param[in] rsc Resource to search
* \param[in] node Find only actions scheduled on this node
* \param[in] task Action name to search for
* \param[in] require_node If TRUE, NULL node or action node will not match
*
* \return List of actions found (or NULL if none)
* \note If node is not NULL and require_node is FALSE, matching actions
* without a node will be assigned to node.
*/
GList *
pe__resource_actions(const pe_resource_t *rsc, const pe_node_t *node,
const char *task, bool require_node)
{
GList *result = NULL;
char *key = pcmk__op_key(rsc->id, task, 0);
if (require_node) {
result = find_actions_exact(rsc->actions, key, node);
} else {
result = find_actions(rsc->actions, key, node);
}
free(key);
return result;
}
static void
resource_node_score(pe_resource_t * rsc, pe_node_t * node, int score, const char *tag)
{
pe_node_t *match = NULL;
if ((rsc->exclusive_discover || (node->rsc_discover_mode == pe_discover_never))
&& safe_str_eq(tag, "symmetric_default")) {
/* This string comparision may be fragile, but exclusive resources and
* exclusive nodes should not have the symmetric_default constraint
* applied to them.
*/
return;
} else if (rsc->children) {
GListPtr gIter = rsc->children;
for (; gIter != NULL; gIter = gIter->next) {
pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
resource_node_score(child_rsc, node, score, tag);
}
}
pe_rsc_trace(rsc, "Setting %s for %s on %s: %d", tag, rsc->id, node->details->uname, score);
match = pe_hash_table_lookup(rsc->allowed_nodes, node->details->id);
if (match == NULL) {
match = pe__copy_node(node);
g_hash_table_insert(rsc->allowed_nodes, (gpointer) match->details->id, match);
}
match->weight = pe__add_scores(match->weight, score);
}
void
resource_location(pe_resource_t * rsc, pe_node_t * node, int score, const char *tag,
pe_working_set_t * data_set)
{
if (node != NULL) {
resource_node_score(rsc, node, score, tag);
} else if (data_set != NULL) {
GListPtr gIter = data_set->nodes;
for (; gIter != NULL; gIter = gIter->next) {
pe_node_t *node_iter = (pe_node_t *) gIter->data;
resource_node_score(rsc, node_iter, score, tag);
}
} else {
GHashTableIter iter;
pe_node_t *node_iter = NULL;
g_hash_table_iter_init(&iter, rsc->allowed_nodes);
while (g_hash_table_iter_next(&iter, NULL, (void **)&node_iter)) {
resource_node_score(rsc, node_iter, score, tag);
}
}
if (node == NULL && score == -INFINITY) {
if (rsc->allocated_to) {
crm_info("Deallocating %s from %s", rsc->id, rsc->allocated_to->details->uname);
free(rsc->allocated_to);
rsc->allocated_to = NULL;
}
}
}
#define sort_return(an_int, why) do { \
free(a_uuid); \
free(b_uuid); \
crm_trace("%s (%d) %c %s (%d) : %s", \
a_xml_id, a_call_id, an_int>0?'>':an_int<0?'<':'=', \
b_xml_id, b_call_id, why); \
return an_int; \
} while(0)
gint
sort_op_by_callid(gconstpointer a, gconstpointer b)
{
int a_call_id = -1;
int b_call_id = -1;
char *a_uuid = NULL;
char *b_uuid = NULL;
const xmlNode *xml_a = a;
const xmlNode *xml_b = b;
const char *a_xml_id = crm_element_value(xml_a, XML_ATTR_ID);
const char *b_xml_id = crm_element_value(xml_b, XML_ATTR_ID);
if (safe_str_eq(a_xml_id, b_xml_id)) {
/* We have duplicate lrm_rsc_op entries in the status
* section which is unlikely to be a good thing
* - we can handle it easily enough, but we need to get
* to the bottom of why it's happening.
*/
pe_err("Duplicate lrm_rsc_op entries named %s", a_xml_id);
sort_return(0, "duplicate");
}
crm_element_value_int(xml_a, XML_LRM_ATTR_CALLID, &a_call_id);
crm_element_value_int(xml_b, XML_LRM_ATTR_CALLID, &b_call_id);
if (a_call_id == -1 && b_call_id == -1) {
/* both are pending ops so it doesn't matter since
* stops are never pending
*/
sort_return(0, "pending");
} else if (a_call_id >= 0 && a_call_id < b_call_id) {
sort_return(-1, "call id");
} else if (b_call_id >= 0 && a_call_id > b_call_id) {
sort_return(1, "call id");
} else if (b_call_id >= 0 && a_call_id == b_call_id) {
/*
* The op and last_failed_op are the same
* Order on last-rc-change
*/
time_t last_a = -1;
time_t last_b = -1;
crm_element_value_epoch(xml_a, XML_RSC_OP_LAST_CHANGE, &last_a);
crm_element_value_epoch(xml_b, XML_RSC_OP_LAST_CHANGE, &last_b);
crm_trace("rc-change: %lld vs %lld",
(long long) last_a, (long long) last_b);
if (last_a >= 0 && last_a < last_b) {
sort_return(-1, "rc-change");
} else if (last_b >= 0 && last_a > last_b) {
sort_return(1, "rc-change");
}
sort_return(0, "rc-change");
} else {
/* One of the inputs is a pending operation
* Attempt to use XML_ATTR_TRANSITION_MAGIC to determine its age relative to the other
*/
int a_id = -1;
int b_id = -1;
const char *a_magic = crm_element_value(xml_a, XML_ATTR_TRANSITION_MAGIC);
const char *b_magic = crm_element_value(xml_b, XML_ATTR_TRANSITION_MAGIC);
CRM_CHECK(a_magic != NULL && b_magic != NULL, sort_return(0, "No magic"));
if (!decode_transition_magic(a_magic, &a_uuid, &a_id, NULL, NULL, NULL,
NULL)) {
sort_return(0, "bad magic a");
}
if (!decode_transition_magic(b_magic, &b_uuid, &b_id, NULL, NULL, NULL,
NULL)) {
sort_return(0, "bad magic b");
}
/* try to determine the relative age of the operation...
* some pending operations (e.g. a start) may have been superseded
* by a subsequent stop
*
* [a|b]_id == -1 means it's a shutdown operation and _always_ comes last
*/
if (safe_str_neq(a_uuid, b_uuid) || a_id == b_id) {
/*
* some of the logic in here may be redundant...
*
* if the UUID from the TE doesn't match then one better
* be a pending operation.
* pending operations don't survive between elections and joins
* because we query the LRM directly
*/
if (b_call_id == -1) {
sort_return(-1, "transition + call");
} else if (a_call_id == -1) {
sort_return(1, "transition + call");
}
} else if ((a_id >= 0 && a_id < b_id) || b_id == -1) {
sort_return(-1, "transition");
} else if ((b_id >= 0 && a_id > b_id) || a_id == -1) {
sort_return(1, "transition");
}
}
/* we should never end up here */
CRM_CHECK(FALSE, sort_return(0, "default"));
}
time_t
get_effective_time(pe_working_set_t * data_set)
{
if(data_set) {
if (data_set->now == NULL) {
crm_trace("Recording a new 'now'");
data_set->now = crm_time_new(NULL);
}
return crm_time_get_seconds_since_epoch(data_set->now);
}
crm_trace("Defaulting to 'now'");
return time(NULL);
}
gboolean
get_target_role(pe_resource_t * rsc, enum rsc_role_e * role)
{
enum rsc_role_e local_role = RSC_ROLE_UNKNOWN;
const char *value = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_TARGET_ROLE);
CRM_CHECK(role != NULL, return FALSE);
if (value == NULL || safe_str_eq("started", value)
|| safe_str_eq("default", value)) {
return FALSE;
}
local_role = text2role(value);
if (local_role == RSC_ROLE_UNKNOWN) {
pcmk__config_err("Ignoring '" XML_RSC_ATTR_TARGET_ROLE "' for %s "
"because '%s' is not valid", rsc->id, value);
return FALSE;
} else if (local_role > RSC_ROLE_STARTED) {
if (is_set(uber_parent(rsc)->flags, pe_rsc_promotable)) {
if (local_role > RSC_ROLE_SLAVE) {
/* This is what we'd do anyway, just leave the default to avoid messing up the placement algorithm */
return FALSE;
}
} else {
pcmk__config_err("Ignoring '" XML_RSC_ATTR_TARGET_ROLE "' for %s "
"because '%s' only makes sense for promotable "
"clones", rsc->id, value);
return FALSE;
}
}
*role = local_role;
return TRUE;
}
gboolean
order_actions(pe_action_t * lh_action, pe_action_t * rh_action, enum pe_ordering order)
{
GListPtr gIter = NULL;
pe_action_wrapper_t *wrapper = NULL;
GListPtr list = NULL;
if (order == pe_order_none) {
return FALSE;
}
if (lh_action == NULL || rh_action == NULL) {
return FALSE;
}
crm_trace("Ordering Action %s before %s", lh_action->uuid, rh_action->uuid);
/* Ensure we never create a dependency on ourselves... it's happened */
CRM_ASSERT(lh_action != rh_action);
/* Filter dups, otherwise update_action_states() has too much work to do */
gIter = lh_action->actions_after;
for (; gIter != NULL; gIter = gIter->next) {
pe_action_wrapper_t *after = (pe_action_wrapper_t *) gIter->data;
if (after->action == rh_action && (after->type & order)) {
return FALSE;
}
}
wrapper = calloc(1, sizeof(pe_action_wrapper_t));
wrapper->action = rh_action;
wrapper->type = order;
list = lh_action->actions_after;
list = g_list_prepend(list, wrapper);
lh_action->actions_after = list;
wrapper = NULL;
/* order |= pe_order_implies_then; */
/* order ^= pe_order_implies_then; */
wrapper = calloc(1, sizeof(pe_action_wrapper_t));
wrapper->action = lh_action;
wrapper->type = order;
list = rh_action->actions_before;
list = g_list_prepend(list, wrapper);
rh_action->actions_before = list;
return TRUE;
}
pe_action_t *
get_pseudo_op(const char *name, pe_working_set_t * data_set)
{
pe_action_t *op = NULL;
if(data_set->singletons) {
op = g_hash_table_lookup(data_set->singletons, name);
}
if (op == NULL) {
op = custom_action(NULL, strdup(name), name, NULL, TRUE, TRUE, data_set);
set_bit(op->flags, pe_action_pseudo);
set_bit(op->flags, pe_action_runnable);
}
return op;
}
void
destroy_ticket(gpointer data)
{
pe_ticket_t *ticket = data;
if (ticket->state) {
g_hash_table_destroy(ticket->state);
}
free(ticket->id);
free(ticket);
}
pe_ticket_t *
ticket_new(const char *ticket_id, pe_working_set_t * data_set)
{
pe_ticket_t *ticket = NULL;
if (pcmk__str_empty(ticket_id)) {
return NULL;
}
if (data_set->tickets == NULL) {
data_set->tickets =
g_hash_table_new_full(crm_str_hash, g_str_equal, free,
destroy_ticket);
}
ticket = g_hash_table_lookup(data_set->tickets, ticket_id);
if (ticket == NULL) {
ticket = calloc(1, sizeof(pe_ticket_t));
if (ticket == NULL) {
crm_err("Cannot allocate ticket '%s'", ticket_id);
return NULL;
}
crm_trace("Creaing ticket entry for %s", ticket_id);
ticket->id = strdup(ticket_id);
ticket->granted = FALSE;
ticket->last_granted = -1;
ticket->standby = FALSE;
ticket->state = crm_str_table_new();
g_hash_table_insert(data_set->tickets, strdup(ticket->id), ticket);
}
return ticket;
}
static void
filter_parameters(xmlNode * param_set, const char *param_string, bool need_present)
{
if (param_set && param_string) {
xmlAttrPtr xIter = param_set->properties;
while (xIter) {
const char *prop_name = (const char *)xIter->name;
char *name = crm_strdup_printf(" %s ", prop_name);
char *match = strstr(param_string, name);
free(name);
// Do now, because current entry might get removed below
xIter = xIter->next;
if (need_present && match == NULL) {
crm_trace("%s not found in %s", prop_name, param_string);
xml_remove_prop(param_set, prop_name);
} else if (need_present == FALSE && match) {
crm_trace("%s found in %s", prop_name, param_string);
xml_remove_prop(param_set, prop_name);
}
}
}
}
#if ENABLE_VERSIONED_ATTRS
static void
append_versioned_params(xmlNode *versioned_params, const char *ra_version, xmlNode *params)
{
GHashTable *hash = pe_unpack_versioned_parameters(versioned_params, ra_version);
char *key = NULL;
char *value = NULL;
GHashTableIter iter;
g_hash_table_iter_init(&iter, hash);
while (g_hash_table_iter_next(&iter, (gpointer *) &key, (gpointer *) &value)) {
crm_xml_add(params, key, value);
}
g_hash_table_destroy(hash);
}
#endif
/*!
* \internal
* \brief Calculate action digests and store in node's digest cache
*
* \param[in] rsc Resource that action was for
* \param[in] task Name of action performed
* \param[in] key Action's task key
* \param[in] node Node action was performed on
* \param[in] xml_op XML of operation in CIB status (if available)
* \param[in] calc_secure Whether to calculate secure digest
* \param[in] data_set Cluster working set
*
* \return Pointer to node's digest cache entry
*/
static op_digest_cache_t *
rsc_action_digest(pe_resource_t *rsc, const char *task, const char *key,
pe_node_t *node, xmlNode *xml_op, bool calc_secure,
pe_working_set_t *data_set)
{
op_digest_cache_t *data = NULL;
data = g_hash_table_lookup(node->details->digest_cache, key);
if (data == NULL) {
GHashTable *local_rsc_params = crm_str_table_new();
pe_action_t *action = custom_action(rsc, strdup(key), task, node, TRUE, FALSE, data_set);
#if ENABLE_VERSIONED_ATTRS
xmlNode *local_versioned_params = create_xml_node(NULL, XML_TAG_RSC_VER_ATTRS);
const char *ra_version = NULL;
#endif
const char *op_version;
const char *restart_list = NULL;
const char *secure_list = " passwd password ";
data = calloc(1, sizeof(op_digest_cache_t));
CRM_ASSERT(data != NULL);
get_rsc_attributes(local_rsc_params, rsc, node, data_set);
#if ENABLE_VERSIONED_ATTRS
pe_get_versioned_attributes(local_versioned_params, rsc, node, data_set);
#endif
data->params_all = create_xml_node(NULL, XML_TAG_PARAMS);
// REMOTE_CONTAINER_HACK: Allow remote nodes that start containers with pacemaker remote inside
if (pe__add_bundle_remote_name(rsc, data->params_all,
XML_RSC_ATTR_REMOTE_RA_ADDR)) {
crm_trace("Set address for bundle connection %s (on %s)",
rsc->id, node->details->uname);
}
g_hash_table_foreach(local_rsc_params, hash2field, data->params_all);
g_hash_table_foreach(action->extra, hash2field, data->params_all);
g_hash_table_foreach(rsc->parameters, hash2field, data->params_all);
g_hash_table_foreach(action->meta, hash2metafield, data->params_all);
if(xml_op) {
secure_list = crm_element_value(xml_op, XML_LRM_ATTR_OP_SECURE);
restart_list = crm_element_value(xml_op, XML_LRM_ATTR_OP_RESTART);
op_version = crm_element_value(xml_op, XML_ATTR_CRM_VERSION);
#if ENABLE_VERSIONED_ATTRS
ra_version = crm_element_value(xml_op, XML_ATTR_RA_VERSION);
#endif
} else {
op_version = CRM_FEATURE_SET;
}
#if ENABLE_VERSIONED_ATTRS
append_versioned_params(local_versioned_params, ra_version, data->params_all);
append_versioned_params(rsc->versioned_parameters, ra_version, data->params_all);
{
pe_rsc_action_details_t *details = pe_rsc_action_details(action);
append_versioned_params(details->versioned_parameters, ra_version, data->params_all);
}
#endif
pcmk__filter_op_for_digest(data->params_all);
g_hash_table_destroy(local_rsc_params);
pe_free_action(action);
data->digest_all_calc = calculate_operation_digest(data->params_all, op_version);
if (calc_secure) {
data->params_secure = copy_xml(data->params_all);
if(secure_list) {
filter_parameters(data->params_secure, secure_list, FALSE);
}
data->digest_secure_calc = calculate_operation_digest(data->params_secure, op_version);
}
if(xml_op && crm_element_value(xml_op, XML_LRM_ATTR_RESTART_DIGEST) != NULL) {
data->params_restart = copy_xml(data->params_all);
if (restart_list) {
filter_parameters(data->params_restart, restart_list, TRUE);
}
data->digest_restart_calc = calculate_operation_digest(data->params_restart, op_version);
}
g_hash_table_insert(node->details->digest_cache, strdup(key), data);
}
return data;
}
op_digest_cache_t *
rsc_action_digest_cmp(pe_resource_t * rsc, xmlNode * xml_op, pe_node_t * node,
pe_working_set_t * data_set)
{
op_digest_cache_t *data = NULL;
char *key = NULL;
guint interval_ms = 0;
const char *op_version;
const char *task = crm_element_value(xml_op, XML_LRM_ATTR_TASK);
const char *digest_all;
const char *digest_restart;
CRM_ASSERT(node != NULL);
op_version = crm_element_value(xml_op, XML_ATTR_CRM_VERSION);
digest_all = crm_element_value(xml_op, XML_LRM_ATTR_OP_DIGEST);
digest_restart = crm_element_value(xml_op, XML_LRM_ATTR_RESTART_DIGEST);
crm_element_value_ms(xml_op, XML_LRM_ATTR_INTERVAL_MS, &interval_ms);
key = pcmk__op_key(rsc->id, task, interval_ms);
data = rsc_action_digest(rsc, task, key, node, xml_op,
is_set(data_set->flags, pe_flag_sanitized),
data_set);
data->rc = RSC_DIGEST_MATCH;
if (digest_restart && data->digest_restart_calc && strcmp(data->digest_restart_calc, digest_restart) != 0) {
pe_rsc_info(rsc, "Parameters to %s on %s changed: was %s vs. now %s (restart:%s) %s",
key, node->details->uname,
crm_str(digest_restart), data->digest_restart_calc,
op_version, crm_element_value(xml_op, XML_ATTR_TRANSITION_MAGIC));
data->rc = RSC_DIGEST_RESTART;
} else if (digest_all == NULL) {
/* it is unknown what the previous op digest was */
data->rc = RSC_DIGEST_UNKNOWN;
} else if (strcmp(digest_all, data->digest_all_calc) != 0) {
pe_rsc_info(rsc, "Parameters to %s on %s changed: was %s vs. now %s (%s:%s) %s",
key, node->details->uname,
crm_str(digest_all), data->digest_all_calc,
(interval_ms > 0)? "reschedule" : "reload",
op_version, crm_element_value(xml_op, XML_ATTR_TRANSITION_MAGIC));
data->rc = RSC_DIGEST_ALL;
}
free(key);
return data;
}
/*!
* \internal
* \brief Create an unfencing summary for use in special node attribute
*
* Create a string combining a fence device's resource ID, agent type, and
* parameter digest (whether for all parameters or just non-private parameters).
* This can be stored in a special node attribute, allowing us to detect changes
* in either the agent type or parameters, to know whether unfencing must be
* redone or can be safely skipped when the device's history is cleaned.
*
* \param[in] rsc_id Fence device resource ID
* \param[in] agent_type Fence device agent
* \param[in] param_digest Fence device parameter digest
*
* \return Newly allocated string with unfencing digest
* \note The caller is responsible for freeing the result.
*/
static inline char *
create_unfencing_summary(const char *rsc_id, const char *agent_type,
const char *param_digest)
{
return crm_strdup_printf("%s:%s:%s", rsc_id, agent_type, param_digest);
}
/*!
* \internal
* \brief Check whether a node can skip unfencing
*
* Check whether a fence device's current definition matches a node's
* stored summary of when it was last unfenced by the device.
*
* \param[in] rsc_id Fence device's resource ID
* \param[in] agent Fence device's agent type
* \param[in] digest_calc Fence device's current parameter digest
* \param[in] node_summary Value of node's special unfencing node attribute
* (a comma-separated list of unfencing summaries for
* all devices that have unfenced this node)
*
* \return TRUE if digest matches, FALSE otherwise
*/
static bool
unfencing_digest_matches(const char *rsc_id, const char *agent,
const char *digest_calc, const char *node_summary)
{
bool matches = FALSE;
if (rsc_id && agent && digest_calc && node_summary) {
char *search_secure = create_unfencing_summary(rsc_id, agent,
digest_calc);
/* The digest was calculated including the device ID and agent,
* so there is no risk of collision using strstr().
*/
matches = (strstr(node_summary, search_secure) != NULL);
crm_trace("Calculated unfencing digest '%s' %sfound in '%s'",
search_secure, matches? "" : "not ", node_summary);
free(search_secure);
}
return matches;
}
/* Magic string to use as action name for digest cache entries used for
* unfencing checks. This is not a real action name (i.e. "on"), so
* check_action_definition() won't confuse these entries with real actions.
*/
#define STONITH_DIGEST_TASK "stonith-on"
/*!
* \internal
* \brief Calculate fence device digests and digest comparison result
*
* \param[in] rsc Fence device resource
* \param[in] agent Fence device's agent type
* \param[in] node Node with digest cache to use
* \param[in] data_set Cluster working set
*
* \return Node's digest cache entry
*/
static op_digest_cache_t *
fencing_action_digest_cmp(pe_resource_t *rsc, const char *agent,
pe_node_t *node, pe_working_set_t *data_set)
{
const char *node_summary = NULL;
// Calculate device's current parameter digests
char *key = pcmk__op_key(rsc->id, STONITH_DIGEST_TASK, 0);
op_digest_cache_t *data = rsc_action_digest(rsc, STONITH_DIGEST_TASK, key,
node, NULL, TRUE, data_set);
free(key);
// Check whether node has special unfencing summary node attribute
node_summary = pe_node_attribute_raw(node, CRM_ATTR_DIGESTS_ALL);
if (node_summary == NULL) {
data->rc = RSC_DIGEST_UNKNOWN;
return data;
}
// Check whether full parameter digest matches
if (unfencing_digest_matches(rsc->id, agent, data->digest_all_calc,
node_summary)) {
data->rc = RSC_DIGEST_MATCH;
return data;
}
// Check whether secure parameter digest matches
node_summary = pe_node_attribute_raw(node, CRM_ATTR_DIGESTS_SECURE);
if (unfencing_digest_matches(rsc->id, agent, data->digest_secure_calc,
node_summary)) {
data->rc = RSC_DIGEST_MATCH;
if (is_set(data_set->flags, pe_flag_stdout)) {
printf("Only 'private' parameters to %s for unfencing %s changed\n",
rsc->id, node->details->uname);
}
return data;
}
// Parameters don't match
data->rc = RSC_DIGEST_ALL;
if (is_set(data_set->flags, (pe_flag_sanitized|pe_flag_stdout))
&& data->digest_secure_calc) {
char *digest = create_unfencing_summary(rsc->id, agent,
data->digest_secure_calc);
printf("Parameters to %s for unfencing %s changed, try '%s'\n",
rsc->id, node->details->uname, digest);
free(digest);
}
return data;
}
const char *rsc_printable_id(pe_resource_t *rsc)
{
if (is_not_set(rsc->flags, pe_rsc_unique)) {
return ID(rsc->xml);
}
return rsc->id;
}
void
clear_bit_recursive(pe_resource_t * rsc, unsigned long long flag)
{
GListPtr gIter = rsc->children;
clear_bit(rsc->flags, flag);
for (; gIter != NULL; gIter = gIter->next) {
pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
clear_bit_recursive(child_rsc, flag);
}
}
void
set_bit_recursive(pe_resource_t * rsc, unsigned long long flag)
{
GListPtr gIter = rsc->children;
set_bit(rsc->flags, flag);
for (; gIter != NULL; gIter = gIter->next) {
pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
set_bit_recursive(child_rsc, flag);
}
}
static GListPtr
find_unfencing_devices(GListPtr candidates, GListPtr matches)
{
for (GListPtr gIter = candidates; gIter != NULL; gIter = gIter->next) {
pe_resource_t *candidate = gIter->data;
const char *provides = g_hash_table_lookup(candidate->meta, XML_RSC_ATTR_PROVIDES);
const char *requires = g_hash_table_lookup(candidate->meta, XML_RSC_ATTR_REQUIRES);
if(candidate->children) {
matches = find_unfencing_devices(candidate->children, matches);
} else if (is_not_set(candidate->flags, pe_rsc_fence_device)) {
continue;
} else if (crm_str_eq(provides, "unfencing", FALSE) || crm_str_eq(requires, "unfencing", FALSE)) {
matches = g_list_prepend(matches, candidate);
}
}
return matches;
}
static int
node_priority_fencing_delay(pe_node_t * node, pe_working_set_t * data_set)
{
int member_count = 0;
int online_count = 0;
int top_priority = 0;
int lowest_priority = 0;
GListPtr gIter = NULL;
// `priority-fencing-delay` is disabled
if (data_set->priority_fencing_delay <= 0) {
return 0;
}
/* No need to request a delay if the fencing target is not a normal cluster
* member, for example if it's a remote node or a guest node. */
if (node->details->type != node_member) {
return 0;
}
// No need to request a delay if the fencing target is in our partition
if (node->details->online) {
return 0;
}
for (gIter = data_set->nodes; gIter != NULL; gIter = gIter->next) {
pe_node_t *n = gIter->data;
if (n->details->type != node_member) {
continue;
}
member_count ++;
if (n->details->online) {
online_count++;
}
if (member_count == 1
|| n->details->priority > top_priority) {
top_priority = n->details->priority;
}
if (member_count == 1
|| n->details->priority < lowest_priority) {
lowest_priority = n->details->priority;
}
}
// No need to delay if we have more than half of the cluster members
if (online_count > member_count / 2) {
return 0;
}
/* All the nodes have equal priority.
* Any configured corresponding `pcmk_delay_base/max` will be applied. */
if (lowest_priority == top_priority) {
return 0;
}
if (node->details->priority < top_priority) {
return 0;
}
return data_set->priority_fencing_delay;
}
pe_action_t *
pe_fence_op(pe_node_t * node, const char *op, bool optional, const char *reason,
bool priority_delay, pe_working_set_t * data_set)
{
char *op_key = NULL;
pe_action_t *stonith_op = NULL;
if(op == NULL) {
op = data_set->stonith_action;
}
op_key = crm_strdup_printf("%s-%s-%s", CRM_OP_FENCE, node->details->uname, op);
if(data_set->singletons) {
stonith_op = g_hash_table_lookup(data_set->singletons, op_key);
}
if(stonith_op == NULL) {
stonith_op = custom_action(NULL, op_key, CRM_OP_FENCE, node, TRUE, TRUE, data_set);
add_hash_param(stonith_op->meta, XML_LRM_ATTR_TARGET, node->details->uname);
add_hash_param(stonith_op->meta, XML_LRM_ATTR_TARGET_UUID, node->details->id);
add_hash_param(stonith_op->meta, "stonith_action", op);
if (pe__is_guest_or_remote_node(node)
&& is_set(data_set->flags, pe_flag_enable_unfencing)) {
/* Extra work to detect device changes on remotes
*
* We may do this for all nodes in the future, but for now
* the check_action_definition() based stuff works fine.
*/
long max = 1024;
long digests_all_offset = 0;
long digests_secure_offset = 0;
char *digests_all = calloc(max, sizeof(char));
char *digests_secure = calloc(max, sizeof(char));
GListPtr matches = find_unfencing_devices(data_set->resources, NULL);
for (GListPtr gIter = matches; gIter != NULL; gIter = gIter->next) {
pe_resource_t *match = gIter->data;
const char *agent = g_hash_table_lookup(match->meta,
XML_ATTR_TYPE);
op_digest_cache_t *data = NULL;
data = fencing_action_digest_cmp(match, agent, node, data_set);
if(data->rc == RSC_DIGEST_ALL) {
optional = FALSE;
crm_notice("Unfencing %s (remote): because the definition of %s changed", node->details->uname, match->id);
if (is_set(data_set->flags, pe_flag_stdout)) {
fprintf(stdout, " notice: Unfencing %s (remote): because the definition of %s changed\n", node->details->uname, match->id);
}
}
digests_all_offset += snprintf(
digests_all+digests_all_offset, max-digests_all_offset,
"%s:%s:%s,", match->id, agent, data->digest_all_calc);
digests_secure_offset += snprintf(
digests_secure+digests_secure_offset, max-digests_secure_offset,
"%s:%s:%s,", match->id, agent, data->digest_secure_calc);
}
g_hash_table_insert(stonith_op->meta,
strdup(XML_OP_ATTR_DIGESTS_ALL),
digests_all);
g_hash_table_insert(stonith_op->meta,
strdup(XML_OP_ATTR_DIGESTS_SECURE),
digests_secure);
}
} else {
free(op_key);
}
if (data_set->priority_fencing_delay > 0
/* It's a suitable case where `priority-fencing-delay` applies.
* At least add `priority-fencing-delay` field as an indicator. */
&& (priority_delay
/* Re-calculate priority delay for the suitable case when
* pe_fence_op() is called again by stage6() after node priority has
* been actually calculated with native_add_running() */
|| g_hash_table_lookup(stonith_op->meta,
XML_CONFIG_ATTR_PRIORITY_FENCING_DELAY) != NULL)) {
/* Add `priority-fencing-delay` to the fencing op even if it's 0 for
* the targeting node. So that it takes precedence over any possible
* `pcmk_delay_base/max`.
*/
char *delay_s = crm_itoa(node_priority_fencing_delay(node, data_set));
g_hash_table_insert(stonith_op->meta,
strdup(XML_CONFIG_ATTR_PRIORITY_FENCING_DELAY),
delay_s);
}
if(optional == FALSE && pe_can_fence(data_set, node)) {
pe_action_required(stonith_op, NULL, reason);
} else if(reason && stonith_op->reason == NULL) {
stonith_op->reason = strdup(reason);
}
return stonith_op;
}
void
trigger_unfencing(
pe_resource_t * rsc, pe_node_t *node, const char *reason, pe_action_t *dependency, pe_working_set_t * data_set)
{
if(is_not_set(data_set->flags, pe_flag_enable_unfencing)) {
/* No resources require it */
return;
} else if (rsc != NULL && is_not_set(rsc->flags, pe_rsc_fence_device)) {
/* Wasn't a stonith device */
return;
} else if(node
&& node->details->online
&& node->details->unclean == FALSE
&& node->details->shutdown == FALSE) {
pe_action_t *unfence = pe_fence_op(node, "on", FALSE, reason, FALSE, data_set);
if(dependency) {
order_actions(unfence, dependency, pe_order_optional);
}
} else if(rsc) {
GHashTableIter iter;
g_hash_table_iter_init(&iter, rsc->allowed_nodes);
while (g_hash_table_iter_next(&iter, NULL, (void **)&node)) {
if(node->details->online && node->details->unclean == FALSE && node->details->shutdown == FALSE) {
trigger_unfencing(rsc, node, reason, dependency, data_set);
}
}
}
}
gboolean
add_tag_ref(GHashTable * tags, const char * tag_name, const char * obj_ref)
{
pe_tag_t *tag = NULL;
GListPtr gIter = NULL;
gboolean is_existing = FALSE;
CRM_CHECK(tags && tag_name && obj_ref, return FALSE);
tag = g_hash_table_lookup(tags, tag_name);
if (tag == NULL) {
tag = calloc(1, sizeof(pe_tag_t));
if (tag == NULL) {
return FALSE;
}
tag->id = strdup(tag_name);
tag->refs = NULL;
g_hash_table_insert(tags, strdup(tag_name), tag);
}
for (gIter = tag->refs; gIter != NULL; gIter = gIter->next) {
const char *existing_ref = (const char *) gIter->data;
if (crm_str_eq(existing_ref, obj_ref, TRUE)){
is_existing = TRUE;
break;
}
}
if (is_existing == FALSE) {
tag->refs = g_list_append(tag->refs, strdup(obj_ref));
crm_trace("Added: tag=%s ref=%s", tag->id, obj_ref);
}
return TRUE;
}
void pe_action_set_flag_reason(const char *function, long line,
pe_action_t *action, pe_action_t *reason, const char *text,
enum pe_action_flags flags, bool overwrite)
{
bool unset = FALSE;
bool update = FALSE;
const char *change = NULL;
if(is_set(flags, pe_action_runnable)) {
unset = TRUE;
change = "unrunnable";
} else if(is_set(flags, pe_action_optional)) {
unset = TRUE;
change = "required";
} else if(is_set(flags, pe_action_migrate_runnable)) {
unset = TRUE;
overwrite = TRUE;
change = "unrunnable";
} else if(is_set(flags, pe_action_dangle)) {
change = "dangling";
} else if(is_set(flags, pe_action_requires_any)) {
change = "required";
} else {
crm_err("Unknown flag change to %x by %s: 0x%s",
flags, action->uuid, (reason? reason->uuid : "0"));
}
if(unset) {
if(is_set(action->flags, flags)) {
action->flags = crm_clear_bit(function, line, action->uuid, action->flags, flags);
update = TRUE;
}
} else {
if(is_not_set(action->flags, flags)) {
action->flags = crm_set_bit(function, line, action->uuid, action->flags, flags);
update = TRUE;
}
}
if((change && update) || text) {
char *reason_text = NULL;
if(reason == NULL) {
pe_action_set_reason(action, text, overwrite);
} else if(reason->rsc == NULL) {
reason_text = crm_strdup_printf("%s %s%c %s", change, reason->task, text?':':0, text?text:"");
} else {
reason_text = crm_strdup_printf("%s %s %s%c %s", change, reason->rsc->id, reason->task, text?':':0, text?text:"NA");
}
if(reason_text && action->rsc != reason->rsc) {
pe_action_set_reason(action, reason_text, overwrite);
}
free(reason_text);
}
}
void pe_action_set_reason(pe_action_t *action, const char *reason, bool overwrite)
{
if (action->reason != NULL && overwrite) {
pe_rsc_trace(action->rsc, "Changing %s reason from '%s' to '%s'",
action->uuid, action->reason, crm_str(reason));
free(action->reason);
} else if (action->reason == NULL) {
pe_rsc_trace(action->rsc, "Set %s reason to '%s'",
action->uuid, crm_str(reason));
} else {
// crm_assert(action->reason != NULL && !overwrite);
return;
}
if (reason != NULL) {
action->reason = strdup(reason);
} else {
action->reason = NULL;
}
}
/*!
* \internal
* \brief Check whether shutdown has been requested for a node
*
* \param[in] node Node to check
*
* \return TRUE if node has shutdown attribute set and nonzero, FALSE otherwise
* \note This differs from simply using node->details->shutdown in that it can
* be used before that has been determined (and in fact to determine it),
* and it can also be used to distinguish requested shutdown from implicit
* shutdown of remote nodes by virtue of their connection stopping.
*/
bool
pe__shutdown_requested(pe_node_t *node)
{
const char *shutdown = pe_node_attribute_raw(node, XML_CIB_ATTR_SHUTDOWN);
return shutdown && strcmp(shutdown, "0");
}
/*!
* \internal
* \brief Update a data set's "recheck by" time
*
* \param[in] recheck Epoch time when recheck should happen
* \param[in,out] data_set Current working set
*/
void
pe__update_recheck_time(time_t recheck, pe_working_set_t *data_set)
{
if ((recheck > get_effective_time(data_set))
&& ((data_set->recheck_by == 0)
|| (data_set->recheck_by > recheck))) {
data_set->recheck_by = recheck;
}
}
/*!
* \internal
* \brief Wrapper for pe_unpack_nvpairs() using a cluster working set
*/
void
pe__unpack_dataset_nvpairs(xmlNode *xml_obj, const char *set_name,
pe_rule_eval_data_t *rule_data, GHashTable *hash,
const char *always_first, gboolean overwrite,
pe_working_set_t *data_set)
{
crm_time_t *next_change = crm_time_new_undefined();
pe_eval_nvpairs(data_set->input, xml_obj, set_name, rule_data, hash,
always_first, overwrite, next_change);
if (crm_time_is_defined(next_change)) {
time_t recheck = (time_t) crm_time_get_seconds_since_epoch(next_change);
pe__update_recheck_time(recheck, data_set);
}
crm_time_free(next_change);
}
bool
pe__resource_is_disabled(pe_resource_t *rsc)
{
const char *target_role = NULL;
CRM_CHECK(rsc != NULL, return false);
target_role = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_TARGET_ROLE);
if (target_role) {
enum rsc_role_e target_role_e = text2role(target_role);
if ((target_role_e == RSC_ROLE_STOPPED)
|| ((target_role_e == RSC_ROLE_SLAVE)
&& is_set(uber_parent(rsc)->flags, pe_rsc_promotable))) {
return true;
}
}
return false;
}
/*!
* \internal
* \brief Create an action to clear a resource's history from CIB
*
* \param[in] rsc Resource to clear
* \param[in] node Node to clear history on
*
* \return New action to clear resource history
*/
pe_action_t *
pe__clear_resource_history(pe_resource_t *rsc, pe_node_t *node,
pe_working_set_t *data_set)
{
char *key = NULL;
CRM_ASSERT(rsc && node);
key = pcmk__op_key(rsc->id, CRM_OP_LRM_DELETE, 0);
return custom_action(rsc, key, CRM_OP_LRM_DELETE, node, FALSE, TRUE,
data_set);
}
bool
pe__rsc_running_on_any_node_in_list(pe_resource_t *rsc, GListPtr node_list)
{
for (GListPtr ele = rsc->running_on; ele; ele = ele->next) {
pe_node_t *node = (pe_node_t *) ele->data;
if (pcmk__str_in_list(node_list, node->details->uname)) {
return true;
}
}
return false;
}
bool
pcmk__rsc_filtered_by_node(pe_resource_t *rsc, GListPtr only_node)
{
return (rsc->fns->active(rsc, FALSE) && !pe__rsc_running_on_any_node_in_list(rsc, only_node));
}
+
+GListPtr
+pe__filter_rsc_list(GListPtr rscs, GListPtr filter)
+{
+ GListPtr retval = NULL;
+
+ for (GListPtr gIter = rscs; gIter; gIter = gIter->next) {
+ pe_resource_t *rsc = (pe_resource_t *) gIter->data;
+
+ /* I think the second condition is safe here for all callers of this
+ * function. If not, it needs to move into pe__node_text.
+ */
+ if (pcmk__str_in_list(filter, rsc_printable_id(rsc)) ||
+ (rsc->parent && pcmk__str_in_list(filter, rsc_printable_id(rsc->parent)))) {
+ retval = g_list_prepend(retval, rsc);
+ }
+ }
+
+ return retval;
+}
diff --git a/tools/crm_mon.c b/tools/crm_mon.c
index e656e8cfe9..7497b1aee9 100644
--- a/tools/crm_mon.c
+++ b/tools/crm_mon.c
@@ -1,2195 +1,2202 @@
/*
* Copyright 2004-2020 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
* This source code is licensed under the GNU General Public License version 2
* or later (GPLv2+) WITHOUT ANY WARRANTY.
*/
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include // pcmk__ends_with_ext()
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include "crm_mon.h"
#define SUMMARY "Provides a summary of cluster's current state.\n\n" \
"Outputs varying levels of detail in a number of different formats."
/*
* Definitions indicating which items to print
*/
static unsigned int show;
/*
* Definitions indicating how to output
*/
static mon_output_format_t output_format = mon_output_unset;
/* other globals */
static GIOChannel *io_channel = NULL;
static GMainLoop *mainloop = NULL;
static guint timer_id = 0;
static mainloop_timer_t *refresh_timer = NULL;
static pe_working_set_t *mon_data_set = NULL;
static cib_t *cib = NULL;
static stonith_t *st = NULL;
static xmlNode *current_cib = NULL;
static GError *error = NULL;
static pcmk__common_args_t *args = NULL;
static pcmk__output_t *out = NULL;
static GOptionContext *context = NULL;
static gchar **processed_args = NULL;
static time_t last_refresh = 0;
crm_trigger_t *refresh_trigger = NULL;
static pcmk__supported_format_t formats[] = {
#if CURSES_ENABLED
CRM_MON_SUPPORTED_FORMAT_CURSES,
#endif
PCMK__SUPPORTED_FORMAT_HTML,
PCMK__SUPPORTED_FORMAT_NONE,
PCMK__SUPPORTED_FORMAT_TEXT,
CRM_MON_SUPPORTED_FORMAT_XML,
{ NULL, NULL, NULL }
};
/* Define exit codes for monitoring-compatible output
* For nagios plugins, the possibilities are
* OK=0, WARN=1, CRIT=2, and UNKNOWN=3
*/
#define MON_STATUS_WARN CRM_EX_ERROR
#define MON_STATUS_CRIT CRM_EX_INVALID_PARAM
#define MON_STATUS_UNKNOWN CRM_EX_UNIMPLEMENT_FEATURE
#define RECONNECT_MSECS 5000
struct {
int reconnect_msec;
gboolean daemonize;
gboolean show_bans;
char *pid_file;
char *external_agent;
char *external_recipient;
char *neg_location_prefix;
char *only_node;
+ char *only_rsc;
unsigned int mon_ops;
GSList *user_includes_excludes;
GSList *includes_excludes;
} options = {
.reconnect_msec = RECONNECT_MSECS,
.mon_ops = mon_op_default
};
static void clean_up_connections(void);
static crm_exit_t clean_up(crm_exit_t exit_code);
static void crm_diff_update(const char *event, xmlNode * msg);
static gboolean mon_refresh_display(gpointer user_data);
static int cib_connect(gboolean full);
static void mon_st_callback_event(stonith_t * st, stonith_event_t * e);
static void mon_st_callback_display(stonith_t * st, stonith_event_t * e);
static void kick_refresh(gboolean data_updated);
static unsigned int
all_includes(mon_output_format_t fmt) {
if (fmt == mon_output_monitor || fmt == mon_output_plain || fmt == mon_output_console) {
return ~mon_show_options;
} else {
return mon_show_all;
}
}
static unsigned int
default_includes(mon_output_format_t fmt) {
switch (fmt) {
case mon_output_monitor:
case mon_output_plain:
case mon_output_console:
return mon_show_stack | mon_show_dc | mon_show_times | mon_show_counts |
mon_show_nodes | mon_show_resources | mon_show_failures;
case mon_output_xml:
case mon_output_legacy_xml:
return all_includes(fmt);
case mon_output_html:
case mon_output_cgi:
return mon_show_summary | mon_show_nodes | mon_show_resources |
mon_show_failures;
default:
return 0;
}
}
struct {
const char *name;
unsigned int bit;
} sections[] = {
{ "attributes", mon_show_attributes },
{ "bans", mon_show_bans },
{ "counts", mon_show_counts },
{ "dc", mon_show_dc },
{ "failcounts", mon_show_failcounts },
{ "failures", mon_show_failures },
{ "fencing", mon_show_fencing_all },
{ "fencing-failed", mon_show_fence_failed },
{ "fencing-pending", mon_show_fence_pending },
{ "fencing-succeeded", mon_show_fence_worked },
{ "nodes", mon_show_nodes },
{ "operations", mon_show_operations },
{ "options", mon_show_options },
{ "resources", mon_show_resources },
{ "stack", mon_show_stack },
{ "summary", mon_show_summary },
{ "tickets", mon_show_tickets },
{ "times", mon_show_times },
{ NULL }
};
static unsigned int
find_section_bit(const char *name) {
for (int i = 0; sections[i].name != NULL; i++) {
if (crm_str_eq(sections[i].name, name, FALSE)) {
return sections[i].bit;
}
}
return 0;
}
static gboolean
apply_exclude(const gchar *excludes, GError **error) {
char **parts = NULL;
parts = g_strsplit(excludes, ",", 0);
for (char **s = parts; *s != NULL; s++) {
unsigned int bit = find_section_bit(*s);
if (crm_str_eq(*s, "all", TRUE)) {
show = 0;
} else if (crm_str_eq(*s, "none", TRUE)) {
show = all_includes(output_format);
} else if (bit != 0) {
show &= ~bit;
} else {
g_set_error(error, PCMK__EXITC_ERROR, CRM_EX_USAGE,
"--exclude options: all, attributes, bans, counts, dc, "
"failcounts, failures, fencing, fencing-failed, "
"fencing-pending, fencing-succeeded, nodes, none, "
"operations, options, resources, stack, summary, "
"tickets, times");
return FALSE;
}
}
g_strfreev(parts);
return TRUE;
}
static gboolean
apply_include(const gchar *includes, GError **error) {
char **parts = NULL;
parts = g_strsplit(includes, ",", 0);
for (char **s = parts; *s != NULL; s++) {
unsigned int bit = find_section_bit(*s);
if (crm_str_eq(*s, "all", TRUE)) {
show = all_includes(output_format);
} else if (pcmk__starts_with(*s, "bans")) {
show |= mon_show_bans;
if (options.neg_location_prefix != NULL) {
free(options.neg_location_prefix);
options.neg_location_prefix = NULL;
}
if (strlen(*s) > 4 && (*s)[4] == ':') {
options.neg_location_prefix = strdup(*s+5);
}
} else if (crm_str_eq(*s, "default", TRUE) || crm_str_eq(*s, "defaults", TRUE)) {
show |= default_includes(output_format);
} else if (crm_str_eq(*s, "none", TRUE)) {
show = 0;
} else if (bit != 0) {
show |= bit;
} else {
g_set_error(error, PCMK__EXITC_ERROR, CRM_EX_USAGE,
"--include options: all, attributes, bans[:PREFIX], counts, dc, "
"default, failcounts, failures, fencing, fencing-failed, "
"fencing-pending, fencing-succeeded, nodes, none, operations, "
"options, resources, stack, summary, tickets, times");
return FALSE;
}
}
g_strfreev(parts);
return TRUE;
}
static gboolean
apply_include_exclude(GSList *lst, mon_output_format_t fmt, GError **error) {
gboolean rc = TRUE;
GSList *node = lst;
/* Set the default of what to display here. Note that we OR everything to
* show instead of set show directly because it could have already had some
* settings applied to it in main.
*/
show |= default_includes(fmt);
while (node != NULL) {
char *s = node->data;
if (pcmk__starts_with(s, "--include=")) {
rc = apply_include(s+10, error);
} else if (pcmk__starts_with(s, "-I=")) {
rc = apply_include(s+3, error);
} else if (pcmk__starts_with(s, "--exclude=")) {
rc = apply_exclude(s+10, error);
} else if (pcmk__starts_with(s, "-U=")) {
rc = apply_exclude(s+3, error);
}
if (rc != TRUE) {
break;
}
node = node->next;
}
return rc;
}
static gboolean
user_include_exclude_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **err) {
char *s = crm_strdup_printf("%s=%s", option_name, optarg);
options.user_includes_excludes = g_slist_append(options.user_includes_excludes, s);
return TRUE;
}
static gboolean
include_exclude_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **err) {
char *s = crm_strdup_printf("%s=%s", option_name, optarg);
options.includes_excludes = g_slist_append(options.includes_excludes, s);
return TRUE;
}
static gboolean
as_cgi_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **err) {
if (args->output_ty != NULL) {
free(args->output_ty);
}
args->output_ty = strdup("html");
output_format = mon_output_cgi;
options.mon_ops |= mon_op_one_shot;
return TRUE;
}
static gboolean
as_html_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **err) {
if (args->output_ty != NULL) {
free(args->output_ty);
}
if (args->output_dest != NULL) {
free(args->output_dest);
args->output_dest = NULL;
}
if (optarg != NULL) {
args->output_dest = strdup(optarg);
}
args->output_ty = strdup("html");
output_format = mon_output_html;
umask(S_IWGRP | S_IWOTH);
return TRUE;
}
static gboolean
as_simple_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **err) {
if (args->output_ty != NULL) {
free(args->output_ty);
}
args->output_ty = strdup("text");
output_format = mon_output_monitor;
options.mon_ops |= mon_op_one_shot;
return TRUE;
}
static gboolean
as_xml_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **err) {
if (args->output_ty != NULL) {
free(args->output_ty);
}
args->output_ty = strdup("xml");
output_format = mon_output_legacy_xml;
options.mon_ops |= mon_op_one_shot;
return TRUE;
}
static gboolean
fence_history_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **err) {
int rc = crm_atoi(optarg, "2");
switch (rc) {
case 3:
options.mon_ops |= mon_op_fence_full_history | mon_op_fence_history | mon_op_fence_connect;
return include_exclude_cb("--include", "fencing", data, err);
case 2:
options.mon_ops |= mon_op_fence_history | mon_op_fence_connect;
return include_exclude_cb("--include", "fencing", data, err);
case 1:
options.mon_ops |= mon_op_fence_history | mon_op_fence_connect;
return include_exclude_cb("--include", "fencing-failed,fencing-pending", data, err);
case 0:
options.mon_ops &= ~(mon_op_fence_history | mon_op_fence_connect);
return include_exclude_cb("--exclude", "fencing", data, err);
default:
g_set_error(err, PCMK__EXITC_ERROR, CRM_EX_INVALID_PARAM, "Fence history must be 0-3");
return FALSE;
}
}
static gboolean
group_by_node_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **err) {
options.mon_ops |= mon_op_group_by_node;
return TRUE;
}
static gboolean
hide_headers_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **err) {
return include_exclude_cb("--exclude", "summary", data, err);
}
static gboolean
inactive_resources_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **err) {
options.mon_ops |= mon_op_inactive_resources;
return TRUE;
}
static gboolean
no_curses_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **err) {
output_format = mon_output_plain;
return TRUE;
}
static gboolean
one_shot_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **err) {
options.mon_ops |= mon_op_one_shot;
return TRUE;
}
static gboolean
print_brief_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **err) {
options.mon_ops |= mon_op_print_brief;
return TRUE;
}
static gboolean
print_clone_detail_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **err) {
options.mon_ops |= mon_op_print_clone_detail;
return TRUE;
}
static gboolean
print_pending_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **err) {
options.mon_ops |= mon_op_print_pending;
return TRUE;
}
static gboolean
print_timing_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **err) {
options.mon_ops |= mon_op_print_timing;
return include_exclude_cb("--include", "operations", data, err);
}
static gboolean
reconnect_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **err) {
int rc = crm_get_msec(optarg);
if (rc == -1) {
g_set_error(err, PCMK__EXITC_ERROR, CRM_EX_INVALID_PARAM, "Invalid value for -i: %s", optarg);
return FALSE;
} else {
options.reconnect_msec = crm_parse_interval_spec(optarg);
}
return TRUE;
}
static gboolean
show_attributes_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **err) {
return include_exclude_cb("--include", "attributes", data, err);
}
static gboolean
show_bans_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **err) {
if (optarg != NULL) {
char *s = crm_strdup_printf("bans:%s", optarg);
gboolean rc = include_exclude_cb("--include", s, data, err);
free(s);
return rc;
} else {
return include_exclude_cb("--include", "bans", data, err);
}
}
static gboolean
show_failcounts_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **err) {
return include_exclude_cb("--include", "failcounts", data, err);
}
static gboolean
show_operations_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **err) {
return include_exclude_cb("--include", "failcounts,operations", data, err);
}
static gboolean
show_tickets_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **err) {
return include_exclude_cb("--include", "tickets", data, err);
}
static gboolean
use_cib_file_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **err) {
setenv("CIB_file", optarg, 1);
options.mon_ops |= mon_op_one_shot;
return TRUE;
}
static gboolean
watch_fencing_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **err) {
options.mon_ops |= mon_op_watch_fencing;
return TRUE;
}
#define INDENT " "
/* *INDENT-OFF* */
static GOptionEntry addl_entries[] = {
{ "interval", 'i', 0, G_OPTION_ARG_CALLBACK, reconnect_cb,
"Update frequency (default is 5 seconds)",
"TIMESPEC" },
{ "one-shot", '1', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, one_shot_cb,
"Display the cluster status once on the console and exit",
NULL },
{ "daemonize", 'd', 0, G_OPTION_ARG_NONE, &options.daemonize,
"Run in the background as a daemon.\n"
INDENT "Requires at least one of --output-to and --external-agent.",
NULL },
{ "pid-file", 'p', 0, G_OPTION_ARG_FILENAME, &options.pid_file,
"(Advanced) Daemon pid file location",
"FILE" },
{ "external-agent", 'E', 0, G_OPTION_ARG_FILENAME, &options.external_agent,
"A program to run when resource operations take place",
"FILE" },
{ "external-recipient", 'e', 0, G_OPTION_ARG_STRING, &options.external_recipient,
"A recipient for your program (assuming you want the program to send something to someone).",
"RCPT" },
{ "watch-fencing", 'W', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, watch_fencing_cb,
"Listen for fencing events. For use with --external-agent.",
NULL },
{ "xml-file", 'x', G_OPTION_FLAG_HIDDEN, G_OPTION_ARG_CALLBACK, use_cib_file_cb,
NULL,
NULL },
{ NULL }
};
static GOptionEntry display_entries[] = {
{ "include", 'I', 0, G_OPTION_ARG_CALLBACK, user_include_exclude_cb,
"A list of sections to include in the output.\n"
INDENT "See `Output Control` help for more information.",
"SECTION(s)" },
{ "exclude", 'U', 0, G_OPTION_ARG_CALLBACK, user_include_exclude_cb,
"A list of sections to exclude from the output.\n"
INDENT "See `Output Control` help for more information.",
"SECTION(s)" },
{ "node", 0, 0, G_OPTION_ARG_STRING, &options.only_node,
"When displaying information about nodes, show only what's related to the given\n"
INDENT "node, or to all nodes tagged with the given tag",
"NODE" },
+ { "resource", 0, 0, G_OPTION_ARG_STRING, &options.only_rsc,
+ "When displaying information about resources, show only what's related to the given\n"
+ INDENT "resource, or to all resources tagged with the given tag",
+ "RSC" },
+
{ "group-by-node", 'n', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, group_by_node_cb,
"Group resources by node",
NULL },
{ "inactive", 'r', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, inactive_resources_cb,
"Display inactive resources",
NULL },
{ "failcounts", 'f', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, show_failcounts_cb,
"Display resource fail counts",
NULL },
{ "operations", 'o', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, show_operations_cb,
"Display resource operation history",
NULL },
{ "timing-details", 't', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, print_timing_cb,
"Display resource operation history with timing details",
NULL },
{ "tickets", 'c', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, show_tickets_cb,
"Display cluster tickets",
NULL },
{ "fence-history", 'm', G_OPTION_FLAG_OPTIONAL_ARG, G_OPTION_ARG_CALLBACK, fence_history_cb,
"Show fence history:\n"
INDENT "0=off, 1=failures and pending (default without option),\n"
INDENT "2=add successes (default without value for option),\n"
INDENT "3=show full history without reduction to most recent of each flavor",
"LEVEL" },
{ "neg-locations", 'L', G_OPTION_FLAG_OPTIONAL_ARG, G_OPTION_ARG_CALLBACK, show_bans_cb,
"Display negative location constraints [optionally filtered by id prefix]",
NULL },
{ "show-node-attributes", 'A', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, show_attributes_cb,
"Display node attributes",
NULL },
{ "hide-headers", 'D', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, hide_headers_cb,
"Hide all headers",
NULL },
{ "show-detail", 'R', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, print_clone_detail_cb,
"Show more details (node IDs, individual clone instances)",
NULL },
{ "brief", 'b', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, print_brief_cb,
"Brief output",
NULL },
{ "pending", 'j', G_OPTION_FLAG_HIDDEN|G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, print_pending_cb,
"Display pending state if 'record-pending' is enabled",
NULL },
{ "simple-status", 's', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, as_simple_cb,
"Display the cluster status once as a simple one line output (suitable for nagios)",
NULL },
{ NULL }
};
static GOptionEntry deprecated_entries[] = {
{ "as-html", 'h', G_OPTION_FLAG_FILENAME, G_OPTION_ARG_CALLBACK, as_html_cb,
"Write cluster status to the named HTML file.\n"
INDENT "Use --output-as=html --output-to=FILE instead.",
"FILE" },
{ "as-xml", 'X', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, as_xml_cb,
"Write cluster status as XML to stdout. This will enable one-shot mode.\n"
INDENT "Use --output-as=xml instead.",
NULL },
{ "disable-ncurses", 'N', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, no_curses_cb,
"Disable the use of ncurses.\n"
INDENT "Use --output-as=text instead.",
NULL },
{ "web-cgi", 'w', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, as_cgi_cb,
"Web mode with output suitable for CGI (preselected when run as *.cgi).\n"
INDENT "Use --output-as=html --html-cgi instead.",
NULL },
{ NULL }
};
/* *INDENT-ON* */
static gboolean
mon_timer_popped(gpointer data)
{
int rc = pcmk_ok;
#if CURSES_ENABLED
if (output_format == mon_output_console) {
clear();
refresh();
}
#endif
if (timer_id > 0) {
g_source_remove(timer_id);
timer_id = 0;
}
print_as(output_format, "Reconnecting...\n");
rc = cib_connect(TRUE);
if (rc != pcmk_ok) {
timer_id = g_timeout_add(options.reconnect_msec, mon_timer_popped, NULL);
}
return FALSE;
}
static void
do_mon_cib_connection_destroy(gpointer user_data, bool is_error)
{
if (is_error) {
out->err(out, "Connection to the cluster-daemons terminated");
} else {
out->info(out, "Connection to the cluster-daemons terminated");
}
if (refresh_timer != NULL) {
/* we'll trigger a refresh after reconnect */
mainloop_timer_stop(refresh_timer);
}
if (timer_id) {
/* we'll trigger a new reconnect-timeout at the end */
g_source_remove(timer_id);
timer_id = 0;
}
if (st) {
/* the client API won't properly reconnect notifications
* if they are still in the table - so remove them
*/
st->cmds->remove_notification(st, T_STONITH_NOTIFY_DISCONNECT);
st->cmds->remove_notification(st, T_STONITH_NOTIFY_FENCE);
st->cmds->remove_notification(st, T_STONITH_NOTIFY_HISTORY);
if (st->state != stonith_disconnected) {
st->cmds->disconnect(st);
}
}
if (cib) {
cib->cmds->signoff(cib);
timer_id = g_timeout_add(options.reconnect_msec, mon_timer_popped, NULL);
}
return;
}
static void
mon_cib_connection_destroy_regular(gpointer user_data)
{
do_mon_cib_connection_destroy(user_data, false);
}
static void
mon_cib_connection_destroy_error(gpointer user_data)
{
do_mon_cib_connection_destroy(user_data, true);
}
/*
* Mainloop signal handler.
*/
static void
mon_shutdown(int nsig)
{
clean_up(CRM_EX_OK);
}
#if CURSES_ENABLED
static sighandler_t ncurses_winch_handler;
static void
mon_winresize(int nsig)
{
static int not_done;
int lines = 0, cols = 0;
if (!not_done++) {
if (ncurses_winch_handler)
/* the original ncurses WINCH signal handler does the
* magic of retrieving the new window size;
* otherwise, we'd have to use ioctl or tgetent */
(*ncurses_winch_handler) (SIGWINCH);
getmaxyx(stdscr, lines, cols);
resizeterm(lines, cols);
mainloop_set_trigger(refresh_trigger);
}
not_done--;
}
#endif
static int
cib_connect(gboolean full)
{
int rc = pcmk_ok;
static gboolean need_pass = TRUE;
CRM_CHECK(cib != NULL, return -EINVAL);
if (getenv("CIB_passwd") != NULL) {
need_pass = FALSE;
}
if (is_set(options.mon_ops, mon_op_fence_connect) && st == NULL) {
st = stonith_api_new();
}
if (is_set(options.mon_ops, mon_op_fence_connect) && st != NULL && st->state == stonith_disconnected) {
rc = st->cmds->connect(st, crm_system_name, NULL);
if (rc == pcmk_ok) {
crm_trace("Setting up stonith callbacks");
if (is_set(options.mon_ops, mon_op_watch_fencing)) {
st->cmds->register_notification(st, T_STONITH_NOTIFY_DISCONNECT,
mon_st_callback_event);
st->cmds->register_notification(st, T_STONITH_NOTIFY_FENCE, mon_st_callback_event);
} else {
st->cmds->register_notification(st, T_STONITH_NOTIFY_DISCONNECT,
mon_st_callback_display);
st->cmds->register_notification(st, T_STONITH_NOTIFY_HISTORY, mon_st_callback_display);
}
}
}
if (cib->state != cib_connected_query && cib->state != cib_connected_command) {
crm_trace("Connecting to the CIB");
/* Hack: the CIB signon will print the prompt for a password if needed,
* but to stderr. If we're in curses, show it on the screen instead.
*
* @TODO Add a password prompt (maybe including input) function to
* pcmk__output_t and use it in libcib.
*/
if ((output_format == mon_output_console) && need_pass && (cib->variant == cib_remote)) {
need_pass = FALSE;
print_as(output_format, "Password:");
}
rc = cib->cmds->signon(cib, crm_system_name, cib_query);
if (rc != pcmk_ok) {
out->err(out, "Could not connect to the CIB: %s",
pcmk_strerror(rc));
return rc;
}
rc = cib->cmds->query(cib, NULL, ¤t_cib, cib_scope_local | cib_sync_call);
if (rc == pcmk_ok) {
mon_refresh_display(&output_format);
}
if (rc == pcmk_ok && full) {
if (rc == pcmk_ok) {
rc = cib->cmds->set_connection_dnotify(cib, mon_cib_connection_destroy_regular);
if (rc == -EPROTONOSUPPORT) {
print_as
(output_format, "Notification setup not supported, won't be able to reconnect after failure");
if (output_format == mon_output_console) {
sleep(2);
}
rc = pcmk_ok;
}
}
if (rc == pcmk_ok) {
cib->cmds->del_notify_callback(cib, T_CIB_DIFF_NOTIFY, crm_diff_update);
rc = cib->cmds->add_notify_callback(cib, T_CIB_DIFF_NOTIFY, crm_diff_update);
}
if (rc != pcmk_ok) {
out->err(out, "Notification setup failed, could not monitor CIB actions");
clean_up_connections();
}
}
}
return rc;
}
#if CURSES_ENABLED
static const char *
get_option_desc(char c)
{
const char *desc = "No help available";
for (GOptionEntry *entry = display_entries; entry != NULL; entry++) {
if (entry->short_name == c) {
desc = entry->description;
break;
}
}
return desc;
}
#define print_option_help(output_format, option, condition) \
out->info(out, "%c %c: \t%s", ((condition)? '*': ' '), option, get_option_desc(option));
static gboolean
detect_user_input(GIOChannel *channel, GIOCondition condition, gpointer user_data)
{
int c;
gboolean config_mode = FALSE;
while (1) {
/* Get user input */
c = getchar();
switch (c) {
case 'm':
if (is_not_set(show, mon_show_fencing_all)) {
options.mon_ops |= mon_op_fence_history;
options.mon_ops |= mon_op_fence_connect;
if (st == NULL) {
mon_cib_connection_destroy_regular(NULL);
}
}
if (is_set(show, mon_show_fence_failed) || is_set(show, mon_show_fence_pending) ||
is_set(show, mon_show_fence_worked)) {
show &= ~mon_show_fencing_all;
} else {
show |= mon_show_fencing_all;
}
break;
case 'c':
show ^= mon_show_tickets;
break;
case 'f':
show ^= mon_show_failcounts;
break;
case 'n':
options.mon_ops ^= mon_op_group_by_node;
break;
case 'o':
show ^= mon_show_operations;
if (is_not_set(show, mon_show_operations)) {
options.mon_ops &= ~mon_op_print_timing;
}
break;
case 'r':
options.mon_ops ^= mon_op_inactive_resources;
break;
case 'R':
options.mon_ops ^= mon_op_print_clone_detail;
break;
case 't':
options.mon_ops ^= mon_op_print_timing;
if (is_set(options.mon_ops, mon_op_print_timing)) {
show |= mon_show_operations;
}
break;
case 'A':
show ^= mon_show_attributes;
break;
case 'L':
show ^= mon_show_bans;
break;
case 'D':
/* If any header is shown, clear them all, otherwise set them all */
if (is_set(show, mon_show_stack) || is_set(show, mon_show_dc) ||
is_set(show, mon_show_times) || is_set(show, mon_show_counts)) {
show &= ~mon_show_summary;
} else {
show |= mon_show_summary;
}
/* Regardless, we don't show options in console mode. */
show &= ~mon_show_options;
break;
case 'b':
options.mon_ops ^= mon_op_print_brief;
break;
case 'j':
options.mon_ops ^= mon_op_print_pending;
break;
case '?':
config_mode = TRUE;
break;
default:
goto refresh;
}
if (!config_mode)
goto refresh;
blank_screen();
out->info(out, "%s", "Display option change mode\n");
print_option_help(out, 'c', is_set(show, mon_show_tickets));
print_option_help(out, 'f', is_set(show, mon_show_failcounts));
print_option_help(out, 'n', is_set(options.mon_ops, mon_op_group_by_node));
print_option_help(out, 'o', is_set(show, mon_show_operations));
print_option_help(out, 'r', is_set(options.mon_ops, mon_op_inactive_resources));
print_option_help(out, 't', is_set(options.mon_ops, mon_op_print_timing));
print_option_help(out, 'A', is_set(show, mon_show_attributes));
print_option_help(out, 'L', is_set(show,mon_show_bans));
print_option_help(out, 'D', is_not_set(show, mon_show_summary));
print_option_help(out, 'R', is_set(options.mon_ops, mon_op_print_clone_detail));
print_option_help(out, 'b', is_set(options.mon_ops, mon_op_print_brief));
print_option_help(out, 'j', is_set(options.mon_ops, mon_op_print_pending));
print_option_help(out, 'm', is_set(show, mon_show_fencing_all));
out->info(out, "%s", "\nToggle fields via field letter, type any other key to return");
}
refresh:
mon_refresh_display(NULL);
return TRUE;
}
#endif
// Basically crm_signal_handler(SIGCHLD, SIG_IGN) plus the SA_NOCLDWAIT flag
static void
avoid_zombies(void)
{
struct sigaction sa;
memset(&sa, 0, sizeof(struct sigaction));
if (sigemptyset(&sa.sa_mask) < 0) {
crm_warn("Cannot avoid zombies: %s", pcmk_strerror(errno));
return;
}
sa.sa_handler = SIG_IGN;
sa.sa_flags = SA_RESTART|SA_NOCLDWAIT;
if (sigaction(SIGCHLD, &sa, NULL) < 0) {
crm_warn("Cannot avoid zombies: %s", pcmk_strerror(errno));
}
}
static GOptionContext *
build_arg_context(pcmk__common_args_t *args, GOptionGroup **group) {
GOptionContext *context = NULL;
GOptionEntry extra_prog_entries[] = {
{ "quiet", 'Q', 0, G_OPTION_ARG_NONE, &(args->quiet),
"Be less descriptive in output.",
NULL },
{ NULL }
};
const char *description = "Notes:\n\n"
"If this program is called as crm_mon.cgi, --output-as=html --html-cgi will\n"
"automatically be added to the command line arguments.\n\n"
"Time Specification:\n\n"
"The TIMESPEC in any command line option can be specified in many different\n"
"formats. It can be just an integer number of seconds, a number plus units\n"
"(ms/msec/us/usec/s/sec/m/min/h/hr), or an ISO 8601 period specification.\n\n"
"Output Control:\n\n"
"By default, a certain list of sections are written to the output destination.\n"
"The default varies based on the output format - XML includes everything, while\n"
"other output formats will display less. This list can be modified with the\n"
"--include and --exclude command line options. Each option may be given multiple\n"
"times on the command line, and each can give a comma-separated list of sections.\n"
"The options are applied to the default set, from left to right as seen on the\n"
"command line. For a list of valid sections, pass --include=list or --exclude=list.\n\n"
"Examples:\n\n"
"Display the cluster status on the console with updates as they occur:\n\n"
"\tcrm_mon\n\n"
"Display the cluster status on the console just once then exit:\n\n"
"\tcrm_mon -1\n\n"
"Display your cluster status, group resources by node, and include inactive resources in the list:\n\n"
"\tcrm_mon --group-by-node --inactive\n\n"
"Start crm_mon as a background daemon and have it write the cluster status to an HTML file:\n\n"
"\tcrm_mon --daemonize --output-as html --output-to /path/to/docroot/filename.html\n\n"
"Start crm_mon and export the current cluster status as XML to stdout, then exit:\n\n"
"\tcrm_mon --output-as xml\n\n";
context = pcmk__build_arg_context(args, "console (default), html, text, xml", group, NULL);
pcmk__add_main_args(context, extra_prog_entries);
g_option_context_set_description(context, description);
pcmk__add_arg_group(context, "display", "Display Options:",
"Show display options", display_entries);
pcmk__add_arg_group(context, "additional", "Additional Options:",
"Show additional options", addl_entries);
pcmk__add_arg_group(context, "deprecated", "Deprecated Options:",
"Show deprecated options", deprecated_entries);
return context;
}
/* If certain format options were specified, we want to set some extra
* options. We can just process these like they were given on the
* command line.
*/
static void
add_output_args(void) {
GError *err = NULL;
if (output_format == mon_output_plain) {
if (!pcmk__force_args(context, &err, "%s --text-fancy", g_get_prgname())) {
g_propagate_error(&error, err);
clean_up(CRM_EX_USAGE);
}
} else if (output_format == mon_output_cgi) {
if (!pcmk__force_args(context, &err, "%s --html-cgi", g_get_prgname())) {
g_propagate_error(&error, err);
clean_up(CRM_EX_USAGE);
}
} else if (output_format == mon_output_xml) {
if (!pcmk__force_args(context, &err, "%s --xml-simple-list", g_get_prgname())) {
g_propagate_error(&error, err);
clean_up(CRM_EX_USAGE);
}
} else if (output_format == mon_output_legacy_xml) {
output_format = mon_output_xml;
if (!pcmk__force_args(context, &err, "%s --xml-legacy", g_get_prgname())) {
g_propagate_error(&error, err);
clean_up(CRM_EX_USAGE);
}
}
}
/* Which output format to use could come from two places: The --as-xml
* style arguments we gave in deprecated_entries above, or the formatted output
* arguments added by pcmk__register_formats. If the latter were used,
* output_format will be mon_output_unset.
*
* Call the callbacks as if those older style arguments were provided so
* the various things they do get done.
*/
static void
reconcile_output_format(pcmk__common_args_t *args) {
gboolean retval = TRUE;
GError *err = NULL;
if (output_format != mon_output_unset) {
return;
}
if (safe_str_eq(args->output_ty, "html")) {
char *dest = NULL;
if (args->output_dest != NULL) {
dest = strdup(args->output_dest);
}
retval = as_html_cb("h", dest, NULL, &err);
free(dest);
} else if (safe_str_eq(args->output_ty, "text")) {
retval = no_curses_cb("N", NULL, NULL, &err);
} else if (safe_str_eq(args->output_ty, "xml")) {
if (args->output_ty != NULL) {
free(args->output_ty);
}
args->output_ty = strdup("xml");
output_format = mon_output_xml;
options.mon_ops |= mon_op_one_shot;
} else if (is_set(options.mon_ops, mon_op_one_shot)) {
if (args->output_ty != NULL) {
free(args->output_ty);
}
args->output_ty = strdup("text");
output_format = mon_output_plain;
} else {
/* Neither old nor new arguments were given, so set the default. */
if (args->output_ty != NULL) {
free(args->output_ty);
}
args->output_ty = strdup("console");
output_format = mon_output_console;
}
if (!retval) {
g_propagate_error(&error, err);
clean_up(CRM_EX_USAGE);
}
}
int
main(int argc, char **argv)
{
int rc = pcmk_ok;
GOptionGroup *output_group = NULL;
args = pcmk__new_common_args(SUMMARY);
context = build_arg_context(args, &output_group);
pcmk__register_formats(output_group, formats);
options.pid_file = strdup("/tmp/ClusterMon.pid");
crm_log_cli_init("crm_mon");
// Avoid needing to wait for subprocesses forked for -E/--external-agent
avoid_zombies();
if (pcmk__ends_with_ext(argv[0], ".cgi")) {
output_format = mon_output_cgi;
options.mon_ops |= mon_op_one_shot;
}
processed_args = pcmk__cmdline_preproc(argv, "ehimpxEILU");
fence_history_cb("--fence-history", "1", NULL, NULL);
/* Set an HTML title regardless of what format we will eventually use. This can't
* be done in add_output_args. That function is called after command line
* arguments are processed in the next block, which means it'll override whatever
* title the user provides. Doing this here means the user can give their own
* title on the command line.
*/
if (!pcmk__force_args(context, &error, "%s --html-title \"Cluster Status\"",
g_get_prgname())) {
return clean_up(CRM_EX_USAGE);
}
if (!g_option_context_parse_strv(context, &processed_args, &error)) {
return clean_up(CRM_EX_USAGE);
}
for (int i = 0; i < args->verbosity; i++) {
crm_bump_log_level(argc, argv);
}
if (!args->version) {
if (args->quiet) {
include_exclude_cb("--exclude", "times", NULL, NULL);
}
if (is_set(options.mon_ops, mon_op_watch_fencing)) {
fence_history_cb("--fence-history", "0", NULL, NULL);
options.mon_ops |= mon_op_fence_connect;
}
/* create the cib-object early to be able to do further
* decisions based on the cib-source
*/
cib = cib_new();
if (cib == NULL) {
rc = -EINVAL;
} else {
switch (cib->variant) {
case cib_native:
/* cib & fencing - everything available */
break;
case cib_file:
/* Don't try to connect to fencing as we
* either don't have a running cluster or
* the fencing-information would possibly
* not match the cib data from a file.
* As we don't expect cib-updates coming
* in enforce one-shot. */
fence_history_cb("--fence-history", "0", NULL, NULL);
options.mon_ops |= mon_op_one_shot;
break;
case cib_remote:
/* updates coming in but no fencing */
fence_history_cb("--fence-history", "0", NULL, NULL);
break;
case cib_undefined:
case cib_database:
default:
/* something is odd */
rc = -EINVAL;
break;
}
}
if (is_set(options.mon_ops, mon_op_one_shot)) {
if (output_format == mon_output_console) {
output_format = mon_output_plain;
}
} else if (options.daemonize) {
if ((output_format == mon_output_console) || (output_format == mon_output_plain)) {
output_format = mon_output_none;
}
crm_enable_stderr(FALSE);
if ((args->output_dest == NULL || safe_str_eq(args->output_dest, "-")) && !options.external_agent) {
g_set_error(&error, PCMK__EXITC_ERROR, CRM_EX_USAGE, "--daemonize requires at least one of --output-to and --external-agent");
return clean_up(CRM_EX_USAGE);
}
if (cib) {
/* to be on the safe side don't have cib-object around
* when we are forking
*/
cib_delete(cib);
cib = NULL;
crm_make_daemon(crm_system_name, TRUE, options.pid_file);
cib = cib_new();
if (cib == NULL) {
rc = -EINVAL;
}
/* otherwise assume we've got the same cib-object we've just destroyed
* in our parent
*/
}
} else if (output_format == mon_output_console) {
#if CURSES_ENABLED
crm_enable_stderr(FALSE);
#else
options.mon_ops |= mon_op_one_shot;
output_format = mon_output_plain;
printf("Defaulting to one-shot mode\n");
printf("You need to have curses available at compile time to enable console mode\n");
#endif
}
}
if (rc != pcmk_ok) {
// Shouldn't really be possible
g_set_error(&error, PCMK__EXITC_ERROR, CRM_EX_ERROR, "Invalid CIB source");
return clean_up(CRM_EX_ERROR);
}
reconcile_output_format(args);
add_output_args();
if (args->version && output_format == mon_output_console) {
/* Use the text output format here if we are in curses mode but were given
* --version. Displaying version information uses printf, and then we
* immediately exit. We don't want to initialize curses for that.
*/
rc = pcmk__output_new(&out, "text", args->output_dest, argv);
} else {
rc = pcmk__output_new(&out, args->output_ty, args->output_dest, argv);
}
if (rc != pcmk_rc_ok) {
g_set_error(&error, PCMK__EXITC_ERROR, CRM_EX_ERROR, "Error creating output format %s: %s",
args->output_ty, pcmk_rc_str(rc));
return clean_up(CRM_EX_ERROR);
}
/* output_format MUST NOT BE CHANGED AFTER THIS POINT. */
/* Apply --include/--exclude flags we used internally. There's no error reporting
* here because this would be a programming error.
*/
apply_include_exclude(options.includes_excludes, output_format, &error);
/* And now apply any --include/--exclude flags the user gave on the command line.
* These are done in a separate pass from the internal ones because we want to
* make sure whatever the user specifies overrides whatever we do.
*/
if (!apply_include_exclude(options.user_includes_excludes, output_format, &error)) {
return clean_up(CRM_EX_USAGE);
}
crm_mon_register_messages(out);
pe__register_messages(out);
stonith__register_messages(out);
if (args->version) {
out->version(out, false);
return clean_up(CRM_EX_OK);
}
/* Extra sanity checks when in CGI mode */
if (output_format == mon_output_cgi) {
if (cib && cib->variant == cib_file) {
g_set_error(&error, PCMK__EXITC_ERROR, CRM_EX_USAGE, "CGI mode used with CIB file");
return clean_up(CRM_EX_USAGE);
} else if (options.external_agent != NULL) {
g_set_error(&error, PCMK__EXITC_ERROR, CRM_EX_USAGE, "CGI mode cannot be used with --external-agent");
return clean_up(CRM_EX_USAGE);
} else if (options.daemonize == TRUE) {
g_set_error(&error, PCMK__EXITC_ERROR, CRM_EX_USAGE, "CGI mode cannot be used with -d");
return clean_up(CRM_EX_USAGE);
}
}
if (output_format == mon_output_xml || output_format == mon_output_legacy_xml) {
options.mon_ops |= mon_op_print_timing | mon_op_inactive_resources;
}
if ((output_format == mon_output_html || output_format == mon_output_cgi) &&
out->dest != stdout) {
pcmk__html_add_header("meta", "http-equiv", "refresh", "content",
crm_itoa(options.reconnect_msec/1000), NULL);
}
crm_info("Starting %s", crm_system_name);
if (cib) {
do {
if (is_not_set(options.mon_ops, mon_op_one_shot)) {
print_as(output_format ,"Waiting until cluster is available on this node ...\n");
}
rc = cib_connect(is_not_set(options.mon_ops, mon_op_one_shot));
if (is_set(options.mon_ops, mon_op_one_shot)) {
break;
} else if (rc != pcmk_ok) {
sleep(options.reconnect_msec / 1000);
#if CURSES_ENABLED
if (output_format == mon_output_console) {
clear();
refresh();
}
#endif
} else {
if (output_format == mon_output_html && out->dest != stdout) {
printf("Writing html to %s ...\n", args->output_dest);
}
}
} while (rc == -ENOTCONN);
}
if (rc != pcmk_ok) {
if (output_format == mon_output_monitor) {
g_set_error(&error, PCMK__EXITC_ERROR, CRM_EX_ERROR, "CLUSTER CRIT: Connection to cluster failed: %s",
pcmk_strerror(rc));
return clean_up(MON_STATUS_CRIT);
} else {
if (rc == -ENOTCONN) {
g_set_error(&error, PCMK__EXITC_ERROR, CRM_EX_ERROR, "Error: cluster is not available on this node");
} else {
g_set_error(&error, PCMK__EXITC_ERROR, CRM_EX_ERROR, "Connection to cluster failed: %s", pcmk_strerror(rc));
}
}
return clean_up(crm_errno2exit(rc));
}
if (is_set(options.mon_ops, mon_op_one_shot)) {
return clean_up(CRM_EX_OK);
}
mainloop = g_main_loop_new(NULL, FALSE);
mainloop_add_signal(SIGTERM, mon_shutdown);
mainloop_add_signal(SIGINT, mon_shutdown);
#if CURSES_ENABLED
if (output_format == mon_output_console) {
ncurses_winch_handler = crm_signal_handler(SIGWINCH, mon_winresize);
if (ncurses_winch_handler == SIG_DFL ||
ncurses_winch_handler == SIG_IGN || ncurses_winch_handler == SIG_ERR)
ncurses_winch_handler = NULL;
io_channel = g_io_channel_unix_new(STDIN_FILENO);
g_io_add_watch(io_channel, G_IO_IN, detect_user_input, NULL);
}
#endif
refresh_trigger = mainloop_add_trigger(G_PRIORITY_LOW, mon_refresh_display, NULL);
g_main_loop_run(mainloop);
g_main_loop_unref(mainloop);
if (io_channel != NULL) {
g_io_channel_shutdown(io_channel, TRUE, NULL);
}
crm_info("Exiting %s", crm_system_name);
return clean_up(CRM_EX_OK);
}
/*!
* \internal
* \brief Print one-line status suitable for use with monitoring software
*
* \param[in] data_set Working set of CIB state
* \param[in] history List of stonith actions
*
* \note This function's output (and the return code when the program exits)
* should conform to https://www.monitoring-plugins.org/doc/guidelines.html
*/
static void
print_simple_status(pcmk__output_t *out, pe_working_set_t * data_set,
stonith_history_t *history, unsigned int mon_ops)
{
GListPtr gIter = NULL;
int nodes_online = 0;
int nodes_standby = 0;
int nodes_maintenance = 0;
char *offline_nodes = NULL;
gboolean no_dc = FALSE;
gboolean offline = FALSE;
if (data_set->dc_node == NULL) {
mon_ops |= mon_op_has_warnings;
no_dc = TRUE;
}
for (gIter = data_set->nodes; gIter != NULL; gIter = gIter->next) {
pe_node_t *node = (pe_node_t *) gIter->data;
if (node->details->standby && node->details->online) {
nodes_standby++;
} else if (node->details->maintenance && node->details->online) {
nodes_maintenance++;
} else if (node->details->online) {
nodes_online++;
} else {
char *s = crm_strdup_printf("offline node: %s", node->details->uname);
/* coverity[leaked_storage] False positive */
offline_nodes = pcmk__add_word(offline_nodes, s);
free(s);
mon_ops |= mon_op_has_warnings;
offline = TRUE;
}
}
if (is_set(mon_ops, mon_op_has_warnings)) {
out->info(out, "CLUSTER WARN:%s%s%s",
no_dc ? " No DC" : "",
no_dc && offline ? "," : "",
offline ? offline_nodes : "");
free(offline_nodes);
} else {
char *nodes_standby_s = NULL;
char *nodes_maint_s = NULL;
if (nodes_standby > 0) {
nodes_standby_s = crm_strdup_printf(", %d standby node%s", nodes_standby,
pcmk__plural_s(nodes_standby));
}
if (nodes_maintenance > 0) {
nodes_maint_s = crm_strdup_printf(", %d maintenance node%s",
nodes_maintenance,
pcmk__plural_s(nodes_maintenance));
}
out->info(out, "CLUSTER OK: %d node%s online%s%s, "
"%d resource instance%s configured",
nodes_online, pcmk__plural_s(nodes_online),
nodes_standby_s != NULL ? nodes_standby_s : "",
nodes_maint_s != NULL ? nodes_maint_s : "",
data_set->ninstances, pcmk__plural_s(data_set->ninstances));
free(nodes_standby_s);
free(nodes_maint_s);
}
/* coverity[leaked_storage] False positive */
}
/*!
* \internal
* \brief Reduce the stonith-history
* for successful actions we keep the last of every action-type & target
* for failed actions we record as well who had failed
* for actions in progress we keep full track
*
* \param[in] history List of stonith actions
*
*/
static stonith_history_t *
reduce_stonith_history(stonith_history_t *history)
{
stonith_history_t *new = history, *hp, *np;
if (new) {
hp = new->next;
new->next = NULL;
while (hp) {
stonith_history_t *hp_next = hp->next;
hp->next = NULL;
for (np = new; ; np = np->next) {
if ((hp->state == st_done) || (hp->state == st_failed)) {
/* action not in progress */
if (safe_str_eq(hp->target, np->target) &&
safe_str_eq(hp->action, np->action) &&
(hp->state == np->state) &&
((hp->state == st_done) ||
safe_str_eq(hp->delegate, np->delegate))) {
/* purge older hp */
stonith_history_free(hp);
break;
}
}
if (!np->next) {
np->next = hp;
break;
}
}
hp = hp_next;
}
}
return new;
}
static int
send_custom_trap(const char *node, const char *rsc, const char *task, int target_rc, int rc,
int status, const char *desc)
{
pid_t pid;
/*setenv needs chars, these are ints */
char *rc_s = crm_itoa(rc);
char *status_s = crm_itoa(status);
char *target_rc_s = crm_itoa(target_rc);
crm_debug("Sending external notification to '%s' via '%s'", options.external_recipient, options.external_agent);
if(rsc) {
setenv("CRM_notify_rsc", rsc, 1);
}
if (options.external_recipient) {
setenv("CRM_notify_recipient", options.external_recipient, 1);
}
setenv("CRM_notify_node", node, 1);
setenv("CRM_notify_task", task, 1);
setenv("CRM_notify_desc", desc, 1);
setenv("CRM_notify_rc", rc_s, 1);
setenv("CRM_notify_target_rc", target_rc_s, 1);
setenv("CRM_notify_status", status_s, 1);
pid = fork();
if (pid == -1) {
crm_perror(LOG_ERR, "notification fork() failed.");
}
if (pid == 0) {
/* crm_debug("notification: I am the child. Executing the nofitication program."); */
execl(options.external_agent, options.external_agent, NULL);
exit(CRM_EX_ERROR);
}
crm_trace("Finished running custom notification program '%s'.", options.external_agent);
free(target_rc_s);
free(status_s);
free(rc_s);
return 0;
}
static void
handle_rsc_op(xmlNode * xml, const char *node_id)
{
int rc = -1;
int status = -1;
int target_rc = -1;
gboolean notify = TRUE;
char *rsc = NULL;
char *task = NULL;
const char *desc = NULL;
const char *magic = NULL;
const char *id = NULL;
const char *node = NULL;
xmlNode *n = xml;
xmlNode * rsc_op = xml;
if(strcmp((const char*)xml->name, XML_LRM_TAG_RSC_OP) != 0) {
xmlNode *cIter;
for(cIter = xml->children; cIter; cIter = cIter->next) {
handle_rsc_op(cIter, node_id);
}
return;
}
id = crm_element_value(rsc_op, XML_LRM_ATTR_TASK_KEY);
if (id == NULL) {
/* Compatibility with <= 1.1.5 */
id = ID(rsc_op);
}
magic = crm_element_value(rsc_op, XML_ATTR_TRANSITION_MAGIC);
if (magic == NULL) {
/* non-change */
return;
}
if (!decode_transition_magic(magic, NULL, NULL, NULL, &status, &rc,
&target_rc)) {
crm_err("Invalid event %s detected for %s", magic, id);
return;
}
if (parse_op_key(id, &rsc, &task, NULL) == FALSE) {
crm_err("Invalid event detected for %s", id);
goto bail;
}
node = crm_element_value(rsc_op, XML_LRM_ATTR_TARGET);
while (n != NULL && safe_str_neq(XML_CIB_TAG_STATE, TYPE(n))) {
n = n->parent;
}
if(node == NULL && n) {
node = crm_element_value(n, XML_ATTR_UNAME);
}
if (node == NULL && n) {
node = ID(n);
}
if (node == NULL) {
node = node_id;
}
if (node == NULL) {
crm_err("No node detected for event %s (%s)", magic, id);
goto bail;
}
/* look up where we expected it to be? */
desc = pcmk_strerror(pcmk_ok);
if (status == PCMK_LRM_OP_DONE && target_rc == rc) {
crm_notice("%s of %s on %s completed: %s", task, rsc, node, desc);
if (rc == PCMK_OCF_NOT_RUNNING) {
notify = FALSE;
}
} else if (status == PCMK_LRM_OP_DONE) {
desc = services_ocf_exitcode_str(rc);
crm_warn("%s of %s on %s failed: %s", task, rsc, node, desc);
} else {
desc = services_lrm_status_str(status);
crm_warn("%s of %s on %s failed: %s", task, rsc, node, desc);
}
if (notify && options.external_agent) {
send_custom_trap(node, rsc, task, target_rc, rc, status, desc);
}
bail:
free(rsc);
free(task);
}
static gboolean
mon_trigger_refresh(gpointer user_data)
{
mainloop_set_trigger(refresh_trigger);
return FALSE;
}
#define NODE_PATT "/lrm[@id="
static char *
get_node_from_xpath(const char *xpath)
{
char *nodeid = NULL;
char *tmp = strstr(xpath, NODE_PATT);
if(tmp) {
tmp += strlen(NODE_PATT);
tmp += 1;
nodeid = strdup(tmp);
tmp = strstr(nodeid, "\'");
CRM_ASSERT(tmp);
tmp[0] = 0;
}
return nodeid;
}
static void
crm_diff_update_v2(const char *event, xmlNode * msg)
{
xmlNode *change = NULL;
xmlNode *diff = get_message_xml(msg, F_CIB_UPDATE_RESULT);
for (change = __xml_first_child(diff); change != NULL; change = __xml_next(change)) {
const char *name = NULL;
const char *op = crm_element_value(change, XML_DIFF_OP);
const char *xpath = crm_element_value(change, XML_DIFF_PATH);
xmlNode *match = NULL;
const char *node = NULL;
if(op == NULL) {
continue;
} else if(strcmp(op, "create") == 0) {
match = change->children;
} else if(strcmp(op, "move") == 0) {
continue;
} else if(strcmp(op, "delete") == 0) {
continue;
} else if(strcmp(op, "modify") == 0) {
match = first_named_child(change, XML_DIFF_RESULT);
if(match) {
match = match->children;
}
}
if(match) {
name = (const char *)match->name;
}
crm_trace("Handling %s operation for %s %p, %s", op, xpath, match, name);
if(xpath == NULL) {
/* Version field, ignore */
} else if(name == NULL) {
crm_debug("No result for %s operation to %s", op, xpath);
CRM_ASSERT(strcmp(op, "delete") == 0 || strcmp(op, "move") == 0);
} else if(strcmp(name, XML_TAG_CIB) == 0) {
xmlNode *state = NULL;
xmlNode *status = first_named_child(match, XML_CIB_TAG_STATUS);
for (state = __xml_first_child_element(status); state != NULL;
state = __xml_next_element(state)) {
node = crm_element_value(state, XML_ATTR_UNAME);
if (node == NULL) {
node = ID(state);
}
handle_rsc_op(state, node);
}
} else if(strcmp(name, XML_CIB_TAG_STATUS) == 0) {
xmlNode *state = NULL;
for (state = __xml_first_child_element(match); state != NULL;
state = __xml_next_element(state)) {
node = crm_element_value(state, XML_ATTR_UNAME);
if (node == NULL) {
node = ID(state);
}
handle_rsc_op(state, node);
}
} else if(strcmp(name, XML_CIB_TAG_STATE) == 0) {
node = crm_element_value(match, XML_ATTR_UNAME);
if (node == NULL) {
node = ID(match);
}
handle_rsc_op(match, node);
} else if(strcmp(name, XML_CIB_TAG_LRM) == 0) {
node = ID(match);
handle_rsc_op(match, node);
} else if(strcmp(name, XML_LRM_TAG_RESOURCES) == 0) {
char *local_node = get_node_from_xpath(xpath);
handle_rsc_op(match, local_node);
free(local_node);
} else if(strcmp(name, XML_LRM_TAG_RESOURCE) == 0) {
char *local_node = get_node_from_xpath(xpath);
handle_rsc_op(match, local_node);
free(local_node);
} else if(strcmp(name, XML_LRM_TAG_RSC_OP) == 0) {
char *local_node = get_node_from_xpath(xpath);
handle_rsc_op(match, local_node);
free(local_node);
} else {
crm_trace("Ignoring %s operation for %s %p, %s", op, xpath, match, name);
}
}
}
static void
crm_diff_update_v1(const char *event, xmlNode * msg)
{
/* Process operation updates */
xmlXPathObject *xpathObj = xpath_search(msg,
"//" F_CIB_UPDATE_RESULT "//" XML_TAG_DIFF_ADDED
"//" XML_LRM_TAG_RSC_OP);
int lpc = 0, max = numXpathResults(xpathObj);
for (lpc = 0; lpc < max; lpc++) {
xmlNode *rsc_op = getXpathResult(xpathObj, lpc);
handle_rsc_op(rsc_op, NULL);
}
freeXpathObject(xpathObj);
}
static void
crm_diff_update(const char *event, xmlNode * msg)
{
int rc = -1;
static bool stale = FALSE;
gboolean cib_updated = FALSE;
xmlNode *diff = get_message_xml(msg, F_CIB_UPDATE_RESULT);
print_dot(output_format);
if (current_cib != NULL) {
rc = xml_apply_patchset(current_cib, diff, TRUE);
switch (rc) {
case -pcmk_err_diff_resync:
case -pcmk_err_diff_failed:
crm_notice("[%s] Patch aborted: %s (%d)", event, pcmk_strerror(rc), rc);
free_xml(current_cib); current_cib = NULL;
break;
case pcmk_ok:
cib_updated = TRUE;
break;
default:
crm_notice("[%s] ABORTED: %s (%d)", event, pcmk_strerror(rc), rc);
free_xml(current_cib); current_cib = NULL;
}
}
if (current_cib == NULL) {
crm_trace("Re-requesting the full cib");
cib->cmds->query(cib, NULL, ¤t_cib, cib_scope_local | cib_sync_call);
}
if (options.external_agent) {
int format = 0;
crm_element_value_int(diff, "format", &format);
switch(format) {
case 1:
crm_diff_update_v1(event, msg);
break;
case 2:
crm_diff_update_v2(event, msg);
break;
default:
crm_err("Unknown patch format: %d", format);
}
}
if (current_cib == NULL) {
if(!stale) {
print_as(output_format, "--- Stale data ---");
}
stale = TRUE;
return;
}
stale = FALSE;
kick_refresh(cib_updated);
}
static gboolean
mon_refresh_display(gpointer user_data)
{
xmlNode *cib_copy = copy_xml(current_cib);
stonith_history_t *stonith_history = NULL;
int history_rc = 0;
last_refresh = time(NULL);
if (cli_config_update(&cib_copy, NULL, FALSE) == FALSE) {
if (cib) {
cib->cmds->signoff(cib);
}
out->err(out, "Upgrade failed: %s", pcmk_strerror(-pcmk_err_schema_validation));
clean_up(CRM_EX_CONFIG);
return FALSE;
}
/* get the stonith-history if there is evidence we need it
*/
while (is_set(options.mon_ops, mon_op_fence_history)) {
if (st != NULL) {
history_rc = st->cmds->history(st, st_opt_sync_call, NULL, &stonith_history, 120);
if (history_rc != 0) {
out->err(out, "Critical: Unable to get stonith-history");
mon_cib_connection_destroy_error(NULL);
} else {
stonith_history = stonith__sort_history(stonith_history);
if (is_not_set(options.mon_ops, mon_op_fence_full_history) && output_format != mon_output_xml) {
stonith_history = reduce_stonith_history(stonith_history);
}
break; /* all other cases are errors */
}
} else {
out->err(out, "Critical: No stonith-API");
}
free_xml(cib_copy);
out->err(out, "Reading stonith-history failed");
return FALSE;
}
if (mon_data_set == NULL) {
mon_data_set = pe_new_working_set();
CRM_ASSERT(mon_data_set != NULL);
}
set_bit(mon_data_set->flags, pe_flag_no_compat);
mon_data_set->input = cib_copy;
cluster_status(mon_data_set);
/* Unpack constraints if any section will need them
* (tickets may be referenced in constraints but not granted yet,
* and bans need negative location constraints) */
if (is_set(show, mon_show_bans) || is_set(show, mon_show_tickets)) {
xmlNode *cib_constraints = get_object_root(XML_CIB_TAG_CONSTRAINTS,
mon_data_set->input);
unpack_constraints(cib_constraints, mon_data_set);
}
switch (output_format) {
case mon_output_html:
case mon_output_cgi:
if (print_html_status(out, mon_data_set, stonith_history, options.mon_ops,
show, options.neg_location_prefix,
- options.only_node) != 0) {
+ options.only_node, options.only_rsc) != 0) {
g_set_error(&error, PCMK__EXITC_ERROR, CRM_EX_CANTCREAT, "Critical: Unable to output html file");
clean_up(CRM_EX_CANTCREAT);
return FALSE;
}
break;
case mon_output_legacy_xml:
case mon_output_xml:
print_xml_status(out, mon_data_set, crm_errno2exit(history_rc),
stonith_history, options.mon_ops, show,
- options.neg_location_prefix, options.only_node);
+ options.neg_location_prefix, options.only_node,
+ options.only_rsc);
break;
case mon_output_monitor:
print_simple_status(out, mon_data_set, stonith_history, options.mon_ops);
if (is_set(options.mon_ops, mon_op_has_warnings)) {
clean_up(MON_STATUS_WARN);
return FALSE;
}
break;
case mon_output_console:
/* If curses is not enabled, this will just fall through to the plain
* text case.
*/
#if CURSES_ENABLED
blank_screen();
print_status(out, mon_data_set, stonith_history, options.mon_ops, show,
- options.neg_location_prefix, options.only_node);
+ options.neg_location_prefix, options.only_node, options.only_rsc);
refresh();
break;
#endif
case mon_output_plain:
print_status(out, mon_data_set, stonith_history, options.mon_ops, show,
- options.neg_location_prefix, options.only_node);
+ options.neg_location_prefix, options.only_node, options.only_rsc);
break;
case mon_output_unset:
case mon_output_none:
break;
}
if (options.daemonize) {
out->reset(out);
}
stonith_history_free(stonith_history);
stonith_history = NULL;
pe_reset_working_set(mon_data_set);
return TRUE;
}
static void
mon_st_callback_event(stonith_t * st, stonith_event_t * e)
{
if (st->state == stonith_disconnected) {
/* disconnect cib as well and have everything reconnect */
mon_cib_connection_destroy_regular(NULL);
} else if (options.external_agent) {
char *desc = crm_strdup_printf("Operation %s requested by %s for peer %s: %s (ref=%s)",
e->operation, e->origin, e->target, pcmk_strerror(e->result),
e->id);
send_custom_trap(e->target, NULL, e->operation, pcmk_ok, e->result, 0, desc);
free(desc);
}
}
static void
kick_refresh(gboolean data_updated)
{
static int updates = 0;
time_t now = time(NULL);
if (data_updated) {
updates++;
}
if(refresh_timer == NULL) {
refresh_timer = mainloop_timer_add("refresh", 2000, FALSE, mon_trigger_refresh, NULL);
}
/* Refresh
* - immediately if the last update was more than 5s ago
* - every 10 cib-updates
* - at most 2s after the last update
*/
if ((now - last_refresh) > (options.reconnect_msec / 1000)) {
mainloop_set_trigger(refresh_trigger);
mainloop_timer_stop(refresh_timer);
updates = 0;
} else if(updates >= 10) {
mainloop_set_trigger(refresh_trigger);
mainloop_timer_stop(refresh_timer);
updates = 0;
} else {
mainloop_timer_start(refresh_timer);
}
}
static void
mon_st_callback_display(stonith_t * st, stonith_event_t * e)
{
if (st->state == stonith_disconnected) {
/* disconnect cib as well and have everything reconnect */
mon_cib_connection_destroy_regular(NULL);
} else {
print_dot(output_format);
kick_refresh(TRUE);
}
}
static void
clean_up_connections(void)
{
if (cib != NULL) {
cib->cmds->signoff(cib);
cib_delete(cib);
cib = NULL;
}
if (st != NULL) {
if (st->state != stonith_disconnected) {
st->cmds->remove_notification(st, T_STONITH_NOTIFY_DISCONNECT);
st->cmds->remove_notification(st, T_STONITH_NOTIFY_FENCE);
st->cmds->remove_notification(st, T_STONITH_NOTIFY_HISTORY);
st->cmds->disconnect(st);
}
stonith_api_delete(st);
st = NULL;
}
}
/*
* De-init ncurses, disconnect from the CIB manager, disconnect fencing,
* deallocate memory and show usage-message if requested.
*
* We don't actually return, but nominally returning crm_exit_t allows a usage
* like "return clean_up(exit_code);" which helps static analysis understand the
* code flow.
*/
static crm_exit_t
clean_up(crm_exit_t exit_code)
{
/* Quitting crm_mon is much more complicated than it ought to be. */
/* (1) Close connections, free things, etc. */
clean_up_connections();
free(options.pid_file);
free(options.neg_location_prefix);
g_slist_free_full(options.includes_excludes, free);
pe_free_working_set(mon_data_set);
mon_data_set = NULL;
g_strfreev(processed_args);
/* (2) If this is abnormal termination and we're in curses mode, shut down
* curses first. Any messages displayed to the screen before curses is shut
* down will be lost because doing the shut down will also restore the
* screen to whatever it looked like before crm_mon was started.
*/
if ((error != NULL || exit_code == CRM_EX_USAGE) && output_format == mon_output_console) {
out->finish(out, exit_code, false, NULL);
pcmk__output_free(out);
out = NULL;
}
/* (3) If this is a command line usage related failure, print the usage
* message.
*/
if (exit_code == CRM_EX_USAGE && (output_format == mon_output_console || output_format == mon_output_plain)) {
char *help = g_option_context_get_help(context, TRUE, NULL);
fprintf(stderr, "%s", help);
g_free(help);
}
pcmk__free_arg_context(context);
/* (4) If this is any kind of error, print the error out and exit. Make
* sure to handle situations both before and after formatted output is
* set up. We want errors to appear formatted if at all possible.
*/
if (error != NULL) {
if (out != NULL) {
out->err(out, "%s: %s", g_get_prgname(), error->message);
out->finish(out, exit_code, true, NULL);
pcmk__output_free(out);
} else {
fprintf(stderr, "%s: %s\n", g_get_prgname(), error->message);
}
g_clear_error(&error);
crm_exit(exit_code);
}
/* (5) Print formatted output to the screen if we made it far enough in
* crm_mon to be able to do so.
*/
if (out != NULL) {
if (options.daemonize) {
out->dest = freopen(NULL, "w", out->dest);
CRM_ASSERT(out->dest != NULL);
}
out->finish(out, exit_code, true, NULL);
pcmk__output_free(out);
pcmk__unregister_formats();
}
crm_exit(exit_code);
}
diff --git a/tools/crm_mon.h b/tools/crm_mon.h
index 2dbe8e4d99..b548b12be6 100644
--- a/tools/crm_mon.h
+++ b/tools/crm_mon.h
@@ -1,125 +1,127 @@
/*
* Copyright 2019-2020 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
* This source code is licensed under the GNU General Public License version 2
* or later (GPLv2+) WITHOUT ANY WARRANTY.
*/
#include
#include
#include
#include
#include
/* Never display node attributes whose name starts with one of these prefixes */
#define FILTER_STR { PCMK__FAIL_COUNT_PREFIX, PCMK__LAST_FAILURE_PREFIX, \
"shutdown", "terminate", "standby", "probe_complete", \
"#", NULL }
#if CURSES_ENABLED
# define print_dot(output_format) if (output_format == mon_output_console) { \
printw("."); \
clrtoeol(); \
refresh(); \
} else { \
fprintf(stdout, "."); \
}
#else
# define print_dot(output_format) fprintf(stdout, ".");
#endif
#if CURSES_ENABLED
# define print_as(output_format, fmt, args...) if (output_format == mon_output_console) { \
printw(fmt, ##args); \
clrtoeol(); \
refresh(); \
} else { \
fprintf(stdout, fmt, ##args); \
}
#else
# define print_as(output_format, fmt, args...) fprintf(stdout, fmt, ##args);
#endif
typedef enum mon_output_format_e {
mon_output_unset,
mon_output_none,
mon_output_monitor,
mon_output_plain,
mon_output_console,
mon_output_xml,
mon_output_legacy_xml,
mon_output_html,
mon_output_cgi
} mon_output_format_t;
#define mon_show_stack (1 << 0)
#define mon_show_dc (1 << 1)
#define mon_show_times (1 << 2)
#define mon_show_counts (1 << 3)
#define mon_show_options (1 << 4)
#define mon_show_nodes (1 << 5)
#define mon_show_resources (1 << 6)
#define mon_show_attributes (1 << 7)
#define mon_show_failcounts (1 << 8)
#define mon_show_operations (1 << 9)
#define mon_show_fence_failed (1 << 10)
#define mon_show_fence_pending (1 << 11)
#define mon_show_fence_worked (1 << 12)
#define mon_show_tickets (1 << 13)
#define mon_show_bans (1 << 14)
#define mon_show_failures (1 << 15)
#define mon_show_fencing_all (mon_show_fence_failed | mon_show_fence_pending | mon_show_fence_worked)
#define mon_show_summary (mon_show_stack | mon_show_dc | mon_show_times | mon_show_counts)
#define mon_show_all (mon_show_summary | mon_show_nodes | mon_show_resources | \
mon_show_attributes | mon_show_failcounts | mon_show_operations | \
mon_show_fencing_all | mon_show_tickets | mon_show_bans | \
mon_show_failures | mon_show_options)
#define mon_op_group_by_node (0x0001U)
#define mon_op_inactive_resources (0x0002U)
#define mon_op_one_shot (0x0004U)
#define mon_op_has_warnings (0x0008U)
#define mon_op_print_timing (0x0010U)
#define mon_op_watch_fencing (0x0020U)
#define mon_op_fence_history (0x0040U)
#define mon_op_fence_full_history (0x0080U)
#define mon_op_fence_connect (0x0100U)
#define mon_op_print_brief (0x0200U)
#define mon_op_print_pending (0x0400U)
#define mon_op_print_clone_detail (0x0800U)
#define mon_op_default (mon_op_print_pending | mon_op_fence_history | mon_op_fence_connect)
void print_status(pcmk__output_t *out, pe_working_set_t *data_set,
stonith_history_t *stonith_history, unsigned int mon_ops,
- unsigned int show, char *prefix, char *only_node);
+ unsigned int show, char *prefix, char *only_node,
+ char *only_rsc);
void print_xml_status(pcmk__output_t *out, pe_working_set_t *data_set,
crm_exit_t history_rc, stonith_history_t *stonith_history,
unsigned int mon_ops, unsigned int show, char *prefix,
- char *only_node);
+ char *only_node, char *only_rsc);
int print_html_status(pcmk__output_t *out, pe_working_set_t *data_set,
stonith_history_t *stonith_history, unsigned int mon_ops,
- unsigned int show, char *prefix, char *only_node);
+ unsigned int show, char *prefix, char *only_node,
+ char *only_rsc);
GList *append_attr_list(GList *attr_list, char *name);
void blank_screen(void);
void crm_mon_get_parameters(pe_resource_t *rsc, pe_working_set_t *data_set);
unsigned int get_resource_display_options(unsigned int mon_ops);
void crm_mon_register_messages(pcmk__output_t *out);
pcmk__output_t *crm_mon_mk_curses_output(char **argv);
void curses_indented_printf(pcmk__output_t *out, const char *format, ...) G_GNUC_PRINTF(2, 3);
void curses_indented_vprintf(pcmk__output_t *out, const char *format, va_list args) G_GNUC_PRINTF(2, 0);
#if CURSES_ENABLED
extern GOptionEntry crm_mon_curses_output_entries[];
#define CRM_MON_SUPPORTED_FORMAT_CURSES { "console", crm_mon_mk_curses_output, crm_mon_curses_output_entries }
#endif
pcmk__output_t *crm_mon_mk_xml_output(char **argv);
#define CRM_MON_SUPPORTED_FORMAT_XML { "xml", crm_mon_mk_xml_output, pcmk__xml_output_entries }
diff --git a/tools/crm_mon_print.c b/tools/crm_mon_print.c
index 3cd3c08df1..2c2de78049 100644
--- a/tools/crm_mon_print.c
+++ b/tools/crm_mon_print.c
@@ -1,984 +1,1086 @@
/*
* Copyright 2019-2020 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
* This source code is licensed under the GNU General Public License version 2
* or later (GPLv2+) WITHOUT ANY WARRANTY.
*/
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include "crm_mon.h"
static void print_resources_heading(pcmk__output_t *out, unsigned int mon_ops);
static void print_resources_closing(pcmk__output_t *out, unsigned int mon_ops);
static int print_resources(pcmk__output_t *out, pe_working_set_t *data_set,
unsigned int print_opts, unsigned int mon_ops, gboolean brief_output,
- gboolean print_summary, GListPtr only_node, gboolean print_spacer);
+ gboolean print_summary, GListPtr only_node, GListPtr only_rsc,
+ gboolean print_spacer);
static int print_rsc_history(pcmk__output_t *out, pe_working_set_t *data_set,
pe_node_t *node, xmlNode *rsc_entry, unsigned int mon_ops,
GListPtr op_list);
static int print_node_history(pcmk__output_t *out, pe_working_set_t *data_set,
pe_node_t *node, xmlNode *node_state, gboolean operations,
- unsigned int mon_ops, GListPtr only_node);
+ unsigned int mon_ops, GListPtr only_node, GListPtr only_rsc);
static gboolean add_extra_info(pcmk__output_t *out, pe_node_t * node, GListPtr rsc_list,
const char *attrname, int *expected_score);
static void print_node_attribute(gpointer name, gpointer user_data);
static int print_node_summary(pcmk__output_t *out, pe_working_set_t * data_set,
gboolean operations, unsigned int mon_ops,
- GListPtr only_snow, gboolean print_spacer);
+ GListPtr only_node, GListPtr only_rsc, gboolean print_spacer);
static int print_cluster_tickets(pcmk__output_t *out, pe_working_set_t * data_set,
gboolean print_spacer);
static int print_neg_locations(pcmk__output_t *out, pe_working_set_t *data_set,
unsigned int mon_ops, const char *prefix,
- gboolean print_spacer);
+ GListPtr only_rsc, gboolean print_spacer);
static int print_node_attributes(pcmk__output_t *out, pe_working_set_t *data_set,
unsigned int mon_ops, GListPtr only_node,
- gboolean print_spacer);
+ GListPtr only_rsc, gboolean print_spacer);
static int print_failed_actions(pcmk__output_t *out, pe_working_set_t *data_set,
GListPtr only_node, gboolean print_spacer);
static GListPtr
build_uname_list(pe_working_set_t *data_set, const char *s) {
GListPtr unames = NULL;
if (s == NULL || strcmp(s, "*") == 0) {
/* Nothing was given so return a list of all node names. Or, '*' was
* given. This would normally fall into the pe__unames_with_tag branch
* where it will return an empty list. Catch it here instead.
*/
unames = g_list_prepend(unames, strdup("*"));
} else {
pe_node_t *node = pe_find_node(data_set->nodes, s);
if (node) {
/* The given string was a valid uname for a node. Return a
* singleton list containing just that uname.
*/
unames = g_list_prepend(unames, strdup(s));
} else {
/* The given string was not a valid uname. It's either a tag or
* it's a typo or something. In the first case, we'll return a
* list of all the unames of the nodes with the given tag. In the
* second case, we'll return a NULL pointer and nothing will
* get displayed.
*/
unames = pe__unames_with_tag(data_set, s);
}
}
return unames;
}
+static GListPtr
+build_rsc_list(pe_working_set_t *data_set, const char *s) {
+ GListPtr resources = NULL;
+
+ if (s == NULL || strcmp(s, "*") == 0) {
+ resources = g_list_prepend(resources, strdup("*"));
+ } else {
+ pe_resource_t *rsc = pe_find_resource_with_flags(data_set->resources, s,
+ pe_find_renamed|pe_find_any);
+
+ if (rsc) {
+ /* A colon in the name we were given means we're being asked to filter
+ * on a specific instance of a cloned resource. Put that exact string
+ * into the filter list. Otherwise, use the printable ID of whatever
+ * resource was found that matches what was asked for.
+ */
+ if (strstr(s, ":") != NULL) {
+ resources = g_list_prepend(resources, strdup(rsc->id));
+ } else {
+ resources = g_list_prepend(resources, strdup(rsc_printable_id(rsc)));
+ }
+ } else {
+ /* The given string was not a valid resource name. It's either
+ * a tag or it's a typo or something. See build_uname_list for
+ * more detail.
+ */
+ resources = pe__rscs_with_tag(data_set, s);
+ }
+ }
+
+ return resources;
+}
+
/*!
* \internal
* \brief Print resources section heading appropriate to options
*
* \param[in] out The output functions structure.
* \param[in] mon_ops Bitmask of mon_op_*.
*/
static void
print_resources_heading(pcmk__output_t *out, unsigned int mon_ops)
{
const char *heading;
if (is_set(mon_ops, mon_op_group_by_node)) {
/* Active resources have already been printed by node */
heading = is_set(mon_ops, mon_op_inactive_resources) ? "Inactive Resources" : NULL;
} else if (is_set(mon_ops, mon_op_inactive_resources)) {
heading = "Full List of Resources";
} else {
heading = "Active Resources";
}
/* Print section heading */
out->begin_list(out, NULL, NULL, "%s", heading);
}
/*!
* \internal
* \brief Print whatever resource section closing is appropriate
*
* \param[in] out The output functions structure.
* \param[in] mon_ops Bitmask of mon_op_*.
*/
static void
print_resources_closing(pcmk__output_t *out, unsigned int mon_ops)
{
const char *heading;
/* What type of resources we did or did not display */
if (is_set(mon_ops, mon_op_group_by_node)) {
heading = "inactive ";
} else if (is_set(mon_ops, mon_op_inactive_resources)) {
heading = "";
} else {
heading = "active ";
}
out->list_item(out, NULL, "No %sresources", heading);
}
/*!
* \internal
* \brief Print whatever resource section(s) are appropriate
*
* \param[in] out The output functions structure.
* \param[in] data_set Cluster state to display.
* \param[in] print_opts Bitmask of pe_print_*.
* \param[in] mon_ops Bitmask of mon_op_*.
* \param[in] brief_output Whether to display full or brief output.
* \param[in] print_summary Whether to display a failure summary.
*/
static int
print_resources(pcmk__output_t *out, pe_working_set_t *data_set,
unsigned int print_opts, unsigned int mon_ops, gboolean brief_output,
- gboolean print_summary, GListPtr only_node, gboolean print_spacer)
+ gboolean print_summary, GListPtr only_node, GListPtr only_rsc,
+ gboolean print_spacer)
{
GListPtr rsc_iter;
int rc = pcmk_rc_no_output;
/* If we already showed active resources by node, and
* we're not showing inactive resources, we have nothing to do
*/
if (is_set(mon_ops, mon_op_group_by_node) && is_not_set(mon_ops, mon_op_inactive_resources)) {
return rc;
}
PCMK__OUTPUT_SPACER_IF(out, print_spacer);
print_resources_heading(out, mon_ops);
/* If we haven't already printed resources grouped by node,
* and brief output was requested, print resource summary */
if (brief_output && is_not_set(mon_ops, mon_op_group_by_node)) {
- pe__rscs_brief_output(out, data_set->resources, print_opts,
+ GListPtr rscs = pe__filter_rsc_list(data_set->resources, only_rsc);
+
+ pe__rscs_brief_output(out, rscs, print_opts,
is_set(mon_ops, mon_op_inactive_resources));
+ g_list_free(rscs);
}
/* For each resource, display it if appropriate */
for (rsc_iter = data_set->resources; rsc_iter != NULL; rsc_iter = rsc_iter->next) {
pe_resource_t *rsc = (pe_resource_t *) rsc_iter->data;
+ int x;
/* Complex resources may have some sub-resources active and some inactive */
gboolean is_active = rsc->fns->active(rsc, TRUE);
gboolean partially_active = rsc->fns->active(rsc, FALSE);
/* Skip inactive orphans (deleted but still in CIB) */
if (is_set(rsc->flags, pe_rsc_orphan) && !is_active) {
continue;
/* Skip active resources if we already displayed them by node */
} else if (is_set(mon_ops, mon_op_group_by_node)) {
if (is_active) {
continue;
}
/* Skip primitives already counted in a brief summary */
} else if (brief_output && (rsc->variant == pe_native)) {
continue;
/* Skip resources that aren't at least partially active,
* unless we're displaying inactive resources
*/
} else if (!partially_active && is_not_set(mon_ops, mon_op_inactive_resources)) {
continue;
} else if (partially_active && !pe__rsc_running_on_any_node_in_list(rsc, only_node)) {
continue;
}
/* Print this resource */
- rc = pcmk_rc_ok;
- out->message(out, crm_map_element_name(rsc->xml), print_opts, rsc, only_node);
+ x = out->message(out, crm_map_element_name(rsc->xml), print_opts, rsc,
+ only_node, only_rsc);
+ if (x == pcmk_rc_ok) {
+ rc = pcmk_rc_ok;
+ }
}
if (print_summary && rc != pcmk_rc_ok) {
print_resources_closing(out, mon_ops);
}
out->end_list(out);
return pcmk_rc_ok;
}
static int
failure_count(pe_working_set_t *data_set, pe_node_t *node, pe_resource_t *rsc, time_t *last_failure) {
return rsc ? pe_get_failcount(node, rsc, last_failure, pe_fc_default,
NULL, data_set)
: 0;
}
static GListPtr
get_operation_list(xmlNode *rsc_entry) {
GListPtr op_list = NULL;
xmlNode *rsc_op = NULL;
for (rsc_op = __xml_first_child_element(rsc_entry); rsc_op != NULL;
rsc_op = __xml_next_element(rsc_op)) {
+ const char *task = crm_element_value(rsc_op, XML_LRM_ATTR_TASK);
+ const char *interval_ms_s = crm_element_value(rsc_op,
+ XML_LRM_ATTR_INTERVAL_MS);
+ const char *op_rc = crm_element_value(rsc_op, XML_LRM_ATTR_RC);
+ int op_rc_i = crm_parse_int(op_rc, "0");
+
+ /* Display 0-interval monitors as "probe" */
+ if (safe_str_eq(task, CRMD_ACTION_STATUS)
+ && ((interval_ms_s == NULL) || safe_str_eq(interval_ms_s, "0"))) {
+ task = "probe";
+ }
+
+ /* Ignore notifies and some probes */
+ if (safe_str_eq(task, CRMD_ACTION_NOTIFY) || (safe_str_eq(task, "probe") && (op_rc_i == 7))) {
+ continue;
+ }
+
if (crm_str_eq((const char *) rsc_op->name, XML_LRM_TAG_RSC_OP, TRUE)) {
op_list = g_list_append(op_list, rsc_op);
}
}
op_list = g_list_sort(op_list, sort_op_by_callid);
return op_list;
}
/*!
* \internal
* \brief Print resource operation/failure history
*
* \param[in] out The output functions structure.
* \param[in] data_set Cluster state to display.
* \param[in] node Node that ran this resource.
* \param[in] rsc_entry Root of XML tree describing resource status.
* \param[in] mon_ops Bitmask of mon_op_*.
* \param[in] op_list A list of operations to print.
*/
static int
print_rsc_history(pcmk__output_t *out, pe_working_set_t *data_set, pe_node_t *node,
xmlNode *rsc_entry, unsigned int mon_ops, GListPtr op_list)
{
GListPtr gIter = NULL;
int rc = pcmk_rc_no_output;
const char *rsc_id = crm_element_value(rsc_entry, XML_ATTR_ID);
pe_resource_t *rsc = pe_find_resource(data_set->resources, rsc_id);
/* Print each operation */
for (gIter = op_list; gIter != NULL; gIter = gIter->next) {
xmlNode *xml_op = (xmlNode *) gIter->data;
const char *task = crm_element_value(xml_op, XML_LRM_ATTR_TASK);
const char *interval_ms_s = crm_element_value(xml_op,
XML_LRM_ATTR_INTERVAL_MS);
const char *op_rc = crm_element_value(xml_op, XML_LRM_ATTR_RC);
int op_rc_i = crm_parse_int(op_rc, "0");
/* Display 0-interval monitors as "probe" */
if (safe_str_eq(task, CRMD_ACTION_STATUS)
&& ((interval_ms_s == NULL) || safe_str_eq(interval_ms_s, "0"))) {
task = "probe";
}
- /* Ignore notifies and some probes */
- if (pcmk__str_any_of(task, CRMD_ACTION_NOTIFY, "probe", NULL) && (op_rc_i == 7)) {
- continue;
- }
-
/* If this is the first printed operation, print heading for resource */
if (rc == pcmk_rc_no_output) {
time_t last_failure = 0;
int failcount = failure_count(data_set, node, rsc, &last_failure);
out->message(out, "resource-history", rsc, rsc_id, TRUE, failcount, last_failure, TRUE);
rc = pcmk_rc_ok;
}
/* Print the operation */
out->message(out, "op-history", xml_op, task, interval_ms_s,
op_rc_i, is_set(mon_ops, mon_op_print_timing));
}
/* Free the list we created (no need to free the individual items) */
g_list_free(op_list);
PCMK__OUTPUT_LIST_FOOTER(out, rc);
return rc;
}
/*!
* \internal
* \brief Print node operation/failure history
*
* \param[in] out The output functions structure.
* \param[in] data_set Cluster state to display.
* \param[in] node_state Root of XML tree describing node status.
* \param[in] operations Whether to print operations or just failcounts.
* \param[in] mon_ops Bitmask of mon_op_*.
*/
static int
print_node_history(pcmk__output_t *out, pe_working_set_t *data_set,
pe_node_t *node, xmlNode *node_state, gboolean operations,
- unsigned int mon_ops, GListPtr only_node)
+ unsigned int mon_ops, GListPtr only_node, GListPtr only_rsc)
{
xmlNode *lrm_rsc = NULL;
xmlNode *rsc_entry = NULL;
int rc = pcmk_rc_no_output;
lrm_rsc = find_xml_node(node_state, XML_CIB_TAG_LRM, FALSE);
lrm_rsc = find_xml_node(lrm_rsc, XML_LRM_TAG_RESOURCES, FALSE);
/* Print history of each of the node's resources */
for (rsc_entry = __xml_first_child_element(lrm_rsc); rsc_entry != NULL;
rsc_entry = __xml_next_element(rsc_entry)) {
+ const char *rsc_id = crm_element_value(rsc_entry, XML_ATTR_ID);
+ pe_resource_t *rsc = pe_find_resource(data_set->resources, rsc_id);
if (!crm_str_eq((const char *)rsc_entry->name, XML_LRM_TAG_RESOURCE, TRUE)) {
continue;
}
+ /* We can't use is_filtered here to filter group resources. For is_filtered,
+ * we have to decide whether to check the parent or not. If we check the
+ * parent, all elements of a group will always be printed because that's how
+ * is_filtered works for groups. If we do not check the parent, sometimes
+ * this will filter everything out.
+ *
+ * For other resource types, is_filtered is okay.
+ */
+ if (uber_parent(rsc)->variant == pe_group) {
+ if (!pcmk__str_in_list(only_rsc, rsc_printable_id(rsc)) &&
+ !pcmk__str_in_list(only_rsc, rsc_printable_id(uber_parent(rsc)))) {
+ continue;
+ }
+ } else {
+ if (rsc->fns->is_filtered(rsc, only_rsc, TRUE)) {
+ continue;
+ }
+ }
+
if (operations == FALSE) {
- const char *rsc_id = crm_element_value(rsc_entry, XML_ATTR_ID);
- pe_resource_t *rsc = pe_find_resource(data_set->resources, rsc_id);
time_t last_failure = 0;
int failcount = failure_count(data_set, node, rsc, &last_failure);
- if (failcount > 0) {
- if (rc == pcmk_rc_no_output) {
- rc = pcmk_rc_ok;
- out->message(out, "node", node, get_resource_display_options(mon_ops),
- FALSE, NULL, is_set(mon_ops, mon_op_print_clone_detail),
- is_set(mon_ops, mon_op_print_brief), is_set(mon_ops, mon_op_group_by_node),
- only_node);
- }
-
- out->message(out, "resource-history", rsc, rsc_id, FALSE,
- failcount, last_failure, FALSE);
+ if (failcount <= 0) {
+ continue;
}
+
+ if (rc == pcmk_rc_no_output) {
+ rc = pcmk_rc_ok;
+ out->message(out, "node", node, get_resource_display_options(mon_ops),
+ FALSE, NULL, is_set(mon_ops, mon_op_print_clone_detail),
+ is_set(mon_ops, mon_op_print_brief), is_set(mon_ops, mon_op_group_by_node),
+ only_node, only_rsc);
+ }
+
+ out->message(out, "resource-history", rsc, rsc_id, FALSE,
+ failcount, last_failure, FALSE);
} else {
GListPtr op_list = get_operation_list(rsc_entry);
+ if (op_list == NULL) {
+ continue;
+ }
+
if (rc == pcmk_rc_no_output) {
rc = pcmk_rc_ok;
out->message(out, "node", node, get_resource_display_options(mon_ops),
FALSE, NULL, is_set(mon_ops, mon_op_print_clone_detail),
is_set(mon_ops, mon_op_print_brief), is_set(mon_ops, mon_op_group_by_node),
- only_node);
+ only_node, only_rsc);
}
- if (op_list != NULL) {
- print_rsc_history(out, data_set, node, rsc_entry, mon_ops, op_list);
- }
+ print_rsc_history(out, data_set, node, rsc_entry, mon_ops, op_list);
}
}
PCMK__OUTPUT_LIST_FOOTER(out, rc);
return rc;
}
/*!
* \internal
* \brief Determine whether extended information about an attribute should be added.
*
* \param[in] out The output functions structure.
* \param[in] node Node that ran this resource.
* \param[in] rsc_list The list of resources for this node.
* \param[in] attrname The attribute to find.
* \param[out] expected_score The expected value for this attribute.
*
* \return TRUE if extended information should be printed, FALSE otherwise
* \note Currently, extended information is only supported for ping/pingd
* resources, for which a message will be printed if connectivity is lost
* or degraded.
*/
static gboolean
add_extra_info(pcmk__output_t *out, pe_node_t *node, GListPtr rsc_list,
const char *attrname, int *expected_score)
{
GListPtr gIter = NULL;
for (gIter = rsc_list; gIter != NULL; gIter = gIter->next) {
pe_resource_t *rsc = (pe_resource_t *) gIter->data;
const char *type = g_hash_table_lookup(rsc->meta, "type");
const char *name = NULL;
if (rsc->children != NULL) {
if (add_extra_info(out, node, rsc->children, attrname, expected_score)) {
return TRUE;
}
}
if (safe_str_neq(type, "ping") && safe_str_neq(type, "pingd")) {
return FALSE;
}
name = g_hash_table_lookup(rsc->parameters, "name");
if (name == NULL) {
name = "pingd";
}
/* To identify the resource with the attribute name. */
if (safe_str_eq(name, attrname)) {
int host_list_num = 0;
/* int value = crm_parse_int(attrvalue, "0"); */
const char *hosts = g_hash_table_lookup(rsc->parameters, "host_list");
const char *multiplier = g_hash_table_lookup(rsc->parameters, "multiplier");
if (hosts) {
char **host_list = g_strsplit(hosts, " ", 0);
host_list_num = g_strv_length(host_list);
g_strfreev(host_list);
}
/* pingd multiplier is the same as the default value. */
*expected_score = host_list_num * crm_parse_int(multiplier, "1");
return TRUE;
}
}
return FALSE;
}
/* structure for passing multiple user data to g_list_foreach() */
struct mon_attr_data {
pcmk__output_t *out;
pe_node_t *node;
};
static void
print_node_attribute(gpointer name, gpointer user_data)
{
const char *value = NULL;
int expected_score = 0;
gboolean add_extra = FALSE;
struct mon_attr_data *data = (struct mon_attr_data *) user_data;
value = pe_node_attribute_raw(data->node, name);
add_extra = add_extra_info(data->out, data->node, data->node->details->running_rsc,
name, &expected_score);
/* Print attribute name and value */
data->out->message(data->out, "node-attribute", name, value, add_extra,
expected_score);
}
/*!
* \internal
* \brief Print history for all nodes.
*
* \param[in] out The output functions structure.
* \param[in] data_set Cluster state to display.
* \param[in] operations Whether to print operations or just failcounts.
* \param[in] mon_ops Bitmask of mon_op_*.
*/
static int
print_node_summary(pcmk__output_t *out, pe_working_set_t * data_set,
gboolean operations, unsigned int mon_ops, GListPtr only_node,
- gboolean print_spacer)
+ GListPtr only_rsc, gboolean print_spacer)
{
xmlNode *node_state = NULL;
xmlNode *cib_status = get_object_root(XML_CIB_TAG_STATUS, data_set->input);
int rc = pcmk_rc_no_output;
if (xmlChildElementCount(cib_status) == 0) {
return rc;
}
/* Print each node in the CIB status */
for (node_state = __xml_first_child_element(cib_status); node_state != NULL;
node_state = __xml_next_element(node_state)) {
pe_node_t *node;
if (!crm_str_eq((const char *)node_state->name, XML_CIB_TAG_STATE, TRUE)) {
continue;
}
node = pe_find_node_id(data_set->nodes, ID(node_state));
if (!node || !node->details || !node->details->online) {
continue;
}
if (!pcmk__str_in_list(only_node, node->details->uname)) {
continue;
}
PCMK__OUTPUT_LIST_HEADER(out, print_spacer, rc, operations ? "Operations" : "Migration Summary");
print_node_history(out, data_set, node, node_state, operations, mon_ops,
- only_node);
+ only_node, only_rsc);
}
PCMK__OUTPUT_LIST_FOOTER(out, rc);
return rc;
}
/*!
* \internal
* \brief Print all tickets.
*
* \param[in] out The output functions structure.
* \param[in] data_set Cluster state to display.
*/
static int
print_cluster_tickets(pcmk__output_t *out, pe_working_set_t * data_set,
gboolean print_spacer)
{
GHashTableIter iter;
gpointer key, value;
if (g_hash_table_size(data_set->tickets) == 0) {
return pcmk_rc_no_output;
}
PCMK__OUTPUT_SPACER_IF(out, print_spacer);
/* Print section heading */
out->begin_list(out, NULL, NULL, "Tickets");
/* Print each ticket */
g_hash_table_iter_init(&iter, data_set->tickets);
while (g_hash_table_iter_next(&iter, &key, &value)) {
pe_ticket_t *ticket = (pe_ticket_t *) value;
out->message(out, "ticket", ticket);
}
/* Close section */
out->end_list(out);
return pcmk_rc_ok;
}
/*!
* \internal
* \brief Print section for negative location constraints
*
* \param[in] out The output functions structure.
* \param[in] data_set Cluster state to display.
* \param[in] mon_ops Bitmask of mon_op_*.
* \param[in] prefix ID prefix to filter results by.
*/
static int
print_neg_locations(pcmk__output_t *out, pe_working_set_t *data_set,
- unsigned int mon_ops, const char *prefix, gboolean print_spacer)
+ unsigned int mon_ops, const char *prefix, GListPtr only_rsc,
+ gboolean print_spacer)
{
GListPtr gIter, gIter2;
int rc = pcmk_rc_no_output;
/* Print each ban */
for (gIter = data_set->placement_constraints; gIter != NULL; gIter = gIter->next) {
pe__location_t *location = gIter->data;
+
if (prefix != NULL && !g_str_has_prefix(location->id, prefix))
continue;
+
+ if (!pcmk__str_in_list(only_rsc, rsc_printable_id(location->rsc_lh)) &&
+ !pcmk__str_in_list(only_rsc, rsc_printable_id(uber_parent(location->rsc_lh)))) {
+ continue;
+ }
+
for (gIter2 = location->node_list_rh; gIter2 != NULL; gIter2 = gIter2->next) {
pe_node_t *node = (pe_node_t *) gIter2->data;
if (node->weight < 0) {
PCMK__OUTPUT_LIST_HEADER(out, print_spacer, rc, "Negative Location Constraints");
out->message(out, "ban", node, location, is_set(mon_ops, mon_op_print_clone_detail));
}
}
}
PCMK__OUTPUT_LIST_FOOTER(out, rc);
return rc;
}
/*!
* \internal
* \brief Print node attributes section
*
* \param[in] out The output functions structure.
* \param[in] data_set Cluster state to display.
* \param[in] mon_ops Bitmask of mon_op_*.
*/
static int
print_node_attributes(pcmk__output_t *out, pe_working_set_t *data_set,
unsigned int mon_ops, GListPtr only_node,
- gboolean print_spacer)
+ GListPtr only_rsc, gboolean print_spacer)
{
GListPtr gIter = NULL;
int rc = pcmk_rc_no_output;
/* Unpack all resource parameters (it would be more efficient to do this
* only when needed for the first time in add_extra_info())
*/
for (gIter = data_set->resources; gIter != NULL; gIter = gIter->next) {
crm_mon_get_parameters(gIter->data, data_set);
}
/* Display each node's attributes */
for (gIter = data_set->nodes; gIter != NULL; gIter = gIter->next) {
struct mon_attr_data data;
data.out = out;
data.node = (pe_node_t *) gIter->data;
if (data.node && data.node->details && data.node->details->online) {
GList *attr_list = NULL;
GHashTableIter iter;
gpointer key, value;
g_hash_table_iter_init(&iter, data.node->details->attrs);
while (g_hash_table_iter_next (&iter, &key, &value)) {
attr_list = append_attr_list(attr_list, key);
}
if (attr_list == NULL) {
continue;
}
if (!pcmk__str_in_list(only_node, data.node->details->uname)) {
continue;
}
PCMK__OUTPUT_LIST_HEADER(out, print_spacer, rc, "Node Attributes");
out->message(out, "node", data.node, get_resource_display_options(mon_ops),
FALSE, NULL, is_set(mon_ops, mon_op_print_clone_detail),
is_set(mon_ops, mon_op_print_brief), is_set(mon_ops, mon_op_group_by_node),
- only_node);
+ only_node, only_rsc);
g_list_foreach(attr_list, print_node_attribute, &data);
g_list_free(attr_list);
out->end_list(out);
}
}
PCMK__OUTPUT_LIST_FOOTER(out, rc);
return rc;
}
/*!
* \internal
* \brief Print a section for failed actions
*
* \param[in] out The output functions structure.
* \param[in] data_set Cluster state to display.
*/
static int
print_failed_actions(pcmk__output_t *out, pe_working_set_t *data_set,
GListPtr only_node, gboolean print_spacer)
{
xmlNode *xml_op = NULL;
int rc = pcmk_rc_no_output;
if (xmlChildElementCount(data_set->failed) == 0) {
return rc;
}
for (xml_op = __xml_first_child(data_set->failed); xml_op != NULL;
xml_op = __xml_next(xml_op)) {
if (!pcmk__str_in_list(only_node, crm_element_value(xml_op, XML_ATTR_UNAME))) {
continue;
}
PCMK__OUTPUT_LIST_HEADER(out, print_spacer, rc, "Failed Resource Actions");
out->message(out, "failed-action", xml_op);
}
PCMK__OUTPUT_LIST_FOOTER(out, rc);
return rc;
}
#define CHECK_RC(retcode, retval) \
if (retval == pcmk_rc_ok) { \
retcode = pcmk_rc_ok; \
}
/*!
* \internal
* \brief Top-level printing function for text/curses output.
*
* \param[in] out The output functions structure.
* \param[in] data_set Cluster state to display.
* \param[in] stonith_history List of stonith actions.
* \param[in] mon_ops Bitmask of mon_op_*.
* \param[in] show Bitmask of mon_show_*.
* \param[in] prefix ID prefix to filter results by.
*/
void
print_status(pcmk__output_t *out, pe_working_set_t *data_set,
stonith_history_t *stonith_history, unsigned int mon_ops,
- unsigned int show, char *prefix, char *only_node)
+ unsigned int show, char *prefix, char *only_node, char *only_rsc)
{
GListPtr unames = NULL;
+ GListPtr resources = NULL;
unsigned int print_opts = get_resource_display_options(mon_ops);
int rc = pcmk_rc_no_output;
CHECK_RC(rc, out->message(out, "cluster-summary", data_set,
is_set(mon_ops, mon_op_print_clone_detail),
is_set(show, mon_show_stack),
is_set(show, mon_show_dc),
is_set(show, mon_show_times),
is_set(show, mon_show_counts),
is_set(show, mon_show_options)));
unames = build_uname_list(data_set, only_node);
+ resources = build_rsc_list(data_set, only_rsc);
if (is_set(show, mon_show_nodes) && unames) {
PCMK__OUTPUT_SPACER_IF(out, rc == pcmk_rc_ok);
- CHECK_RC(rc, out->message(out, "node-list", data_set->nodes, unames, print_opts,
+ CHECK_RC(rc, out->message(out, "node-list", data_set->nodes, unames,
+ resources, print_opts,
is_set(mon_ops, mon_op_print_clone_detail),
is_set(mon_ops, mon_op_print_brief),
is_set(mon_ops, mon_op_group_by_node)));
}
/* Print resources section, if needed */
if (is_set(show, mon_show_resources)) {
CHECK_RC(rc, print_resources(out, data_set, print_opts, mon_ops,
is_set(mon_ops, mon_op_print_brief), TRUE,
- unames, rc == pcmk_rc_ok));
+ unames, resources, rc == pcmk_rc_ok));
}
/* print Node Attributes section if requested */
if (is_set(show, mon_show_attributes)) {
- CHECK_RC(rc, print_node_attributes(out, data_set, mon_ops, unames, rc == pcmk_rc_ok));
+ CHECK_RC(rc, print_node_attributes(out, data_set, mon_ops, unames, resources,
+ rc == pcmk_rc_ok));
}
/* If requested, print resource operations (which includes failcounts)
* or just failcounts
*/
if (is_set(show, mon_show_operations) || is_set(show, mon_show_failcounts)) {
CHECK_RC(rc, print_node_summary(out, data_set, is_set(show, mon_show_operations),
- mon_ops, unames, rc == pcmk_rc_ok));
+ mon_ops, unames, resources, rc == pcmk_rc_ok));
}
/* If there were any failed actions, print them */
if (is_set(show, mon_show_failures) && xml_has_children(data_set->failed)) {
CHECK_RC(rc, print_failed_actions(out, data_set, unames, rc == pcmk_rc_ok));
}
/* Print failed stonith actions */
if (is_set(show, mon_show_fence_failed) && is_set(mon_ops, mon_op_fence_history)) {
stonith_history_t *hp = stonith__first_matching_event(stonith_history, stonith__event_state_eq,
GINT_TO_POINTER(st_failed));
if (hp) {
CHECK_RC(rc, out->message(out, "failed-fencing-history", hp, unames,
is_set(mon_ops, mon_op_fence_full_history),
rc == pcmk_rc_ok));
}
}
/* Print tickets if requested */
if (is_set(show, mon_show_tickets)) {
CHECK_RC(rc, print_cluster_tickets(out, data_set, rc == pcmk_rc_ok));
}
/* Print negative location constraints if requested */
if (is_set(show, mon_show_bans)) {
- CHECK_RC(rc, print_neg_locations(out, data_set, mon_ops, prefix, rc == pcmk_rc_ok));
+ CHECK_RC(rc, print_neg_locations(out, data_set, mon_ops, prefix, resources,
+ rc == pcmk_rc_ok));
}
/* Print stonith history */
if (is_set(mon_ops, mon_op_fence_history)) {
if (is_set(show, mon_show_fence_worked)) {
stonith_history_t *hp = stonith__first_matching_event(stonith_history, stonith__event_state_neq,
GINT_TO_POINTER(st_failed));
if (hp) {
CHECK_RC(rc, out->message(out, "fencing-history", hp, unames,
is_set(mon_ops, mon_op_fence_full_history),
rc == pcmk_rc_ok));
}
} else if (is_set(show, mon_show_fence_pending)) {
stonith_history_t *hp = stonith__first_matching_event(stonith_history, stonith__event_state_pending, NULL);
if (hp) {
CHECK_RC(rc, out->message(out, "pending-fencing-actions", hp, unames,
is_set(mon_ops, mon_op_fence_full_history),
rc == pcmk_rc_ok));
}
}
}
g_list_free_full(unames, free);
}
/*!
* \internal
* \brief Top-level printing function for XML output.
*
* \param[in] out The output functions structure.
* \param[in] data_set Cluster state to display.
* \param[in] stonith_history List of stonith actions.
* \param[in] mon_ops Bitmask of mon_op_*.
* \param[in] show Bitmask of mon_show_*.
* \param[in] prefix ID prefix to filter results by.
*/
void
print_xml_status(pcmk__output_t *out, pe_working_set_t *data_set,
crm_exit_t history_rc, stonith_history_t *stonith_history,
unsigned int mon_ops, unsigned int show, char *prefix,
- char *only_node)
+ char *only_node, char *only_rsc)
{
GListPtr unames = NULL;
+ GListPtr resources = NULL;
unsigned int print_opts = get_resource_display_options(mon_ops);
out->message(out, "cluster-summary", data_set,
is_set(mon_ops, mon_op_print_clone_detail),
is_set(show, mon_show_stack),
is_set(show, mon_show_dc),
is_set(show, mon_show_times),
is_set(show, mon_show_counts),
is_set(show, mon_show_options));
unames = build_uname_list(data_set, only_node);
+ resources = build_rsc_list(data_set, only_rsc);
/*** NODES ***/
if (is_set(show, mon_show_nodes)) {
- out->message(out, "node-list", data_set->nodes, unames, print_opts,
+ out->message(out, "node-list", data_set->nodes, unames,
+ resources, print_opts,
is_set(mon_ops, mon_op_print_clone_detail),
is_set(mon_ops, mon_op_print_brief),
is_set(mon_ops, mon_op_group_by_node));
}
/* Print resources section, if needed */
if (is_set(show, mon_show_resources)) {
- print_resources(out, data_set, print_opts, mon_ops, FALSE, FALSE, unames, FALSE);
+ print_resources(out, data_set, print_opts, mon_ops, FALSE, FALSE,
+ unames, resources, FALSE);
}
/* print Node Attributes section if requested */
if (is_set(show, mon_show_attributes)) {
- print_node_attributes(out, data_set, mon_ops, unames, FALSE);
+ print_node_attributes(out, data_set, mon_ops, unames, resources, FALSE);
}
/* If requested, print resource operations (which includes failcounts)
* or just failcounts
*/
if (is_set(show, mon_show_operations) || is_set(show, mon_show_failcounts)) {
print_node_summary(out, data_set, is_set(show, mon_show_operations),
- mon_ops, unames, FALSE);
+ mon_ops, unames, resources, FALSE);
}
/* If there were any failed actions, print them */
if (is_set(show, mon_show_failures) && xml_has_children(data_set->failed)) {
print_failed_actions(out, data_set, unames, FALSE);
}
/* Print stonith history */
if (is_set(show, mon_show_fencing_all) && is_set(mon_ops, mon_op_fence_history)) {
out->message(out, "full-fencing-history", history_rc, stonith_history,
unames, is_set(mon_ops, mon_op_fence_full_history), FALSE);
}
/* Print tickets if requested */
if (is_set(show, mon_show_tickets)) {
print_cluster_tickets(out, data_set, FALSE);
}
/* Print negative location constraints if requested */
if (is_set(show, mon_show_bans)) {
- print_neg_locations(out, data_set, mon_ops, prefix, FALSE);
+ print_neg_locations(out, data_set, mon_ops, prefix, resources, FALSE);
}
g_list_free_full(unames, free);
+ g_list_free_full(resources, free);
}
/*!
* \internal
* \brief Top-level printing function for HTML output.
*
* \param[in] out The output functions structure.
* \param[in] data_set Cluster state to display.
* \param[in] stonith_history List of stonith actions.
* \param[in] mon_ops Bitmask of mon_op_*.
* \param[in] show Bitmask of mon_show_*.
* \param[in] prefix ID prefix to filter results by.
*/
int
print_html_status(pcmk__output_t *out, pe_working_set_t *data_set,
stonith_history_t *stonith_history, unsigned int mon_ops,
- unsigned int show, char *prefix, char *only_node)
+ unsigned int show, char *prefix, char *only_node,
+ char *only_rsc)
{
GListPtr unames = NULL;
+ GListPtr resources = NULL;
+
unsigned int print_opts = get_resource_display_options(mon_ops);
out->message(out, "cluster-summary", data_set,
is_set(mon_ops, mon_op_print_clone_detail),
is_set(show, mon_show_stack),
is_set(show, mon_show_dc),
is_set(show, mon_show_times),
is_set(show, mon_show_counts),
is_set(show, mon_show_options));
unames = build_uname_list(data_set, only_node);
+ resources = build_rsc_list(data_set, only_rsc);
/*** NODE LIST ***/
if (is_set(show, mon_show_nodes) && unames) {
- out->message(out, "node-list", data_set->nodes, unames, print_opts,
+ out->message(out, "node-list", data_set->nodes, unames,
+ resources, print_opts,
is_set(mon_ops, mon_op_print_clone_detail),
is_set(mon_ops, mon_op_print_brief),
is_set(mon_ops, mon_op_group_by_node));
}
/* Print resources section, if needed */
if (is_set(show, mon_show_resources)) {
print_resources(out, data_set, print_opts, mon_ops,
- is_set(mon_ops, mon_op_print_brief), TRUE, unames, FALSE);
+ is_set(mon_ops, mon_op_print_brief), TRUE, unames,
+ resources, FALSE);
}
/* print Node Attributes section if requested */
if (is_set(show, mon_show_attributes)) {
- print_node_attributes(out, data_set, mon_ops, unames, FALSE);
+ print_node_attributes(out, data_set, mon_ops, unames, resources, FALSE);
}
/* If requested, print resource operations (which includes failcounts)
* or just failcounts
*/
if (is_set(show, mon_show_operations) || is_set(show, mon_show_failcounts)) {
print_node_summary(out, data_set, is_set(show, mon_show_operations),
- mon_ops, unames, FALSE);
+ mon_ops, unames, resources, FALSE);
}
/* If there were any failed actions, print them */
if (is_set(show, mon_show_failures) && xml_has_children(data_set->failed)) {
print_failed_actions(out, data_set, unames, FALSE);
}
/* Print failed stonith actions */
if (is_set(show, mon_show_fence_failed) && is_set(mon_ops, mon_op_fence_history)) {
stonith_history_t *hp = stonith__first_matching_event(stonith_history, stonith__event_state_eq,
GINT_TO_POINTER(st_failed));
if (hp) {
out->message(out, "failed-fencing-history", hp, unames,
is_set(mon_ops, mon_op_fence_full_history), FALSE);
}
}
/* Print stonith history */
if (is_set(mon_ops, mon_op_fence_history)) {
if (is_set(show, mon_show_fence_worked)) {
stonith_history_t *hp = stonith__first_matching_event(stonith_history, stonith__event_state_neq,
GINT_TO_POINTER(st_failed));
if (hp) {
out->message(out, "fencing-history", hp, unames,
is_set(mon_ops, mon_op_fence_full_history),
FALSE);
}
} else if (is_set(show, mon_show_fence_pending)) {
stonith_history_t *hp = stonith__first_matching_event(stonith_history, stonith__event_state_pending, NULL);
if (hp) {
out->message(out, "pending-fencing-actions", hp, unames,
is_set(mon_ops, mon_op_fence_full_history),
FALSE);
}
}
}
/* Print tickets if requested */
if (is_set(show, mon_show_tickets)) {
print_cluster_tickets(out, data_set, FALSE);
}
/* Print negative location constraints if requested */
if (is_set(show, mon_show_bans)) {
- print_neg_locations(out, data_set, mon_ops, prefix, FALSE);
+ print_neg_locations(out, data_set, mon_ops, prefix, resources, FALSE);
}
g_list_free_full(unames, free);
+ g_list_free_full(resources, free);
return 0;
}