diff --git a/cts/CM_lha.py b/cts/CM_lha.py index 6b62376628..4f9764f93c 100755 --- a/cts/CM_lha.py +++ b/cts/CM_lha.py @@ -1,608 +1,609 @@ '''CTS: Cluster Testing System: LinuxHA v2 dependent modules... ''' __copyright__=''' Author: Huang Zhen Copyright (C) 2004 International Business Machines Additional Audits, Revised Start action, Default Configuration: Copyright (C) 2004 Andrew Beekhof ''' # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. import os,sys,CTS,CTSaudits,CTStests, warnings from CTSvars import * from CTS import * from CTSaudits import ClusterAudit from CTStests import * from CIB import * try: from xml.dom.minidom import * except ImportError: sys.__stdout__.write("Python module xml.dom.minidom not found\n") sys.__stdout__.write("Please install python-xml or similar before continuing\n") sys.__stdout__.flush() sys.exit(1) ####################################################################### # # LinuxHA v2 dependent modules # ####################################################################### class crm_lha(ClusterManager): ''' The linux-ha version 2 cluster manager class. It implements the things we need to talk to and manipulate linux-ha version 2 clusters ''' def __init__(self, Environment, randseed=None): ClusterManager.__init__(self, Environment, randseed=randseed) #HeartbeatCM.__init__(self, Environment, randseed=randseed) self.fastfail = 0 self.clear_cache = 0 self.cib_installed = 0 self.config = None self.cluster_monitor = 0 self.use_short_names = 1 self.update({ "Name" : "crm-lha", "DeadTime" : 300, "StartTime" : 300, # Max time to start up "StableTime" : 30, "StartCmd" : CTSvars.INITDIR+"/heartbeat start > /dev/null 2>&1", "StopCmd" : CTSvars.INITDIR+"/heartbeat stop > /dev/null 2>&1", "ElectionCmd" : "crmadmin -E %s", "StatusCmd" : "crmadmin -t 60000 -S %s 2>/dev/null", "EpocheCmd" : "crm_node -H -e", "QuorumCmd" : "crm_node -H -q", "ParitionCmd" : "crm_node -H -p", "CibQuery" : "cibadmin -Ql", "ExecuteRscOp" : "lrmadmin -n %s -E %s %s 0 %d EVERYTIME 2>&1", "CIBfile" : "%s:"+CTSvars.CRM_CONFIG_DIR+"/cib.xml", "TmpDir" : "/tmp", "BreakCommCmd" : "iptables -A INPUT -s %s -j DROP >/dev/null 2>&1", "FixCommCmd" : "iptables -D INPUT -s %s -j DROP >/dev/null 2>&1", # tc qdisc add dev lo root handle 1: cbq avpkt 1000 bandwidth 1000mbit # tc class add dev lo parent 1: classid 1:1 cbq rate "$RATE"kbps allot 17000 prio 5 bounded isolated # tc filter add dev lo parent 1: protocol ip prio 16 u32 match ip dst 127.0.0.1 match ip sport $PORT 0xFFFF flowid 1:1 # tc qdisc add dev lo parent 1: netem delay "$LATENCY"msec "$(($LATENCY/4))"msec 10% 2> /dev/null > /dev/null "ReduceCommCmd" : "", "RestoreCommCmd" : "tc qdisc del dev lo root", "LogFileName" : Environment["LogFileName"], "StandbyCmd" : "crm_standby -U %s -v %s 2>/dev/null", "UUIDQueryCmd" : "crmadmin -N", "StandbyQueryCmd" : "crm_standby -GQ -U %s 2>/dev/null", # Patterns to look for in the log files for various occasions... "Pat:DC_IDLE" : "crmd.*State transition.*-> S_IDLE", # This wont work if we have multiple partitions "Pat:Local_started" : "%s crmd:.*The local CRM is operational", "Pat:Slave_started" : "%s crmd:.*State transition.*-> S_NOT_DC", "Pat:Master_started" : "%s crmd:.* State transition.*-> S_IDLE", "Pat:We_stopped" : "heartbeat.*%s.*Heartbeat shutdown complete", "Pat:Logd_stopped" : "%s logd:.*Exiting write process", "Pat:They_stopped" : "%s crmd:.*LOST:.* %s ", "Pat:All_stopped" : "heartbeat.*%s.*Heartbeat shutdown complete", "Pat:They_dead" : "node %s.*: is dead", "Pat:TransitionComplete" : "Transition status: Complete: complete", "Pat:ChildKilled" : "%s heartbeat.*%s.*killed by signal 9", "Pat:ChildRespawn" : "%s heartbeat.*Respawning client.*%s", "Pat:ChildExit" : "ERROR: Client .* exited with return code", # Bad news Regexes. Should never occur. "BadRegexes" : ( r"ERROR:", r"CRIT:", r"Shutting down\.", r"Forcing shutdown\.", r"Timer I_TERMINATE just popped", r"input=I_ERROR", r"input=I_FAIL", r"input=I_INTEGRATED cause=C_TIMER_POPPED", r"input=I_FINALIZED cause=C_TIMER_POPPED", r"input=I_ERROR", r", exiting\.", r"WARN.*Ignoring HA message.*vote.*not in our membership list", r"pengine.*Attempting recovery of resource", r"is taking more than 2x its timeout", r"Confirm not received from", r"Welcome reply not received from", r"Attempting to schedule .* after a stop", r"Resource .* was active at shutdown", r"duplicate entries for call_id", r"Search terminated:", r"No need to invoke the TE", r"global_timer_callback:", r"Faking parameter digest creation", r"Parameters to .* action changed:", r"Parameters to .* changed", ), }) if self.Env["DoBSC"]: del self["Pat:They_stopped"] del self["Pat:Logd_stopped"] self.Env["use_logd"] = 0 self._finalConditions() self.check_transitions = 0 self.check_elections = 0 self.CIBsync = {} self.CibFactory = ConfigFactory(self) self.cib = self.CibFactory.createConfig(self.Env["Schema"]) def errorstoignore(self): # At some point implement a more elegant solution that # also produces a report at the end '''Return list of errors which are known and very noisey should be ignored''' if 1: return [ "ERROR: crm_abort: crm_glib_handler: ", "ERROR: Message hist queue is filling up", "stonithd: .*CRIT: external_hostlist: 'vmware gethosts' returned an empty hostlist", "stonithd: .*ERROR: Could not list nodes for stonith RA external/vmware.", "pengine: Preventing .* from re-starting", ] return [] def install_config(self, node): if not self.ns.WaitForNodeToComeUp(node): self.log("Node %s is not up." % node) return None if not self.CIBsync.has_key(node) and self.Env["ClobberCIB"] == 1: self.CIBsync[node] = 1 self.rsh(node, "rm -f "+CTSvars.CRM_CONFIG_DIR+"/cib*") # Only install the CIB on the first node, all the other ones will pick it up from there if self.cib_installed == 1: return None self.cib_installed = 1 if self.Env["CIBfilename"] == None: self.debug("Installing Generated CIB on node %s" %(node)) warnings.filterwarnings("ignore") cib_file=os.tmpnam() warnings.resetwarnings() self.rsh("localhost", "rm -f "+cib_file) self.debug("Creating new CIB for " + node + " in: " + cib_file) self.rsh("localhost", "echo \'" + self.cib.contents() + "\' > " + cib_file) if 0!=self.rsh.echo_cp(None, cib_file, node, CTSvars.CRM_CONFIG_DIR+"/cib.xml"): #if 0!=self.rsh.cp(cib_file, "root@%s:"+CTSvars.CRM_CONFIG_DIR+"/cib.xml" % node): raise ValueError("Can not create CIB on %s "%node) self.rsh("localhost", "rm -f "+cib_file) else: self.debug("Installing CIB (%s) on node %s" %(self.Env["CIBfilename"], node)) if 0 != self.rsh.cp(self.Env["CIBfilename"], "root@" + (self["CIBfile"]%node)): raise ValueError("Can not scp file to %s "%node) self.rsh(node, "chown "+CTSvars.CRM_DAEMON_USER+" "+CTSvars.CRM_CONFIG_DIR+"/cib.xml") def prepare(self): '''Finish the Initialization process. Prepare to test...''' self.partitions_expected = 1 for node in self.Env["nodes"]: self.ShouldBeStatus[node] = "" self.unisolate_node(node) self.StataCM(node) def test_node_CM(self, node): '''Report the status of the cluster manager on a given node''' watchpats = [ ] watchpats.append("Current ping state: (S_IDLE|S_NOT_DC)") watchpats.append(self["Pat:Slave_started"]%node) idle_watch = CTS.LogWatcher(self["LogFileName"], watchpats) idle_watch.setwatch() out = self.rsh(node, self["StatusCmd"]%node, 1) self.debug("Node %s status: '%s'" %(node, out)) if not out or string.find(out, 'ok') < 0: if self.ShouldBeStatus[node] == "up": self.log( "Node status for %s is %s but we think it should be %s" %(node, "down", self.ShouldBeStatus[node])) self.ShouldBeStatus[node]="down" return 0 if self.ShouldBeStatus[node] == "down": self.log( "Node status for %s is %s but we think it should be %s: %s" %(node, "up", self.ShouldBeStatus[node], out)) self.ShouldBeStatus[node]="up" # check the output first - because syslog-ng looses messages if string.find(out, 'S_NOT_DC') != -1: # Up and stable return 2 if string.find(out, 'S_IDLE') != -1: # Up and stable return 2 # fall back to syslog-ng and wait if not idle_watch.look(): # just up self.debug("Warn: Node %s is unstable: %s" %(node, out)) return 1 # Up and stable return 2 # Is the node up or is the node down def StataCM(self, node): '''Report the status of the cluster manager on a given node''' if self.test_node_CM(node) > 0: return 1 return None # Being up and being stable is not the same question... def node_stable(self, node): '''Report the status of the cluster manager on a given node''' if self.test_node_CM(node) == 2: return 1 self.log("Warn: Node %s not stable" %(node)) return None def partition_stable(self, nodes, timeout=None): watchpats = [ ] watchpats.append("Current ping state: S_IDLE") watchpats.append(self["Pat:DC_IDLE"]) self.debug("Waiting for cluster stability...") if timeout == None: timeout = self["DeadTime"] idle_watch = CTS.LogWatcher(self["LogFileName"], watchpats, timeout) idle_watch.setwatch() any_up = 0 for node in self.Env["nodes"]: # have each node dump its current state if self.ShouldBeStatus[node] == "up": self.rsh(node, self["StatusCmd"] %node, 1) any_up = 1 if any_up == 0: self.debug("Cluster is inactive") return 1 ret = idle_watch.look() while ret: self.debug(ret) for node in nodes: if re.search(node, ret): return 1 ret = idle_watch.look() self.debug("Warn: Partition %s not IDLE after %ds" % (repr(nodes), timeout)) return None def cluster_stable(self, timeout=None): partitions = self.find_partitions() for partition in partitions: if not self.partition_stable(partition, timeout): return None return 1 def is_node_dc(self, node, status_line=None): rc = 0 if not status_line: status_line = self.rsh(node, self["StatusCmd"]%node, 1) if not status_line: rc = 0 elif string.find(status_line, 'S_IDLE') != -1: rc = 1 elif string.find(status_line, 'S_INTEGRATION') != -1: rc = 1 elif string.find(status_line, 'S_FINALIZE_JOIN') != -1: rc = 1 elif string.find(status_line, 'S_POLICY_ENGINE') != -1: rc = 1 elif string.find(status_line, 'S_TRANSITION_ENGINE') != -1: rc = 1 return rc def active_resources(self, node): # [SM].* {node} matches Started, Slave, Master # Stopped wont be matched as it wont include {node} - (rc, output) = self.rsh(node, """crm_mon -1 | grep -e "[SM].* %s" """ % node, None) + (rc, output) = self.rsh(node, """crm_resource -c""", None) resources = [] for line in output: - fields = line.split() - resources.append(fields[0]) + tmp = AuditResource(self.CM, line) + if tmp.host == node: + resources.append(line) return resources def ResourceOp(self, resource, op, node, interval=0, app="lrmadmin"): ''' Execute an operation on a resource ''' cmd = self["ExecuteRscOp"] % (app, resource, op, interval) (rc, lines) = self.rsh(node, cmd, None) #self.debug("RscOp '%s' on %s: %d" % (cmd, node, rc)) #for line in lines: # self.debug("RscOp: "+line) return rc def ResourceLocation(self, rid): ResourceNodes = [] for node in self.Env["nodes"]: if self.ShouldBeStatus[node] == "up": dummy = 0 rc = self.ResourceOp(rid, "monitor", node) # Strange error codes from remote_py # 65024 == not installed # 2048 == 8 # 1792 == 7 # 0 == 0 if rc == 127: self.log("Command failed. Tool not available?") elif rc == 254 or rc == 65024: dummy = 1 #self.debug("%s is not installed on %s: %d" % (rid, node, rc)) elif rc == 0 or rc == 2048 or rc == 8: ResourceNodes.append(node) elif rc == 7 or rc == 1792: dummy = 1 #self.debug("%s is not running on %s: %d" % (rid, node, rc)) else: # not active on this node? self.log("Unknown rc code for %s on %s: %d" % (rid, node, rc)) return ResourceNodes def find_partitions(self): ccm_partitions = [] for node in self.Env["nodes"]: if self.ShouldBeStatus[node] == "up": partition = self.rsh(node, self["ParitionCmd"], 1) if not partition: self.log("no partition details for %s" %node) elif len(partition) > 2: partition = partition[:-1] found=0 for a_partition in ccm_partitions: if partition == a_partition: found = 1 if found == 0: self.debug("Adding partition from %s: %s" %(node, partition)) ccm_partitions.append(partition) else: self.debug("Partition '%s' from %s is consistent with existing entries" %(partition, node)) else: self.log("bad partition details for %s" %node) else: self.debug("Node %s is down... skipping" %node) return ccm_partitions def HasQuorum(self, node_list): # If we are auditing a partition, then one side will # have quorum and the other not. # So the caller needs to tell us which we are checking # If no value for node_list is specified... assume all nodes if not node_list: node_list = self.Env["nodes"] for node in node_list: if self.ShouldBeStatus[node] == "up": quorum = self.rsh(node, self["QuorumCmd"], 1) if string.find(quorum, "1") != -1: return 1 elif string.find(quorum, "0") != -1: return 0 else: self.log("WARN: Unexpected quorum test result from "+ node +":"+ quorum) return 0 def Components(self): complist = [] common_ignore = [ "Pending action:", "ERROR: crm_log_message_adv:", "ERROR: MSG: No message to dump", "pending LRM operations at shutdown", "Lost connection to the CIB service", "Connection to the CIB terminated...", "Sending message to CIB service FAILED", "crmd: .*Action A_RECOVER .* not supported", "ERROR: stonithd_op_result_ready: not signed on", "pingd: .*ERROR: send_update: Could not send update", "send_ipc_message: IPC Channel to .* is not connected", "unconfirmed_actions: Waiting on .* unconfirmed actions", "cib_native_msgready: Message pending on command channel", "crmd:.*do_exit: Performing A_EXIT_1 - forcefully exiting the CRMd", "verify_stopped: Resource .* was active at shutdown. You may ignore this error if it is unmanaged.", ] stonith_ignore = [ "ERROR: stonithd_signon: ", "update_failcount: Updating failcount for child_DoFencing", "ERROR: te_connect_stonith: Sign-in failed: triggered a retry", ] stonith_ignore.extend(common_ignore) ccm = Process("ccm", 0, [ "State transition S_IDLE", "CCM connection appears to have failed", "crmd: .*Action A_RECOVER .* not supported", "crmd: .*Input I_TERMINATE from do_recover", "Exiting to recover from CCM connection failure", "crmd:.*do_exit: Could not recover from internal error", "crmd: .*I_ERROR.*(ccm_dispatch|crmd_cib_connection_destroy)", "crmd .*exited with return code 2.", "attrd .*exited with return code 1.", "cib .*exited with return code 2.", "crmd:.*get_channel_token: No reply message - disconnected", # "WARN: determine_online_status: Node .* is unclean", # "Scheduling Node .* for STONITH", # "Executing .* fencing operation", # "tengine_stonith_callback: .*result=0", "A new node joined the cluster", # "Processing I_NODE_JOIN:.* cause=C_HA_MESSAGE", # "State transition S_.* -> S_INTEGRATION.*input=I_NODE_JOIN", "State transition S_STARTING -> S_PENDING", ], [], common_ignore, self.fastfail, self) cib = Process("cib", 0, [ "State transition S_IDLE", "Lost connection to the CIB service", "Connection to the CIB terminated...", "crmd: .*Input I_TERMINATE from do_recover", "crmd: .*I_ERROR.*crmd_cib_connection_destroy", "crmd:.*do_exit: Could not recover from internal error", "crmd .*exited with return code 2.", "attrd .*exited with return code 1.", ], [], common_ignore, self.fastfail, self) lrmd = Process("lrmd", 0, [ "State transition S_IDLE", "LRM Connection failed", "crmd: .*I_ERROR.*lrm_connection_destroy", "State transition S_STARTING -> S_PENDING", "crmd: .*Input I_TERMINATE from do_recover", "crmd:.*do_exit: Could not recover from internal error", "crmd .*exited with return code 2.", ], [], common_ignore, self.fastfail, self) crmd = Process("crmd", 0, [ # "WARN: determine_online_status: Node .* is unclean", # "Scheduling Node .* for STONITH", # "Executing .* fencing operation", # "tengine_stonith_callback: .*result=0", "State transition .* S_IDLE", "State transition S_STARTING -> S_PENDING", ], [ ], common_ignore, self.fastfail, self) pengine = Process("pengine", 1, [ "State transition S_IDLE", "crmd .*exited with return code 2.", "crmd: .*Input I_TERMINATE from do_recover", "crmd: .*do_exit: Could not recover from internal error", "crmd: .*CRIT: pe_connection_destroy: Connection to the Policy Engine failed", "crmd: .*I_ERROR.*save_cib_contents", "crmd .*exited with return code 2.", ], [], common_ignore, self.fastfail, self) if self.Env["DoFencing"] == 1 : complist.append(Process("stonithd", 0, [], [ "crmd: .*CRIT: tengine_stonith_connection_destroy: Fencing daemon connection failed", "Attempting connection to fencing daemon", "te_connect_stonith: Connected", ], stonith_ignore, 0, self)) # complist.append(Process("heartbeat", 0, [], [], [], None, self)) if self.fastfail == 0: ccm.pats.extend([ "attrd .* exited with return code 1", "ERROR: Respawning client .*attrd", "cib .* exited with return code 2", "ERROR: Respawning client .*cib", "crmd .* exited with return code 2", "ERROR: Respawning client .*crmd" ]) cib.pats.extend([ "attrd .* exited with return code 1", "ERROR: Respawning client .*attrd", "crmd .* exited with return code 2", "ERROR: Respawning client .*crmd" ]) lrmd.pats.extend([ "crmd .* exited with return code 2", "ERROR: Respawning client .*crmd" ]) pengine.pats.extend([ "ERROR: Respawning client .*crmd" ]) complist.append(ccm) complist.append(cib) complist.append(lrmd) complist.append(crmd) complist.append(pengine) return complist def NodeUUID(self, node): lines = self.rsh(node, self["UUIDQueryCmd"], 1) for line in lines: self.debug("UUIDLine:"+ line) m = re.search(r'%s.+\((.+)\)' % node, line) if m: return m.group(1) return "" def StandbyStatus(self, node): out=self.rsh(node, self["StandbyQueryCmd"]%node, 1) if not out: return "off" out = out[:-1] self.debug("Standby result: "+out) return out # status == "on" : Enter Standby mode # status == "off": Enter Active mode def SetStandbyMode(self, node, status): current_status = self.StandbyStatus(node) cmd = self["StandbyCmd"] % (node, status) ret = self.rsh(node, cmd) return True ####################################################################### # # A little test code... # # Which you are advised to completely ignore... # ####################################################################### if __name__ == '__main__': pass diff --git a/cts/CTSaudits.py b/cts/CTSaudits.py index c1647cf144..435ac1909f 100755 --- a/cts/CTSaudits.py +++ b/cts/CTSaudits.py @@ -1,715 +1,716 @@ '''CTS: Cluster Testing System: Audit module ''' __copyright__=''' Copyright (C) 2000, 2001,2005 Alan Robertson Licensed under the GNU GPL. ''' # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. import time, os, popen2, string, re import CTS import os import popen2 class ClusterAudit: def __init__(self, cm): self.CM = cm def __call__(self): raise ValueError("Abstract Class member (__call__)") def is_applicable(self): '''Return TRUE if we are applicable in the current test configuration''' raise ValueError("Abstract Class member (is_applicable)") return 1 def name(self): raise ValueError("Abstract Class member (name)") AllAuditClasses = [ ] class LogAudit(ClusterAudit): def name(self): return "LogAudit" def __init__(self, cm): self.CM = cm def RestartClusterLogging(self, nodes=None): if not nodes: nodes = self.CM.Env["nodes"] self.CM.log("Restarting logging on: %s" % repr(nodes)) for node in nodes: cmd=self.CM.Env["logrestartcmd"] if self.CM.rsh(node, cmd, blocking=0) != 0: self.CM.log ("ERROR: Cannot restart logging on %s [%s failed]" % (node, cmd)) def TestLogging(self): patterns= [] prefix="Test message from" for node in self.CM.Env["nodes"]: # Look for the node name in two places to make sure # that syslog is logging with the correct hostname patterns.append("%s.*%s %s" % (node, prefix, node)) watch = CTS.LogWatcher(self.CM.Env["LogFileName"], patterns, 60) watch.setwatch() for node in self.CM.Env["nodes"]: cmd="logger -p %s.info %s %s" % (self.CM.Env["SyslogFacility"], prefix, node) if self.CM.rsh(node, cmd, blocking=0) != 0: self.CM.log ("ERROR: Cannot execute remote command [%s] on %s" % (cmd, node)) watch_result = watch.lookforall() if watch.unmatched: for regex in watch.unmatched: self.CM.log ("Test message [%s] not found in logs." % (regex)) return 0 return 1 def __call__(self): max=3 attempt=0 self.CM.ns.WaitForAllNodesToComeUp(self.CM.Env["nodes"]) while attempt <= max and self.TestLogging() == 0: attempt = attempt + 1 self.RestartClusterLogging() time.sleep(60*attempt) if attempt > max: self.CM.log("ERROR: Cluster logging unrecoverable.") return 0 return 1 def is_applicable(self): if self.CM.Env["DoBSC"]: return 0 return 1 class DiskAudit(ClusterAudit): def name(self): return "DiskspaceAudit" def __init__(self, cm): self.CM = cm def __call__(self): result=1 dfcmd="df -k /var/log | tail -1 | tr -s ' ' | cut -d' ' -f2" self.CM.ns.WaitForAllNodesToComeUp(self.CM.Env["nodes"]) for node in self.CM.Env["nodes"]: dfout=self.CM.rsh(node, dfcmd, 1) if not dfout: self.CM.log ("ERROR: Cannot execute remote df command [%s] on %s" % (dfcmd, node)) else: try: idfout = int(dfout) except (ValueError, TypeError): self.CM.log("Warning: df output from %s was invalid [%s]" % (node, dfout)) else: if idfout == 0: self.CM.log("CRIT: Completely out of log disk space on %s" % node) result=None elif idfout <= 1000: self.CM.log("WARN: Low on log disk space (%d Mbytes) on %s" % (idfout, node)) return result def is_applicable(self): if self.CM.Env["DoBSC"]: return 0 return 1 class AuditResource: def __init__(self, cm, line): fields = line.split() self.CM = cm self.line = line self.type = fields[1] self.id = fields[2] self.clone_id = fields[3] self.parent = fields[4] self.managed = fields[5] self.needs_quorum = fields[6] self.unique = fields[7] self.rprovider = fields[8] self.rclass = fields[9] self.rtype = fields[10] + self.host = fields[11] if self.parent == "NA": self.parent = None class AuditConstraint: def __init__(self, cm, line): fields = line.split() self.CM = cm self.line = line self.type = fields[1] self.id = fields[2] self.rsc = fields[3] self.target = fields[4] self.score = fields[5] self.rsc_role = fields[6] self.target_role = fields[7] if self.rsc_role == "NA": self.rsc_role = None if self.target_role == "NA": self.target_role = None class PrimitiveAudit(ClusterAudit): def name(self): return "PrimitiveAudit" def __init__(self, cm): self.CM = cm def doResourceAudit(self, resource): rc=1 active = self.CM.ResourceLocation(resource.id) if len(active) > 1: if resource.unique == "1": rc=0 self.CM.log("Resource %s is active multiple times: %s" % (resource.id, repr(active))) else: self.CM.debug("Non-unique resource %s is active on: %s" % (resource.id, repr(active))) elif len(active) == 1: if self.CM.HasQuorum(None): self.CM.debug("Resource %s active on %s" % (resource.id, repr(active))) elif resource.needs_quorum == 1: rc=0 self.CM.log("Resource %s active without quorum: %s (%s)" % (resource.id, repr(active), resource.line)) elif self.CM.Env["warn-inactive"] == 1: if self.CM.HasQuorum(None) or not resource.needs_quorum: self.CM.log("WARN: Resource %s not served anywhere (Inactive nodes: %s)" % (resource.id, repr(self.inactive_nodes))) else: self.CM.debug("Resource %s not served anywhere (Inactive nodes: %s)" % (resource.id, repr(self.inactive_nodes))) elif self.CM.HasQuorum(None) or not resource.needs_quorum: self.CM.debug("Resource %s not served anywhere (Inactive nodes: %s)" % (resource.id, repr(self.inactive_nodes))) if resource.managed == "0": self.CM.log("Resource %s not managed: faking success" % resource.id) rc = 1 return rc def setup(self): self.target = None self.resources = [] self.constraints = [] self.active_nodes = [] self.inactive_nodes = [] self.CM.debug("Do Audit %s"%self.name()) for node in self.CM.Env["nodes"]: if self.CM.ShouldBeStatus[node] == "up": self.active_nodes.append(node) else: self.inactive_nodes.append(node) for node in self.CM.Env["nodes"]: if self.target == None and self.CM.ShouldBeStatus[node] == "up": self.target = node if not self.target: # TODO: In Pacemaker 1.0 clusters we'll be able to run crm_resource # with CIB_file=/path/to/cib.xml even when the cluster isn't running self.CM.debug("No nodes active - skipping %s" % self.name()) return 0 (rc, lines) = self.CM.rsh(self.target, "crm_resource -c", None) for line in lines: if re.search("^Resource", line): self.resources.append(AuditResource(self.CM, line)) elif re.search("^Constraint", line): self.constraints.append(AuditConstraint(self.CM, line)) else: self.CM.log("Unknown entry: %s" % line); return 1 def __call__(self): rc = 1 if not self.setup(): return 1 for resource in self.resources: if resource.type == "primitive": if self.doResourceAudit(resource) == 0: rc = 0 return rc def is_applicable(self): if self.CM["Name"] == "crm-lha": return 1 if self.CM["Name"] == "crm-ais": return 1 return 0 class GroupAudit(PrimitiveAudit): def name(self): return "GroupAudit" def __call__(self): rc = 1 if not self.setup(): return 1 for group in self.resources: if group.type == "group": first_match = 1 group_location = None for child in self.resources: if child.parent == group.id: nodes = self.CM.ResourceLocation(child.id) if first_match and len(nodes) > 0: group_location = nodes[0] first_match = 0 if len(nodes) > 1: rc = 0 self.CM.log("Child %s of %s is active more than once: %s" % (child.id, group.id, repr(nodes))) elif len(nodes) == 0: # Groups are allowed to be partially active # However we do need to make sure later children aren't running group_location = None self.CM.debug("Child %s of %s is stopped" % (child.id, group.id)) elif nodes[0] != group_location: rc = 0 self.CM.log("Child %s of %s is active on the wrong node (%s) expected %s" % (child.id, group.id, nodes[0], group_location)) else: self.CM.debug("Child %s of %s is active on %s" % (child.id, group.id, nodes[0])) return rc class CloneAudit(PrimitiveAudit): def name(self): return "CloneAudit" def __call__(self): rc = 1 if not self.setup(): return 1 for clone in self.resources: if clone.type == "clone": for child in self.resources: if child.parent == clone.id and child.type == "primitive": self.CM.debug("Checking child %s of %s..." % (child.id, clone.id)) # Check max and node_max # Obtain with: # crm_resource -g clone_max --meta -r child.id # crm_resource -g clone_node_max --meta -r child.id return rc class ColocationAudit(PrimitiveAudit): def name(self): return "ColocationAudit" def crm_location(self, resource): (rc, lines) = self.CM.rsh(self.target, "crm_resource -W -r %s -Q"%resource, None) hosts = [] if rc == 0: for line in lines: fields = line.split() hosts.append(fields[0]) return hosts def __call__(self): rc = 1 if not self.setup(): return 1 for coloc in self.constraints: if coloc.type == "rsc_colocation": source = self.crm_location(coloc.rsc) target = self.crm_location(coloc.target) if len(source) == 0: self.CM.debug("Colocation audit (%s): %s not running" % (coloc.id, coloc.rsc)) else: for node in source: if not node in target: rc = 0 self.CM.log("Colocation audit (%s): %s running on %s (not in %s)" % (coloc.id, coloc.rsc, node, repr(target))) else: self.CM.debug("Colocation audit (%s): %s running on %s (in %s)" % (coloc.id, coloc.rsc, node, repr(target))) return rc class CrmdStateAudit(ClusterAudit): def __init__(self, cm): self.CM = cm self.Stats = {"calls":0 , "success":0 , "failure":0 , "skipped":0 , "auditfail":0} def has_key(self, key): return self.Stats.has_key(key) def __setitem__(self, key, value): self.Stats[key] = value def __getitem__(self, key): return self.Stats[key] def incr(self, name): '''Increment (or initialize) the value associated with the given name''' if not self.Stats.has_key(name): self.Stats[name]=0 self.Stats[name] = self.Stats[name]+1 def __call__(self): passed = 1 up_are_down = 0 down_are_up = 0 unstable_list = [] self.CM.debug("Do Audit %s"%self.name()) for node in self.CM.Env["nodes"]: should_be = self.CM.ShouldBeStatus[node] rc = self.CM.test_node_CM(node) if rc > 0: if should_be == "down": down_are_up = down_are_up + 1 if rc == 1: unstable_list.append(node) elif should_be == "up": up_are_down = up_are_down + 1 if len(unstable_list) > 0: passed = 0 self.CM.log("Cluster is not stable: %d (of %d): %s" %(len(unstable_list), self.CM.upcount(), repr(unstable_list))) if up_are_down > 0: passed = 0 self.CM.log("%d (of %d) nodes expected to be up were down." %(up_are_down, len(self.CM.Env["nodes"]))) if down_are_up > 0: passed = 0 self.CM.log("%d (of %d) nodes expected to be down were up." %(down_are_up, len(self.CM.Env["nodes"]))) return passed def name(self): return "CrmdStateAudit" def is_applicable(self): if self.CM["Name"] == "crm-lha": return 1 if self.CM["Name"] == "crm-ais": return 1 return 0 class CIBAudit(ClusterAudit): def __init__(self, cm): self.CM = cm self.Stats = {"calls":0 , "success":0 , "failure":0 , "skipped":0 , "auditfail":0} def has_key(self, key): return self.Stats.has_key(key) def __setitem__(self, key, value): self.Stats[key] = value def __getitem__(self, key): return self.Stats[key] def incr(self, name): '''Increment (or initialize) the value associated with the given name''' if not self.Stats.has_key(name): self.Stats[name]=0 self.Stats[name] = self.Stats[name]+1 def __call__(self): self.CM.debug("Do Audit %s"%self.name()) passed = 1 ccm_partitions = self.CM.find_partitions() if len(ccm_partitions) == 0: self.CM.debug("\tNo partitions to audit") return 1 for partition in ccm_partitions: self.CM.debug("\tAuditing CIB consistency for: %s" %partition) partition_passed = 0 if self.audit_cib_contents(partition) == 0: passed = 0 return passed def audit_cib_contents(self, hostlist): passed = 1 first_host = None first_host_xml = "" partition_hosts = hostlist.split() for a_host in partition_hosts: if first_host == None: first_host = a_host first_host_xml = self.store_remote_cib(a_host) #self.CM.debug("Retrieved CIB: %s" % first_host_xml) else: a_host_xml = self.store_remote_cib(a_host) diff_cmd="crm_diff -c -VV -f -N \'%s\' -O '%s'" % (a_host_xml, first_host_xml) infile, outfile, errfile = os.popen3(diff_cmd) diff_lines = outfile.readlines() for line in diff_lines: if not re.search("", line): passed = 0 self.CM.debug("CibDiff[%s-%s]: %s" % (first_host, a_host, line)) else: self.CM.debug("CibDiff[%s-%s] Ignoring: %s" % (first_host, a_host, line)) diff_lines = errfile.readlines() for line in diff_lines: passed = 0 self.CM.log("CibDiff[%s-%s] ERROR: %s" % (first_host, a_host, line)) return passed def store_remote_cib(self, node): combined = "" first_line = 1 extra_debug = 0 #self.CM.debug("\tRetrieving CIB from: %s" % node) (rc, lines) = self.CM.rsh(node, self.CM["CibQuery"], None) if extra_debug: self.CM.debug("Start Cib[%s]" % node) for line in lines: combined = combined + line[:-1] if first_line: self.CM.debug("[Cib]" + line) first_line = 0 elif extra_debug: self.CM.debug("[Cib]" + line) if extra_debug: self.CM.debug("End Cib[%s]" % node) #self.CM.debug("Complete CIB: %s" % combined) return combined def name(self): return "CibAudit" def is_applicable(self): if self.CM["Name"] == "crm-lha": return 1 if self.CM["Name"] == "crm-ais": return 1 return 0 class PartitionAudit(ClusterAudit): def __init__(self, cm): self.CM = cm self.Stats = {"calls":0 , "success":0 , "failure":0 , "skipped":0 , "auditfail":0} self.NodeEpoche={} self.NodeState={} self.NodeQuorum={} def has_key(self, key): return self.Stats.has_key(key) def __setitem__(self, key, value): self.Stats[key] = value def __getitem__(self, key): return self.Stats[key] def incr(self, name): '''Increment (or initialize) the value associated with the given name''' if not self.Stats.has_key(name): self.Stats[name]=0 self.Stats[name] = self.Stats[name]+1 def __call__(self): self.CM.debug("Do Audit %s"%self.name()) passed = 1 ccm_partitions = self.CM.find_partitions() if ccm_partitions == None or len(ccm_partitions) == 0: return 1 if len(ccm_partitions) != self.CM.partitions_expected: self.CM.log("ERROR: %d cluster partitions detected:" %len(ccm_partitions)) passed = 0 for partition in ccm_partitions: self.CM.log("\t %s" %partition) for partition in ccm_partitions: partition_passed = 0 if self.audit_partition(partition) == 0: passed = 0 return passed def trim_string(self, avalue): if not avalue: return None if len(avalue) > 1: return avalue[:-1] def trim2int(self, avalue): if not avalue: return None if len(avalue) > 1: return int(avalue[:-1]) def audit_partition(self, partition): passed = 1 dc_found = [] dc_allowed_list = [] lowest_epoche = None node_list = partition.split() self.CM.debug("Auditing partition: %s" %(partition)) for node in node_list: if self.CM.ShouldBeStatus[node] != "up": self.CM.log("Warn: Node %s appeared out of nowhere" %(node)) self.CM.ShouldBeStatus[node] = "up" # not in itself a reason to fail the audit (not what we're # checking for in this audit) self.NodeState[node] = self.CM.rsh(node, self.CM["StatusCmd"]%node, 1) self.NodeEpoche[node] = self.CM.rsh(node, self.CM["EpocheCmd"], 1) self.NodeQuorum[node] = self.CM.rsh(node, self.CM["QuorumCmd"], 1) self.CM.debug("Node %s: %s - %s - %s." %(node, self.NodeState[node], self.NodeEpoche[node], self.NodeQuorum[node])) self.NodeState[node] = self.trim_string(self.NodeState[node]) self.NodeEpoche[node] = self.trim2int(self.NodeEpoche[node]) self.NodeQuorum[node] = self.trim_string(self.NodeQuorum[node]) if not self.NodeEpoche[node]: self.CM.log("Warn: Node %s dissappeared: cant determin epoche" %(node)) self.CM.ShouldBeStatus[node] = "down" # not in itself a reason to fail the audit (not what we're # checking for in this audit) elif lowest_epoche == None or self.NodeEpoche[node] < lowest_epoche: lowest_epoche = self.NodeEpoche[node] if not lowest_epoche: self.CM.log("Lowest epoche not determined in %s" % (partition)) passed = 0 for node in node_list: if self.CM.ShouldBeStatus[node] == "up": if self.CM.is_node_dc(node, self.NodeState[node]): dc_found.append(node) if self.NodeEpoche[node] == lowest_epoche: self.CM.debug("%s: OK" % node) elif not self.NodeEpoche[node]: self.CM.debug("Check on %s ignored: no node epoche" % node) elif not lowest_epoche: self.CM.debug("Check on %s ignored: no lowest epoche" % node) else: self.CM.log("DC %s is not the oldest node (%d vs. %d)" %(node, self.NodeEpoche[node], lowest_epoche)) passed = 0 if len(dc_found) == 0: self.CM.log("DC not found on any of the %d allowed nodes: %s (of %s)" %(len(dc_allowed_list), str(dc_allowed_list), str(node_list))) elif len(dc_found) > 1: self.CM.log("%d DCs (%s) found in cluster partition: %s" %(len(dc_found), str(dc_found), str(node_list))) passed = 0 if passed == 0: for node in node_list: if self.CM.ShouldBeStatus[node] == "up": self.CM.log("epoche %s : %s" %(self.NodeEpoche[node], self.NodeState[node])) return passed def name(self): return "PartitionAudit" def is_applicable(self): if self.CM["Name"] == "crm-lha": return 1 if self.CM["Name"] == "crm-ais": return 1 return 0 AllAuditClasses.append(DiskAudit) AllAuditClasses.append(LogAudit) AllAuditClasses.append(CrmdStateAudit) AllAuditClasses.append(PartitionAudit) AllAuditClasses.append(PrimitiveAudit) AllAuditClasses.append(GroupAudit) AllAuditClasses.append(CloneAudit) AllAuditClasses.append(ColocationAudit) AllAuditClasses.append(CIBAudit) def AuditList(cm): result = [] for auditclass in AllAuditClasses: result.append(auditclass(cm)) return result diff --git a/tools/crm_resource.c b/tools/crm_resource.c index 256ab898cb..402a99640f 100644 --- a/tools/crm_resource.c +++ b/tools/crm_resource.c @@ -1,1512 +1,1518 @@ /* * Copyright (C) 2004 Andrew Beekhof * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * This software is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef HAVE_GETOPT_H # include #endif void usage(const char *cmd, int exit_status); gboolean do_force = FALSE; gboolean BE_QUIET = FALSE; const char *attr_set_type = XML_TAG_ATTR_SETS; char *host_id = NULL; const char *rsc_id = NULL; const char *host_uname = NULL; const char *prop_name = NULL; const char *prop_value = NULL; const char *rsc_type = NULL; const char *prop_id = NULL; const char *prop_set = NULL; char *migrate_lifetime = NULL; char rsc_cmd = 'L'; char *our_pid = NULL; IPC_Channel *crmd_channel = NULL; char *xml_file = NULL; int cib_options = cib_sync_call; #define OPTARGS "V?LRQxDCPp:WMUr:H:h:v:t:p:g:d:i:s:G:S:fX:lmu:FOocqN:" #define CMD_ERR(fmt, args...) do { \ crm_warn(fmt, ##args); \ fprintf(stderr, fmt, ##args); \ } while(0) static int do_find_resource(const char *rsc, pe_working_set_t *data_set) { int found = 0; resource_t *the_rsc = pe_find_resource(data_set->resources, rsc); if(the_rsc == NULL) { return cib_NOTEXISTS; } slist_iter(node, node_t, the_rsc->running_on, lpc, crm_debug_3("resource %s is running on: %s", rsc, node->details->uname); if(BE_QUIET) { fprintf(stdout, "%s\n", node->details->uname); } else { fprintf(stdout, "resource %s is running on: %s\n", rsc, node->details->uname); } found++; ); if(BE_QUIET == FALSE && found == 0) { fprintf(stderr, "resource %s is NOT running\n", rsc); } return 0; } #define cons_string(x) x?x:"NA" static void print_cts_constraints(pe_working_set_t *data_set) { xmlNode *lifetime = NULL; xmlNode * cib_constraints = get_object_root(XML_CIB_TAG_CONSTRAINTS, data_set->input); xml_child_iter(cib_constraints, xml_obj, const char *id = crm_element_value(xml_obj, XML_ATTR_ID); if(id == NULL) { continue; } lifetime = first_named_child(xml_obj, "lifetime"); if(test_ruleset(lifetime, NULL, data_set->now) == FALSE) { continue; } if(safe_str_eq(XML_CONS_TAG_RSC_DEPEND, crm_element_name(xml_obj))) { printf("Constraint %s %s %s %s %s %s %s\n", crm_element_name(xml_obj), cons_string(crm_element_value(xml_obj, XML_ATTR_ID)), cons_string(crm_element_value(xml_obj, XML_COLOC_ATTR_SOURCE)), cons_string(crm_element_value(xml_obj, XML_COLOC_ATTR_TARGET)), cons_string(crm_element_value(xml_obj, XML_RULE_ATTR_SCORE)), cons_string(crm_element_value(xml_obj, XML_COLOC_ATTR_SOURCE_ROLE)), cons_string(crm_element_value(xml_obj, XML_COLOC_ATTR_TARGET_ROLE))); } else if(safe_str_eq(XML_CONS_TAG_RSC_LOCATION, crm_element_name(xml_obj))) { /* unpack_rsc_location(xml_obj, data_set); */ } ); } static void print_cts_rsc(resource_t *rsc) { + const char *host = NULL; gboolean needs_quorum = TRUE; const char *p_id = "NA"; const char *rprov = crm_element_value(rsc->xml, XML_AGENT_ATTR_PROVIDER); const char *rclass = crm_element_value(rsc->xml, XML_AGENT_ATTR_CLASS); const char *rtype = crm_element_value(rsc->xml, XML_ATTR_TYPE); if(rsc->parent) { p_id = rsc->parent->id; } if(safe_str_eq(rclass, "stonith")) { needs_quorum = FALSE; } else { xml_child_iter_filter(rsc->ops_xml, op, "op", const char *name = crm_element_value(op, "name"); if(safe_str_neq(name, CRMD_ACTION_START)) { const char *value = crm_element_value(op, "requires"); if(safe_str_eq(value, "nothing")) { needs_quorum = FALSE; } break; } ); } - - printf("Resource: %s %s %s %s %d %d %d %s %s %s\n", + + if(rsc->running_on != NULL && g_list_length(rsc->running_on) == 1) { + node_t *tmp = rsc->running_on->data; + host = tmp->details->uname; + } + + printf("Resource: %s %s %s %s %d %d %d %s %s %s %s\n", crm_element_name(rsc->xml), rsc->id, rsc->clone_name?rsc->clone_name:rsc->id, p_id, is_set(rsc->flags, pe_rsc_managed), needs_quorum, - is_set(rsc->flags, pe_rsc_unique), rprov?rprov:"NA", rclass, rtype); + is_set(rsc->flags, pe_rsc_unique), rprov?rprov:"NA", rclass, rtype, host?host:"NA"); slist_iter(child, resource_t, rsc->children, lpc, print_cts_rsc(child); ); } static void print_raw_rsc(resource_t *rsc) { GListPtr children = rsc->children; if(children == NULL) { printf("%s\n", rsc->id); } slist_iter(child, resource_t, children, lpc, print_raw_rsc(child); ); } static int do_find_resource_list(pe_working_set_t *data_set, gboolean raw) { int found = 0; slist_iter( rsc, resource_t, data_set->resources, lpc, if(is_set(rsc->flags, pe_rsc_orphan) && rsc->fns->active(rsc, TRUE) == FALSE) { continue; } rsc->fns->print( rsc, NULL, pe_print_printf|pe_print_rsconly, stdout); found++; ); if(found == 0) { printf("NO resources configured\n"); return cib_NOTEXISTS; } return 0; } static resource_t *find_rsc_or_clone(const char *rsc, pe_working_set_t *data_set) { resource_t *the_rsc = pe_find_resource(data_set->resources, rsc); if(the_rsc == NULL) { char *as_clone = crm_concat(rsc, "0", ':'); the_rsc = pe_find_resource(data_set->resources, as_clone); crm_free(as_clone); } return the_rsc; } static int dump_resource(const char *rsc, pe_working_set_t *data_set) { char *rsc_xml = NULL; resource_t *the_rsc = find_rsc_or_clone(rsc, data_set); if(the_rsc == NULL) { return cib_NOTEXISTS; } the_rsc->fns->print(the_rsc, NULL, pe_print_printf, stdout); rsc_xml = dump_xml_formatted(the_rsc->xml); fprintf(stdout, "raw xml:\n%s", rsc_xml); crm_free(rsc_xml); return 0; } static int dump_resource_attr( const char *rsc, const char *attr, pe_working_set_t *data_set) { int rc = cib_NOTEXISTS; node_t *current = NULL; GHashTable *params = NULL; resource_t *the_rsc = find_rsc_or_clone(rsc, data_set); const char *value = NULL; if(the_rsc == NULL) { return cib_NOTEXISTS; } if(g_list_length(the_rsc->running_on) == 1) { current = the_rsc->running_on->data; } else if(g_list_length(the_rsc->running_on) > 1) { CMD_ERR("%s is active on more than one node," " returning the default value for %s\n", the_rsc->id, crm_str(value)); } params = g_hash_table_new_full( g_str_hash, g_str_equal, g_hash_destroy_str, g_hash_destroy_str); get_rsc_attributes(params, the_rsc, current, data_set); crm_debug("Looking up %s in %s", attr, the_rsc->id); value = g_hash_table_lookup(params, attr); if(value != NULL) { fprintf(stdout, "%s\n", value); rc = 0; } g_hash_table_destroy(params); return rc; } static int find_resource_attr( cib_t *the_cib, const char *attr, const char *rsc, const char *set_type, const char *set_name, const char *attr_id, const char *attr_name, char **value) { int offset = 0; static int xpath_max = 1024; enum cib_errors rc = cib_ok; xmlNode *xml_search = NULL; char *xpath_string = NULL; CRM_ASSERT(value != NULL); *value = NULL; crm_malloc0(xpath_string, xpath_max); offset += snprintf(xpath_string + offset, xpath_max - offset, "%s", get_object_path("resources")); offset += snprintf(xpath_string + offset, xpath_max - offset, "//*[@id=\"%s\"]", rsc); if(set_type) { offset += snprintf(xpath_string + offset, xpath_max - offset, "//%s", set_type); if(set_name) { offset += snprintf(xpath_string + offset, xpath_max - offset, "[@id=\"%s\"]", set_name); } } offset += snprintf(xpath_string + offset, xpath_max - offset, "//nvpair["); if(attr_id) { offset += snprintf(xpath_string + offset, xpath_max - offset, "@id=\"%s\"", attr_id); } if(attr_name) { if(attr_id) { offset += snprintf(xpath_string + offset, xpath_max - offset, " and "); } offset += snprintf(xpath_string + offset, xpath_max - offset, "@name=\"%s\"", attr_name); } offset += snprintf(xpath_string + offset, xpath_max - offset, "]"); rc = the_cib->cmds->query( the_cib, xpath_string, &xml_search, cib_sync_call|cib_scope_local|cib_xpath); if(rc != cib_ok) { return rc; } crm_log_xml_debug(xml_search, "Match"); if(xml_has_children(xml_search)) { rc = cib_missing_data; printf("Multiple attributes match name=%s\n", attr_name); xml_child_iter(xml_search, child, printf(" Value: %s \t(id=%s)\n", crm_element_value(child, XML_NVPAIR_ATTR_VALUE), ID(child)); ); } else { const char *tmp = crm_element_value(xml_search, attr); if(tmp) { *value = crm_strdup(tmp); } } free_xml(xml_search); return rc; } static int set_resource_attr(const char *rsc_id, const char *attr_set, const char *attr_id, const char *attr_name, const char *attr_value, cib_t *cib, pe_working_set_t *data_set) { int rc = cib_ok; char *local_attr_id = NULL; char *local_attr_set = NULL; xmlNode *xml_top = NULL; xmlNode *xml_obj = NULL; gboolean use_attributes_tag = FALSE; resource_t *rsc = find_rsc_or_clone(rsc_id, data_set); if(rsc == NULL) { return cib_NOTEXISTS; } if(safe_str_eq(attr_set_type, XML_TAG_ATTR_SETS)) { rc = find_resource_attr( cib, XML_ATTR_ID, rsc_id, XML_TAG_META_SETS, attr_set, attr_id, attr_name, &local_attr_id); if(rc == cib_ok) { printf("WARNING: There is already a meta attribute called %s (id=%s)\n", attr_name, local_attr_id); } } rc = find_resource_attr( cib, XML_ATTR_ID, rsc_id, attr_set_type, attr_set, attr_id, attr_name, &local_attr_id); if(rc == cib_ok) { crm_debug("Found a match for name=%s: id=%s", attr_name, local_attr_id); attr_id = local_attr_id; } else if(rc != cib_NOTEXISTS) { return rc; } else { const char *value = NULL; xmlNode *cib_top = NULL; const char *tag = crm_element_name(rsc->xml); rc = cib->cmds->query(cib, "/cib", &cib_top, cib_sync_call|cib_scope_local|cib_xpath|cib_no_children); value = crm_element_value(cib_top, "ignore_dtd"); if(value != NULL) { use_attributes_tag = TRUE; } else { value = crm_element_value(cib_top, XML_ATTR_VALIDATION); if(value && strstr(value, "-0.6")) { use_attributes_tag = TRUE; } } free_xml(cib_top); if(attr_set == NULL) { local_attr_set = crm_concat(rsc_id, attr_set_type, '-'); attr_set = local_attr_set; } if(attr_id == NULL) { local_attr_id = crm_concat(attr_set, attr_name, '-'); attr_id = local_attr_id; } if(use_attributes_tag && safe_str_eq(tag, XML_CIB_TAG_MASTER)) { tag = "master_slave"; /* use the old name */ } xml_top = create_xml_node(NULL, tag); crm_xml_add(xml_top, XML_ATTR_ID, rsc_id); xml_obj = create_xml_node(xml_top, attr_set_type); crm_xml_add(xml_obj, XML_ATTR_ID, attr_set); if(use_attributes_tag) { xml_obj = create_xml_node(xml_obj, XML_TAG_ATTRS); } } xml_obj = create_xml_node(xml_obj, XML_CIB_TAG_NVPAIR); if(xml_top == NULL) { xml_top = xml_obj; } crm_xml_add(xml_obj, XML_ATTR_ID, attr_id); crm_xml_add(xml_obj, XML_NVPAIR_ATTR_NAME, attr_name); crm_xml_add(xml_obj, XML_NVPAIR_ATTR_VALUE, attr_value); crm_log_xml_debug(xml_top, "Update"); rc = cib->cmds->modify(cib, XML_CIB_TAG_RESOURCES, xml_top, cib_options); free_xml(xml_top); crm_free(local_attr_id); crm_free(local_attr_set); return rc; } static int delete_resource_attr( const char *rsc_id, const char *attr_set, const char *attr_id, const char *attr_name, cib_t *cib, pe_working_set_t *data_set) { xmlNode *xml_obj = NULL; int rc = cib_ok; char *local_attr_id = NULL; resource_t *rsc = find_rsc_or_clone(rsc_id, data_set); if(rsc == NULL) { return cib_NOTEXISTS; } rc = find_resource_attr( cib, XML_ATTR_ID, rsc_id, attr_set_type, attr_set, attr_id, attr_name, &local_attr_id); if(rc == cib_NOTEXISTS) { return cib_ok; } else if(rc != cib_ok) { return rc; } if(attr_id == NULL) { attr_id = local_attr_id; } xml_obj = create_xml_node(NULL, XML_CIB_TAG_NVPAIR); crm_xml_add(xml_obj, XML_ATTR_ID, attr_id); crm_xml_add(xml_obj, XML_NVPAIR_ATTR_NAME, attr_name); crm_log_xml_debug(xml_obj, "Delete"); rc = cib->cmds->delete(cib, XML_CIB_TAG_RESOURCES, xml_obj, cib_options); if(rc == cib_ok) { printf("Deleted %s option: id=%s%s%s%s%s\n", rsc_id, local_attr_id, attr_set?" set=":"", attr_set?attr_set:"", attr_name?" name=":"", attr_name?attr_name:""); } free_xml(xml_obj); crm_free(local_attr_id); return rc; } static int dump_resource_prop( const char *rsc, const char *attr, pe_working_set_t *data_set) { const char *value = NULL; resource_t *the_rsc = pe_find_resource(data_set->resources, rsc); if(the_rsc == NULL) { return cib_NOTEXISTS; } value = crm_element_value(the_rsc->xml, attr); if(value != NULL) { fprintf(stdout, "%s\n", value); return 0; } return cib_NOTEXISTS; } static void resource_ipc_connection_destroy(gpointer user_data) { crm_info("Connection to CRMd was terminated"); exit(1); } static gboolean crmd_msg_callback(IPC_Channel * server, void *private_data) { int lpc = 0; IPC_Message *msg = NULL; gboolean hack_return_good = TRUE; while (server->ch_status != IPC_DISCONNECT && server->ops->is_message_pending(server) == TRUE) { if (server->ops->recv(server, &msg) != IPC_OK) { perror("Receive failure:"); return !hack_return_good; } if (msg == NULL) { crm_debug_4("No message this time"); continue; } lpc++; msg->msg_done(msg); } if (server->ch_status == IPC_DISCONNECT) { crm_debug_2("admin_msg_callback: received HUP"); return !hack_return_good; } return hack_return_good; } static int send_lrm_rsc_op(IPC_Channel *crmd_channel, const char *op, const char *host_uname, const char *rsc_id, gboolean only_failed, pe_working_set_t *data_set) { char *key = NULL; int rc = cib_send_failed; xmlNode *cmd = NULL; xmlNode *xml_rsc = NULL; const char *value = NULL; xmlNode *params = NULL; xmlNode *msg_data = NULL; resource_t *rsc = pe_find_resource(data_set->resources, rsc_id); if(rsc == NULL) { CMD_ERR("Resource %s not found\n", rsc_id); return cib_NOTEXISTS; } else if(rsc->variant != pe_native) { CMD_ERR("We can only process primitive resources, not %s\n", rsc_id); return cib_invalid_argument; } else if(host_uname == NULL) { CMD_ERR("Please supply a hostname with -H\n"); return cib_invalid_argument; } key = crm_concat("0:0:crm-resource", our_pid, '-'); msg_data = create_xml_node(NULL, XML_GRAPH_TAG_RSC_OP); crm_xml_add(msg_data, XML_ATTR_TRANSITION_KEY, key); crm_free(key); xml_rsc = create_xml_node(msg_data, XML_CIB_TAG_RESOURCE); if(rsc->clone_name) { crm_xml_add(xml_rsc, XML_ATTR_ID, rsc->clone_name); crm_xml_add(xml_rsc, XML_ATTR_ID_LONG, rsc->id); } else { crm_xml_add(xml_rsc, XML_ATTR_ID, rsc->id); crm_xml_add(xml_rsc, XML_ATTR_ID_LONG, rsc->long_name); } value = crm_element_value(rsc->xml, XML_ATTR_TYPE); crm_xml_add(xml_rsc, XML_ATTR_TYPE, value); if(value == NULL) { CMD_ERR("%s has no type! Aborting...\n", rsc_id); return cib_NOTEXISTS; } value = crm_element_value(rsc->xml, XML_AGENT_ATTR_CLASS); crm_xml_add(xml_rsc, XML_AGENT_ATTR_CLASS, value); if(value == NULL) { CMD_ERR("%s has no class! Aborting...\n", rsc_id); return cib_NOTEXISTS; } value = crm_element_value(rsc->xml, XML_AGENT_ATTR_PROVIDER); crm_xml_add(xml_rsc, XML_AGENT_ATTR_PROVIDER, value); params = create_xml_node(msg_data, XML_TAG_ATTRS); crm_xml_add(params, XML_ATTR_CRM_VERSION, CRM_FEATURE_SET); key = crm_meta_name(XML_LRM_ATTR_INTERVAL); crm_xml_add(params, key, "60000"); /* 1 minute */ crm_free(key); cmd = create_request(op, msg_data, host_uname, CRM_SYSTEM_CRMD, crm_system_name, our_pid); /* crm_log_xml_warn(cmd, "send_lrm_rsc_op"); */ free_xml(msg_data); if(send_ipc_message(crmd_channel, cmd)) { rc = 0; sleep(1); /* dont exit striaght away, give the crmd time * to process our request */ } else { CMD_ERR("Could not send %s op to the crmd", op); } free_xml(cmd); return rc; } static int delete_lrm_rsc(IPC_Channel *crmd_channel, const char *host_uname, const char *rsc_id, pe_working_set_t *data_set) { return send_lrm_rsc_op(crmd_channel, CRM_OP_LRM_DELETE, host_uname, rsc_id, TRUE, data_set); } static int fail_lrm_rsc(IPC_Channel *crmd_channel, const char *host_uname, const char *rsc_id, pe_working_set_t *data_set) { crm_warn("Failing: %s", rsc_id); return send_lrm_rsc_op(crmd_channel, CRM_OP_LRM_FAIL, host_uname, rsc_id, FALSE, data_set); } static int refresh_lrm(IPC_Channel *crmd_channel, const char *host_uname) { xmlNode *cmd = NULL; int rc = cib_send_failed; cmd = create_request(CRM_OP_LRM_REFRESH, NULL, host_uname, CRM_SYSTEM_CRMD, crm_system_name, our_pid); if(send_ipc_message(crmd_channel, cmd)) { rc = 0; } free_xml(cmd); return rc; } static int migrate_resource( const char *rsc_id, const char *existing_node, const char *preferred_node, cib_t * cib_conn) { char *later_s = NULL; enum cib_errors rc = cib_ok; char *id = NULL; xmlNode *rule = NULL; xmlNode *expr = NULL; xmlNode *constraints = NULL; xmlNode *fragment = NULL; xmlNode *can_run = NULL; xmlNode *dont_run = NULL; fragment = create_xml_node(NULL, XML_CIB_TAG_CONSTRAINTS); constraints = fragment; id = crm_concat("cli-prefer", rsc_id, '-'); can_run = create_xml_node(NULL, XML_CONS_TAG_RSC_LOCATION); crm_xml_add(can_run, XML_ATTR_ID, id); crm_free(id); id = crm_concat("cli-standby", rsc_id, '-'); dont_run = create_xml_node(NULL, XML_CONS_TAG_RSC_LOCATION); crm_xml_add(dont_run, XML_ATTR_ID, id); crm_free(id); if(migrate_lifetime) { char *life = crm_strdup(migrate_lifetime); char *life_mutable = life; ha_time_t *now = NULL; ha_time_t *later = NULL; ha_time_t *duration = parse_time_duration(&life_mutable); if(duration == NULL) { CMD_ERR("Invalid duration specified: %s\n", migrate_lifetime); CMD_ERR("Please refer to" " http://en.wikipedia.org/wiki/ISO_8601#Duration" " for examples of valid durations\n"); crm_free(life); return cib_invalid_argument; } now = new_ha_date(TRUE); later = add_time(now, duration); log_date(LOG_INFO, "now ", now, ha_log_date|ha_log_time); log_date(LOG_INFO, "later ", later, ha_log_date|ha_log_time); log_date(LOG_INFO, "duration", duration, ha_log_date|ha_log_time|ha_log_local); later_s = date_to_string(later, ha_log_date|ha_log_time); printf("Migration will take effect until: %s\n", later_s); free_ha_date(duration); free_ha_date(later); free_ha_date(now); crm_free(life); } if(existing_node == NULL) { crm_log_xml_notice(can_run, "Deleting"); rc = cib_conn->cmds->delete( cib_conn, XML_CIB_TAG_CONSTRAINTS, dont_run, cib_options); if(rc == cib_NOTEXISTS) { rc = cib_ok; } else if(rc != cib_ok) { goto bail; } } else { if(BE_QUIET == FALSE) { fprintf(stderr, "WARNING: Creating rsc_location constraint '%s'" " with a score of -INFINITY for resource %s" " on %s.\n", ID(dont_run), rsc_id, existing_node); CMD_ERR("\tThis will prevent %s from running" " on %s until the constraint is removed using" " the 'crm_resource -U' command or manually" " with cibadmin\n", rsc_id, existing_node); CMD_ERR("\tThis will be the case even if %s is" " the last node in the cluster\n", existing_node); CMD_ERR("\tThis message can be disabled with -Q\n"); } crm_xml_add(dont_run, "rsc", rsc_id); rule = create_xml_node(dont_run, XML_TAG_RULE); expr = create_xml_node(rule, XML_TAG_EXPRESSION); id = crm_concat("cli-standby-rule", rsc_id, '-'); crm_xml_add(rule, XML_ATTR_ID, id); crm_free(id); crm_xml_add(rule, XML_RULE_ATTR_SCORE, MINUS_INFINITY_S); crm_xml_add(rule, XML_RULE_ATTR_BOOLEAN_OP, "and"); id = crm_concat("cli-standby-expr", rsc_id, '-'); crm_xml_add(expr, XML_ATTR_ID, id); crm_free(id); crm_xml_add(expr, XML_EXPR_ATTR_ATTRIBUTE, "#uname"); crm_xml_add(expr, XML_EXPR_ATTR_OPERATION, "eq"); crm_xml_add(expr, XML_EXPR_ATTR_VALUE, existing_node); crm_xml_add(expr, XML_EXPR_ATTR_TYPE, "string"); if(later_s) { expr = create_xml_node(rule, "date_expression"); id = crm_concat("cli-standby-lifetime-end",rsc_id,'-'); crm_xml_add(expr, XML_ATTR_ID, id); crm_free(id); crm_xml_add(expr, "operation", "lt"); crm_xml_add(expr, "end", later_s); } add_node_copy(constraints, dont_run); } if(preferred_node == NULL) { crm_log_xml_notice(can_run, "Deleting"); rc = cib_conn->cmds->delete( cib_conn, XML_CIB_TAG_CONSTRAINTS, can_run, cib_options); if(rc == cib_NOTEXISTS) { rc = cib_ok; } else if(rc != cib_ok) { goto bail; } } else { crm_xml_add(can_run, "rsc", rsc_id); rule = create_xml_node(can_run, XML_TAG_RULE); expr = create_xml_node(rule, XML_TAG_EXPRESSION); id = crm_concat("cli-prefer-rule", rsc_id, '-'); crm_xml_add(rule, XML_ATTR_ID, id); crm_free(id); crm_xml_add(rule, XML_RULE_ATTR_SCORE, INFINITY_S); crm_xml_add(rule, XML_RULE_ATTR_BOOLEAN_OP, "and"); id = crm_concat("cli-prefer-expr", rsc_id, '-'); crm_xml_add(expr, XML_ATTR_ID, id); crm_free(id); crm_xml_add(expr, XML_EXPR_ATTR_ATTRIBUTE, "#uname"); crm_xml_add(expr, XML_EXPR_ATTR_OPERATION, "eq"); crm_xml_add(expr, XML_EXPR_ATTR_VALUE, preferred_node); crm_xml_add(expr, XML_EXPR_ATTR_TYPE, "string"); if(later_s) { expr = create_xml_node(rule, "date_expression"); id = crm_concat("cli-prefer-lifetime-end", rsc_id, '-'); crm_xml_add(expr, XML_ATTR_ID, id); crm_free(id); crm_xml_add(expr, "operation", "lt"); crm_xml_add(expr, "end", later_s); } add_node_copy(constraints, can_run); } if(preferred_node != NULL || existing_node != NULL) { crm_log_xml_notice(fragment, "CLI Update"); rc = cib_conn->cmds->update( cib_conn, XML_CIB_TAG_CONSTRAINTS, fragment, cib_options); } bail: free_xml(fragment); free_xml(dont_run); free_xml(can_run); crm_free(later_s); return rc; } static int list_resource_operations( const char *rsc_id, const char *host_uname, gboolean active, pe_working_set_t *data_set) { resource_t *rsc = NULL; int opts = pe_print_printf|pe_print_rsconly|pe_print_suppres_nl; GListPtr ops = find_operations(rsc_id, host_uname, active, data_set); slist_iter(xml_op, xmlNode, ops, lpc, const char *op_rsc = crm_element_value(xml_op, "resource"); const char *last = crm_element_value(xml_op, "last_run"); const char *status_s = crm_element_value(xml_op, XML_LRM_ATTR_OPSTATUS); int status = crm_parse_int(status_s, "0"); rsc = pe_find_resource(data_set->resources, op_rsc); rsc->fns->print(rsc, "", opts, stdout); fprintf(stdout, ": %s (node=%s, call=%s, rc=%s", ID(xml_op), crm_element_value(xml_op, XML_ATTR_UNAME), crm_element_value(xml_op, XML_LRM_ATTR_CALLID), crm_element_value(xml_op, XML_LRM_ATTR_RC)); if(last) { time_t run_at = crm_parse_int(last, "0"); fprintf(stdout, ", last-run=%s, exec=%sms\n", ctime(&run_at), crm_element_value(xml_op, "exec_time")); } fprintf(stdout, "): %s\n", op_status2text(status)); ); return cib_ok; } int main(int argc, char **argv) { pe_working_set_t data_set; xmlNode *cib_xml_copy = NULL; cib_t * cib_conn = NULL; enum cib_errors rc = cib_ok; int argerr = 0; int flag; #ifdef HAVE_GETOPT_H int option_index = 0; static struct option long_options[] = { /* Top-level Options */ {"verbose", 0, 0, 'V'}, {"help", 0, 0, '?'}, {"quiet", 0, 0, 'Q'}, {"list", 0, 0, 'L'}, {"list-raw", 0, 0, 'l'}, {"list-cts", 0, 0, 'c'}, {"refresh", 0, 0, 'R'}, {"reprobe", 0, 0, 'P'}, {"query-xml", 0, 0, 'q'}, {"delete", 0, 0, 'D'}, {"cleanup", 0, 0, 'C'}, {"locate", 0, 0, 'W'}, {"migrate", 0, 0, 'M'}, {"un-migrate", 0, 0, 'U'}, {"resource", 1, 0, 'r'}, {"host-uname", 1, 0, 'H'}, /* legacy */ {"node", 1, 0, 'N'}, {"lifetime", 1, 0, 'u'}, {"fail", 0, 0, 'F'}, {"force", 0, 0, 'f'}, {"meta", 0, 0, 'm'}, {"list-operations", 0, 0, 'O'}, {"list-all-operations", 0, 0, 'o'}, {"set-parameter", 1, 0, 'p'}, {"get-parameter", 1, 0, 'g'}, {"delete-parameter",1, 0, 'd'}, {"property-value", 1, 0, 'v'}, {"get-property", 1, 0, 'G'}, {"set-property", 1, 0, 'S'}, {"set-name", 1, 0, 's'}, {"resource-type", 1, 0, 't'}, {"xml-file", 0, 0, 'x'}, {0, 0, 0, 0} }; #endif crm_log_init(basename(argv[0]), LOG_ERR, FALSE, FALSE, argc, argv); if(argc < 2) { usage(crm_system_name, LSB_EXIT_EINVAL); } while (1) { #ifdef HAVE_GETOPT_H flag = getopt_long(argc, argv, OPTARGS, long_options, &option_index); #else flag = getopt(argc, argv, OPTARGS); #endif if (flag == -1) break; switch(flag) { case 'V': cl_log_enable_stderr(TRUE); alter_debug(DEBUG_INC); break; case '?': usage(crm_system_name, LSB_EXIT_OK); break; case 'X': xml_file = crm_strdup(optarg); break; case 'Q': BE_QUIET = TRUE; break; case 'm': attr_set_type = XML_TAG_META_SETS; break; case 'L': case 'c': case 'l': case 'R': case 'q': case 'D': case 'F': case 'C': case 'P': case 'W': case 'M': case 'U': rsc_cmd = flag; break; case 'u': migrate_lifetime = crm_strdup(optarg); break; case 'p': crm_debug_2("Option %c => %s", flag, optarg); prop_name = optarg; rsc_cmd = flag; break; case 'g': crm_debug_2("Option %c => %s", flag, optarg); prop_name = optarg; rsc_cmd = flag; break; case 'd': crm_debug_2("Option %c => %s", flag, optarg); prop_name = optarg; rsc_cmd = flag; break; case 'S': crm_debug_2("Option %c => %s", flag, optarg); prop_name = optarg; rsc_cmd = flag; break; case 'O': case 'o': crm_debug_2("Option %c => %s", flag, optarg); rsc_cmd = flag; break; case 'G': crm_debug_2("Option %c => %s", flag, optarg); prop_name = optarg; rsc_cmd = flag; break; case 'f': do_force = TRUE; break; case 'i': crm_debug_2("Option %c => %s", flag, optarg); prop_id = optarg; break; case 's': crm_debug_2("Option %c => %s", flag, optarg); prop_set = optarg; break; case 'r': crm_debug_2("Option %c => %s", flag, optarg); rsc_id = optarg; break; case 'v': crm_debug_2("Option %c => %s", flag, optarg); prop_value = optarg; break; case 't': crm_debug_2("Option %c => %s", flag, optarg); rsc_type = optarg; break; case 'h': case 'H': case 'N': crm_debug_2("Option %c => %s", flag, optarg); host_uname = optarg; break; default: CMD_ERR("Argument code 0%o (%c) is not (?yet?) supported\n", flag, flag); ++argerr; break; } } if (optind < argc && argv[optind] != NULL) { CMD_ERR("non-option ARGV-elements: "); while (optind < argc && argv[optind] != NULL) { CMD_ERR("%s ", argv[optind++]); ++argerr; } CMD_ERR("\n"); } if (optind > argc) { ++argerr; } if (argerr) { usage(crm_system_name, LSB_EXIT_GENERIC); } crm_malloc0(our_pid, 11); if(our_pid != NULL) { snprintf(our_pid, 10, "%d", getpid()); our_pid[10] = '\0'; } if(do_force) { crm_debug("Forcing..."); cib_options |= cib_scope_local|cib_quorum_override; } if(rsc_cmd == 'L' || rsc_cmd == 'O' || rsc_cmd == 'o' || rsc_cmd == 'W' || rsc_cmd == 'D' || rsc_cmd == 'q' || rsc_cmd == 'M' || rsc_cmd == 'U' || rsc_cmd == 'C' || rsc_cmd == 'F' || rsc_cmd == 'p' || rsc_cmd == 'd' || rsc_cmd == 'g' || rsc_cmd == 'G' || rsc_cmd == 'S' || rsc_cmd == 'c' || rsc_cmd == 'l') { resource_t *rsc = NULL; if(xml_file != NULL) { cib_xml_copy = filename2xml(xml_file); } else { cib_conn = cib_new(); rc = cib_conn->cmds->signon( cib_conn, crm_system_name, cib_command); if(rc != cib_ok) { CMD_ERR("Error signing on to the CIB service: %s\n", cib_error2string(rc)); return rc; } cib_xml_copy = get_cib_copy(cib_conn); } set_working_set_defaults(&data_set); if(cli_config_update(&cib_xml_copy, NULL) == FALSE) { return cib_STALE; } data_set.input = cib_xml_copy; data_set.now = new_ha_date(TRUE); cluster_status(&data_set); if(rsc_id) { rsc = find_rsc_or_clone(rsc_id, &data_set); } if(rsc == NULL) { rc = cib_NOTEXISTS; } } if(rsc_cmd == 'R' || rsc_cmd == 'C' || rsc_cmd == 'F' || rsc_cmd == 'P') { GCHSource *src = NULL; src = init_client_ipc_comms(CRM_SYSTEM_CRMD, crmd_msg_callback, NULL, &crmd_channel); if(src == NULL) { CMD_ERR("Error signing on to the CRMd service\n"); return 1; } send_hello_message( crmd_channel, our_pid, crm_system_name, "0", "1"); set_IPC_Channel_dnotify(src, resource_ipc_connection_destroy); } if(rsc_cmd == 'L') { rc = cib_ok; do_find_resource_list(&data_set, FALSE); } else if(rsc_cmd == 'l') { int found = 0; rc = cib_ok; slist_iter( rsc, resource_t, data_set.resources, lpc, found++; print_raw_rsc(rsc); ); if(found == 0) { printf("NO resources configured\n"); return cib_NOTEXISTS; } } else if(rsc_cmd == 'c') { int found = 0; rc = cib_ok; slist_iter( rsc, resource_t, data_set.resources, lpc, found++; print_cts_rsc(rsc); ); print_cts_constraints(&data_set); } else if(rsc_cmd == 'C') { resource_t *rsc = pe_find_resource(data_set.resources, rsc_id); if(rsc && rsc->variant != pe_native) { fprintf(stderr, "We can only clean up primitive resources and %s is a %s\n", rsc_id, get_resource_typename(rsc->variant)); rc = cib_NOTEXISTS; } else { rc = delete_lrm_rsc(crmd_channel, host_uname, rsc_id, &data_set); } if(rc == cib_ok) { char *host_uuid = NULL; char *attr_name = crm_concat("fail-count", rsc_id, '-'); rc = query_node_uuid(cib_conn, host_uname, &host_uuid); if(rc != cib_ok) { fprintf(stderr,"Could not map uname=%s to a UUID: %s\n", host_uname, cib_error2string(rc)); } else { crm_info("Mapped %s to %s", host_uname, crm_str(host_uuid)); rc = delete_attr(cib_conn, cib_sync_call, XML_CIB_TAG_STATUS, host_uuid, NULL, NULL, attr_name, NULL, FALSE); } } } else if(rsc_cmd == 'F') { rc = fail_lrm_rsc(crmd_channel, host_uname, rsc_id, &data_set); } else if(rsc_cmd == 'O') { rc = list_resource_operations(rsc_id, host_uname, TRUE, &data_set); } else if(rsc_cmd == 'o') { rc = list_resource_operations(rsc_id, host_uname, FALSE, &data_set); } else if(rc == cib_NOTEXISTS) { CMD_ERR("Resource %s not found: %s\n", crm_str(rsc_id), cib_error2string(rc)); } else if(rsc_cmd == 'W') { if(rsc_id == NULL) { CMD_ERR("Must supply a resource id with -r\n"); return cib_NOTEXISTS; } rc = do_find_resource(rsc_id, &data_set); } else if(rsc_cmd == 'q') { if(rsc_id == NULL) { CMD_ERR("Must supply a resource id with -r\n"); return cib_NOTEXISTS; } rc = dump_resource(rsc_id, &data_set); } else if(rsc_cmd == 'U') { if(rsc_id == NULL) { CMD_ERR("Must supply a resource id with -r\n"); return cib_NOTEXISTS; } rc = migrate_resource(rsc_id, NULL, NULL, cib_conn); } else if(rsc_cmd == 'M') { node_t *dest = NULL; node_t *current = NULL; const char *current_uname = NULL; resource_t *rsc = pe_find_resource(data_set.resources, rsc_id); if(rsc != NULL && rsc->running_on != NULL) { current = rsc->running_on->data; if(current != NULL) { current_uname = current->details->uname; } } if(host_uname != NULL) { dest = pe_find_node(data_set.nodes, host_uname); } if(rsc == NULL) { CMD_ERR("Resource %s not migrated:" " not found\n", rsc_id); } else if(rsc->variant == pe_native && g_list_length(rsc->running_on) > 1) { CMD_ERR("Resource %s not migrated:" " active on multiple nodes\n", rsc_id); } else if(host_uname != NULL && dest == NULL) { CMD_ERR("Error performing operation: " "%s is not a known node\n", host_uname); rc = cib_NOTEXISTS; } else if(host_uname != NULL && safe_str_eq(current_uname, host_uname)) { CMD_ERR("Error performing operation: " "%s is already active on %s\n", rsc_id, host_uname); } else if(current_uname != NULL && (do_force || host_uname == NULL)) { rc = migrate_resource(rsc_id, current_uname, host_uname, cib_conn); } else if(host_uname != NULL) { rc = migrate_resource( rsc_id, NULL, host_uname, cib_conn); } else { CMD_ERR("Resource %s not migrated: " "not-active and no prefered location" " specified.\n", rsc_id); rc = cib_missing; } } else if(rsc_cmd == 'G') { if(rsc_id == NULL) { CMD_ERR("Must supply a resource id with -r\n"); return cib_NOTEXISTS; } rc = dump_resource_prop(rsc_id, prop_name, &data_set); } else if(rsc_cmd == 'S') { xmlNode *msg_data = NULL; if(prop_value == NULL || strlen(prop_value) == 0) { CMD_ERR("You need to supply a value with the -v option\n"); return CIBRES_MISSING_FIELD; } else if(cib_conn == NULL) { return cib_connection; } if(rsc_id == NULL) { CMD_ERR("Must supply a resource id with -r\n"); return cib_NOTEXISTS; } CRM_DEV_ASSERT(rsc_type != NULL); CRM_DEV_ASSERT(prop_name != NULL); CRM_DEV_ASSERT(prop_value != NULL); msg_data = create_xml_node(NULL, rsc_type); crm_xml_add(msg_data, XML_ATTR_ID, rsc_id); crm_xml_add(msg_data, prop_name, prop_value); rc = cib_conn->cmds->modify( cib_conn, XML_CIB_TAG_RESOURCES, msg_data, cib_options); free_xml(msg_data); } else if(rsc_cmd == 'g') { if(rsc_id == NULL) { CMD_ERR("Must supply a resource id with -r\n"); return cib_NOTEXISTS; } rc = dump_resource_attr(rsc_id, prop_name, &data_set); } else if(rsc_cmd == 'p') { if(rsc_id == NULL) { CMD_ERR("Must supply a resource id with -r\n"); return cib_NOTEXISTS; } if(prop_value == NULL || strlen(prop_value) == 0) { CMD_ERR("You need to supply a value with the -v option\n"); return CIBRES_MISSING_FIELD; } rc = set_resource_attr(rsc_id, prop_set, prop_id, prop_name, prop_value, cib_conn, &data_set); } else if(rsc_cmd == 'd') { if(rsc_id == NULL) { CMD_ERR("Must supply a resource id with -r\n"); return cib_NOTEXISTS; } rc = delete_resource_attr(rsc_id, prop_set, prop_id, prop_name, cib_conn, &data_set); } else if(rsc_cmd == 'P') { xmlNode *cmd = NULL; cmd = create_request(CRM_OP_REPROBE, NULL, host_uname, CRM_SYSTEM_CRMD, crm_system_name, our_pid); send_ipc_message(crmd_channel, cmd); free_xml(cmd); } else if(rsc_cmd == 'R') { refresh_lrm(crmd_channel, host_uname); } else if(rsc_cmd == 'D') { xmlNode *msg_data = NULL; if(rsc_id == NULL) { CMD_ERR("Must supply a resource id with -r\n"); return cib_NOTEXISTS; } if(rsc_type == NULL) { CMD_ERR("You need to specify a resource type with -t"); return cib_NOTEXISTS; } else if(cib_conn == NULL) { return cib_connection; } msg_data = create_xml_node(NULL, rsc_type); crm_xml_add(msg_data, XML_ATTR_ID, rsc_id); rc = cib_conn->cmds->delete( cib_conn, XML_CIB_TAG_RESOURCES, msg_data, cib_options); free_xml(msg_data); } else { CMD_ERR("Unknown command: %c\n", rsc_cmd); } if(cib_conn != NULL) { cleanup_calculations(&data_set); cib_conn->cmds->signoff(cib_conn); } if(rc == cib_no_quorum) { CMD_ERR("Error performing operation: %s\n", cib_error2string(rc)); CMD_ERR("Try using -f\n"); } else if(rc != cib_ok) { CMD_ERR("Error performing operation: %s\n", cib_error2string(rc)); } return rc; } void usage(const char *cmd, int exit_status) { FILE *stream; stream = exit_status ? stderr : stdout; fprintf(stream, "%s -- Perform tasks related to cluster resources.\n" " Allows resources to be queried (definition and location), modified, and moved around the cluster.\n\n", cmd); fprintf(stream, "usage: %s [-?VS] -(L|Q|W|D|C|P|p) [options]\n", cmd); fprintf(stream, "\t--%s (-%c)\t: this help message\n", "help", '?'); fprintf(stream, "\t--%s (-%c)\t: " "turn on debug info. additional instances increase verbosity\n", "verbose", 'V'); fprintf(stream, "\t--%s (-%c)\t: Print only the value on stdout (for use with -W)\n", "quiet", 'Q'); fprintf(stream, "\nCommands\n"); fprintf(stream, "\t--%s (-%c)\t: List all resources\n", "list", 'L'); fprintf(stream, "\t--%s (-%c)\t: Query a resource\n" "\t\t\t Requires: -r\n", "query-xml", 'q'); fprintf(stream, "\t--%s (-%c)\t: Locate a resource\n" "\t\t\t Requires: -r\n", "locate", 'W'); fprintf(stream, "\t--%s (-%c)\t: Migrate a resource from it current" " location. Use -N to specify a destination\n" "\t\tIf -N is not specified, we will force the resource to move by" " creating a rule for the current location and a score of -INFINITY\n" "\t\tNOTE: This will prevent the resource from running on this" " node until the constraint is removed with -U\n" "\t\t\t Requires: -r, Optional: -N, -f, --lifetime\n", "migrate", 'M'); fprintf(stream, "\t--%s (-%c)\t: Remove all constraints created by -M\n" "\t\t\t Requires: -r\n", "un-migrate", 'U'); fprintf(stream, "\t--%s (-%c)\t: Delete a resource from the CIB\n" "\t\t\t Requires: -r, -t\n", "delete", 'D'); fprintf(stream, "\t--%s (-%c)\t: Delete a resource from the LRM\n" "\t\t\t Requires: -r. Optional: -N\n", "cleanup", 'C'); fprintf(stream, "\t--%s (-%c)\t: Recheck for resources started outside of the CRM\n" "\t\t\t Optional: -N\n", "reprobe", 'P'); fprintf(stream, "\t--%s (-%c)\t: Refresh the CIB from the LRM\n" "\t\t\t Optional: -N\n", "refresh", 'R'); fprintf(stream, "\t--%s (-%c) \t: " "Set the named parameter for a resource\n" "\t\t\t Requires: -r, -v. Optional: -i, -s, --meta\n", "set-parameter", 'p'); fprintf(stream, "\t--%s (-%c) \t: " "Get the named parameter for a resource\n" "\t\t\t Requires: -r. Optional: -i, -s, --meta\n", "get-parameter", 'g'); fprintf(stream, "\t--%s (-%c) : " "Delete the named parameter for a resource\n" "\t\t\t Requires: -r. Optional: -i, --meta\n", "delete-parameter", 'd'); fprintf(stream, "\t--%s (-%c) : " "List the active resource operations. Optionally filtered by resource and/or node.\n" "\t\t\t Optional: -N, -r\n", "list-operations", 'O'); fprintf(stream, "\t--%s (-%c) : " "List all resource operations. Optionally filtered by resource and/or node.\n" "\t\t\t Optional: -N, -r\n", "list-all-operations", 'o'); fprintf(stream, "\nOptions\n"); fprintf(stream, "\t--%s (-%c) \t: Resource ID\n", "resource", 'r'); fprintf(stream, "\t--%s (-%c) \t: " "Resource type (primitive, clone, group, ...)\n", "resource-type", 't'); fprintf(stream, "\t--%s (-%c) \t: " "Property value\n", "property-value", 'v'); fprintf(stream, "\t--%s (-%c) \t: Host uname\n", "node", 'N'); fprintf(stream, "\t--%s\t: Modify a resource's configuration option rather than one which is passed to the resource agent script." "\n\t\tFor use with -p, -g, -d\n", "meta"); fprintf(stream, "\t--%s (-%c) \t: " "Lifespan of migration constraints\n", "lifetime", 'u'); fprintf(stream, "\t--%s (-%c)\t: " "Force the resource to move by creating a rule for the" " current location and a score of -INFINITY\n" "\t\tThis should be used if the resource's stickiness and" " constraint scores total more than INFINITY (Currently 100,000)\n" "\t\tNOTE: This will prevent the resource from running on this" " node until the constraint is removed with -U or the --lifetime duration expires\n", "force", 'f'); fprintf(stream, "\t-%c \t: (Advanced Use Only) ID of the instance_attributes object to change\n", 's'); fprintf(stream, "\t-%c \t: (Advanced Use Only) ID of the nvpair object to change/delete\n", 'i'); fflush(stream); exit(exit_status); }