diff --git a/Makefile.am b/Makefile.am index 04966db3ef..5d1dc68196 100644 --- a/Makefile.am +++ b/Makefile.am @@ -1,150 +1,170 @@ # # Pacemaker code # # Copyright (C) 2004 Andrew Beekhof # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. # EXTRA_DIST = autogen.sh ConfigureMe README.in libltdl.tar RPM = @RPM@ RPMFLAGS = -ba TARFILE = pacemaker.tar.gz AM_TAR = tar -LAST_RELEASE = 007c3a1c50f5 +LAST_RELEASE = 9b1e9d2785ed STABLE_SERIES = unstable-0.7 +BOMB_ADDRESS = abeekhof@suse.de +BOMB_STATUS := $(shell hg id -i | grep -c +) +BOMB_LAST := $(shell test -e .bomb && cat .bomb) +BOMB_TARGET := $(shell hg id -i) +BOMB_DATE := $(shell python -c 'from time import *; print strftime ("%W, %Y", gmtime(time()))') +BOMB_SUBJECT := Pacemaker $(VERSION) patch update for week $(BOMB_DATE) + AUTOMAKE_OPTIONS = foreign ##ACLOCAL = aclocal -I $(auxdir) PRETTY_ARGS = --braces-on-if-line --braces-on-struct-decl-line --cuddle-do-while --cuddle-else --leave-preprocessor-space --blank-lines-after-declarations --blank-lines-after-procedures -sc --case-indentation4 --no-space-after-function-call-names --no-blank-lines-after-commas --procnames-start-lines --leave-optional-blank-lines --indent-level4 --line-length100 --break-before-boolean-operator --ignore-newlines --no-space-after-if --no-space-after-for --no-space-after-while --paren-indentation4 MAINTAINERCLEANFILES = Makefile.in aclocal.m4 configure DRF/config-h.in \ DRF/stamp-h.in libtool.m4 ltdl.m4 libltdl.tar SUBDIRS = debian build $(LIBLTDL_DIR) replace include lib cib crmd pengine fencing tools xml doc cts extra tgz: rm -f $(TARFILE) hg archive -t tgz $(TARFILE) echo Rebuilt $(TARFILE) on `date` +bomb: + if [ $(BOMB_STATUS) = 1 ]; then \ + echo "Uncommitted changes exist"; \ + elif [ -e .bomb -a "$(BOMB_TARGET)" != "$(BOMB_LAST)" ]; then \ + hg email --config ui.interactive=false -i -g -d -t $(BOMB_ADDRESS) -s '$(BOMB_SUBJECT)' -r $(BOMB_LAST):tip; \ + echo $(BOMB_TARGET) > .bomb; \ + elif [ ! -e .bomb ]; then \ + echo "Initializing patch record"; \ + echo $(BOMB_TARGET) > .bomb; \ + else \ + echo "No patches to send"; \ + fi + changes: printf "$(PACKAGE) ($(VERSION)-1) stable; urgency=medium\n" printf " * Update source tarball to revision: `hg id`\n" printf " * Statistics:\n" printf " Changesets: `hg log -M --template "{desc|firstline|strip}\n" -r $(LAST_RELEASE):tip | wc -l`\n" printf " Diff: " hg diff -r $(LAST_RELEASE):tip | diffstat | tail -n 1 printf "\n * Testing Notes:\n" printf "\n + Test hardware:\n" printf "\n + All testing was performed with STONITH enabled\n" printf "\n + Pending bugs encountered during testing:\n" printf "\n * Changes since $(LAST_RELEASE)\n" hg log -M --template " + {desc|firstline|strip}\n" -r $(LAST_RELEASE):tip | grep -v Low: | sort -uf printf "\n -- Andrew Beekhof `date +"%a, %d %b %Y %T %z"`\n" features: printf "$(PACKAGE) ($(VERSION)-1) unstable; urgency=medium\n" printf " * Update source tarball to revision: `hg id`\n" printf " * Statistics:\n" printf " Changesets: `hg out -M --template "{desc|firstline|strip}\n" ../$(STABLE_SERIES) | wc -l`\n" printf " Diff: " hg out -M -p ../$(STABLE_SERIES) | diffstat | tail -n 1 printf "\n * Changes added since $(STABLE_SERIES)\n" hg out -M --template " + {desc|firstline|strip}\n" ../$(STABLE_SERIES) | grep -v Low: | sort -uf printf "\n -- Andrew Beekhof `date +"%a, %d %b %Y %T %z"`\n" OBS_PREFIX=c001n16.suse.de:Development/obs stable: tgz make changes > .changes scp .changes $(TARFILE) $(OBS_PREFIX)/server:ha-clustering/pacemaker/ unstable: tgz make changes > .changes scp .changes $(TARFILE) $(OBS_PREFIX)/server:ha-clustering:UNSTABLE/pacemaker/ factory: tgz make changes > .changes scp .changes $(TARFILE) $(OBS_PREFIX)/server:ha-clustering:Factory/pacemaker/ global: clean-generic gtags -q global-html: global htags -sanhIT global-www: global-html rsync -avzxlSD --progress HTML/ root@clusterlabs.org:/var/lib/global/pacemaker pretty: for file in `find . -name "*.c" | tr '\n' ' '`; do \ gnuindent $(PRETTY_ARGS) $$file; \ done rpmtgz: tgz echo "Installing $(TARFILE) into /usr/src/packages/SOURCES for rpm" -test -d /usr/src/packages/SOURCES && cp $(TARFILE) /usr/src/packages/SOURCES/ -test -d /usr/src/redhat/SOURCES && cp $(TARFILE) /usr/src/redhat/SOURCES/ rpm: rpmtgz $(RPM) $(RPMFLAGS) $(top_srcdir)/pacemaker.spec Copyright (C) 2006 International Business Machines ''' from UserDict import UserDict import sys, time, types, syslog, os, struct, string, signal, traceback from CTS import ClusterManager from CM_hb import HeartbeatCM class CIB: cib_option_template = ''' ''' ipaddr_template = ''' ''' hb_ipaddr_template = ''' ''' lsb_resource = ''' - + ''' dummy_resource_template = ''' ''' clustermon_resource_template = ''' ''' clustermon_location_constraint = ''' ''' master_slave_resource = ''' ''' resource_group_template = '''%s %s %s''' per_node_constraint_template = ''' ''' stonith_resource_template = """ """ cib_template =''' %s %s %s ''' def NextIP(self): fields = string.split(self.CM.Env["IPBase"], '.') fields[3] = str(int(fields[3])+1) ip = string.join(fields, '.') self.CM.Env["IPBase"]=ip return ip def __init__(self, CM): self.CM = CM #make up crm config cib_options = self.cib_option_template % CM.Env["DoFencing"] #create resources and their constraints resources = "" constraints = "" if self.CM.Env["DoBSC"] == 1: cib_options = cib_options + ''' ''' if self.CM.Env["CIBResource"] != 1: # generate cib self.cts_cib = self.cib_template % (cib_options, resources, constraints) return if self.CM.cluster_monitor == 1: resources += self.clustermon_resource_template constraints += self.clustermon_location_constraint ip1=self.NextIP() ip2=self.NextIP() ip3=self.NextIP() ip1_rsc = self.ipaddr_template % ("r"+ip1, ip1, ip1, ip1, ip1, ip1) ip2_rsc = self.hb_ipaddr_template % ("r"+ip2, ip2, ip2, ip2, ip2) ip3_rsc = self.ipaddr_template % ("r"+ip3, ip3, ip3, ip3, ip3, ip3) resources += self.resource_group_template % (ip1_rsc, ip2_rsc, ip3_rsc) # lsb resource resources += self.lsb_resource # Mirgator resources += self.dummy_resource_template % \ ("migrator", "migrator", "migrator", "migrator") constraints += """""" constraints += """""" # per node resource fields = string.split(self.CM.Env["IPBase"], '.') for node in self.CM.Env["nodes"]: ip = self.NextIP() per_node_resources = self.ipaddr_template % \ ("rsc_"+node, "rsc_"+node, "rsc_"+node, "rsc_"+node, "rsc_"+node, ip) per_node_constraint = self.per_node_constraint_template % \ ("rsc_"+node, "rsc_"+node, "rsc_"+node, "rsc_"+node, node) resources += per_node_resources constraints += per_node_constraint # fencing resource nodelist = "" len = 0 for node in self.CM.Env["nodes"]: nodelist += node + " " len = len + 1 if CM.Env["DoFencing"]: stonith_resource = self.stonith_resource_template % \ (self.CM.Env["reset"].stonithtype, self.CM.Env["reset"].configName, self.CM.Env["reset"].configValue) resources += stonith_resource #master slave resource resources += self.master_slave_resource % (len, 1, 1, 1) # generate cib self.cts_cib = self.cib_template % (cib_options, resources, constraints) def cib(self): return self.cts_cib diff --git a/cts/CTS.py.in b/cts/CTS.py.in index 53f990dc52..d12eb841c5 100755 --- a/cts/CTS.py.in +++ b/cts/CTS.py.in @@ -1,1189 +1,1189 @@ #!@PYTHON@ '''CTS: Cluster Testing System: Main module Classes related to testing high-availability clusters... Lots of things are implemented. Lots of things are not implemented. We have many more ideas of what to do than we've implemented. ''' __copyright__=''' Copyright (C) 2000, 2001 Alan Robertson Licensed under the GNU GPL. ''' # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. import types, string, select, sys, time, re, os, struct, os, signal import base64, pickle, binascii from UserDict import UserDict from syslog import * from popen2 import Popen3 class RemoteExec: '''This is an abstract remote execution class. It runs a command on another machine - somehow. The somehow is up to us. This particular class uses ssh. Most of the work is done by fork/exec of ssh or scp. ''' def __init__(self): # -n: no stdin, -x: no X11 self.Command = "@SSH@ -l root -n -x" # -f: ssh to background self.CommandnoBlock = "@SSH@ -f -l root -n -x" # -B: batch mode, -q: no stats (quiet) self.CpCommand = "@SCP@ -B -q" self.OurNode=string.lower(os.uname()[1]) def setcmd(self, rshcommand): '''Set the name of the remote shell command''' self.Command = rshcommand def _fixcmd(self, cmd): return re.sub("\'", "'\\''", cmd) def _cmd(self, *args): '''Compute the string that will run the given command on the given remote system''' args= args[0] sysname = args[0] command = args[1] #print "sysname: %s, us: %s" % (sysname, self.OurNode) if sysname == None or string.lower(sysname) == self.OurNode or sysname == "localhost": ret = command else: ret = self.Command + " " + sysname + " '" + self._fixcmd(command) + "'" #print ("About to run %s\n" % ret) return ret def _cmd_noblock(self, *args): '''Compute the string that will run the given command on the given remote system''' args= args[0] sysname = args[0] command = args[1] #print "sysname: %s, us: %s" % (sysname, self.OurNode) if sysname == None or string.lower(sysname) == self.OurNode or sysname == "localhost": ret = command + " &" else: ret = self.CommandnoBlock + " " + sysname + " '" + self._fixcmd(command) + "'" #print ("About to run %s\n" % ret) return ret def __call__(self, *args): '''Run the given command on the given remote system If you call this class like a function, this is the function that gets called. It just runs it roughly as though it were a system() call on the remote machine. The first argument is name of the machine to run it on. ''' count=0; rc = 0; while count < 3: rc = os.system(self._cmd(args)) if rc == 0: return rc print "Retrying command %s" % self._cmd(args) count=count+1 return rc def popen(self, *args): '''popen the given remote command on the remote system. As in __call__, the first argument is name of the machine to run it on. ''' #print "Now running %s\n" % self._cmd(args) return Popen3(self._cmd(args), None) def readaline(self, *args): '''Run a command on the remote machine and capture 1 line of stdout from the given remote command As in __call__, the first argument is name of the machine to run it on. ''' p = self.popen(args[0], args[1]) p.tochild.close() result = p.fromchild.readline() p.fromchild.close() self.lastrc = p.wait() return result def readlines(self, *args): p = self.popen(args[0], args[1]) p.tochild.close() result = p.fromchild.readlines() p.fromchild.close() self.lastrc = p.wait() return result def cp(self, *args): '''Perform a remote copy''' cpstring=self.CpCommand for arg in args: cpstring = cpstring + " \'" + arg + "\'" count=0; rc = 0; for i in range(3): rc = os.system(cpstring) if rc == 0: return rc print "Retrying command %s" % cpstring return rc def echo_cp(self, src_host, src_file, dest_host, dest_file): '''Perform a remote copy via echo''' (rc, lines) = self.remote_py(src_host, "os", "system", "cat %s" % src_file) if rc != 0: print "Copy of %s:%s failed" % (src_host, src_file) elif dest_host == None: fd = open(dest_file, "w") fd.writelines(lines) fd.close() else: big_line="" for line in lines: big_line = big_line + line (rc, lines) = self.remote_py(dest_host, "os", "system", "echo '%s' > %s" % (big_line, dest_file)) return rc def noBlock(self, *args): '''Perform a remote execution without waiting for it to finish''' sshnoBlock = self._cmd_noblock(args) count=0; rc = 0; for i in range(3): rc = os.system(sshnoBlock) if rc == 0: return rc print "Retrying command %s" % sshnoBlock return rc def remote_py(self, node, module, func, *args): '''Execute a remote python function If the call success, lastrc == 0 and return result. If the call fail, lastrc == 1 and return the reason (string) ''' encode_args = binascii.b2a_base64(pickle.dumps(args)) - encode_cmd = string.join(["@HA_NOARCHDATAHBDIR@/cts/CTSproxy.py",module,func,encode_args]) + encode_cmd = string.join(["@datadir@/@PKG_NAME@/cts/CTSproxy.py",module,func,encode_args]) #print "%s: %s.%s %s" % (node, module, func, repr(args)) result = self.readlines(node, encode_cmd) if result != None: result.pop() if self.lastrc == 0: last_line="" if result != None: array_len = len(result) if array_len > 0: last_line=result.pop() #print "result: %s" % repr(last_line) return pickle.loads(binascii.a2b_base64(last_line)), result return -1, result class LogWatcher: '''This class watches logs for messages that fit certain regular expressions. Watching logs for events isn't the ideal way to do business, but it's better than nothing :-) On the other hand, this class is really pretty cool ;-) The way you use this class is as follows: Construct a LogWatcher object Call setwatch() when you want to start watching the log Call look() to scan the log looking for the patterns ''' def __init__(self, log, regexes, timeout=10, debug=None): '''This is the constructor for the LogWatcher class. It takes a log name to watch, and a list of regular expressions to watch for." ''' # Validate our arguments. Better sooner than later ;-) for regex in regexes: assert re.compile(regex) self.regexes = regexes self.filename = log self.debug=debug self.whichmatch = -1 self.unmatched = None if self.debug: print "Debug now on for for log", log self.Timeout = int(timeout) self.returnonlymatch = None if not os.access(log, os.R_OK): raise ValueError("File [" + log + "] not accessible (r)") def setwatch(self, frombeginning=None): '''Mark the place to start watching the log from. ''' self.file = open(self.filename, "r") self.size = os.path.getsize(self.filename) if not frombeginning: self.file.seek(0,2) def ReturnOnlyMatch(self, onlymatch=1): '''Mark the place to start watching the log from. ''' self.returnonlymatch = onlymatch def look(self, timeout=None): '''Examine the log looking for the given patterns. It starts looking from the place marked by setwatch(). This function looks in the file in the fashion of tail -f. It properly recovers from log file truncation, but not from removing and recreating the log. It would be nice if it recovered from this as well :-) We return the first line which matches any of our patterns. ''' last_line=None first_line=None if timeout == None: timeout = self.Timeout done=time.time()+timeout+1 if self.debug: print "starting search: timeout=%d" % timeout for regex in self.regexes: print "Looking for regex: ", regex while (timeout <= 0 or time.time() <= done): newsize=os.path.getsize(self.filename) if self.debug > 4: print "newsize = %d" % newsize if newsize < self.size: # Somebody truncated the log! if self.debug: print "Log truncated!" self.setwatch(frombeginning=1) continue if newsize > self.file.tell(): line=self.file.readline() if self.debug > 2: print "Looking at line:", line if line: last_line=line if not first_line: first_line=line if self.debug: print "First line: "+ line which=-1 for regex in self.regexes: which=which+1 if self.debug > 3: print "Comparing line to ", regex #matchobj = re.search(string.lower(regex), string.lower(line)) matchobj = re.search(regex, line) if matchobj: self.whichmatch=which if self.returnonlymatch: return matchobj.group(self.returnonlymatch) else: if self.debug: print "Returning line" return line newsize=os.path.getsize(self.filename) if self.file.tell() == newsize: if timeout > 0: time.sleep(0.025) else: if self.debug: print "End of file" if self.debug: print "Last line: "+last_line return None if self.debug: print "Timeout" if self.debug: print "Last line: "+last_line return None def lookforall(self, timeout=None): '''Examine the log looking for ALL of the given patterns. It starts looking from the place marked by setwatch(). We return when the timeout is reached, or when we have found ALL of the regexes that were part of the watch ''' if timeout == None: timeout = self.Timeout save_regexes = self.regexes returnresult = [] while (len(self.regexes) > 0): oneresult = self.look(timeout) if not oneresult: self.unmatched = self.regexes self.regexes = save_regexes return None returnresult.append(oneresult) del self.regexes[self.whichmatch] self.unmatched = None self.regexes = save_regexes return returnresult # In case we ever want multiple regexes to match a single line... #- del self.regexes[self.whichmatch] #+ tmp_regexes = self.regexes #+ self.regexes = [] #+ which = 0 #+ for regex in tmp_regexes: #+ matchobj = re.search(regex, oneresult) #+ if not matchobj: #+ self.regexes.append(regex) class NodeStatus: def __init__(self, Env): self.Env = Env self.rsh = RemoteExec() def IsNodeBooted(self, node): '''Return TRUE if the given node is booted (responds to pings)''' - return os.system("@PING@ -nq -c1 @PING_TIMEOUT_OPT@ %s >/dev/null 2>&1" % node) == 0 + return os.system("ping -nq -c1 -w1 %s >/dev/null 2>&1" % node) == 0 def IsSshdUp(self, node): return self.rsh(node, "true") == 0; def WaitForNodeToComeUp(self, node, Timeout=300): '''Return TRUE when given node comes up, or None/FALSE if timeout''' timeout=Timeout anytimeouts=0 while timeout > 0: if self.IsNodeBooted(node) and self.IsSshdUp(node): if anytimeouts: # Fudge to wait for the system to finish coming up time.sleep(30) self.Env.log("Node %s now up" % node) return 1 time.sleep(1) if (not anytimeouts): self.Env.log("Waiting for node %s to come up" % node) anytimeouts=1 timeout = timeout - 1 self.Env.log("%s did not come up within %d tries" % (node, Timeout)) def WaitForAllNodesToComeUp(self, nodes, timeout=300): '''Return TRUE when all nodes come up, or FALSE if timeout''' for node in nodes: if not self.WaitForNodeToComeUp(node, timeout): return None return 1 class ClusterManager(UserDict): '''The Cluster Manager class. This is an subclass of the Python dictionary class. (this is because it contains lots of {name,value} pairs, not because it's behavior is that terribly similar to a dictionary in other ways.) This is an abstract class which class implements high-level operations on the cluster and/or its cluster managers. Actual cluster managers classes are subclassed from this type. One of the things we do is track the state we think every node should be in. ''' def __InitialConditions(self): #if os.geteuid() != 0: # raise ValueError("Must Be Root!") None def _finalConditions(self): for key in self.keys(): if self[key] == None: raise ValueError("Improper derivation: self[" + key + "] must be overridden by subclass.") def __init__(self, Environment, randseed=None): self.Env = Environment self.__InitialConditions() self.clear_cache = 0 self.TestLoggingLevel=0 self.data = { "up" : "up", # Status meaning up "down" : "down", # Status meaning down "StonithCmd" : "@sbindir@/stonith -t baytech -p '10.10.10.100 admin admin' %s", "DeadTime" : 30, # Max time to detect dead node... "StartTime" : 90, # Max time to start up # # These next values need to be overridden in the derived class. # "Name" : None, "StartCmd" : None, "StopCmd" : None, "StatusCmd" : None, "RereadCmd" : None, "StartDRBDCmd" : None, "StopDRBDCmd" : None, "StatusDRBDCmd" : None, "DRBDCheckconf" : None, "BreakCommCmd" : None, "FixCommCmd" : None, "TestConfigDir" : None, "LogFileName" : None, "Pat:Master_started" : None, "Pat:Slave_started" : None, "Pat:We_stopped" : None, "Pat:They_stopped" : None, "BadRegexes" : None, # A set of "bad news" regexes # to apply to the log } self.rsh = RemoteExec() self.ShouldBeStatus={} self.OurNode=string.lower(os.uname()[1]) self.ShouldBeStatus={} self.ns = NodeStatus(self.Env) def errorstoignore(self): '''Return list of errors which are 'normal' and should be ignored''' return [] def log(self, args): self.Env.log(args) def debug(self, args): self.Env.debug(args) def prepare(self): '''Finish the Initialization process. Prepare to test...''' for node in self.Env["nodes"]: if self.StataCM(node): self.ShouldBeStatus[node]=self["up"] else: self.ShouldBeStatus[node]=self["down"] def upcount(self): '''How many nodes are up?''' count=0 for node in self.Env["nodes"]: if self.ShouldBeStatus[node]==self["up"]: count=count+1 return count def TruncLogs(self): '''Truncate the log for the cluster manager so we can start clean''' if self["LogFileName"] != None: os.system("cp /dev/null " + self["LogFileName"]) def install_config(self, node): return None def clear_all_caches(self): if self.clear_cache: for node in self.Env["nodes"]: if self.ShouldBeStatus[node] == self["down"]: self.debug("Removing cache file on: "+node) self.rsh.remote_py(node, "os", "system", "rm -f @HA_VARLIBHBDIR@/hostcache") else: self.debug("NOT Removing cache file on: "+node) def StartaCM(self, node): '''Start up the cluster manager on a given node''' self.debug("Starting %s on node %s" %(self["Name"], node)) ret = 1 if not self.ShouldBeStatus.has_key(node): self.ShouldBeStatus[node] = self["down"] if self.ShouldBeStatus[node] != self["down"]: return 1 patterns = [] # Technically we should always be able to notice ourselves starting patterns.append(self["Pat:Local_started"] % node) if self.upcount() == 0: patterns.append(self["Pat:Master_started"] % node) else: patterns.append(self["Pat:Slave_started"] % node) watch = LogWatcher( self["LogFileName"], patterns, timeout=self["StartTime"]+10) watch.setwatch() self.install_config(node) self.ShouldBeStatus[node] = "any" if self.StataCM(node) and self.cluster_stable(self["DeadTime"]): self.log ("%s was already started" %(node)) return 1 # Clear out the host cache so autojoin can be exercised if self.clear_cache: self.debug("Removing cache file on: "+node) self.rsh.remote_py(node, "os", "system", "rm -f @HA_VARLIBHBDIR@/hostcache") if self.rsh(node, self["StartCmd"]) != 0: self.log ("Warn: Start command failed on node %s" %(node)) return None self.ShouldBeStatus[node]=self["up"] watch_result = watch.lookforall() if watch.unmatched: for regex in watch.unmatched: self.log ("Warn: Startup pattern not found: %s" %(regex)) if watch_result: #self.debug("Found match: "+ repr(watch_result)) self.cluster_stable(self["DeadTime"]) return 1 if self.StataCM(node) and self.cluster_stable(self["DeadTime"]): return 1 self.log ("Warn: Start failed for node %s" %(node)) return None def StartaCMnoBlock(self, node): '''Start up the cluster manager on a given node with none-block mode''' self.debug("Starting %s on node %s" %(self["Name"], node)) # Clear out the host cache so autojoin can be exercised if self.clear_cache: self.debug("Removing cache file on: "+node) self.rsh.remote_py(node, "os", "system", "rm -f @HA_VARLIBHBDIR@/hostcache") self.rsh.noBlock(node, self["StartCmd"]) self.ShouldBeStatus[node]=self["up"] return 1 def StopaCM(self, node): '''Stop the cluster manager on a given node''' self.debug("Stopping %s on node %s" %(self["Name"], node)) if self.ShouldBeStatus[node] != self["up"]: return 1 if self.rsh(node, self["StopCmd"]) == 0: self.ShouldBeStatus[node]=self["down"] self.cluster_stable(self["DeadTime"]) return 1 else: self.log ("Could not stop %s on node %s" %(self["Name"], node)) return None def StopaCMnoBlock(self, node): '''Stop the cluster manager on a given node with none-block mode''' self.debug("Stopping %s on node %s" %(self["Name"], node)) self.rsh.noBlock(node, self["StopCmd"]) self.ShouldBeStatus[node]=self["down"] return 1 def cluster_stable(self, timeout = None): time.sleep(self["StableTime"]) return 1 def node_stable(self, node): return 1 def RereadCM(self, node): '''Force the cluster manager on a given node to reread its config This may be a no-op on certain cluster managers. ''' rc=self.rsh(node, self["RereadCmd"]) if rc == 0: return 1 else: self.log ("Could not force %s on node %s to reread its config" % (self["Name"], node)) return None def StataCM(self, node): '''Report the status of the cluster manager on a given node''' out=self.rsh.readaline(node, self["StatusCmd"]) ret= (string.find(out, 'stopped') == -1) try: if ret: if self.ShouldBeStatus[node] == self["down"]: self.log( "Node status for %s is %s but we think it should be %s" % (node, self["up"], self.ShouldBeStatus[node])) else: if self.ShouldBeStatus[node] == self["up"]: self.log( "Node status for %s is %s but we think it should be %s" % (node, self["down"], self.ShouldBeStatus[node])) except KeyError: pass if ret: self.ShouldBeStatus[node]=self["up"] else: self.ShouldBeStatus[node]=self["down"] return ret def startall(self, nodelist=None): '''Start the cluster manager on every node in the cluster. We can do it on a subset of the cluster if nodelist is not None. ''' ret = 1 map = {} if not nodelist: nodelist=self.Env["nodes"] for node in nodelist: if self.ShouldBeStatus[node] == self["down"]: if not self.StartaCM(node): ret = 0 return ret def stopall(self, nodelist=None): '''Stop the cluster managers on every node in the cluster. We can do it on a subset of the cluster if nodelist is not None. ''' ret = 1 map = {} if not nodelist: nodelist=self.Env["nodes"] for node in self.Env["nodes"]: if self.ShouldBeStatus[node] == self["up"]: if not self.StopaCM(node): ret = 0 return ret def rereadall(self, nodelist=None): '''Force the cluster managers on every node in the cluster to reread their config files. We can do it on a subset of the cluster if nodelist is not None. ''' map = {} if not nodelist: nodelist=self.Env["nodes"] for node in self.Env["nodes"]: if self.ShouldBeStatus[node] == self["up"]: self.RereadCM(node) def statall(self, nodelist=None): '''Return the status of the cluster managers in the cluster. We can do it on a subset of the cluster if nodelist is not None. ''' result={} if not nodelist: nodelist=self.Env["nodes"] for node in nodelist: if self.StataCM(node): result[node] = self["up"] else: result[node] = self["down"] return result def isolate_node(self, node): '''isolate the communication between the nodes''' rc = self.rsh(node, self["BreakCommCmd"]) if rc == 0: return 1 else: self.log("Could not break the communication between the nodes frome node: %s" % node) return None def unisolate_node(self, node): '''fix the communication between the nodes''' rc = self.rsh(node, self["FixCommCmd"]) if rc == 0: return 1 else: self.log("Could not fix the communication between the nodes from node: %s" % node) return None def reducecomm_node(self,node): '''reduce the communication between the nodes''' rc = self.rsh(node, self["ReduceCommCmd"]%(self.Env["XmitLoss"],self.Env["RecvLoss"])) if rc == 0: return 1 else: self.log("Could not reduce the communication between the nodes from node: %s" % node) return None def savecomm_node(self,node): '''save current the communication between the nodes''' rc = 0 if float(self.Env["XmitLoss"])!=0 or float(self.Env["RecvLoss"])!=0 : rc = self.rsh(node, self["SaveFileCmd"]); if rc == 0: return 1 else: self.log("Could not save the communication between the nodes from node: %s" % node) return None def restorecomm_node(self,node): '''restore the saved communication between the nodes''' rc = 0 if float(self.Env["XmitLoss"])!=0 or float(self.Env["RecvLoss"])!=0 : rc = self.rsh(node, self["RestoreCommCmd"]); if rc == 0: return 1 else: self.log("Could not restore the communication between the nodes from node: %s" % node) return None def SyncTestConfigs(self): '''Synchronize test configurations throughout the cluster. This one's a no-op for FailSafe, since it does that by itself. ''' fromdir=self["TestConfigDir"] if not os.access(fromdir, os.F_OK | os.R_OK | os.W_OK): raise ValueError("Directory [" + fromdir + "] not accessible (rwx)") for node in self.Env["nodes"]: if node == self.OurNode: continue self.log("Syncing test configurations on " + node) # Perhaps I ought to use rsync... self.rsh.cp("-r", fromdir, node + ":" + fromdir) def SetClusterConfig(self, configpath="default", nodelist=None): '''Activate the named test configuration throughout the cluster. It would be useful to implement this :-) ''' pass return 1 def ResourceGroups(self): "Return a list of resource type/instance pairs for the cluster" raise ValueError("Abstract Class member (ResourceGroups)") def InternalCommConfig(self): "Return a list of paths: each patch consists of a tuple" raise ValueError("Abstract Class member (InternalCommConfig)") def HasQuorum(self, node_list): "Return TRUE if the cluster currently has quorum" # If we are auditing a partition, then one side will # have quorum and the other not. # So the caller needs to tell us which we are checking # If no value for node_list is specified... assume all nodes raise ValueError("Abstract Class member (HasQuorum)") def Components(self): raise ValueError("Abstract Class member (Components)") def oprofileStart(self, node=None): if not node: for n in self.Env["oprofile"]: self.oprofileStart(n) elif node in self.Env["oprofile"]: self.debug("Enabling oprofile on %s" % node) self.rsh.remote_py(node, "os", "system", "opcontrol --init") self.rsh.remote_py(node, "os", "system", "opcontrol --setup --no-vmlinux --separate=lib --callgraph=10 --image=all") self.rsh.remote_py(node, "os", "system", "opcontrol --start") self.rsh.remote_py(node, "os", "system", "opcontrol --reset") def oprofileSave(self, test, node=None): if not node: for n in self.Env["oprofile"]: self.oprofileSave(test, n) elif node in self.Env["oprofile"]: self.rsh.remote_py(node, "os", "system", "opcontrol --dump") self.rsh.remote_py(node, "os", "system", "opcontrol --save=cts.%d" % test) # Read back with: opreport -l session:cts.0 image:/usr/lib/heartbeat/c* if None: self.rsh.remote_py(node, "os", "system", "opcontrol --reset") else: self.oprofileStop(node) self.oprofileStart(node) def oprofileStop(self, node=None): if not node: for n in self.Env["oprofile"]: self.oprofileStop(n) elif node in self.Env["oprofile"]: self.debug("Stopping oprofile on %s" % node) self.rsh.remote_py(node, "os", "system", "opcontrol --reset") self.rsh.remote_py(node, "os", "system", "opcontrol --shutdown 2>&1 > /dev/null") class Resource: ''' This is an HA resource (not a resource group). A resource group is just an ordered list of Resource objects. ''' def __init__(self, cm, rsctype=None, instance=None): self.CM = cm self.ResourceType = rsctype self.Instance = instance self.needs_quorum = 1 def Type(self): return self.ResourceType def Instance(self, nodename): return self.Instance def IsRunningOn(self, nodename): ''' This member function returns true if our resource is running on the given node in the cluster. It is analagous to the "status" operation on SystemV init scripts and heartbeat scripts. FailSafe calls it the "exclusive" operation. ''' raise ValueError("Abstract Class member (IsRunningOn)") return None def IsWorkingCorrectly(self, nodename): ''' This member function returns true if our resource is operating correctly on the given node in the cluster. Heartbeat does not require this operation, but it might be called the Monitor operation, which is what FailSafe calls it. For remotely monitorable resources (like IP addresses), they *should* be monitored remotely for testing. ''' raise ValueError("Abstract Class member (IsWorkingCorrectly)") return None def Start(self, nodename): ''' This member function starts or activates the resource. ''' raise ValueError("Abstract Class member (Start)") return None def Stop(self, nodename): ''' This member function stops or deactivates the resource. ''' raise ValueError("Abstract Class member (Stop)") return None def __repr__(self): if (self.Instance and len(self.Instance) > 1): return "{" + self.ResourceType + "::" + self.Instance + "}" else: return "{" + self.ResourceType + "}" class Component: def kill(self, node): None class Process(Component): def __init__(self, name, dc_only, pats, dc_pats, badnews_ignore, triggersreboot, cm): self.name = str(name) self.dc_only = dc_only self.pats = pats self.dc_pats = dc_pats self.CM = cm self.badnews_ignore = badnews_ignore self.triggersreboot = triggersreboot self.KillCmd = "killall -9 " + self.name def kill(self, node): if self.CM.rsh(node, self.KillCmd) != 0: self.CM.log ("ERROR: Kill %s failed on node %s" %(self.name,node)) return None return 1 class ScenarioComponent: def __init__(self, Env): self.Env = Env def IsApplicable(self): '''Return TRUE if the current ScenarioComponent is applicable in the given LabEnvironment given to the constructor. ''' raise ValueError("Abstract Class member (IsApplicable)") def SetUp(self, CM): '''Set up the given ScenarioComponent''' raise ValueError("Abstract Class member (Setup)") def TearDown(self, CM): '''Tear down (undo) the given ScenarioComponent''' raise ValueError("Abstract Class member (Setup)") class Scenario: ( '''The basic idea of a scenario is that of an ordered list of ScenarioComponent objects. Each ScenarioComponent is SetUp() in turn, and then after the tests have been run, they are torn down using TearDown() (in reverse order). A Scenario is applicable to a particular cluster manager iff each ScenarioComponent is applicable. A partially set up scenario is torn down if it fails during setup. ''') def __init__(self, Components): "Initialize the Scenario from the list of ScenarioComponents" for comp in Components: if not issubclass(comp.__class__, ScenarioComponent): raise ValueError("Init value must be subclass of" " ScenarioComponent") self.Components = Components def IsApplicable(self): ( '''A Scenario IsApplicable() iff each of its ScenarioComponents IsApplicable() ''' ) for comp in self.Components: if not comp.IsApplicable(): return None return 1 def SetUp(self, CM): '''Set up the Scenario. Return TRUE on success.''' j=0 while j < len(self.Components): if not self.Components[j].SetUp(CM): # OOPS! We failed. Tear partial setups down. CM.log("Tearing down partial setup") self.TearDown(CM, j) return None j=j+1 return 1 def TearDown(self, CM, max=None): '''Tear Down the Scenario - in reverse order.''' if max == None: max = len(self.Components)-1 j=max while j >= 0: self.Components[j].TearDown(CM) j=j-1 class InitClusterManager(ScenarioComponent): ( '''InitClusterManager is the most basic of ScenarioComponents. This ScenarioComponent simply starts the cluster manager on all the nodes. It is fairly robust as it waits for all nodes to come up before starting as they might have been rebooted or crashed for some reason beforehand. ''') def __init__(self, Env): pass def IsApplicable(self): '''InitClusterManager is so generic it is always Applicable''' return 1 def SetUp(self, CM): '''Basic Cluster Manager startup. Start everything''' CM.prepare() # Clear out the cobwebs ;-) self.TearDown(CM) for node in CM.Env["nodes"]: CM.rsh(node, CM["DelFileCommCmd"]+ "; true") # Now start the Cluster Manager on all the nodes. CM.log("Starting Cluster Manager on all nodes.") return CM.startall() def TearDown(self, CM): '''Set up the given ScenarioComponent''' # Stop the cluster manager everywhere CM.log("Stopping Cluster Manager on all nodes") return CM.stopall() class PingFest(ScenarioComponent): ( '''PingFest does a flood ping to each node in the cluster from the test machine. If the LabEnvironment Parameter PingSize is set, it will be used as the size of ping packet requested (via the -s option). If it is not set, it defaults to 1024 bytes. According to the manual page for ping: Outputs packets as fast as they come back or one hundred times per second, whichever is more. For every ECHO_REQUEST sent a period ``.'' is printed, while for every ECHO_REPLY received a backspace is printed. This provides a rapid display of how many packets are being dropped. Only the super-user may use this option. This can be very hard on a net- work and should be used with caution. ''' ) def __init__(self, Env): self.Env = Env def IsApplicable(self): '''PingFests are always applicable ;-) ''' return 1 def SetUp(self, CM): '''Start the PingFest!''' self.PingSize=1024 if CM.Env.has_key("PingSize"): self.PingSize=CM.Env["PingSize"] CM.log("Starting %d byte flood pings" % self.PingSize) self.PingPids=[] for node in CM.Env["nodes"]: self.PingPids.append(self._pingchild(node)) CM.log("Ping PIDs: " + repr(self.PingPids)) return 1 def TearDown(self, CM): '''Stop it right now! My ears are pinging!!''' for pid in self.PingPids: if pid != None: CM.log("Stopping ping process %d" % pid) os.kill(pid, signal.SIGKILL) def _pingchild(self, node): Args = ["ping", "-qfn", "-s", str(self.PingSize), node] sys.stdin.flush() sys.stdout.flush() sys.stderr.flush() pid = os.fork() if pid < 0: self.Env.log("Cannot fork ping child") return None if pid > 0: return pid # Otherwise, we're the child process. os.execvp("ping", Args) self.Env.log("Cannot execvp ping: " + repr(Args)) sys.exit(1) class PacketLoss(ScenarioComponent): ( ''' It would be useful to do some testing of CTS with a modest amount of packet loss enabled - so we could see that everything runs like it should with a certain amount of packet loss present. ''') def IsApplicable(self): '''always Applicable''' return 1 def SetUp(self, CM): '''Reduce the reliability of communications''' if float(CM.Env["XmitLoss"]) == 0 and float(CM.Env["RecvLoss"]) == 0 : return 1 for node in CM.Env["nodes"]: CM.reducecomm_node(node) CM.log("Reduce the reliability of communications") return 1 def TearDown(self, CM): '''Fix the reliability of communications''' if float(CM.Env["XmitLoss"]) == 0 and float(CM.Env["RecvLoss"]) == 0 : return 1 for node in CM.Env["nodes"]: CM.unisolate_node(node) CM.log("Fix the reliability of communications") class BasicSanityCheck(ScenarioComponent): ( ''' ''') def IsApplicable(self): return self.Env["DoBSC"] def SetUp(self, CM): CM.prepare() # Clear out the cobwebs self.TearDown(CM) # Now start the Cluster Manager on all the nodes. CM.log("Starting Cluster Manager on BSC node(s).") return CM.startall() def TearDown(self, CM): CM.log("Stopping Cluster Manager on BSC node(s).") return CM.stopall() diff --git a/cts/CTSlab.py.in b/cts/CTSlab.py.in index e9402c6aa3..ed337d5ad8 100755 --- a/cts/CTSlab.py.in +++ b/cts/CTSlab.py.in @@ -1,846 +1,839 @@ #!@PYTHON@ '''CTS: Cluster Testing System: Lab environment module ''' __copyright__=''' Copyright (C) 2001,2005 Alan Robertson Licensed under the GNU GPL. ''' # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. from UserDict import UserDict import sys, time, types, string, syslog, random, os, string, signal, traceback from CTS import ClusterManager from CM_hb import HeartbeatCM from CTStests import BSC_AddResource from socket import gethostbyname_ex tests = None cm = None old_handler = None DefaultFacility = "daemon" def sig_handler(signum, frame) : if cm != None: cm.log("Interrupted by signal %d"%signum) if signum == 10 and tests != None : tests.summarize() if signum == 15 : sys.exit(1) class ResetMechanism: def reset(self, node): raise ValueError("Abstract class member (reset)") class Stonith(ResetMechanism): def __init__(self, sttype="external/ssh", pName=None, pValue=None , path="@sbindir@/stonith"): self.pathname=path self.configName=pName self.configValue=pValue self.stonithtype=sttype def reset(self, node): if self.configValue == None : config=node else: config=self.configValue cmdstring = "%s -t '%s' -p '%s' '%s' 2>/dev/null" % (self.pathname , self.stonithtype, config, node) return (os.system(cmdstring) == 0) class Stonithd(ResetMechanism): def __init__(self, nodes, sttype = 'external/ssh'): self.sttype = sttype self.nodes = nodes self.query_cmd_pat = '@libdir@/heartbeat/stonithdtest/apitest 0 %s 60000 0' self.reset_cmd_pat = '@libdir@/heartbeat/stonithdtest/apitest 1 %s 60000 0' self.poweron_cmd_pat = '@libdir@/heartbeat/stonithdtest/apitest 2 %s 60000 0' self.poweroff_cmd_pat= '@libdir@/heartbeat/stonithdtest/apitest 3 %s 60000 0' self.lrmd_add_pat = '@libdir@/heartbeat/lrmadmin -A %s stonith ' + sttype + ' NULL hostlist=%s' self.lrmd_start_pat = '@libdir@/heartbeat/lrmadmin -E %s start 0 0 EVERYTIME' self.lrmd_stop_pat = '@libdir@/heartbeat/lrmadmin -E %s stop 0 0 EVERYTIME' self.lrmd_del_pat = '@libdir@/heartbeat/lrmadmin -D %s' self.rsc_id = 'my_stonithd_id' self.command = "@SSH@ -l root -n -x" self.command_noblock = "@SSH@ -f -l root -n -x" self.stonithd_started_nodes = [] self.fail_reason = '' def _remote_exec(self, node, cmnd): return (os.system("%s %s %s > /dev/null" % (self.command, node, cmnd)) == 0) def _remote_readlines(self, node, cmnd): f = os.popen("%s %s %s" % (self.command, node, cmnd)) return f.readlines() def _stonithd_started(self, node): return node in self.stonithd_started_nodes def _start_stonithd(self, node, hosts): hostlist = string.join(hosts, ',') lrmd_add_cmd = self.lrmd_add_pat % (self.rsc_id, hostlist) ret = self._remote_exec(node, lrmd_add_cmd) if not ret:return ret lrmd_start_cmd = self.lrmd_start_pat % self.rsc_id ret = self._remote_exec(node, lrmd_start_cmd) if not ret:return ret self.stonithd_started_nodes.append(node) return 1 def _stop_stonithd(self, node): lrmd_stop_cmd = self.lrmd_stop_pat % self.rsc_id ret = self._remote_exec(node, lrmd_stop_cmd) if not ret:return ret lrmd_del_cmd = self.lrmd_del_pat % self.rsc_id ret = self._remote_exec(node, lrmd_del_cmd) if not ret:return ret self.stonithd_started_nodes.remove(node) return 1 def _do_stonith(self, init_node, target_node, action): stonithd_started = self._stonithd_started(init_node) if not stonithd_started: ret = self._start_stonithd(init_node, [target_node]) if not ret: self.fail_reason = "failed to start stonithd on node %s" % init_node return ret command = "" if action == "RESET": command = self.reset_cmd_pat % target_node elif action == "POWEROFF": command = self.poweroff_cmd_pat % target_node elif action == "POWERON": command = self.poweron_cmd_pat % target_node else: self.fail_reason = "unknown opration type %s" % action return 0 lines = self._remote_readlines(init_node, command) result = "".join(lines) if not stonithd_started: self._stop_stonithd(init_node) index = result.find("result=0") if index == -1: self.fail_reason = "unexpected stonithd status: %s" % result return 0 return 1 # Should we randomly choose a node as init_node here if init_node not specified? def reset(self, init_node, target_node): return self._do_stonith(init_node, target_node, "RESET") def poweron(self, init_node, target_node): return self._do_stonith(init_node, target_node, "POWERON") def poweroff(self, init_node, target_node): return self._do_stonith(init_node, target_node, "POWEROFF") class Logger: TimeFormat = "%b %d %H:%M:%S\t" def __call__(self, lines): raise ValueError("Abstract class member (__call__)") def write(self, line): return self(line.rstrip()) def writelines(self, lines): for s in lines: self.write(s) return 1 def flush(self): return 1 def isatty(self): return None class SysLog(Logger): # http://docs.python.org/lib/module-syslog.html defaultsource="CTS" map = { "kernel": syslog.LOG_KERN, "user": syslog.LOG_USER, "mail": syslog.LOG_MAIL, "daemon": syslog.LOG_DAEMON, "auth": syslog.LOG_AUTH, "lpr": syslog.LOG_LPR, "news": syslog.LOG_NEWS, "uucp": syslog.LOG_UUCP, "cron": syslog.LOG_CRON, "local0": syslog.LOG_LOCAL0, "local1": syslog.LOG_LOCAL1, "local2": syslog.LOG_LOCAL2, "local3": syslog.LOG_LOCAL3, "local4": syslog.LOG_LOCAL4, "local5": syslog.LOG_LOCAL5, "local6": syslog.LOG_LOCAL6, "local7": syslog.LOG_LOCAL7, } def __init__(self, labinfo): if labinfo.has_key("syslogsource"): self.source=labinfo["syslogsource"] else: self.source=SysLog.defaultsource if labinfo.has_key("SyslogFacility"): self.facility=labinfo["SyslogFacility"] else: self.facility=DefaultFacility if SysLog.map.has_key(self.facility): self.facility=SysLog.map[self.facility] syslog.openlog(self.source, 0, self.facility) def setfacility(self, facility): self.facility = facility if SysLog.map.has_key(self.facility): self.facility=SysLog.map[self.facility] syslog.closelog() syslog.openlog(self.source, 0, self.facility) def __call__(self, lines): if isinstance(lines, types.StringType): syslog.syslog(lines) else: for line in lines: syslog.syslog(line) def name(self): return "Syslog" class StdErrLog(Logger): def __init__(self, labinfo): pass def __call__(self, lines): t = time.strftime(Logger.TimeFormat, time.localtime(time.time())) if isinstance(lines, types.StringType): sys.__stderr__.writelines([t, lines, "\n"]) else: for line in lines: sys.__stderr__.writelines([t, line, "\n"]) sys.__stderr__.flush() def name(self): return "StdErrLog" class FileLog(Logger): def __init__(self, labinfo, filename=None): if filename == None: filename=labinfo["LogFileName"] self.logfile=filename import os self.hostname = os.uname()[1]+" " self.source = "CTS: " def __call__(self, lines): fd = open(self.logfile, "a") t = time.strftime(Logger.TimeFormat, time.localtime(time.time())) if isinstance(lines, types.StringType): fd.writelines([t, self.hostname, self.source, lines, "\n"]) else: for line in lines: fd.writelines([t, self.hostname, self.source, line, "\n"]) fd.close() def name(self): return "FileLog" class CtsLab(UserDict): '''This class defines the Lab Environment for the Cluster Test System. It defines those things which are expected to change from test environment to test environment for the same cluster manager. It is where you define the set of nodes that are in your test lab what kind of reset mechanism you use, etc. This class is derived from a UserDict because we hold many different parameters of different kinds, and this provides provide a uniform and extensible interface useful for any kind of communication between the user/administrator/tester and CTS. At this point in time, it is the intent of this class to model static configuration and/or environmental data about the environment which doesn't change as the tests proceed. Well-known names (keys) are an important concept in this class. The HasMinimalKeys member function knows the minimal set of well-known names for the class. The following names are standard (well-known) at this time: nodes An array of the nodes in the cluster reset A ResetMechanism object logger An array of objects that log strings... CMclass The type of ClusterManager we are running (This is a class object, not a class instance) RandSeed Random seed. It is a triple of bytes. (optional) HAdir Base directory for HA installation The CTS code ignores names it doesn't know about/need. The individual tests have access to this information, and it is perfectly acceptable to provide hints, tweaks, fine-tuning directions or other information to the tests through this mechanism. ''' def __init__(self, nodes): self.data = {} self["nodes"] = nodes self.MinimalKeys=["nodes", "reset", "logger", "CMclass", "HAdir"] def HasMinimalKeys(self): 'Return TRUE if our object has the minimal set of keys/values in it' result = 1 for key in self.MinimalKeys: if not self.has_key(key): result = None return result def SupplyDefaults(self): if not self.has_key("logger"): self["logger"] = (SysLog(self), StdErrLog(self)) if not self.has_key("reset"): self["reset"] = Stonith() if not self.has_key("CMclass"): self["CMclass"] = HeartbeatCM if not self.has_key("LogFileName"): self["LogFileName"] = "@HA_VARLOGDIR@/ha-log" if not self.has_key("logrestartcmd"): self["logrestartcmd"] = "@INITDIR@/syslog restart" if not self.has_key("logfacility"): LogFacility = DefaultFacility # # Now set up our random number generator... # self.RandomGen = random.Random() # Get a random seed for the random number generator. if self.has_key("RandSeed"): randseed = self["RandSeed"] self.log("Random seed is: " + str(randseed)) self.RandomGen.seed(str(randseed)) else: randseed = int(time.time()) self.log("Random seed is: " + str(randseed)) self.RandomGen.seed(str(randseed)) def log(self, args): "Log using each of the supplied logging methods" for logfcn in self._logfunctions: logfcn(string.strip(args)) def debug(self, args): "Log using each of the supplied logging methods" for logfcn in self._logfunctions: if logfcn.name() != "StdErrLog": logfcn("debug: %s" % string.strip(args)) def __setitem__(self, key, value): '''Since this function gets called whenever we modify the dictionary (object), we can (and do) validate those keys that we know how to validate. For the most part, we know how to validate the "MinimalKeys" elements. ''' # # List of nodes in the system # if key == "nodes": self.Nodes = {} for node in value: # I don't think I need the IP address, etc. but this validates # the node name against /etc/hosts and/or DNS, so it's a # GoodThing(tm). try: self.Nodes[node] = gethostbyname_ex(node) except: print node+" not found in DNS... aborting" raise # # Reset Mechanism # elif key == "reset": if not issubclass(value.__class__, ResetMechanism): raise ValueError("'reset' Value must be a subclass" " of ResetMechanism") # # List of Logging Mechanism(s) # elif key == "logger": if len(value) < 1: raise ValueError("Must have at least one logging mechanism") for logger in value: if not callable(logger): raise ValueError("'logger' elements must be callable") self._logfunctions = value # # Cluster Manager Class # elif key == "CMclass": if not issubclass(value, ClusterManager): raise ValueError("'CMclass' must be a subclass of" " ClusterManager") # # Initial Random seed... # #elif key == "RandSeed": # if len(value) != 3: # raise ValueError("'Randseed' must be a 3-element list/tuple") # for elem in value: # if not isinstance(elem, types.IntType): # raise ValueError("'Randseed' list must all be ints") self.data[key] = value def IsValidNode(self, node): 'Return TRUE if the given node is valid' return self.Nodes.has_key(node) def __CheckNode(self, node): "Raise a ValueError if the given node isn't valid" if not self.IsValidNode(node): raise ValueError("Invalid node [%s] in CheckNode" % node) def RandomNode(self): '''Choose a random node from the cluster''' return self.RandomGen.choice(self["nodes"]) def ResetNode(self, node): "Reset a node, (normally) using a hardware mechanism" self.__CheckNode(node) return self["reset"].reset(node) def ResetNode2(self, init_node, target_node, reasons): self.__CheckNode(target_node) stonithd = Stonithd(self["nodes"]) ret = stonithd.reset(init_node, target_node) if not ret: reasons.append(stonithd.fail_reason) return ret def usage(arg): print "Illegal argument " + arg print "usage: " + sys.argv[0] \ + " --directory config-directory" \ + " -D config-directory" \ + " --logfile system-logfile-name" \ + " --trunc (truncate logfile before starting)" \ + " -L system-logfile-name" \ + " --limit-nodes maxnumnodes" \ + " --xmit-loss lost-rate(0.0-1.0)" \ + " --recv-loss lost-rate(0.0-1.0)" \ + " --suppressmonitoring" \ + " --syslog-facility syslog-facility" \ + " --facility syslog-facility" \ + " --choose testcase-name" \ + " --test-ip-base ip" \ + " --oprofile \"whitespace separated list of nodes to oprofile\"" \ + " (-2 |"\ + " -v2 |"\ + " --crm |"\ + " --classic)"\ + " (--populate-resources | -r)" \ + " --resource-can-stop" \ + " --stonith (1 | 0 | yes | no)" \ + " --stonith-type type" \ + " --stonith-args name=value" \ + " --standby (1 | 0 | yes | no)" \ + " --fencing (1 | 0 | yes | no)" \ + " --suppress_cib_writes (1 | 0 | yes | no)" \ + " -lstests" \ + " --seed" \ + " [number-of-iterations]" sys.exit(1) # # A little test code... # if __name__ == '__main__': from CTSaudits import AuditList from CTStests import TestList,RandomTests from CTS import Scenario, InitClusterManager, PingFest, PacketLoss, BasicSanityCheck import CM_hb HAdir = "@sysconfdir@/ha.d" LogFile = "@HA_VARLOGDIR@/ha-log-"+DefaultFacility DoStonith = 1 DoStandby = 1 DoFencing = 1 NumIter = 500 SuppressMonitoring = None Version = 1 CIBfilename = None CIBResource = 0 ClobberCIB = 0 LimitNodes = 0 TestCase = None LogFacility = None TruncateLog = 0 ResCanStop = 0 XmitLoss = "0.0" RecvLoss = "0.0" IPBase = "127.0.0.10" SuppressCib = 1 DoBSC = 0 ListTests = 0 HaveSeed = 0 oprofile = None warn_inactive = 0 StonithType = "external/ssh" StonithParams = None StonithParams = "hostlist=dynamic".split('=') node_list = '' # # The values of the rest of the parameters are now properly derived from # the configuration files. # # Stonith is configurable because it's slow, I have a few machines which # don't reboot very reliably, and it can mild damage to your machine if # you're using a real power switch. # # Standby is configurable because the test is very heartbeat specific # and I haven't written the code to set it properly yet. Patches are # being accepted... # Set the signal handler signal.signal(15, sig_handler) signal.signal(10, sig_handler) # Process arguments... skipthis=None args=sys.argv[1:] for i in range(0, len(args)): if skipthis: skipthis=None continue elif args[i] == "-D" or args[i] == "--directory": skipthis=1 HAdir = args[i+1] elif args[i] == "-l" or args[i] == "--limit-nodes": skipthis=1 LimitNodes = int(args[i+1]) elif args[i] == "-r" or args[i] == "--populate-resources": CIBResource = 1 elif args[i] == "-L" or args[i] == "--logfile": skipthis=1 LogFile = args[i+1] elif args[i] == "--test-ip-base": skipthis=1 IPBase = args[i+1] elif args[i] == "--oprofile": skipthis=1 oprofile = args[i+1].split(' ') elif args[i] == "--trunc": TruncateLog=1 elif args[i] == "-v2": Version=2 elif args[i] == "-lstests": ListTests=1 elif args[i] == "--stonith": skipthis=1 if args[i+1] == "1" or args[i+1] == "yes": DoStonith=1 elif args[i+1] == "0" or args[i+1] == "no": DoStonith=0 else: usage(args[i+1]) elif args[i] == "--stonith-type": StonithType = args[i+1] skipthis=1 elif args[i] == "--stonith-args": StonithParams = args[i+1].split('=') skipthis=1 elif args[i] == "--suppress-cib-writes": skipthis=1 if args[i+1] == "1" or args[i+1] == "yes": SuppressCib=1 elif args[i+1] == "0" or args[i+1] == "no": SuppressCib=0 else: usage(args[i+1]) elif args[i] == "--bsc": DoBSC=1 elif args[i] == "--standby": skipthis=1 if args[i+1] == "1" or args[i+1] == "yes": DoStandby=1 elif args[i+1] == "0" or args[i+1] == "no": DoStandby=0 else: usage(args[i+1]) elif args[i] == "--fencing": skipthis=1 if args[i+1] == "1" or args[i+1] == "yes": DoFencing=1 elif args[i+1] == "0" or args[i+1] == "no": DoFencing=0 else: usage(args[i+1]) elif args[i] == "--suppressmonitoring": SuppressMonitoring = 1 elif args[i] == "--resource-can-stop": ResCanStop = 1 elif args[i] == "-3" or args[i] == "--ais": Version = 3 elif args[i] == "-2" or args[i] == "--crm": Version = 2 elif args[i] == "-1" or args[i] == "--classic": Version = 1 elif args[i] == "--clobber-cib" or args[i] == "-c": ClobberCIB = 1 elif args[i] == "--cib-filename": skipthis=1 CIBfilename = args[i+1] elif args[i] == "--xmit-loss": try: float(args[i+1]) except ValueError: print ("--xmit-loss parameter should be float") usage(args[i+1]) skipthis=1 XmitLoss = args[i+1] elif args[i] == "--recv-loss": try: float(args[i+1]) except ValueError: print ("--recv-loss parameter should be float") usage(args[i+1]) skipthis=1 RecvLoss = args[i+1] elif args[i] == "--choose": skipthis=1 TestCase = args[i+1] elif args[i] == "--nodes": skipthis=1 node_list = args[i+1].split(' ') elif args[i] == "--syslog-facility" or args[i] == "--facility": skipthis=1 LogFacility = args[i+1] elif args[i] == "--seed": skipthis=1 Seed=args[i+1] HaveSeed = 1 elif args[i] == "--warn-inactive": warn_inactive = 1 else: NumIter=int(args[i]) if not oprofile: oprofile = [] # # This reading of HBconfig here is ugly, and I suppose ought to # be done by the Cluster manager. This would probably mean moving the # list of cluster nodes into the ClusterManager class. A good thought # for our Copious Spare Time in the future... # config = CM_hb.HBConfig(HAdir) if not node_list: node_list = config.Parameters["node"] else: config.Parameters["node"] = node_list if DoBSC: NumIter = 2 Version = 2 while len(node_list) > 1: node_list.pop(len(node_list)-1) if LogFacility == None: if config.Parameters.has_key("logfacility"): LogFacility = config.Parameters["logfacility"][0] else: LogFacility = DefaultFacility if LimitNodes > 0: if len(node_list) > LimitNodes: print("Limiting the number of nodes configured=%d (max=%d)" %(len(node_list), LimitNodes)) while len(node_list) > LimitNodes: node_list.pop(len(node_list)-1) if StonithParams[0] == "hostlist": StonithParams[1] = string.join(node_list, " ") # alt_list = [] # for node in node_list: # alt_list.append(string.lower(node)) # node_list = alt_list Environment = CtsLab(node_list) Environment["HAdir"] = HAdir Environment["ClobberCIB"] = ClobberCIB Environment["CIBfilename"] = CIBfilename Environment["CIBResource"] = CIBResource Environment["LogFileName"] = LogFile Environment["DoStonith"] = DoStonith Environment["SyslogFacility"] = LogFacility Environment["DoStandby"] = DoStandby Environment["DoFencing"] = DoFencing Environment["ResCanStop"] = ResCanStop Environment["SuppressMonitoring"] = SuppressMonitoring Environment["XmitLoss"] = XmitLoss Environment["RecvLoss"] = RecvLoss Environment["IPBase"] = IPBase Environment["SuppressCib"] = SuppressCib Environment["DoBSC"] = 0 Environment["use_logd"] = 0 Environment["logfacility"] = LogFacility Environment["oprofile"] = oprofile Environment["warn-inactive"] = warn_inactive if config.Parameters.has_key("use_logd"): Environment["use_logd"] = 1 if Version == 2: from CM_LinuxHAv2 import LinuxHAv2 Environment['CMclass']=LinuxHAv2 if Version == 3: from CM_ais import crm_ais Environment['CMclass'] = crm_ais Environment["use_logd"] = 0 if HaveSeed: Environment["RandSeed"] = Seed Environment["reset"] = Stonith(sttype=StonithType, pName=StonithParams[0], pValue=StonithParams[1]) if DoBSC: Environment["DoBSC"] = 1 Environment["ClobberCIB"] = 1 Environment["CIBResource"] = 0 Environment["logger"] = (FileLog(Environment), StdErrLog(Environment)) scenario = Scenario([ BasicSanityCheck(Environment) ]) else: scenario = Scenario( [ InitClusterManager(Environment), PacketLoss(Environment)]) Environment.SupplyDefaults() # Your basic start up the world type of test scenario... #scenario = Scenario( #[ InitClusterManager(Environment) #, PingFest(Environment)]) # Create the Cluster Manager object cm = Environment['CMclass'](Environment) if TruncateLog: cm.log("Truncating %s" % LogFile) lf = open(LogFile, "w"); if lf != None: lf.truncate(0) lf.close() cm.log(">>>>>>>>>>>>>>>> BEGINNING " + repr(NumIter) + " TESTS ") cm.log("Version: %d" % Version) cm.log("HA configuration directory: " + Environment["HAdir"]) cm.log("System log files: " + Environment["LogFileName"]) cm.log("Enable Stonith: %d" % Environment["DoStonith"]) cm.log("Enable Fencing: %d" % Environment["DoFencing"]) cm.log("Enable Standby: %d" % Environment["DoStandby"]) cm.log("Enable Resources: %d" % Environment["CIBResource"]) if Environment.has_key("SuppressMonitoring") \ and Environment["SuppressMonitoring"]: cm.log("Resource Monitoring is disabled") cm.ns.WaitForAllNodesToComeUp(config.Parameters["node"]) cm.log("Cluster nodes: ") for node in config.Parameters["node"]: - (rc, lines) = cm.rsh.remote_py(node, "os", "system", - "@sbindir@/crm_uuid") - if not lines: - cm.log(" * %s: __undefined_uuid__" % node) - else: - out=lines[0] - out = out[:-1] - cm.log(" * %s: %s" % (node, out)) + cm.log(" * %s" % (node)) Audits = AuditList(cm) Tests = [] if Environment["DoBSC"]: test = BSC_AddResource(cm) Tests.append(test) elif TestCase != None: for test in TestList(cm): if test.name == TestCase: Tests.append(test) if Tests == []: usage("--choose: No applicable/valid tests chosen") else: Tests = TestList(cm) if ListTests == 1 : cm.log("Total %d tests"%len(Tests)) for test in Tests : cm.log(str(test.name)); sys.exit(0) tests = RandomTests(scenario, cm, Tests, Audits) Environment.RandomTests = tests try : overall, detailed = tests.run(NumIter) except : cm.Env.log("Exception by %s" % sys.exc_info()[0]) for logmethod in Environment["logger"]: traceback.print_exc(50, logmethod) tests.summarize() if tests.Stats["failure"] > 0: sys.exit(tests.Stats["failure"]) elif tests.Stats["success"] != NumIter: cm.Env.log("No failure count but success != requested iterations") sys.exit(1) diff --git a/doc/crm_cli.txt b/doc/crm_cli.txt index eb54b23009..6a7f1dd47e 100644 --- a/doc/crm_cli.txt +++ b/doc/crm_cli.txt @@ -1,1177 +1,1256 @@ CRM CLI (command line interface) tool ====================================== Dejan_Muhamedagic,_Yan_Gao dejan@suse.de,ygao@novell.com -v0.5 +v0.9 The CRM (a.k.a Pacemaker) is a Cluster Resource Manager which implements the cluster configuration provided by the user in CIB (Cluster Information Base). The CIB is a set of instructions coded in XML. Editing the CIB is a challenge, not only due to its complexity and a wide variety of options, but also because XML is not exactly user friendly. .Note ************************** I do understand that there are people capable of dealing with XML without an intermediary. ************************** There are currently three options to manage the CIB, listed here in a decreasing order of user-friendliness: - the GUI (`hb_gui`) - a set of command line tools - `cibadmin(8)` The GUI is very popular and it has recently seen a lot of good development. For some it is going to be (or remain) the first choice in cluster management. -The command line tools, lead by `crm_resource(8)`, are very -powerful. They are capable of performing almost any kind of CIB -transformation. The usage is, however, plagued by the notorious -weakness common to all UNIX tools: a multitude of options, -necessary for operation and yet very hard to remember. Usage is -also inconsistent at times. +The command line tools, lead by `crm_resource(8)`, are capable of +performing almost any kind of CIB transformation. The usage is, +however, plagued by the notorious weakness common to all UNIX +tools: a multitude of options, necessary for operation and yet +very hard to remember. Usage is also inconsistent at times. The `cibadmin` is the ultimate CIB management tool: it applies chunks of XML written by the user or generated by another tool to the CIB. Very difficult to use without extensive training. Or should I say drill. May be unnerving as well, in particular due to sometimes cryptic error messages. == Design goals The CLI provides a consistent and unified interface to CIB/cluster management. It uses the command line tools where possible and may resort to XML and `cibadmin` when there is no other option. That is the easiest way to ensure compatibility -between the management tools. +between different management tools. It may be used either as an interactive shell or for single commands directly on the shell's command line. It is also possible to feed it a set of commands from standard input, thus turning it into a scripting tool. Templates with ready made configurations may help people to learn about the cluster configuration or facilitate testing procedures. The CLI may also be used for the CIB description and generation. A file containing a set of CLI instructions may be applied to the CLI tool to generate a complete CIB. The new shadow CIB feature may also be put to use. The user may work on one of the shadow CIBs and then apply (or commit) it in a single step to the cluster. It should also allow deployment of raw XML which may come either from files or network. Several modes of operation are available to restrict the set of features depending on the user's proficiency. The CLI is line oriented: every command must start and finish on the same line. It is possible to use a continuation character to write one command in two or more lines. The CLI must run on one of the cluster nodes. .Note ************************** Even though all sensible configurations (and most of those that are not) are going to be supported by the CLI, I suspect that it may still happen that certain XML constructs may confuse the tool. When that happens, please file a bug report. The CLI will not try to update the objects it does not understand. Of course, it is always possible to edit such objects in the XML format. ************************** == Introduction to the user interface Arguably the most important aspect of such a program is the user interface. We begin with an informal introduction so that the reader may get acquainted with it and get a general feeling of the tool. It is probably best just to give some examples: 1. Command line (one-shot) use: # crm resource stop www_app 2. Interactive use: # crm crm(live)# resource crm(live) resource# unmanage tetris_1 crm(live) resource# up crm(live)# node standby node4 3. Cluster configuration (interactive mode): # crm crm(live)# configure crm(live) configure# help show [xml] [] edit [xml] [] delete [...] + verify verify configuration erase erase configuration + commit commit changes to the CIB ra-list [] display avaiable RAs enter enter sub-level primitive define a primitive group define a group clone define cloned resource ms define a master-slave location constrain resource to a location colocation colocate resources order order resources end help crm(live) configure# enter primitive www crm(live) primitive www# help show [xml] edit [xml] erase type [class:[provider:]]type set the primitive type metadata show the RA meta-data param-list display parameters available for the RA meta-list display available meta attributes param name [val] set the parameter value meta name [val] set the meta attribute value op op_type [...] enter the operation end help crm(live) primitive www# type apache crm(live) primitive www# param-list configfile (required) httpd port statusurl options testregex crm(live) primitive www# param configfile /etc/apache2/site0.conf crm(live) primitive www# show primitive www apache configfile=/etc/apache2/site0.conf crm(live) primitive www# meta target_role stopped crm(live) primitive www# end crm(live) configure# 4. Cluster configuration (batch mode): # crm<` denotes a string, `[]` means that the construct is optional, the ellipsis (`...`) signifies that the previous construct may be repeated, `|` means pick one of many, and the rest are literals (strings, `:`, `=`). === `cib` This level is for management of shadow CIBs. All the commands are implemented using `cib_shadow(8)` and the `CIB_shadow` environment variable. The user prompt always includes the name of the currently active shadow or the live CIB. ==== `list` List existing shadow CIBs. Usage: ............... list ............... ==== `new/delete` Create a new shadow CIB or delete an existing one. On `new`, the live cluster configuration is copied. Usage: ............... new delete ............... ==== `reset` Copy the current cluster configuration into the shadow CIB. Usage: ............... reset ............... ==== `use` Choose a CIB. Leave out the CIB name to switch to the running CIB. Usage: ............... use [] ............... ==== `diff` Print differences between the current cluster configuration and the active shadow CIB. Usage: ............... diff ............... ==== `commit` Apply a shadow CIB to the cluster. Usage: ............... commit ............... === `resource` At this level resources may be managed. All (or almost all) commands are implemented with the CRM tools such as `crm_resource(8)`. ==== `status` Print resource status. If the resource parameter is left out status of all resources is printed. Usage: ............... status [] ............... ==== `start/stop` Start/stop a resource using the `target_role` attribute. Usage: ............... start stop ............... ==== `manage/unmanage` Manage/unmanage a resource using the `is_managed` attribute. Usage: ............... manage unmanage ............... ==== `migrate/unmigrate` Migrate a resource to a different node or remove the constraint generated by the previous migrate command. If node is left out, the resource is migrated by creating a constraint which prevents it from running on the current node. Usage: ............... migrate [] unmigrate ............... ==== `param` Show/edit/delete a parameter of a resource. Usage: ............... param set param delete param show ............... Example: ............... param ip_0 show ip ............... ==== `meta` Show/edit/delete a meta attribute of a resource. Currently, all meta attributes of a resource may be managed with other commands such as `resource stop`. Usage: ............... meta set meta delete meta show ............... Example: ............... meta ip_0 set target_role stopped ............... ==== `failcount` Show/edit/delete the failcount of a resource. Usage: ............... failcount set failcount delete failcount show ............... Example: ............... failcount fs_0 delete node2 ............... ==== `cleanup` Cleanup resource status. Typically done after the resource has temporarily failed. If a node is omitted, cleanup on all nodes. If there are many nodes, the command may take a while. Usage: ............... cleanup [] ............... ==== `refresh` Refresh CIB from the LRM status. Usage: ............... refresh [] ............... ==== `reprobe` Probe for resources not started by the CRM. Usage: ............... reprobe [] ............... === `node` +Node management and status commands. + ==== `status` Show a node status. If the node parameter is omitted then status of all nodes is displayed. Usage: ............... status [] ............... ==== `show` Show a node definition. If the node parameter is omitted then all nodes are shown. Usage: ............... show [] ............... ==== `standby/online` Set a node to standby or online status. Usage: ............... standby online ............... ==== `attribute` Edit node attributes. This kind of attribute should refer to relatively static properties, such as memory size. Usage: ............... attribute set attribute delete attribute show ............... Example: ............... attribute node_1 set memory_size 4096 ............... ==== `status-attr` Edit node attributes which are in the CIB status section, i.e. attributes which hold properties of a more volatile nature. One typical example is attribute generated by the `pingd` utility. Usage: ............... status-attr set status-attr delete status-attr show ............... Example: ............... status-attr node_1 show pingd ............... === `configure` (batch mode) This level enables all CIB object definition commands. The commands from the batch mode may also be entered interactively, but they are better suited for scripts. The configuration is divided into three levels: resources, constraints, and (cluster) properties. Each of these levels support several basic CIB objects. For resources those are: - `primitive` - `group` - `clone` - `ms` (master-slave) There are three types of constraints: - `location` - `colocation` - `order` Finally, there are the cluster properties which is just a set of attributes. The attributes are managed by the `property` command. -Implementation: internally constructed XML. +The changes applied to the current CIB only on ending the +configuration session or using the `configure commit` command. ==== `primitive` The primitive command describes a resource. It may be referenced only once in group, clone, or master-slave objects. If it's not referenced, then it is placed as a single resource in the CIB. Operations may be specified in three ways. "Anonymous" as a simple list of "op" specifications. Use that if you don't want to reference the set of operations elsewhere. That's by far the most common way to define operations. If reusing operation sets is desired, use the "operations" keyword along with the id to give the operations set a name and the id-ref to reference another set of operations. Usage: ............... primitive [:[:]] [params = [=...]] [meta = [=...]] [operations id_spec [op op_type [=...] ...]] id_spec :: $id= | $id-ref= op_type :: start | stop | monitor ............... Example: ............... primitive apcfence stonith:apcsmart \ params ttydev=/dev/ttyS0 hostlist="node1 node2" \ op start timeout=60s \ op monitor interval=30m timeout=60s primitive www8 apache \ params configfile=/etc/apache/www8.conf \ operations $id-ref=apache_ops ............... ==== `group` The `group` command creates a group of resources. Usage: ............... group [...] [params = [=...]] [meta = [=...]] ............... Example: ............... group internal_www disk0 fs0 internal_ip apache \ meta target_role=stopped ............... ==== `clone` The `clone` command creates a resource clone. It may contain a single primitive resource or one group of resources. Usage: ............... clone [params = [=...]] [meta = [=...]] ............... Example: ............... clone cl_fence apc_1 \ meta clone_node_max=1 globally_unique=false ............... ==== `ms` (master/slave) The `ms` command creates a master/slave resource type. It may contain a single primitive resource or one group of resources. Usage: ............... ms [params = [=...]] [meta = [=...]] ............... Example: ............... ms disk1 drbd1 \ meta notify=true globally_unique=false ............... ==== `location` `location` defines the preference of nodes for the given resource. The location constraints consist of one or more rules which specify a score to be awarded if the rule matches. Usage: ............... location - rule [id_spec] [$role=] - [rule [id_spec] [$role=] ...] + rule [id_spec] [$role=] : + [rule [id_spec] [$role=] : ...] id_spec :: $id= | $id-ref= score :: | | [-]inf expression :: [bool_op ...] | bool_op :: or | and single_exp :: [type:] | type :: string | version | number binary_op :: lt | gt | lte | gte | eq | ne unary_op :: defined | not_defined date_expr :: date_op [] (TBD) ............... Examples: ............... location conn_1 internal_www \ - rule 50 \#uname eq node1 \ - rule pingd defined pingd + rule 50: #uname eq node1 \ + rule pingd: defined pingd location conn_2 dummy_float \ - rule -inf not_defined pingd or pingd lte 0 + rule -inf: not_defined pingd or pingd lte 0 ............... ==== `colocation` This constraint expresses the placement relation between two resources. Usage: ............... - colocation [:] [:] + colocation : [:] [:] [symmetrical=] ............... Example: ............... - colocation dummy_and_apache -inf apache dummy + colocation dummy_and_apache -inf: apache dummy ............... ==== `order` This constraint expresses the order of actions on two resources. Usage: ............... - order score-type [:] [:] + order score-type: [:] [:] [symmetrical=] score-type :: advisory | mandatory | ............... Example: ............... - order c_apache_1 mandatory apache:start ip_1 + order c_apache_1 mandatory: apache:start ip_1 ............... ==== `property` Set the cluster (`crm_config`) options. Usage: ............... property [$id=]