diff --git a/cts/CIB.py b/cts/CIB.py index 1de85fa8eb..ea8c772383 100644 --- a/cts/CIB.py +++ b/cts/CIB.py @@ -1,559 +1,559 @@ '''CTS: Cluster Testing System: CIB generator ''' __copyright__=''' Author: Andrew Beekhof Copyright (C) 2008 Andrew Beekhof ''' from UserDict import UserDict import sys, time, types, syslog, os, struct, string, signal, traceback, warnings from cts.CTSvars import * from cts.CTS import ClusterManager class CibBase: cts_cib = None cib_tmpfile = None version = "unknown" feature_set = "unknown" target = None def __init__(self, CM, tmpfile=None): self.CM = CM #self.target = self.CM.Env["nodes"][0] if not tmpfile: warnings.filterwarnings("ignore") self.cib_tmpfile=os.tmpnam() warnings.resetwarnings() else: self.cib_tmpfile = tmpfile def version(self): return self.version def NextIP(self): fields = string.split(self.CM.Env["IPBase"], '.') fields[3] = str(int(fields[3])+1) ip = string.join(fields, '.') self.CM.Env["IPBase"] = ip return ip class CIB06(CibBase): version = "transitional-0.6" coloc_template = """""" cib_template =''' %s %s %s ''' cib_option_template = ''' ''' lsb_resource = ''' ''' clustermon_location_constraint = ''' ''' resource_group_template = '''%s %s %s''' per_node_constraint_template = ''' ''' pingd_constraint_template = ''' ''' dummy_resource_template = ''' ''' clustermon_resource_template = ''' ''' master_slave_resource = ''' ''' pingd_resource_template = """ """ stonith_resource_template = """ """ bsc_template = ''' ''' def NewIP(self, name=None): template = ''' ''' ip = self.NextIP() if not name: name = "r"+ip return template % (name, name, name, name, name, ip) def NewHBIP(self, name=None): template = ''' ''' ip = self.NextIP() if not name: name = "r"+ip return template % (name, name, name, name, ip) def NewDummy(self, name): return self.dummy_resource_template % (name, name, name, name) def install(self, target): self.CM.rsh("localhost", "echo \'" + self.contents(target) + "\' > " + self.cib_tmpfile) rc = self.CM.rsh.cp(cib_file, "root@%s:%s/cib.xml" + (target, CTSvars.CRM_CONFIG_DIR)) if rc != 0: raise ValueError("Can not copy %s to %s (%d)"%(self.cib_tmpfile, target, rc)) self.CM.rsh(target, "chown "+CTSvars.CRM_DAEMON_USER+" "+CTSvars.CRM_CONFIG_DIR+"/cib.xml") self.CM.rsh("localhost", "rm -f "+self.cib_tmpfile) def contents(self, target=None): # fencing resource if self.cts_cib: return self.cts_cib nodelist = "" num_nodes = 0 for node in self.CM.Env["nodes"]: nodelist += node + " " num_nodes = num_nodes + 1 no_quorum = "stop" if num_nodes < 3: no_quorum = "ignore" self.CM.debug("Cluster only has %d nodes, ignoring quorum" % num_nodes) #make up crm config cib_options = self.cib_option_template % (self.CM.Env["DoFencing"], no_quorum) #create resources and their constraints resources = "" constraints = "" if self.CM.Env["DoBSC"] == 1: cib_options = cib_options + self.bsc_template if self.CM.Env["CIBResource"] != 1: # generate cib self.cts_cib = self.cib_template % (cib_options, resources, constraints) return self.cts_cib if self.CM.cluster_monitor == 1: resources += self.clustermon_resource_template constraints += self.clustermon_location_constraint ip1_rsc = self.NewIP() ip2_rsc = self.NewHBIP() ip3_rsc = self.NewIP() resources += self.resource_group_template % (ip1_rsc, ip2_rsc, ip3_rsc) # lsb resource resources += self.lsb_resource # Mirgator resources += self.NewDummy("migrator") constraints += self.coloc_template % ("group-with-master", "group-1", "master-1", "Master", "INFINITY") constraints += self.coloc_template % ("lsb-with-group", "lsb_dummy", "group-1", "Started", "INFINITY") # per node resource for node in self.CM.Env["nodes"]: per_node_resources = self.NewIP("rsc_"+node) per_node_constraint = self.per_node_constraint_template % (node, "rsc_"+node, node) resources += per_node_resources constraints += per_node_constraint # Ping the test master resources += self.pingd_resource_template % os.uname()[1] # Require conectivity to run constraints += self.pingd_constraint_template % ("master-1", "master-1", "m", "Started", "m", 1) if self.CM.Env["DoFencing"]: p_name = None p_value = None entries = string.split(self.CM.Env["stonith-params"], ',') for entry in entries: (p_name, p_value) = string.split(entry, '=') if p_name == "hostlist" and p_value == "all": p_value = string.join(self.CM.Env["nodes"], " ") stonith_resource = self.stonith_resource_template % (self.CM.Env["stonith-type"], p_name, p_value) resources += stonith_resource #master slave resource resources += self.master_slave_resource % (num_nodes, 1, 1, 1) # generate cib self.cts_cib = self.cib_template % (cib_options, resources, constraints) return self.cts_cib class CIB10(CibBase): feature_set = "3.0" version = "pacemaker-1.0" cib_template = ''' ''' def _create(self, command): fixed = "HOME=/root CIB_file="+self.cib_tmpfile+" crm --force configure " + command rc = self.CM.rsh(self.target, fixed) if rc != 0: self.CM.log("Configure call failed: "+fixed) sys.exit(1) def _show(self, command=""): output = "" (rc, result) = self.CM.rsh(self.target, "HOME=/root CIB_file="+self.cib_tmpfile+" crm configure show "+command, None, ) for line in result: output += line self.CM.debug("Generated Config: "+line) return output def NewIP(self, name=None, standard="ocf:heartbeat"): ip = self.NextIP() if not name: name = "r"+ip if not standard: standard = "" else: standard += ":" self._create('''primitive %s %sIPaddr params ip=%s cidr_netmask=32 op monitor interval=5s''' % (name, standard, ip)) return name def install(self, target): old = self.cib_tmpfile # Force a rebuild self.cts_cib = None self.cib_tmpfile = CTSvars.CRM_CONFIG_DIR+"/cib.xml" self.contents(target) self.CM.rsh(self.target, "chown "+CTSvars.CRM_DAEMON_USER+" "+self.cib_tmpfile) self.cib_tmpfile = old def contents(self, target=None): # fencing resource if self.cts_cib: return self.cts_cib if not target: self.target = self.CM.Env["nodes"][0] else: self.target = target cib_base = self.cib_template % (self.feature_set, self.version, ''' remote-tls-port='9898' remote-clear-port='9999' ''') self.CM.rsh(self.target, '''echo "%s" > %s''' % (cib_base, self.cib_tmpfile)) #self.CM.rsh.cp(self.cib_tmpfile, "root@%s:%s" % (self.target, self.cib_tmpfile)) nodelist = "" self.num_nodes = 0 for node in self.CM.Env["nodes"]: nodelist += node + " " self.num_nodes = self.num_nodes + 1 no_quorum = "stop" if self.num_nodes < 3: no_quorum = "ignore" self.CM.debug("Cluster only has %d nodes, ignoring quorum" % self.num_nodes) # The shell no longer functions when the lrmd isn't running, how wonderful # Start one here and let the cluster clean it up when the full stack starts # Just hope target has the same location for lrmd self.CM.rsh(self.target, CTSvars.CRM_DAEMON_DIR+"/lrmd", blocking=0) # Tell the shell to mind its own business, we know what we're doing self.CM.rsh(self.target, "crm options check-mode relaxed") # Now stop the shell from rejecting every update because we've not defined stonith resources yet self._create('''property stonith-enabled=false''') self._create('''property start-failure-is-fatal=false pe-input-series-max=5000''') self._create('''property shutdown-escalation=5min startup-fencing=false batch-limit=10''') self._create('''property no-quorum-policy=%s expected-quorum-votes=%d''' % (no_quorum, self.num_nodes)) if self.CM.Env["DoBSC"] == 1: self._create('''property ident-string="Linux-HA TEST configuration file - REMOVEME!!"''') # Add resources? if self.CM.Env["CIBResource"] == 1: self.add_resources() # Fencing resource if self.CM.Env["DoFencing"]: params = None entries = string.split(self.CM.Env["stonith-params"], ',') for entry in entries: (name, value) = string.split(entry, '=') if name == "hostlist" and value == "all": value = string.join(self.CM.Env["nodes"], " ") if params: params = ("""%s '%s="%s"' """ % (params, name, value)) else: params = ("""'%s="%s"' """ % (name, value)) if params: params = "params %s" % params else: params = "" self._create('''primitive FencingChild stonith::%s %s op monitor interval=120s timeout=300 op start interval=0 timeout=180s op stop interval=0 timeout=180s''' % (self.CM.Env["stonith-type"], params)) # Set a threshold for unreliable stonith devices such as the vmware one self._create('''clone Fencing FencingChild meta globally-unique=false migration-threshold=5''') if self.CM.cluster_monitor == 1: self._create('''primitive cluster_mon ocf:pacemaker:ClusterMon params update=10 extra_options="-r -n" user=abeekhof htmlfile=/suse/abeekhof/Export/cluster.html op start interval=0 requires=nothing op monitor interval=5s requires=nothing''') self._create('''location prefer-dc cluster_mon rule -INFINITY: \#is_dc eq false''') self._create('''property stonith-enabled=%s''' % (self.CM.Env["DoFencing"])) # generate cib self.cts_cib = self._show("xml") if self.cib_tmpfile != CTSvars.CRM_CONFIG_DIR+"/cib.xml": self.CM.rsh(self.target, "rm -f "+self.cib_tmpfile) return self.cts_cib def add_resources(self): # Group Resource r1 = self.NewIP() ip = self.NextIP() r2 = "r"+ip self._create('''primitive %s heartbeat::IPaddr params 1=%s/32 op monitor interval=5s''' % (r2, ip)) r3 = self.NewIP() self._create('''group group-1 %s %s %s''' % (r1, r2, r3)) # Per-node resources for node in self.CM.Env["nodes"]: r = self.NewIP("rsc_"+node) self._create('''location prefer-%s %s rule 100: \#uname eq %s''' % (node, r, node)) # LSB resource self._create('''primitive lsb-dummy lsb::''' +CTSvars.CTS_home+ '''/LSBDummy op monitor interval=5s''') self._create('''colocation lsb-with-group INFINITY: lsb-dummy group-1''') self._create('''order lsb-after-group mandatory: group-1 lsb-dummy symmetrical=true''') # Migrator self._create('''primitive migrator ocf:pacemaker:Dummy meta allow-migrate=1 op monitor interval=P10S''') # Ping the test master - self._create('''primitive ping-1 ocf:pacemaker:pingd params host_list=%s name=connected op monitor interval=120s''' % os.uname()[1]) + self._create('''primitive ping-1 ocf:pacemaker:ping params host_list=%s name=connected op monitor interval=120s''' % os.uname()[1]) self._create('''clone Connectivity ping-1 meta globally-unique=false''') #master slave resource self._create('''primitive stateful-1 ocf:pacemaker:Stateful op monitor interval=15s op monitor interval=16s role=Master''') self._create('''ms master-1 stateful-1 meta clone-max=%d clone-node-max=%d master-max=%d master-node-max=%d''' % (self.num_nodes, 1, 1, 1)) # Require conectivity to run the master - self._create('''location %s-is-connected %s rule -INFINITY: connected lt %d''' % ("m1", "master-1", 1)) + self._create('''location %s-is-connected %s rule -INFINITY: connected lt %d or not_defined connected''' % ("m1", "master-1", 1)) # Group with the master self._create('''colocation group-with-master INFINITY: group-1 master-1:Master''') self._create('''order group-after-master mandatory: master-1:promote group-1:start symmetrical=true''') class HASI(CIB10): def add_resources(self): # DLM resource self._create('''primitive dlm ocf:pacemaker:controld op monitor interval=120s''') self._create('''clone dlm-clone dlm meta globally-unique=false interleave=true''') # O2CB resource self._create('''primitive o2cb ocf:ocfs2:o2cb op monitor interval=120s''') self._create('''clone o2cb-clone o2cb meta globally-unique=false interleave=true''') self._create('''colocation o2cb-with-dlm INFINITY: o2cb-clone dlm-clone''') self._create('''order start-o2cb-after-dlm mandatory: dlm-clone o2cb-clone''') class ConfigFactory: def __init__(self, CM): self.CM = CM self.register("pacemaker06", CIB06, CM) self.register("pacemaker10", CIB10, CM) self.register("hae", HASI, CM) def register(self, methodName, constructor, *args, **kargs): """register a constructor""" _args = [constructor] _args.extend(args) setattr(self, methodName, apply(ConfigFactoryItem,_args, kargs)) def unregister(self, methodName): """unregister a constructor""" delattr(self, methodName) def createConfig(self, name="pacemaker-1.0"): if name == "pacemaker-0.6": name = "pacemaker06"; elif name == "pacemaker-1.0": name = "pacemaker10"; elif name == "hasi": name = "hae"; if hasattr(self, name): return getattr(self, name)() else: self.CM.log("Configuration variant '%s' is unknown. Defaulting to latest config" % name) return self.pacemaker10() class ConfigFactoryItem: def __init__(self, function, *args, **kargs): assert callable(function), "function should be a callable obj" self._function = function self._args = args self._kargs = kargs def __call__(self, *args, **kargs): """call function""" _args = list(self._args) _args.extend(args) _kargs = self._kargs.copy() _kargs.update(kargs) return apply(self._function,_args,_kargs) #CibFactory = ConfigFactory() diff --git a/cts/CTS.py b/cts/CTS.py index 81b05af4d2..82a56b8d84 100644 --- a/cts/CTS.py +++ b/cts/CTS.py @@ -1,1340 +1,1337 @@ '''CTS: Cluster Testing System: Main module Classes related to testing high-availability clusters... ''' __copyright__=''' Copyright (C) 2000, 2001 Alan Robertson Licensed under the GNU GPL. ''' # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. import types, string, select, sys, time, re, os, struct, signal import time, syslog, random, traceback, base64, pickle, binascii, fcntl from socket import gethostbyname_ex from UserDict import UserDict from subprocess import Popen,PIPE from cts.CTSvars import * class CtsLab(UserDict): '''This class defines the Lab Environment for the Cluster Test System. It defines those things which are expected to change from test environment to test environment for the same cluster manager. It is where you define the set of nodes that are in your test lab what kind of reset mechanism you use, etc. This class is derived from a UserDict because we hold many different parameters of different kinds, and this provides provide a uniform and extensible interface useful for any kind of communication between the user/administrator/tester and CTS. At this point in time, it is the intent of this class to model static configuration and/or environmental data about the environment which doesn't change as the tests proceed. Well-known names (keys) are an important concept in this class. The HasMinimalKeys member function knows the minimal set of well-known names for the class. The following names are standard (well-known) at this time: nodes An array of the nodes in the cluster reset A ResetMechanism object logger An array of objects that log strings... CMclass The type of ClusterManager we are running (This is a class object, not a class instance) RandSeed Random seed. It is a triple of bytes. (optional) The CTS code ignores names it doesn't know about/need. The individual tests have access to this information, and it is perfectly acceptable to provide hints, tweaks, fine-tuning directions or other information to the tests through this mechanism. ''' def __init__(self): self.data = {} self.rsh = RemoteExec(self) self.RandomGen = random.Random() self.Scenario = None # Get a random seed for the random number generator. self["LogWatcher"] = "any" self["LogFileName"] = "/var/log/messages" self["OutputFile"] = None self["SyslogFacility"] = None self["CMclass"] = None self["logrestartcmd"] = "/etc/init.d/syslog-ng restart 2>&1 > /dev/null" self["logger"] = ([StdErrLog(self)]) self.SeedRandom() def SeedRandom(self, seed=None): if not seed: seed = int(time.time()) if self.has_key("RandSeed"): self.log("New random seed is: " + str(seed)) else: self.log("Random seed is: " + str(seed)) self["RandSeed"] = seed self.RandomGen.seed(str(seed)) def HasMinimalKeys(self): 'Return TRUE if our object has the minimal set of keys/values in it' result = 1 for key in self.MinimalKeys: if not self.has_key(key): result = None return result def log(self, args): "Log using each of the supplied logging methods" for logfcn in self._logfunctions: logfcn(string.strip(args)) def debug(self, args): "Log using each of the supplied logging methods" for logfcn in self._logfunctions: if logfcn.name() != "StdErrLog": logfcn("debug: %s" % string.strip(args)) def dump(self): keys = [] for key in self.keys(): keys.append(key) keys.sort() for key in keys: self.debug("Environment["+key+"]:\t"+str(self[key])) def run(self, Scenario, Iterations): if not Scenario: self.log("No scenario was defined") return 1 self.log("Cluster nodes: ") for node in self["nodes"]: self.log(" * %s" % (node)) if not Scenario.SetUp(): return 1 try : Scenario.run(Iterations) except : self.log("Exception by %s" % sys.exc_info()[0]) for logmethod in self["logger"]: traceback.print_exc(50, logmethod) #ClusterManager.oprofileSave(Iterations) Scenario.TearDown() Scenario.summarize() if Scenario.Stats["failure"] > 0: return Scenario.Stats["failure"] elif Scenario.Stats["success"] != Iterations: self.log("No failure count but success != requested iterations") return 1 return 0 def __setitem__(self, key, value): '''Since this function gets called whenever we modify the dictionary (object), we can (and do) validate those keys that we know how to validate. For the most part, we know how to validate the "MinimalKeys" elements. ''' # # List of nodes in the system # if key == "nodes": self.Nodes = {} for node in value: # I don't think I need the IP address, etc. but this validates # the node name against /etc/hosts and/or DNS, so it's a # GoodThing(tm). try: self.Nodes[node] = gethostbyname_ex(node) except: print node+" not found in DNS... aborting" raise # # List of Logging Mechanism(s) # elif key == "logger": if len(value) < 1: raise ValueError("Must have at least one logging mechanism") for logger in value: if not callable(logger): raise ValueError("'logger' elements must be callable") self._logfunctions = value # # Cluster Manager Class # elif key == "CMclass": if value and not issubclass(value, ClusterManager): raise ValueError("'CMclass' must be a subclass of" " ClusterManager") # # Initial Random seed... # #elif key == "RandSeed": # if len(value) != 3: # raise ValueError("'Randseed' must be a 3-element list/tuple") # for elem in value: # if not isinstance(elem, types.IntType): # raise ValueError("'Randseed' list must all be ints") self.data[key] = value def IsValidNode(self, node): 'Return TRUE if the given node is valid' return self.Nodes.has_key(node) def __CheckNode(self, node): "Raise a ValueError if the given node isn't valid" if not self.IsValidNode(node): raise ValueError("Invalid node [%s] in CheckNode" % node) def RandomNode(self): '''Choose a random node from the cluster''' return self.RandomGen.choice(self["nodes"]) class Logger: TimeFormat = "%b %d %H:%M:%S\t" def __call__(self, lines): raise ValueError("Abstract class member (__call__)") def write(self, line): return self(line.rstrip()) def writelines(self, lines): for s in lines: self.write(s) return 1 def flush(self): return 1 def isatty(self): return None class SysLog(Logger): # http://docs.python.org/lib/module-syslog.html defaultsource="CTS" map = { "kernel": syslog.LOG_KERN, "user": syslog.LOG_USER, "mail": syslog.LOG_MAIL, "daemon": syslog.LOG_DAEMON, "auth": syslog.LOG_AUTH, "lpr": syslog.LOG_LPR, "news": syslog.LOG_NEWS, "uucp": syslog.LOG_UUCP, "cron": syslog.LOG_CRON, "local0": syslog.LOG_LOCAL0, "local1": syslog.LOG_LOCAL1, "local2": syslog.LOG_LOCAL2, "local3": syslog.LOG_LOCAL3, "local4": syslog.LOG_LOCAL4, "local5": syslog.LOG_LOCAL5, "local6": syslog.LOG_LOCAL6, "local7": syslog.LOG_LOCAL7, } def __init__(self, labinfo): if labinfo.has_key("syslogsource"): self.source=labinfo["syslogsource"] else: self.source=SysLog.defaultsource self.facility="daemon" if labinfo.has_key("SyslogFacility") and labinfo["SyslogFacility"]: if SysLog.map.has_key(labinfo["SyslogFacility"]): self.facility=labinfo["SyslogFacility"] else: raise ValueError("%s: bad syslog facility"%labinfo["SyslogFacility"]) self.facility=SysLog.map[self.facility] syslog.openlog(self.source, 0, self.facility) def setfacility(self, facility): self.facility = facility if SysLog.map.has_key(self.facility): self.facility=SysLog.map[self.facility] syslog.closelog() syslog.openlog(self.source, 0, self.facility) def __call__(self, lines): if isinstance(lines, types.StringType): syslog.syslog(lines) else: for line in lines: syslog.syslog(line) def name(self): return "Syslog" class StdErrLog(Logger): def __init__(self, labinfo): pass def __call__(self, lines): t = time.strftime(Logger.TimeFormat, time.localtime(time.time())) if isinstance(lines, types.StringType): sys.__stderr__.writelines([t, lines, "\n"]) else: for line in lines: sys.__stderr__.writelines([t, line, "\n"]) sys.__stderr__.flush() def name(self): return "StdErrLog" class FileLog(Logger): def __init__(self, labinfo, filename=None): if filename == None: filename=labinfo["LogFileName"] self.logfile=filename import os self.hostname = os.uname()[1]+" " self.source = "CTS: " def __call__(self, lines): fd = open(self.logfile, "a") t = time.strftime(Logger.TimeFormat, time.localtime(time.time())) if isinstance(lines, types.StringType): fd.writelines([t, self.hostname, self.source, lines, "\n"]) else: for line in lines: fd.writelines([t, self.hostname, self.source, line, "\n"]) fd.close() def name(self): return "FileLog" class RemoteExec: '''This is an abstract remote execution class. It runs a command on another machine - somehow. The somehow is up to us. This particular class uses ssh. Most of the work is done by fork/exec of ssh or scp. ''' def __init__(self, Env=None, silent=False): self.Env = Env self.silent = silent # -n: no stdin, -x: no X11 self.Command = "ssh -l root -n -x" # -B: batch mode, -q: no stats (quiet) self.CpCommand = "scp -B -q" self.OurNode=string.lower(os.uname()[1]) def enable_qarsh(self): # http://nstraz.wordpress.com/2008/12/03/introducing-qarsh/ self.log("Using QARSH for connections to cluster nodes") self.Command = "qarsh -l root" self.CpCommand = "qacp" def _fixcmd(self, cmd): return re.sub("\'", "'\\''", cmd) def _cmd(self, *args): '''Compute the string that will run the given command on the given remote system''' args= args[0] sysname = args[0] command = args[1] #print "sysname: %s, us: %s" % (sysname, self.OurNode) if sysname == None or string.lower(sysname) == self.OurNode or sysname == "localhost": ret = command else: ret = self.Command + " " + sysname + " '" + self._fixcmd(command) + "'" #print ("About to run %s\n" % ret) return ret def log(self, args): if not self.silent: if not self.Env: print (args) else: self.Env.log(args) def debug(self, args): if not self.silent: if not self.Env: print (args) else: self.Env.debug(args) def __call__(self, node, command, stdout=0, blocking=1): '''Run the given command on the given remote system If you call this class like a function, this is the function that gets called. It just runs it roughly as though it were a system() call on the remote machine. The first argument is name of the machine to run it on. ''' rc = 0 result = None if not blocking: proc = Popen(self._cmd([node, command]), stdout = PIPE, stderr = PIPE, close_fds = True, shell = True) self.debug("cmd: async: target=%s, rc=%d: %s" % (node, proc.pid, command)) if proc.pid > 0: return 0 return -1 proc = Popen(self._cmd([node, command]), stdout = PIPE, stderr = PIPE, close_fds = True, shell = True) if stdout == 1: result = proc.stdout.readline() else: result = proc.stdout.readlines() proc.stdout.close() rc = proc.wait() if not self.silent: self.debug("cmd: target=%s, rc=%d: %s" % (node, rc, command)) if stdout == 1: return result if proc.stderr: errors = proc.stderr.readlines() proc.stderr.close() for err in errors: self.debug("cmd: stderr: %s" % err) if stdout == 0: if result: for line in result: self.debug("cmd: stdout: %s" % line) return rc return (rc, result) def cp(self, *args): '''Perform a remote copy''' cpstring = self.CpCommand for arg in args: cpstring = cpstring + " \'" + arg + "\'" rc = os.system(cpstring) self.debug("cmd: rc=%d: %s" % (rc, cpstring)) return rc has_log_watcher = {} log_watcher_bin = "/tmp/cts_log_watcher.py" log_watcher = """ import sys, os, fcntl ''' Remote logfile reader for CTS Reads a specified number of lines from the supplied offset Returns the current offset Contains logic for handling truncation ''' limit = 5 offset = 0 prefix = '' filename = '/var/log/messages' skipthis=None args=sys.argv[1:] for i in range(0, len(args)): if skipthis: skipthis=None continue elif args[i] == '-l' or args[i] == '--limit': skipthis=1 limit = int(args[i+1]) elif args[i] == '-f' or args[i] == '--filename': skipthis=1 filename = args[i+1] elif args[i] == '-o' or args[i] == '--offset': skipthis=1 offset = args[i+1] elif args[i] == '-p' or args[i] == '--prefix': skipthis=1 prefix = args[i+1] logfile=open(filename, 'r') logfile.seek(0, os.SEEK_END) newsize=logfile.tell() if offset != 'EOF': offset = int(offset) if newsize >= offset: logfile.seek(offset) else: print prefix + 'File truncated' logfile.seek(0) # Don't block when we reach EOF fcntl.fcntl(logfile.fileno(), fcntl.F_SETFL, os.O_NONBLOCK) count = limit while count > 0: count -= 1 line = logfile.readline() if line: print line.strip() else: break print prefix + 'Last read: %d %d' % (logfile.tell(), limit - count) logfile.close() """ class SearchObj: def __init__(self, Env, filename, host=None): self.Env = Env self.host = host self.filename = filename self.cache = [] self.offset = "EOF" self.rsh = RemoteExec(Env, silent=True) if host == None: host = "localhost" global has_log_watcher if not has_log_watcher.has_key(host): global log_watcher global log_watcher_bin self.debug("Installing %s on %s" % (log_watcher_bin, host)) self.rsh(host, '''echo "%s" > %s''' % (log_watcher, log_watcher_bin)) has_log_watcher[host] = 1 self.next() def __str__(self): if self.host: return "%s:%s" % (self.host, self.filename) return self.filename def log(self, args): message = "lw: %s: %s" % (self, args) if not self.Env: print (message) else: self.Env.log(message) def debug(self, args): message = "lw: %s: %s" % (self, args) if not self.Env: print (message) else: self.Env.debug(message) def next(self): cache = [] if not len(self.cache): global log_watcher_bin (rc, lines) = self.rsh( self.host, "python %s -p CTSwatcher: -f %s -o %s -l 50" % (log_watcher_bin, self.filename, self.offset), stdout=None) for line in lines: - if self.offset == "EOF": - self.offset = "0" - self.debug("First line: %s" % line) - match = re.search("^CTSwatcher:Last read: (\d+)", line) if match: - offset = match.group(1) - if offset != self.offset: - self.offset = offset - # self.debug("Got %d lines, new offset: %s" % (len(lines), self.offset)) + last_offset = self.offset + self.offset = match.group(1) + if last_offset == "EOF": + self.debug("Got %d lines, new offset: %s" % (len(lines), self.offset)) + elif re.search("^CTSwatcher:", line): self.debug("Got control line: "+ line) else: cache.append(line) return cache class LogWatcher(RemoteExec): '''This class watches logs for messages that fit certain regular expressions. Watching logs for events isn't the ideal way to do business, but it's better than nothing :-) On the other hand, this class is really pretty cool ;-) The way you use this class is as follows: Construct a LogWatcher object Call setwatch() when you want to start watching the log Call look() to scan the log looking for the patterns ''' def __init__(self, Env, log, regexes, name="Anon", timeout=10, debug_level=None): '''This is the constructor for the LogWatcher class. It takes a log name to watch, and a list of regular expressions to watch for." ''' RemoteExec.__init__(self, Env) # Validate our arguments. Better sooner than later ;-) for regex in regexes: assert re.compile(regex) self.name = name self.regexes = regexes self.filename = log self.debug_level = debug_level self.whichmatch = -1 self.unmatched = None self.file_list = [] self.line_cache = [] for regex in self.regexes: self.debug("Looking for regex: "+regex) self.Timeout = int(timeout) self.returnonlymatch = None if not os.access(log, os.R_OK): raise ValueError("File [" + log + "] not accessible (r)") def debug(self, args): message = "lw: %s: %s" % (self.name, args) if not self.Env: print (message) else: self.Env.debug(message) def setwatch(self): '''Mark the place to start watching the log from. ''' if self.Env["LogWatcher"] == "remote": for node in self.Env["nodes"]: self.file_list.append(SearchObj(self.Env, self.filename, node)) else: self.file_list.append(SearchObj(self.Env, self.filename)) def __del__(self): if self.debug_level > 1: self.debug("Destroy") def ReturnOnlyMatch(self, onlymatch=1): '''Specify one or more subgroups of the match to return rather than the whole string http://www.python.org/doc/2.5.2/lib/match-objects.html ''' self.returnonlymatch = onlymatch def __get_line(self): if not len(self.line_cache): if not len(self.file_list): raise ValueError("No sources to read from") for f in self.file_list: lines = f.next() self.line_cache.extend(lines) if self.line_cache: line = self.line_cache[0] self.line_cache.remove(line) return line return None def look(self, timeout=None): '''Examine the log looking for the given patterns. It starts looking from the place marked by setwatch(). This function looks in the file in the fashion of tail -f. It properly recovers from log file truncation, but not from removing and recreating the log. It would be nice if it recovered from this as well :-) We return the first line which matches any of our patterns. ''' if timeout == None: timeout = self.Timeout done=time.time()+timeout+1 if self.debug_level > 2: self.debug("starting single search: timeout=%d" % timeout) while (timeout <= 0 or time.time() <= done): line = self.__get_line() if line: which=-1 if re.search("CTS:", line): continue if self.debug_level > 2: self.debug("Processing: "+ line) for regex in self.regexes: which=which+1 if self.debug_level > 2: self.debug("Comparing line to: "+ regex) #matchobj = re.search(string.lower(regex), string.lower(line)) matchobj = re.search(regex, line) if matchobj: self.whichmatch=which if self.returnonlymatch: return matchobj.group(self.returnonlymatch) else: self.debug("Matched: "+line) if self.debug_level > 1: self.debug("With: "+ regex) return line elif timeout > 0: time.sleep(5) #time.sleep(0.025) else: self.debug("End of file") return None self.debug("Timeout") return None def lookforall(self, timeout=None, allow_multiple_matches=None): '''Examine the log looking for ALL of the given patterns. It starts looking from the place marked by setwatch(). We return when the timeout is reached, or when we have found ALL of the regexes that were part of the watch ''' if timeout == None: timeout = self.Timeout save_regexes = self.regexes returnresult = [] self.debug("starting search: timeout=%d" % timeout) for regex in self.regexes: if self.debug_level > 2: self.debug("Looking for regex: "+regex) while (len(self.regexes) > 0): oneresult = self.look(timeout) if not oneresult: self.unmatched = self.regexes self.matched = returnresult self.regexes = save_regexes return None returnresult.append(oneresult) if not allow_multiple_matches: del self.regexes[self.whichmatch] else: # Allow multiple regexes to match a single line tmp_regexes = self.regexes self.regexes = [] which = 0 for regex in tmp_regexes: matchobj = re.search(regex, oneresult) if not matchobj: self.regexes.append(regex) self.unmatched = None self.matched = returnresult self.regexes = save_regexes return returnresult class NodeStatus: def __init__(self, Env): self.Env = Env def IsNodeBooted(self, node): '''Return TRUE if the given node is booted (responds to pings)''' return self.Env.rsh("localhost", "ping -nq -c1 -w1 %s" % node) == 0 def IsSshdUp(self, node): #return self.rsh(node, "true") == 0; rc = self.Env.rsh(node, "true") return rc == 0 def WaitForNodeToComeUp(self, node, Timeout=300): '''Return TRUE when given node comes up, or None/FALSE if timeout''' timeout=Timeout anytimeouts=0 while timeout > 0: if self.IsNodeBooted(node) and self.IsSshdUp(node): if anytimeouts: # Fudge to wait for the system to finish coming up time.sleep(30) self.Env.debug("Node %s now up" % node) return 1 time.sleep(30) if (not anytimeouts): self.Env.debug("Waiting for node %s to come up" % node) anytimeouts=1 timeout = timeout - 1 self.Env.log("%s did not come up within %d tries" % (node, Timeout)) answer = raw_input('Continue? [nY]') if answer and answer == "n": raise ValueError("%s did not come up within %d tries" % (node, Timeout)) def WaitForAllNodesToComeUp(self, nodes, timeout=300): '''Return TRUE when all nodes come up, or FALSE if timeout''' for node in nodes: if not self.WaitForNodeToComeUp(node, timeout): return None return 1 class ClusterManager(UserDict): '''The Cluster Manager class. This is an subclass of the Python dictionary class. (this is because it contains lots of {name,value} pairs, not because it's behavior is that terribly similar to a dictionary in other ways.) This is an abstract class which class implements high-level operations on the cluster and/or its cluster managers. Actual cluster managers classes are subclassed from this type. One of the things we do is track the state we think every node should be in. ''' def __InitialConditions(self): #if os.geteuid() != 0: # raise ValueError("Must Be Root!") None def _finalConditions(self): for key in self.keys(): if self[key] == None: raise ValueError("Improper derivation: self[" + key + "] must be overridden by subclass.") def __init__(self, Environment, randseed=None): self.Env = Environment self.__InitialConditions() self.clear_cache = 0 self.TestLoggingLevel=0 self.data = { "up" : "up", # Status meaning up "down" : "down", # Status meaning down "StonithCmd" : "stonith -t baytech -p '10.10.10.100 admin admin' %s", "DeadTime" : 30, # Max time to detect dead node... "StartTime" : 90, # Max time to start up # # These next values need to be overridden in the derived class. # "Name" : None, "StartCmd" : None, "StopCmd" : None, "StatusCmd" : None, #"RereadCmd" : None, "BreakCommCmd" : None, "FixCommCmd" : None, #"TestConfigDir" : None, "LogFileName" : None, #"Pat:Master_started" : None, #"Pat:Slave_started" : None, "Pat:We_stopped" : None, "Pat:They_stopped" : None, "BadRegexes" : None, # A set of "bad news" regexes # to apply to the log } self.rsh = self.Env.rsh self.ShouldBeStatus={} self.ns = NodeStatus(self.Env) self.OurNode=string.lower(os.uname()[1]) def key_for_node(self, node): return node def errorstoignore(self): '''Return list of errors which are 'normal' and should be ignored''' return [] def log(self, args): self.Env.log(args) def debug(self, args): self.Env.debug(args) def prepare(self): '''Finish the Initialization process. Prepare to test...''' for node in self.Env["nodes"]: if self.StataCM(node): self.ShouldBeStatus[node]="up" else: self.ShouldBeStatus[node]="down" self.unisolate_node(node) def upcount(self): '''How many nodes are up?''' count=0 for node in self.Env["nodes"]: if self.ShouldBeStatus[node]=="up": count=count+1 return count def install_config(self, node): return None def clear_all_caches(self): if self.clear_cache: for node in self.Env["nodes"]: if self.ShouldBeStatus[node] == "down": self.debug("Removing cache file on: "+node) self.rsh(node, "rm -f "+CTSvars.HA_VARLIBHBDIR+"/hostcache") else: self.debug("NOT Removing cache file on: "+node) def StartaCM(self, node): '''Start up the cluster manager on a given node''' self.debug("Starting %s on node %s" %(self["Name"], node)) ret = 1 if not self.ShouldBeStatus.has_key(node): self.ShouldBeStatus[node] = "down" if self.ShouldBeStatus[node] != "down": return 1 patterns = [] # Technically we should always be able to notice ourselves starting patterns.append(self["Pat:Local_started"] % node) if self.upcount() == 0: patterns.append(self["Pat:Master_started"] % node) else: patterns.append(self["Pat:Slave_started"] % node) watch = LogWatcher( self.Env, self["LogFileName"], patterns, "StartaCM", self["StartTime"]+10) watch.setwatch() self.install_config(node) self.ShouldBeStatus[node] = "any" if self.StataCM(node) and self.cluster_stable(self["DeadTime"]): self.log ("%s was already started" %(node)) return 1 # Clear out the host cache so autojoin can be exercised if self.clear_cache: self.debug("Removing cache file on: "+node) self.rsh(node, "rm -f "+CTSvars.HA_VARLIBHBDIR+"/hostcache") if not(self.Env["valgrind-tests"]): startCmd = self["StartCmd"] else: if self.Env["valgrind-prefix"]: prefix = self.Env["valgrind-prefix"] else: prefix = "cts" startCmd = """G_SLICE=always-malloc HA_VALGRIND_ENABLED='%s' VALGRIND_OPTS='%s --log-file=/tmp/%s-%s.valgrind' %s""" % ( self.Env["valgrind-procs"], self.Env["valgrind-opts"], prefix, """%p""", self["StartCmd"]) if self.rsh(node, startCmd) != 0: self.log ("Warn: Start command failed on node %s" %(node)) return None self.ShouldBeStatus[node]="up" watch_result = watch.lookforall() if watch.unmatched: for regex in watch.unmatched: self.log ("Warn: Startup pattern not found: %s" %(regex)) if watch_result: #self.debug("Found match: "+ repr(watch_result)) self.cluster_stable(self["DeadTime"]) return 1 if self.StataCM(node) and self.cluster_stable(self["DeadTime"]): return 1 self.log ("Warn: Start failed for node %s" %(node)) return None def StartaCMnoBlock(self, node): '''Start up the cluster manager on a given node with none-block mode''' self.debug("Starting %s on node %s" %(self["Name"], node)) # Clear out the host cache so autojoin can be exercised if self.clear_cache: self.debug("Removing cache file on: "+node) self.rsh(node, "rm -f "+CTSvars.HA_VARLIBHBDIR+"/hostcache") if not(self.Env["valgrind-tests"]): startCmd = self["StartCmd"] else: if self.Env["valgrind-prefix"]: prefix = self.Env["valgrind-prefix"] else: prefix = "cts" startCmd = """G_SLICE=always-malloc HA_VALGRIND_ENABLED='%s' VALGRIND_OPTS='%s --log-file=/tmp/%s-%s.valgrind' %s""" % ( self.Env["valgrind-procs"], self.Env["valgrind-opts"], prefix, """%p""", self["StartCmd"]) self.rsh(node, startCmd, blocking=0) self.ShouldBeStatus[node]="up" return 1 def StopaCM(self, node): '''Stop the cluster manager on a given node''' self.debug("Stopping %s on node %s" %(self["Name"], node)) if self.ShouldBeStatus[node] != "up": return 1 if self.rsh(node, self["StopCmd"]) == 0: self.ShouldBeStatus[node]="down" self.cluster_stable(self["DeadTime"]) return 1 else: self.log ("Could not stop %s on node %s" %(self["Name"], node)) return None def StopaCMnoBlock(self, node): '''Stop the cluster manager on a given node with none-block mode''' self.debug("Stopping %s on node %s" %(self["Name"], node)) self.rsh(node, self["StopCmd"], blocking=0) self.ShouldBeStatus[node]="down" return 1 def cluster_stable(self, timeout = None): time.sleep(self["StableTime"]) return 1 def node_stable(self, node): return 1 def RereadCM(self, node): '''Force the cluster manager on a given node to reread its config This may be a no-op on certain cluster managers. ''' rc=self.rsh(node, self["RereadCmd"]) if rc == 0: return 1 else: self.log ("Could not force %s on node %s to reread its config" % (self["Name"], node)) return None def StataCM(self, node): '''Report the status of the cluster manager on a given node''' out=self.rsh(node, self["StatusCmd"], 1) ret= (string.find(out, 'stopped') == -1) try: if ret: if self.ShouldBeStatus[node] == "down": self.log( "Node status for %s is %s but we think it should be %s" % (node, "up", self.ShouldBeStatus[node])) else: if self.ShouldBeStatus[node] == "up": self.log( "Node status for %s is %s but we think it should be %s" % (node, "down", self.ShouldBeStatus[node])) except KeyError: pass if ret: self.ShouldBeStatus[node]="up" else: self.ShouldBeStatus[node]="down" return ret def startall(self, nodelist=None): '''Start the cluster manager on every node in the cluster. We can do it on a subset of the cluster if nodelist is not None. ''' ret = 1 map = {} if not nodelist: nodelist=self.Env["nodes"] for node in nodelist: if self.ShouldBeStatus[node] == "down": if not self.StartaCM(node): ret = 0 return ret def stopall(self, nodelist=None): '''Stop the cluster managers on every node in the cluster. We can do it on a subset of the cluster if nodelist is not None. ''' ret = 1 map = {} if not nodelist: nodelist=self.Env["nodes"] for node in self.Env["nodes"]: if self.ShouldBeStatus[node] == "up": if not self.StopaCM(node): ret = 0 return ret def rereadall(self, nodelist=None): '''Force the cluster managers on every node in the cluster to reread their config files. We can do it on a subset of the cluster if nodelist is not None. ''' map = {} if not nodelist: nodelist=self.Env["nodes"] for node in self.Env["nodes"]: if self.ShouldBeStatus[node] == "up": self.RereadCM(node) def statall(self, nodelist=None): '''Return the status of the cluster managers in the cluster. We can do it on a subset of the cluster if nodelist is not None. ''' result={} if not nodelist: nodelist=self.Env["nodes"] for node in nodelist: if self.StataCM(node): result[node] = "up" else: result[node] = "down" return result def isolate_node(self, target, nodes=None): '''isolate the communication between the nodes''' if not nodes: nodes = self.Env["nodes"] for node in nodes: if node != target: rc = self.rsh(target, self["BreakCommCmd"] % self.key_for_node(node)) if rc != 0: self.log("Could not break the communication between %s and %s: %d" % (target, node, rc)) return None else: self.debug("Communication cut between %s and %s" % (target, node)) return 1 def unisolate_node(self, target, nodes=None): '''fix the communication between the nodes''' if not nodes: nodes = self.Env["nodes"] for node in nodes: if node != target: restored = 0 # Limit the amount of time we have asynchronous connectivity for # Restore both sides as simultaneously as possible self.rsh(target, self["FixCommCmd"] % self.key_for_node(node), blocking=0) self.rsh(node, self["FixCommCmd"] % self.key_for_node(target), blocking=0) self.debug("Communication restored between %s and %s" % (target, node)) def reducecomm_node(self,node): '''reduce the communication between the nodes''' rc = self.rsh(node, self["ReduceCommCmd"]%(self.Env["XmitLoss"],self.Env["RecvLoss"])) if rc == 0: return 1 else: self.log("Could not reduce the communication between the nodes from node: %s" % node) return None def restorecomm_node(self,node): '''restore the saved communication between the nodes''' rc = 0 if float(self.Env["XmitLoss"])!=0 or float(self.Env["RecvLoss"])!=0 : rc = self.rsh(node, self["RestoreCommCmd"]); if rc == 0: return 1 else: self.log("Could not restore the communication between the nodes from node: %s" % node) return None def HasQuorum(self, node_list): "Return TRUE if the cluster currently has quorum" # If we are auditing a partition, then one side will # have quorum and the other not. # So the caller needs to tell us which we are checking # If no value for node_list is specified... assume all nodes raise ValueError("Abstract Class member (HasQuorum)") def Components(self): raise ValueError("Abstract Class member (Components)") def oprofileStart(self, node=None): if not node: for n in self.Env["oprofile"]: self.oprofileStart(n) elif node in self.Env["oprofile"]: self.debug("Enabling oprofile on %s" % node) self.rsh(node, "opcontrol --init") self.rsh(node, "opcontrol --setup --no-vmlinux --separate=lib --callgraph=20 --image=all") self.rsh(node, "opcontrol --start") self.rsh(node, "opcontrol --reset") def oprofileSave(self, test, node=None): if not node: for n in self.Env["oprofile"]: self.oprofileSave(test, n) elif node in self.Env["oprofile"]: self.rsh(node, "opcontrol --dump") self.rsh(node, "opcontrol --save=cts.%d" % test) # Read back with: opreport -l session:cts.0 image:/usr/lib/heartbeat/c* if None: self.rsh(node, "opcontrol --reset") else: self.oprofileStop(node) self.oprofileStart(node) def oprofileStop(self, node=None): if not node: for n in self.Env["oprofile"]: self.oprofileStop(n) elif node in self.Env["oprofile"]: self.debug("Stopping oprofile on %s" % node) self.rsh(node, "opcontrol --reset") self.rsh(node, "opcontrol --shutdown 2>&1 > /dev/null") class Resource: ''' This is an HA resource (not a resource group). A resource group is just an ordered list of Resource objects. ''' def __init__(self, cm, rsctype=None, instance=None): self.CM = cm self.ResourceType = rsctype self.Instance = instance self.needs_quorum = 1 def Type(self): return self.ResourceType def Instance(self, nodename): return self.Instance def IsRunningOn(self, nodename): ''' This member function returns true if our resource is running on the given node in the cluster. It is analagous to the "status" operation on SystemV init scripts and heartbeat scripts. FailSafe calls it the "exclusive" operation. ''' raise ValueError("Abstract Class member (IsRunningOn)") return None def IsWorkingCorrectly(self, nodename): ''' This member function returns true if our resource is operating correctly on the given node in the cluster. Heartbeat does not require this operation, but it might be called the Monitor operation, which is what FailSafe calls it. For remotely monitorable resources (like IP addresses), they *should* be monitored remotely for testing. ''' raise ValueError("Abstract Class member (IsWorkingCorrectly)") return None def Start(self, nodename): ''' This member function starts or activates the resource. ''' raise ValueError("Abstract Class member (Start)") return None def Stop(self, nodename): ''' This member function stops or deactivates the resource. ''' raise ValueError("Abstract Class member (Stop)") return None def __repr__(self): if (self.Instance and len(self.Instance) > 1): return "{" + self.ResourceType + "::" + self.Instance + "}" else: return "{" + self.ResourceType + "}" class Component: def kill(self, node): None class Process(Component): def __init__(self, cm, name, process=None, dc_only=0, pats=[], dc_pats=[], badnews_ignore=[], triggersreboot=0): self.name = str(name) self.dc_only = dc_only self.pats = pats self.dc_pats = dc_pats self.CM = cm self.badnews_ignore = badnews_ignore self.triggersreboot = triggersreboot if process: self.proc = str(process) else: self.proc = str(name) self.KillCmd = "killall -9 " + self.proc def kill(self, node): if self.CM.rsh(node, self.KillCmd) != 0: self.CM.log ("ERROR: Kill %s failed on node %s" %(self.name,node)) return None return 1 diff --git a/cts/CTStests.py b/cts/CTStests.py index a76e10848b..becf5bba69 100644 --- a/cts/CTStests.py +++ b/cts/CTStests.py @@ -1,2173 +1,2187 @@ '''CTS: Cluster Testing System: Tests module There are a few things we want to do here: ''' __copyright__=''' Copyright (C) 2000, 2001 Alan Robertson Licensed under the GNU GPL. Add RecourceRecover testcase Zhao Kai ''' # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. # # SPECIAL NOTE: # # Tests may NOT implement any cluster-manager-specific code in them. # EXTEND the ClusterManager object to provide the base capabilities # the test needs if you need to do something that the current CM classes # do not. Otherwise you screw up the whole point of the object structure # in CTS. # # Thank you. # import time, os, re, types, string, tempfile, sys from stat import * from cts import CTS from cts.CTSaudits import * AllTestClasses = [ ] class CTSTest: ''' A Cluster test. We implement the basic set of properties and behaviors for a generic cluster test. Cluster tests track their own statistics. We keep each of the kinds of counts we track as separate {name,value} pairs. ''' def __init__(self, cm): #self.name="the unnamed test" self.Stats = {"calls":0 , "success":0 , "failure":0 , "skipped":0 , "auditfail":0} # if not issubclass(cm.__class__, ClusterManager): # raise ValueError("Must be a ClusterManager object") self.CM = cm self.Audits = [] self.timeout=120 self.passed = 1 self.is_loop = 0 self.is_unsafe = 0 self.is_experimental = 0 self.is_valgrind = 0 self.benchmark = 0 # which tests to benchmark self.timer = {} # timers def has_key(self, key): return self.Stats.has_key(key) def __setitem__(self, key, value): self.Stats[key] = value def __getitem__(self, key): return self.Stats[key] def log_mark(self, msg): self.CM.debug("MARK: test %s %s %d" % (self.name,msg,time.time())) return def get_timer(self,key = "test"): try: return self.timer[key] except: return 0 def set_timer(self,key = "test"): self.timer[key] = time.time() return self.timer[key] def log_timer(self,key = "test"): elapsed = 0 if key in self.timer: elapsed = time.time() - self.timer[key] s = key == "test" and self.name or "%s:%s" %(self.name,key) self.CM.debug("%s runtime: %.2f" % (s, elapsed)) del self.timer[key] return elapsed def incr(self, name): '''Increment (or initialize) the value associated with the given name''' if not self.Stats.has_key(name): self.Stats[name]=0 self.Stats[name] = self.Stats[name]+1 # Reset the test passed boolean if name == "calls": self.passed = 1 def failure(self, reason="none"): '''Increment the failure count''' self.passed = 0 self.incr("failure") self.CM.log(("Test %s" % self.name).ljust(35) +" FAILED: %s" % reason) return None def success(self): '''Increment the success count''' self.incr("success") return 1 def skipped(self): '''Increment the skipped count''' self.incr("skipped") return 1 def __call__(self, node): '''Perform the given test''' raise ValueError("Abstract Class member (__call__)") self.incr("calls") return self.failure() def audit(self): passed = 1 if len(self.Audits) > 0: for audit in self.Audits: if not audit(): self.CM.log("Internal %s Audit %s FAILED." % (self.name, audit.name())) self.incr("auditfail") passed = 0 return passed def setup(self, node): '''Setup the given test''' return self.success() def teardown(self, node): '''Tear down the given test''' return self.success() def create_watch(self, patterns, timeout, name=None): if not name: name = self.name return CTS.LogWatcher(self.CM.Env, self.CM["LogFileName"], patterns, name, timeout) def local_badnews(self, prefix, watch, local_ignore=[]): errcount = 0 if not prefix: prefix = "LocalBadNews:" ignorelist = [] ignorelist.append(" CTS: ") ignorelist.append(prefix) ignorelist.extend(local_ignore) while errcount < 100: match=watch.look(0) if match: add_err = 1 for ignore in ignorelist: if add_err == 1 and re.search(ignore, match): add_err = 0 if add_err == 1: self.CM.log(prefix + " " + match) errcount=errcount+1 else: break else: self.CM.log("Too many errors!") return errcount def is_applicable(self): return self.is_applicable_common() def is_applicable_common(self): '''Return TRUE if we are applicable in the current test configuration''' #raise ValueError("Abstract Class member (is_applicable)") if self.is_loop and not self.CM.Env["loop-tests"]: return 0 elif self.is_unsafe and not self.CM.Env["unsafe-tests"]: return 0 elif self.is_valgrind and not self.CM.Env["valgrind-tests"]: return 0 elif self.is_experimental and not self.CM.Env["experimental-tests"]: return 0 elif self.CM.Env["benchmark"] and self.benchmark == 0: return 0 return 1 def find_ocfs2_resources(self, node): self.r_o2cb = None self.r_ocfs2 = [] (rc, lines) = self.CM.rsh(node, "crm_resource -c", None) for line in lines: if re.search("^Resource", line): r = AuditResource(self.CM, line) if r.rtype == "o2cb" and r.parent != "NA": self.CM.debug("Found o2cb: %s" % self.r_o2cb) self.r_o2cb = r.parent if re.search("^Constraint", line): c = AuditConstraint(self.CM, line) if c.type == "rsc_colocation" and c.target == self.r_o2cb: self.r_ocfs2.append(c.rsc) self.CM.debug("Found ocfs2 filesystems: %s" % repr(self.r_ocfs2)) return len(self.r_ocfs2) def canrunnow(self, node): '''Return TRUE if we can meaningfully run right now''' return 1 def errorstoignore(self): '''Return list of errors which are 'normal' and should be ignored''' return [] ################################################################### class StopTest(CTSTest): ################################################################### '''Stop (deactivate) the cluster manager on a node''' def __init__(self, cm): CTSTest.__init__(self, cm) self.name="Stop" def __call__(self, node): '''Perform the 'stop' test. ''' self.incr("calls") if self.CM.ShouldBeStatus[node] != "up": return self.skipped() patterns = [] # Technically we should always be able to notice ourselves stopping patterns.append(self.CM["Pat:We_stopped"] % node) #if self.CM.Env["use_logd"]: # patterns.append(self.CM["Pat:Logd_stopped"] % node) # Any active node needs to notice this one left # NOTE: This wont work if we have multiple partitions for other in self.CM.Env["nodes"]: if self.CM.ShouldBeStatus[other] == "up" and other != node: patterns.append(self.CM["Pat:They_stopped"] %(other, self.CM.key_for_node(node))) #self.debug("Checking %s will notice %s left"%(other, node)) watch = self.create_watch(patterns, self.CM["DeadTime"]) watch.setwatch() if node == self.CM.OurNode: self.incr("us") else: if self.CM.upcount() <= 1: self.incr("all") else: self.incr("them") self.CM.StopaCM(node) watch_result = watch.lookforall() failreason=None UnmatchedList = "||" if watch.unmatched: (rc, output) = self.CM.rsh(node, "/bin/ps axf", None) for line in output: self.CM.debug(line) for regex in watch.unmatched: self.CM.log ("ERROR: Shutdown pattern not found: %s" % (regex)) UnmatchedList += regex + "||"; failreason="Missing shutdown pattern" self.CM.cluster_stable(self.CM["DeadTime"]) if not watch.unmatched or self.CM.upcount() == 0: return self.success() if len(watch.unmatched) >= self.CM.upcount(): return self.failure("no match against (%s)" % UnmatchedList) if failreason == None: return self.success() else: return self.failure(failreason) # # We don't register StopTest because it's better when called by # another test... # ################################################################### class StartTest(CTSTest): ################################################################### '''Start (activate) the cluster manager on a node''' def __init__(self, cm, debug=None): CTSTest.__init__(self,cm) self.name="start" self.debug = debug def __call__(self, node): '''Perform the 'start' test. ''' self.incr("calls") if self.CM.upcount() == 0: self.incr("us") else: self.incr("them") if self.CM.ShouldBeStatus[node] != "down": return self.skipped() elif self.CM.StartaCM(node): return self.success() else: return self.failure("Startup %s on node %s failed" %(self.CM["Name"], node)) # # We don't register StartTest because it's better when called by # another test... # ################################################################### class FlipTest(CTSTest): ################################################################### '''If it's running, stop it. If it's stopped start it. Overthrow the status quo... ''' def __init__(self, cm): CTSTest.__init__(self,cm) self.name="Flip" self.start = StartTest(cm) self.stop = StopTest(cm) def __call__(self, node): '''Perform the 'Flip' test. ''' self.incr("calls") if self.CM.ShouldBeStatus[node] == "up": self.incr("stopped") ret = self.stop(node) type="up->down" # Give the cluster time to recognize it's gone... time.sleep(self.CM["StableTime"]) elif self.CM.ShouldBeStatus[node] == "down": self.incr("started") ret = self.start(node) type="down->up" else: return self.skipped() self.incr(type) if ret: return self.success() else: return self.failure("%s failure" % type) # Register FlipTest as a good test to run AllTestClasses.append(FlipTest) ################################################################### class RestartTest(CTSTest): ################################################################### '''Stop and restart a node''' def __init__(self, cm): CTSTest.__init__(self,cm) self.name="Restart" self.start = StartTest(cm) self.stop = StopTest(cm) self.benchmark = 1 def __call__(self, node): '''Perform the 'restart' test. ''' self.incr("calls") self.incr("node:" + node) ret1 = 1 if self.CM.StataCM(node): self.incr("WasStopped") if not self.start(node): return self.failure("start (setup) failure: "+node) self.set_timer() if not self.stop(node): return self.failure("stop failure: "+node) if not self.start(node): return self.failure("start failure: "+node) return self.success() # Register RestartTest as a good test to run AllTestClasses.append(RestartTest) ################################################################### class StonithdTest(CTSTest): ################################################################### def __init__(self, cm): CTSTest.__init__(self, cm) self.name="Stonithd" self.startall = SimulStartLite(cm) self.benchmark = 1 def __call__(self, node): self.incr("calls") if len(self.CM.Env["nodes"]) < 2: return self.skipped() ret = self.startall(None) if not ret: return self.failure("Setup failed") watchpats = [] watchpats.append("Forcing node %s to be terminated" % node) watchpats.append("Scheduling Node %s for STONITH" % node) watchpats.append("Executing .* fencing operation") watchpats.append("stonith-ng:.*Operation .* for host '%s' with device .* returned: 0" % node) if not self.CM.is_node_dc(node): # Won't be found if the DC is shot (and there's no equivalent message from stonithd) watchpats.append("tengine_stonith_callback: .*: OK ") # TODO else: look for the notification on a peer once implimented if self.CM.Env["at-boot"] == 0: self.CM.debug("Expecting %s to stay down" % node) self.CM.ShouldBeStatus[node]="down" else: self.CM.debug("Expecting %s to come up again %d" % (node, self.CM.Env["at-boot"])) watchpats.append("%s crmd: .* S_STARTING -> S_PENDING" % node) watchpats.append("%s crmd: .* S_PENDING -> S_NOT_DC" % node) watch = self.create_watch(watchpats, self.CM["DeadTime"] + self.CM["StableTime"] + self.CM["StartTime"]) watch.setwatch() self.CM.rsh(node, "crm_attribute --node %s --type status --attr-name terminate --attr-value true" % node) self.set_timer("fence") matched = watch.lookforall() self.log_timer("fence") self.set_timer("reform") if matched: self.CM.debug("Found: "+ repr(matched)) else: self.CM.log("Patterns not found: " + repr(watch.unmatched)) self.CM.debug("Waiting for the cluster to recover") self.CM.cluster_stable() self.CM.debug("Waiting STONITHd node to come back up") self.CM.ns.WaitForAllNodesToComeUp(self.CM.Env["nodes"], 600) self.CM.debug("Waiting for the cluster to re-stabilize with all nodes") is_stable = self.CM.cluster_stable(self.CM["StartTime"]) if not matched: return self.failure("Didn't find all expected patterns") elif not is_stable: return self.failure("Cluster did not become stable") self.log_timer("reform") return self.success() def errorstoignore(self): return [ "Executing .* fencing operation" ] def is_applicable(self): if not self.is_applicable_common(): return 0 if self.CM.Env.has_key("DoStonith"): return self.CM.Env["DoStonith"] return 1 AllTestClasses.append(StonithdTest) ################################################################### class StartOnebyOne(CTSTest): ################################################################### '''Start all the nodes ~ one by one''' def __init__(self, cm): CTSTest.__init__(self,cm) self.name="StartOnebyOne" self.stopall = SimulStopLite(cm) self.start = StartTest(cm) self.ns=CTS.NodeStatus(cm.Env) def __call__(self, dummy): '''Perform the 'StartOnebyOne' test. ''' self.incr("calls") # We ignore the "node" parameter... # Shut down all the nodes... ret = self.stopall(None) if not ret: return self.failure("Test setup failed") failed=[] self.set_timer() for node in self.CM.Env["nodes"]: if not self.start(node): failed.append(node) if len(failed) > 0: return self.failure("Some node failed to start: " + repr(failed)) return self.success() # Register StartOnebyOne as a good test to run AllTestClasses.append(StartOnebyOne) ################################################################### class SimulStart(CTSTest): ################################################################### '''Start all the nodes ~ simultaneously''' def __init__(self, cm): CTSTest.__init__(self,cm) self.name="SimulStart" self.stopall = SimulStopLite(cm) self.startall = SimulStartLite(cm) def __call__(self, dummy): '''Perform the 'SimulStart' test. ''' self.incr("calls") # We ignore the "node" parameter... # Shut down all the nodes... ret = self.stopall(None) if not ret: return self.failure("Setup failed") self.CM.clear_all_caches() if not self.startall(None): return self.failure("Startall failed") return self.success() # Register SimulStart as a good test to run AllTestClasses.append(SimulStart) ################################################################### class SimulStop(CTSTest): ################################################################### '''Stop all the nodes ~ simultaneously''' def __init__(self, cm): CTSTest.__init__(self,cm) self.name="SimulStop" self.startall = SimulStartLite(cm) self.stopall = SimulStopLite(cm) def __call__(self, dummy): '''Perform the 'SimulStop' test. ''' self.incr("calls") # We ignore the "node" parameter... # Start up all the nodes... ret = self.startall(None) if not ret: return self.failure("Setup failed") if not self.stopall(None): return self.failure("Stopall failed") return self.success() # Register SimulStop as a good test to run AllTestClasses.append(SimulStop) ################################################################### class StopOnebyOne(CTSTest): ################################################################### '''Stop all the nodes in order''' def __init__(self, cm): CTSTest.__init__(self,cm) self.name="StopOnebyOne" self.startall = SimulStartLite(cm) self.stop = StopTest(cm) def __call__(self, dummy): '''Perform the 'StopOnebyOne' test. ''' self.incr("calls") # We ignore the "node" parameter... # Start up all the nodes... ret = self.startall(None) if not ret: return self.failure("Setup failed") failed=[] self.set_timer() for node in self.CM.Env["nodes"]: if not self.stop(node): failed.append(node) if len(failed) > 0: return self.failure("Some node failed to stop: " + repr(failed)) self.CM.clear_all_caches() return self.success() # Register StopOnebyOne as a good test to run AllTestClasses.append(StopOnebyOne) ################################################################### class RestartOnebyOne(CTSTest): ################################################################### '''Restart all the nodes in order''' def __init__(self, cm): CTSTest.__init__(self,cm) self.name="RestartOnebyOne" self.startall = SimulStartLite(cm) def __call__(self, dummy): '''Perform the 'RestartOnebyOne' test. ''' self.incr("calls") # We ignore the "node" parameter... # Start up all the nodes... ret = self.startall(None) if not ret: return self.failure("Setup failed") did_fail=[] self.set_timer() self.restart = RestartTest(self.CM) for node in self.CM.Env["nodes"]: if not self.restart(node): did_fail.append(node) if did_fail: return self.failure("Could not restart %d nodes: %s" %(len(did_fail), repr(did_fail))) return self.success() # Register StopOnebyOne as a good test to run AllTestClasses.append(RestartOnebyOne) ################################################################### class PartialStart(CTSTest): ################################################################### '''Start a node - but tell it to stop before it finishes starting up''' def __init__(self, cm): CTSTest.__init__(self,cm) self.name="PartialStart" self.startall = SimulStartLite(cm) self.stopall = SimulStopLite(cm) #self.is_unsafe = 1 def __call__(self, node): '''Perform the 'PartialStart' test. ''' self.incr("calls") ret = self.stopall(None) if not ret: return self.failure("Setup failed") # FIXME! This should use the CM class to get the pattern # then it would be applicable in general watchpats = [] watchpats.append("Starting crmd") watch = self.create_watch(watchpats, self.CM["DeadTime"]+10) watch.setwatch() self.CM.StartaCMnoBlock(node) ret = watch.lookforall() if not ret: self.CM.log("Patterns not found: " + repr(watch.unmatched)) return self.failure("Setup of %s failed" % node) ret = self.stopall(None) if not ret: return self.failure("%s did not stop in time" % node) return self.success() # Register StopOnebyOne as a good test to run AllTestClasses.append(PartialStart) ####################################################################### class StandbyTest(CTSTest): ####################################################################### def __init__(self, cm): CTSTest.__init__(self,cm) self.name="Standby" self.benchmark = 1 self.start = StartTest(cm) self.startall = SimulStartLite(cm) # make sure the node is active # set the node to standby mode # check resources, none resource should be running on the node # set the node to active mode # check resouces, resources should have been migrated back (SHOULD THEY?) def __call__(self, node): self.incr("calls") ret=self.startall(None) if not ret: return self.failure("Start all nodes failed") self.CM.debug("Make sure node %s is active" % node) if self.CM.StandbyStatus(node) != "off": if not self.CM.SetStandbyMode(node, "off"): return self.failure("can't set node %s to active mode" % node) self.CM.cluster_stable() status = self.CM.StandbyStatus(node) if status != "off": return self.failure("standby status of %s is [%s] but we expect [off]" % (node, status)) self.CM.debug("Getting resources running on node %s" % node) rsc_on_node = self.CM.active_resources(node) self.CM.debug("Setting node %s to standby mode" % node) if not self.CM.SetStandbyMode(node, "on"): return self.failure("can't set node %s to standby mode" % node) self.set_timer("on") time.sleep(1) # Allow time for the update to be applied and cause something self.CM.cluster_stable() status = self.CM.StandbyStatus(node) if status != "on": return self.failure("standby status of %s is [%s] but we expect [on]" % (node, status)) self.log_timer("on") self.CM.debug("Checking resources") bad_run = self.CM.active_resources(node) if len(bad_run) > 0: rc = self.failure("%s set to standby, %s is still running on it" % (node, repr(bad_run))) self.CM.debug("Setting node %s to active mode" % node) self.CM.SetStandbyMode(node, "off") return rc self.CM.debug("Setting node %s to active mode" % node) if not self.CM.SetStandbyMode(node, "off"): return self.failure("can't set node %s to active mode" % node) self.set_timer("off") self.CM.cluster_stable() status = self.CM.StandbyStatus(node) if status != "off": return self.failure("standby status of %s is [%s] but we expect [off]" % (node, status)) self.log_timer("off") return self.success() AllTestClasses.append(StandbyTest) ####################################################################### class ValgrindTest(CTSTest): ####################################################################### '''Check for memory leaks''' def __init__(self, cm): CTSTest.__init__(self,cm) self.name="Valgrind" self.stopall = SimulStopLite(cm) self.startall = SimulStartLite(cm) self.is_valgrind = 1 self.is_loop = 1 def setup(self, node): self.incr("calls") ret=self.stopall(None) if not ret: return self.failure("Stop all nodes failed") # Enable valgrind self.logPat = "/tmp/%s-*.valgrind" % self.name self.CM.Env["valgrind-prefix"] = self.name self.CM.rsh(node, "rm -f %s" % self.logPat, None) ret=self.startall(None) if not ret: return self.failure("Start all nodes failed") for node in self.CM.Env["nodes"]: (rc, output) = self.CM.rsh(node, "ps u --ppid `pidofproc aisexec`", None) for line in output: self.CM.debug(line) return self.success() def teardown(self, node): # Disable valgrind self.CM.Env["valgrind-prefix"] = None # Return all nodes to normal ret=self.stopall(None) if not ret: return self.failure("Stop all nodes failed") return self.success() def find_leaks(self): # Check for leaks leaked = [] self.stop = StopTest(self.CM) for node in self.CM.Env["nodes"]: (rc, ps_out) = self.CM.rsh(node, "ps u --ppid `pidofproc aisexec`", None) rc = self.stop(node) if not rc: self.failure("Couldn't shut down %s" % node) rc = self.CM.rsh(node, "grep -e indirectly.*lost:.*[1-9] -e definitely.*lost:.*[1-9] -e ERROR.*SUMMARY:.*[1-9].*errors %s" % self.logPat, 0) if rc != 1: leaked.append(node) self.failure("Valgrind errors detected on %s" % node) for line in ps_out: self.CM.log(line) (rc, output) = self.CM.rsh(node, "grep -e lost: -e SUMMARY: %s" % self.logPat, None) for line in output: self.CM.log(line) (rc, output) = self.CM.rsh(node, "cat %s" % self.logPat, None) for line in output: self.CM.debug(line) self.CM.rsh(node, "rm -f %s" % self.logPat, None) return leaked def __call__(self, node): leaked = self.find_leaks() if len(leaked) > 0: return self.failure("Nodes %s leaked" % repr(leaked)) return self.success() def errorstoignore(self): '''Return list of errors which should be ignored''' return [ """cib:.*readCibXmlFile:""", """HA_VALGRIND_ENABLED""" ] ####################################################################### class StandbyLoopTest(ValgrindTest): ####################################################################### '''Check for memory leaks by putting a node in and out of standby for an hour''' def __init__(self, cm): ValgrindTest.__init__(self,cm) self.name="StandbyLoop" def __call__(self, node): lpc = 0 delay = 2 failed = 0 done=time.time() + self.CM.Env["loop-minutes"]*60 while time.time() <= done and not failed: lpc = lpc + 1 time.sleep(delay) if not self.CM.SetStandbyMode(node, "on"): self.failure("can't set node %s to standby mode" % node) failed = lpc time.sleep(delay) if not self.CM.SetStandbyMode(node, "off"): self.failure("can't set node %s to active mode" % node) failed = lpc leaked = self.find_leaks() if failed: return self.failure("Iteration %d failed" % failed) elif len(leaked) > 0: return self.failure("Nodes %s leaked" % repr(leaked)) return self.success() AllTestClasses.append(StandbyLoopTest) ############################################################################## class BandwidthTest(CTSTest): ############################################################################## # Tests should not be cluster-manager-specific # If you need to find out cluster manager configuration to do this, then # it should be added to the generic cluster manager API. '''Test the bandwidth which heartbeat uses''' def __init__(self, cm): CTSTest.__init__(self, cm) self.name = "Bandwidth" self.start = StartTest(cm) self.__setitem__("min",0) self.__setitem__("max",0) self.__setitem__("totalbandwidth",0) self.tempfile = tempfile.mktemp(".cts") self.startall = SimulStartLite(cm) def __call__(self, node): '''Perform the Bandwidth test''' self.incr("calls") if self.CM.upcount()<1: return self.skipped() Path = self.CM.InternalCommConfig() if "ip" not in Path["mediatype"]: return self.skipped() port = Path["port"][0] port = int(port) ret = self.startall(None) if not ret: return self.failure("Test setup failed") time.sleep(5) # We get extra messages right after startup. fstmpfile = "/var/run/band_estimate" dumpcmd = "tcpdump -p -n -c 102 -i any udp port %d > %s 2>&1" \ % (port, fstmpfile) rc = self.CM.rsh(node, dumpcmd) if rc == 0: farfile = "root@%s:%s" % (node, fstmpfile) self.CM.rsh.cp(farfile, self.tempfile) Bandwidth = self.countbandwidth(self.tempfile) if not Bandwidth: self.CM.log("Could not compute bandwidth.") return self.success() intband = int(Bandwidth + 0.5) self.CM.log("...bandwidth: %d bits/sec" % intband) self.Stats["totalbandwidth"] = self.Stats["totalbandwidth"] + Bandwidth if self.Stats["min"] == 0: self.Stats["min"] = Bandwidth if Bandwidth > self.Stats["max"]: self.Stats["max"] = Bandwidth if Bandwidth < self.Stats["min"]: self.Stats["min"] = Bandwidth self.CM.rsh(node, "rm -f %s" % fstmpfile) os.unlink(self.tempfile) return self.success() else: return self.failure("no response from tcpdump command [%d]!" % rc) def countbandwidth(self, file): fp = open(file, "r") fp.seek(0) count = 0 sum = 0 while 1: line = fp.readline() if not line: return None if re.search("udp",line) or re.search("UDP,", line): count=count+1 linesplit = string.split(line," ") for j in range(len(linesplit)-1): if linesplit[j]=="udp": break if linesplit[j]=="length:": break try: sum = sum + int(linesplit[j+1]) except ValueError: self.CM.log("Invalid tcpdump line: %s" % line) return None T1 = linesplit[0] timesplit = string.split(T1,":") time2split = string.split(timesplit[2],".") time1 = (long(timesplit[0])*60+long(timesplit[1]))*60+long(time2split[0])+long(time2split[1])*0.000001 break while count < 100: line = fp.readline() if not line: return None if re.search("udp",line) or re.search("UDP,", line): count = count+1 linessplit = string.split(line," ") for j in range(len(linessplit)-1): if linessplit[j] =="udp": break if linesplit[j]=="length:": break try: sum=int(linessplit[j+1])+sum except ValueError: self.CM.log("Invalid tcpdump line: %s" % line) return None T2 = linessplit[0] timesplit = string.split(T2,":") time2split = string.split(timesplit[2],".") time2 = (long(timesplit[0])*60+long(timesplit[1]))*60+long(time2split[0])+long(time2split[1])*0.000001 time = time2-time1 if (time <= 0): return 0 return (sum*8)/time def is_applicable(self): '''BandwidthTest never applicable''' return 0 AllTestClasses.append(BandwidthTest) ################################################################### class ResourceRecover(CTSTest): ################################################################### def __init__(self, cm): CTSTest.__init__(self,cm) self.name="ResourceRecover" self.start = StartTest(cm) self.startall = SimulStartLite(cm) self.max=30 self.rid=None #self.is_unsafe = 1 self.benchmark = 1 # these are the values used for the new LRM API call self.action = "asyncmon" self.interval = 0 def __call__(self, node): '''Perform the 'ResourceRecover' test. ''' self.incr("calls") ret = self.startall(None) if not ret: return self.failure("Setup failed") resourcelist = self.CM.active_resources(node) # if there are no resourcelist, return directly if len(resourcelist)==0: self.CM.log("No active resources on %s" % node) return self.skipped() self.rid = self.CM.Env.RandomGen.choice(resourcelist) rsc = None (rc, lines) = self.CM.rsh(node, "crm_resource -c", None) for line in lines: if re.search("^Resource", line): tmp = AuditResource(self.CM, line) if tmp.id == self.rid: rsc = tmp # Handle anonymous clones that get renamed self.rid = rsc.clone_id break if not rsc: return self.failure("Could not find %s in the resource list" % self.rid) self.CM.debug("Shooting %s aka. %s" % (rsc.clone_id, rsc.id)) pats = [] pats.append("Updating failcount for %s on .* after .* %s" % (self.rid, self.action)) if rsc.managed(): pats.append("crmd:.* Performing .* op=%s_stop_0" % self.rid) if rsc.unique(): pats.append("crmd:.* Performing .* op=%s_start_0" % self.rid) pats.append("crmd:.* LRM operation %s_start_0.*confirmed.*ok" % self.rid) else: # Anonymous clones may get restarted with a different clone number pats.append("crmd:.* Performing .* op=.*_start_0") pats.append("crmd:.* LRM operation .*_start_0.*confirmed.*ok") watch = self.create_watch(pats, 60) watch.setwatch() self.CM.rsh(node, "crm_resource -F -r %s -H %s &>/dev/null" % (self.rid, node)) self.set_timer("recover") watch.lookforall() self.log_timer("recover") self.CM.cluster_stable() recovered=self.CM.ResourceLocation(self.rid) if watch.unmatched: return self.failure("Patterns not found: %s" % repr(watch.unmatched)) elif rsc.unique() and len(recovered) > 1: return self.failure("%s is now active on more than one node: %s"%(self.rid, repr(recovered))) elif len(recovered) > 0: self.CM.debug("%s is running on: %s" %(self.rid, repr(recovered))) elif rsc.managed(): return self.failure("%s was not recovered and is inactive" % self.rid) return self.success() def errorstoignore(self): '''Return list of errors which should be ignored''' return [ """Updating failcount for %s""" % self.rid, """Unknown operation: fail""", """ERROR: sending stonithRA op to stonithd failed.""", """ERROR: process_lrm_event: LRM operation %s_%s_%d""" % (self.rid, self.action, self.interval), """ERROR: process_graph_event: Action %s_%s_%d .* initiated outside of a transition""" % (self.rid, self.action, self.interval), ] AllTestClasses.append(ResourceRecover) ################################################################### class ComponentFail(CTSTest): ################################################################### def __init__(self, cm): CTSTest.__init__(self,cm) self.name="ComponentFail" self.startall = SimulStartLite(cm) self.complist = cm.Components() self.patterns = [] self.okerrpatterns = [] self.is_unsafe = 1 def __call__(self, node): '''Perform the 'ComponentFail' test. ''' self.incr("calls") self.patterns = [] self.okerrpatterns = [] # start all nodes ret = self.startall(None) if not ret: return self.failure("Setup failed") if not self.CM.cluster_stable(self.CM["StableTime"]): return self.failure("Setup failed - unstable") node_is_dc = self.CM.is_node_dc(node, None) # select a component to kill chosen = self.CM.Env.RandomGen.choice(self.complist) while chosen.dc_only == 1 and node_is_dc == 0: chosen = self.CM.Env.RandomGen.choice(self.complist) self.CM.debug("...component %s (dc=%d,boot=%d)" % (chosen.name, node_is_dc,chosen.triggersreboot)) self.incr(chosen.name) if chosen.name != "aisexec": if self.CM["Name"] != "crm-lha" or chosen.name != "pengine": self.patterns.append(self.CM["Pat:ChildKilled"] %(node, chosen.name)) self.patterns.append(self.CM["Pat:ChildRespawn"] %(node, chosen.name)) self.patterns.extend(chosen.pats) if node_is_dc: self.patterns.extend(chosen.dc_pats) # In an ideal world, this next stuff should be in the "chosen" object as a member function if self.CM["Name"] == "crm-lha" and chosen.triggersreboot: # Make sure the node goes down and then comes back up if it should reboot... for other in self.CM.Env["nodes"]: if other != node: self.patterns.append(self.CM["Pat:They_stopped"] %(other, self.CM.key_for_node(node))) self.patterns.append(self.CM["Pat:Slave_started"] % node) self.patterns.append(self.CM["Pat:Local_started"] % node) if chosen.dc_only: # Sometimes these will be in the log, and sometimes they won't... self.okerrpatterns.append("%s crmd:.*Process %s:.* exited" %(node, chosen.name)) self.okerrpatterns.append("%s crmd:.*I_ERROR.*crmdManagedChildDied" %node) self.okerrpatterns.append("%s crmd:.*The %s subsystem terminated unexpectedly" %(node, chosen.name)) self.okerrpatterns.append("ERROR: Client .* exited with return code") else: # Sometimes this won't be in the log... self.okerrpatterns.append(self.CM["Pat:ChildKilled"] %(node, chosen.name)) self.okerrpatterns.append(self.CM["Pat:ChildRespawn"] %(node, chosen.name)) self.okerrpatterns.append(self.CM["Pat:ChildExit"]) # supply a copy so self.patterns doesnt end up empty tmpPats = [] tmpPats.extend(self.patterns) self.patterns.extend(chosen.badnews_ignore) # Look for STONITH ops, depending on Env["at-boot"] we might need to change the nodes status stonithPats = [] stonithPats.append("stonith-ng:.*Operation .* for host '%s' with device .* returned: 0" % node) stonith = self.create_watch(stonithPats, 0) stonith.setwatch() # set the watch for stable watch = self.create_watch( tmpPats, self.CM["DeadTime"] + self.CM["StableTime"] + self.CM["StartTime"]) watch.setwatch() # kill the component chosen.kill(node) # check to see Heartbeat noticed matched = watch.lookforall(allow_multiple_matches=1) if matched: self.CM.debug("Found: "+ repr(matched)) else: self.CM.log("Patterns not found: " + repr(watch.unmatched)) if self.CM.Env["at-boot"] == 0: self.CM.debug("Checking if %s was shot" % node) shot = stonith.look(60) if shot: self.CM.debug("Found: "+ repr(shot)) self.CM.ShouldBeStatus[node]="down" self.CM.debug("Waiting for the cluster to recover") self.CM.cluster_stable() self.CM.debug("Waiting for any STONITHd node to come back up") self.CM.ns.WaitForAllNodesToComeUp(self.CM.Env["nodes"], 600) self.CM.debug("Waiting for the cluster to re-stabilize with all nodes") is_stable = self.CM.cluster_stable(self.CM["StartTime"]) if not matched: return self.failure("Didn't find all expected patterns") elif not is_stable: return self.failure("Cluster did not become stable") return self.success() def errorstoignore(self): '''Return list of errors which should be ignored''' # Note that okerrpatterns refers to the last time we ran this test # The good news is that this works fine for us... self.okerrpatterns.extend(self.patterns) return self.okerrpatterns AllTestClasses.append(ComponentFail) #################################################################### class SplitBrainTest(CTSTest): #################################################################### '''It is used to test split-brain. when the path between the two nodes break check the two nodes both take over the resource''' def __init__(self,cm): CTSTest.__init__(self,cm) self.name = "SplitBrain" self.start = StartTest(cm) self.startall = SimulStartLite(cm) self.is_experimental = 1 def isolate_partition(self, partition): other_nodes = [] other_nodes.extend(self.CM.Env["nodes"]) for node in partition: try: other_nodes.remove(node) except ValueError: self.CM.log("Node "+node+" not in " + repr(self.CM.Env["nodes"]) + " from " +repr(partition)) if len(other_nodes) == 0: return 1 self.CM.debug("Creating partition: " + repr(partition)) self.CM.debug("Everyone else: " + repr(other_nodes)) for node in partition: if not self.CM.isolate_node(node, other_nodes): self.CM.log("Could not isolate %s" % node) return 0 return 1 def heal_partition(self, partition): other_nodes = [] other_nodes.extend(self.CM.Env["nodes"]) for node in partition: try: other_nodes.remove(node) except ValueError: self.CM.log("Node "+node+" not in " + repr(self.CM.Env["nodes"])) if len(other_nodes) == 0: return 1 self.CM.debug("Healing partition: " + repr(partition)) self.CM.debug("Everyone else: " + repr(other_nodes)) for node in partition: self.CM.unisolate_node(node, other_nodes) def __call__(self, node): '''Perform split-brain test''' self.incr("calls") self.passed = 1 partitions = {} ret = self.startall(None) if not ret: return self.failure("Setup failed") while 1: # Retry until we get multiple partitions partitions = {} p_max = len(self.CM.Env["nodes"]) for node in self.CM.Env["nodes"]: p = self.CM.Env.RandomGen.randint(1, p_max) if not partitions.has_key(p): partitions[p]= [] partitions[p].append(node) p_max = len(partitions.keys()) if p_max > 1: break # else, try again self.CM.debug("Created %d partitions" % p_max) for key in partitions.keys(): self.CM.debug("Partition["+str(key)+"]:\t"+repr(partitions[key])) # Disabling STONITH to reduce test complexity for now self.CM.rsh(node, "crm_attribute -n stonith-enabled -v false") for key in partitions.keys(): self.isolate_partition(partitions[key]) count = 30 while count > 0: if len(self.CM.find_partitions()) != p_max: time.sleep(10) else: break else: self.failure("Expected partitions were not created") # Target number of partitions formed - wait for stability if not self.CM.cluster_stable(): self.failure("Partitioned cluster not stable") # Now audit the cluster state self.CM.partitions_expected = p_max if not self.audit(): self.failure("Audits failed") self.CM.partitions_expected = 1 # And heal them again for key in partitions.keys(): self.heal_partition(partitions[key]) # Wait for a single partition to form count = 30 while count > 0: if len(self.CM.find_partitions()) != 1: time.sleep(10) count -= 1 else: break else: self.failure("Cluster did not reform") # Wait for it to have the right number of members count = 30 while count > 0: members = [] partitions = self.CM.find_partitions() if len(partitions) > 0: members = partitions[0].split() if len(members) != len(self.CM.Env["nodes"]): time.sleep(10) count -= 1 else: break else: self.failure("Cluster did not completely reform") # Wait up to 20 minutes - the delay is more preferable than # trying to continue with in a messed up state if not self.CM.cluster_stable(1200): self.failure("Reformed cluster not stable") answer = raw_input('Continue? [nY]') if answer and answer == "n": raise ValueError("Reformed cluster not stable") # Turn fencing back on if self.CM.Env["DoStonith"]: self.CM.rsh(node, "crm_attribute -D -n stonith-enabled") self.CM.cluster_stable() if self.passed: return self.success() return self.failure("See previous errors") def errorstoignore(self): '''Return list of errors which are 'normal' and should be ignored''' return [ "Another DC detected:", "ERROR: attrd_cib_callback: .*Application of an update diff failed", "crmd_ha_msg_callback:.*not in our membership list", "CRIT:.*node.*returning after partition", ] def is_applicable(self): if not self.is_applicable_common(): return 0 return len(self.CM.Env["nodes"]) > 2 AllTestClasses.append(SplitBrainTest) #################################################################### class Reattach(CTSTest): #################################################################### def __init__(self, cm): CTSTest.__init__(self,cm) self.name="Reattach" self.startall = SimulStartLite(cm) self.restart1 = RestartTest(cm) self.stopall = SimulStopLite(cm) self.is_unsafe = 0 # Handled by canrunnow() def setup(self, node): - return self.startall(None) + attempt=0 + if not self.startall(None): + return None + + # Make sure we are really _really_ stable and that all + # resources, including those that depend on transient node + # attributes, are started + while not self.CM.cluster_stable(): + if attempt < 5: + attempt += 1 + self.CM.debug("Not stable yet, re-testing") + else: + return None + + return 1 def canrunnow(self, node): '''Return TRUE if we can meaningfully run right now''' if self.find_ocfs2_resources(node): self.CM.log("Detach/Reattach scenarios are not possible with OCFS2 services present") return 0 return 1 def __call__(self, node): self.incr("calls") pats = [] managed = self.create_watch(["is-managed-default"], 60) managed.setwatch() self.CM.debug("Disable resource management") self.CM.rsh(node, "crm_attribute -n is-managed-default -v false") if not managed.lookforall(): self.CM.log("Patterns not found: " + repr(managed.unmatched)) return self.failure("Resource management not disabled") pats = [] pats.append("crmd:.*Performing.*_stop_0") pats.append("crmd:.*Performing.*_start_0") pats.append("crmd:.*Performing.*_promote_0") pats.append("crmd:.*Performing.*_demote_0") pats.append("crmd:.*Performing.*_migrate_.*_0") watch = self.create_watch(pats, 60, "ShutdownActivity") watch.setwatch() self.CM.debug("Shutting down the cluster") ret = self.stopall(None) if not ret: self.CM.debug("Re-enable resource management") self.CM.rsh(node, "crm_attribute -D -n is-managed-default") return self.failure("Couldn't shut down the cluster") self.CM.debug("Bringing the cluster back up") ret = self.startall(None) if not ret: self.CM.debug("Re-enable resource management") self.CM.rsh(node, "crm_attribute -D -n is-managed-default") return self.failure("Couldn't restart the cluster") if self.local_badnews("ResourceActivity:", watch): self.CM.debug("Re-enable resource management") self.CM.rsh(node, "crm_attribute -D -n is-managed-default") return self.failure("Resources stopped or started during cluster restart") watch = self.create_watch(pats, 60, "StartupActivity") watch.setwatch() managed = self.create_watch(["is-managed-default"], 60) managed.setwatch() self.CM.debug("Re-enable resource management") self.CM.rsh(node, "crm_attribute -D -n is-managed-default") if not managed.lookforall(): self.CM.log("Patterns not found: " + repr(managed.unmatched)) return self.failure("Resource management not enabled") self.CM.cluster_stable() # Ignore actions for STONITH resources ignore = [] (rc, lines) = self.CM.rsh(node, "crm_resource -c", None) for line in lines: if re.search("^Resource", line): r = AuditResource(self.CM, line) if r.rclass == "stonith": self.CM.debug("Ignoring: crmd:.*Performing.*op=%s_.*_0" % r.id) ignore.append("crmd:.*Performing.*op=%s_.*_0" % r.id) if self.local_badnews("ResourceActivity:", watch, ignore): return self.failure("Resources stopped or started after resource management was re-enabled") return ret def errorstoignore(self): '''Return list of errors which should be ignored''' return [ "You may ignore this error if it is unmanaged.", "pingd: .*ERROR: send_ipc_message:", "pingd: .*ERROR: send_update:", "lrmd: .*ERROR: notify_client:", ] def is_applicable(self): if self.CM["Name"] == "crm-lha": return None return 1 AllTestClasses.append(Reattach) #################################################################### class SpecialTest1(CTSTest): #################################################################### '''Set up a custom test to cause quorum failure issues for Andrew''' def __init__(self, cm): CTSTest.__init__(self,cm) self.name="SpecialTest1" self.startall = SimulStartLite(cm) self.restart1 = RestartTest(cm) self.stopall = SimulStopLite(cm) def __call__(self, node): '''Perform the 'SpecialTest1' test for Andrew. ''' self.incr("calls") # Shut down all the nodes... ret = self.stopall(None) if not ret: return ret # Start the selected node ret = self.restart1(node) if not ret: return ret # Start all remaining nodes ret = self.startall(None) return ret AllTestClasses.append(SpecialTest1) #################################################################### class HAETest(CTSTest): #################################################################### '''Set up a custom test to cause quorum failure issues for Andrew''' def __init__(self, cm): CTSTest.__init__(self,cm) self.name="HAETest" self.stopall = SimulStopLite(cm) self.startall = SimulStartLite(cm) self.is_loop = 1 def setup(self, node): # Start all remaining nodes ret = self.startall(None) if not ret: return self.failure("Couldn't start all nodes") return self.success() def teardown(self, node): # Stop everything ret = self.stopall(None) if not ret: return self.failure("Couldn't stop all nodes") return self.success() def wait_on_state(self, node, resource, expected_clones, attempts=240): while attempts > 0: active=0 (rc, lines) = self.CM.rsh(node, "crm_resource -r %s -W -Q" % resource, stdout=None) # Hack until crm_resource does the right thing if rc == 0 and lines: active = len(lines) if len(lines) == expected_clones: return 1 elif rc == 1: self.CM.debug("Resource %s is still inactive" % resource) elif rc == 234: self.CM.log("Unknown resource %s" % resource) return 0 elif rc == 246: self.CM.log("Cluster is inactive") return 0 elif rc != 0: self.CM.log("Call to crm_resource failed, rc=%d" % rc) return 0 else: self.CM.debug("Resource %s is active on %d times instead of %d" % (resource, active, expected_clones)) attempts -= 1 time.sleep(1) return 0 def find_dlm(self, node): self.r_dlm = None (rc, lines) = self.CM.rsh(node, "crm_resource -c", None) for line in lines: if re.search("^Resource", line): r = AuditResource(self.CM, line) if r.rtype == "controld" and r.parent != "NA": self.CM.debug("Found dlm: %s" % self.r_dlm) self.r_dlm = r.parent return 1 return 0 def find_hae_resources(self, node): self.r_dlm = None self.r_o2cb = None self.r_ocfs2 = [] if self.find_dlm(node): self.find_ocfs2_resources(node) def is_applicable(self): if not self.is_applicable_common(): return 0 if self.CM.Env["Schema"] == "hae": return 1 return None #################################################################### class HAERoleTest(HAETest): #################################################################### def __init__(self, cm): '''Lars' mount/unmount test for the HA extension. ''' HAETest.__init__(self,cm) self.name="HAERoleTest" def change_state(self, node, resource, target): rc = self.CM.rsh(node, "crm_resource -r %s -p target-role -v %s --meta" % (resource, target)) return rc def __call__(self, node): self.incr("calls") lpc = 0 failed = 0 delay = 2 done=time.time() + self.CM.Env["loop-minutes"]*60 self.find_hae_resources(node) clone_max = len(self.CM.Env["nodes"]) while time.time() <= done and not failed: lpc = lpc + 1 self.change_state(node, self.r_dlm, "Stopped") if not self.wait_on_state(node, self.r_dlm, 0): self.failure("%s did not go down correctly" % self.r_dlm) failed = lpc self.change_state(node, self.r_dlm, "Started") if not self.wait_on_state(node, self.r_dlm, clone_max): self.failure("%s did not come up correctly" % self.r_dlm) failed = lpc if not self.wait_on_state(node, self.r_o2cb, clone_max): self.failure("%s did not come up correctly" % self.r_o2cb) failed = lpc for fs in self.r_ocfs2: if not self.wait_on_state(node, fs, clone_max): self.failure("%s did not come up correctly" % fs) failed = lpc if failed: return self.failure("iteration %d failed" % failed) return self.success() AllTestClasses.append(HAERoleTest) #################################################################### class HAEStandbyTest(HAETest): #################################################################### '''Set up a custom test to cause quorum failure issues for Andrew''' def __init__(self, cm): HAETest.__init__(self,cm) self.name="HAEStandbyTest" def change_state(self, node, resource, target): rc = self.CM.rsh(node, "crm_standby -l reboot -v %s" % (target)) return rc def __call__(self, node): self.incr("calls") lpc = 0 failed = 0 done=time.time() + self.CM.Env["loop-minutes"]*60 self.find_hae_resources(node) clone_max = len(self.CM.Env["nodes"]) while time.time() <= done and not failed: lpc = lpc + 1 self.change_state(node, self.r_dlm, "true") if not self.wait_on_state(node, self.r_dlm, clone_max-1): self.failure("%s did not go down correctly" % self.r_dlm) failed = lpc self.change_state(node, self.r_dlm, "false") if not self.wait_on_state(node, self.r_dlm, clone_max): self.failure("%s did not come up correctly" % self.r_dlm) failed = lpc if not self.wait_on_state(node, self.r_o2cb, clone_max): self.failure("%s did not come up correctly" % self.r_o2cb) failed = lpc for fs in self.r_ocfs2: if not self.wait_on_state(node, fs, clone_max): self.failure("%s did not come up correctly" % fs) failed = lpc if failed: return self.failure("iteration %d failed" % failed) return self.success() AllTestClasses.append(HAEStandbyTest) ################################################################### class NearQuorumPointTest(CTSTest): ################################################################### ''' This test brings larger clusters near the quorum point (50%). In addition, it will test doing starts and stops at the same time. Here is how I think it should work: - loop over the nodes and decide randomly which will be up and which will be down Use a 50% probability for each of up/down. - figure out what to do to get into that state from the current state - in parallel, bring up those going up and bring those going down. ''' def __init__(self, cm): CTSTest.__init__(self,cm) self.name="NearQuorumPoint" def __call__(self, dummy): '''Perform the 'NearQuorumPoint' test. ''' self.incr("calls") startset = [] stopset = [] #decide what to do with each node for node in self.CM.Env["nodes"]: action = self.CM.Env.RandomGen.choice(["start","stop"]) #action = self.CM.Env.RandomGen.choice(["start","stop","no change"]) if action == "start" : startset.append(node) elif action == "stop" : stopset.append(node) self.CM.debug("start nodes:" + repr(startset)) self.CM.debug("stop nodes:" + repr(stopset)) #add search patterns watchpats = [ ] for node in stopset: if self.CM.ShouldBeStatus[node] == "up": watchpats.append(self.CM["Pat:We_stopped"] % node) for node in startset: if self.CM.ShouldBeStatus[node] == "down": #watchpats.append(self.CM["Pat:Slave_started"] % node) watchpats.append(self.CM["Pat:Local_started"] % node) else: for stopping in stopset: if self.CM.ShouldBeStatus[stopping] == "up": watchpats.append(self.CM["Pat:They_stopped"] % (node, self.CM.key_for_node(stopping))) if len(watchpats) == 0: return self.skipped() if len(startset) != 0: watchpats.append(self.CM["Pat:DC_IDLE"]) watch = self.create_watch(watchpats, self.CM["DeadTime"]+10) watch.setwatch() #begin actions for node in stopset: if self.CM.ShouldBeStatus[node] == "up": self.CM.StopaCMnoBlock(node) for node in startset: if self.CM.ShouldBeStatus[node] == "down": self.CM.StartaCMnoBlock(node) #get the result if watch.lookforall(): self.CM.cluster_stable() return self.success() self.CM.log("Warn: Patterns not found: " + repr(watch.unmatched)) #get the "bad" nodes upnodes = [] for node in stopset: if self.CM.StataCM(node) == 1: upnodes.append(node) downnodes = [] for node in startset: if self.CM.StataCM(node) == 0: downnodes.append(node) if upnodes == [] and downnodes == []: self.CM.cluster_stable() # Make sure they're completely down with no residule for node in stopset: self.CM.rsh(node, self.CM["StopCmd"]) return self.success() if len(upnodes) > 0: self.CM.log("Warn: Unstoppable nodes: " + repr(upnodes)) if len(downnodes) > 0: self.CM.log("Warn: Unstartable nodes: " + repr(downnodes)) return self.failure() AllTestClasses.append(NearQuorumPointTest) ################################################################### class RollingUpgradeTest(CTSTest): ################################################################### '''Perform a rolling upgrade of the cluster''' def __init__(self, cm): CTSTest.__init__(self,cm) self.name="RollingUpgrade" self.start = StartTest(cm) self.stop = StopTest(cm) self.stopall = SimulStopLite(cm) self.startall = SimulStartLite(cm) def setup(self, node): # Start all remaining nodes ret = self.stopall(None) if not ret: return self.failure("Couldn't stop all nodes") for node in self.CM.Env["nodes"]: if not self.downgrade(node, None): return self.failure("Couldn't downgrade %s" % node) ret = self.startall(None) if not ret: return self.failure("Couldn't start all nodes") return self.success() def teardown(self, node): # Stop everything ret = self.stopall(None) if not ret: return self.failure("Couldn't stop all nodes") for node in self.CM.Env["nodes"]: if not self.upgrade(node, None): return self.failure("Couldn't upgrade %s" % node) return self.success() def install(self, node, version, start=1, flags="--force"): target_dir = "/tmp/rpm-%s" % version src_dir = "%s/%s" % (self.CM.Env["rpm-dir"], version) self.CM.log("Installing %s on %s with %s" % (version, node, flags)) if not self.stop(node): return self.failure("stop failure: "+node) rc = self.CM.rsh(node, "mkdir -p %s" % target_dir) rc = self.CM.rsh(node, "rm -f %s/*.rpm" % target_dir) (rc, lines) = self.CM.rsh(node, "ls -1 %s/*.rpm" % src_dir, None) for line in lines: line = line[:-1] rc = self.CM.rsh.cp("%s" % (line), "%s:%s/" % (node, target_dir)) rc = self.CM.rsh(node, "rpm -Uvh %s %s/*.rpm" % (flags, target_dir)) if start and not self.start(node): return self.failure("start failure: "+node) return self.success() def upgrade(self, node, start=1): return self.install(node, self.CM.Env["current-version"], start) def downgrade(self, node, start=1): return self.install(node, self.CM.Env["previous-version"], start, "--force --nodeps") def __call__(self, node): '''Perform the 'Rolling Upgrade' test. ''' self.incr("calls") for node in self.CM.Env["nodes"]: if self.upgrade(node): return self.failure("Couldn't upgrade %s" % node) self.CM.cluster_stable() return self.success() def is_applicable(self): if not self.is_applicable_common(): return None if not self.CM.Env.has_key("rpm-dir"): return None if not self.CM.Env.has_key("current-version"): return None if not self.CM.Env.has_key("previous-version"): return None return 1 # Register RestartTest as a good test to run AllTestClasses.append(RollingUpgradeTest) ################################################################### class BSC_AddResource(CTSTest): ################################################################### '''Add a resource to the cluster''' def __init__(self, cm): CTSTest.__init__(self, cm) self.name="AddResource" self.resource_offset = 0 self.cib_cmd="""cibadmin -C -o %s -X '%s' """ def __call__(self, node): self.incr("calls") self.resource_offset = self.resource_offset + 1 r_id = "bsc-rsc-%s-%d" % (node, self.resource_offset) start_pat = "crmd.*%s_start_0.*confirmed.*ok" patterns = [] patterns.append(start_pat % r_id) watch = self.create_watch(patterns, self.CM["DeadTime"]) watch.setwatch() fields = string.split(self.CM.Env["IPBase"], '.') fields[3] = str(int(fields[3])+1) ip = string.join(fields, '.') self.CM.Env["IPBase"] = ip if not self.make_ip_resource(node, r_id, "ocf", "IPaddr", ip): return self.failure("Make resource %s failed" % r_id) failed = 0 watch_result = watch.lookforall() if watch.unmatched: for regex in watch.unmatched: self.CM.log ("Warn: Pattern not found: %s" % (regex)) failed = 1 if failed: return self.failure("Resource pattern(s) not found") if not self.CM.cluster_stable(self.CM["DeadTime"]): return self.failure("Unstable cluster") return self.success() def make_ip_resource(self, node, id, rclass, type, ip): self.CM.log("Creating %s::%s:%s (%s) on %s" % (rclass,type,id,ip,node)) rsc_xml=""" """ % (id, rclass, type, id, id, ip) node_constraint=""" """ % (id, id, id, id, node) rc = 0 (rc, lines) = self.CM.rsh(node, self.cib_cmd % ("constraints", node_constraint), None) if rc != 0: self.CM.log("Constraint creation failed: %d" % rc) return None (rc, lines) = self.CM.rsh(node, self.cib_cmd % ("resources", rsc_xml), None) if rc != 0: self.CM.log("Resource creation failed: %d" % rc) return None return 1 def is_applicable(self): if self.CM.Env["DoBSC"]: return 1 return None AllTestClasses.append(BSC_AddResource) class SimulStopLite(CTSTest): ################################################################### '''Stop any active nodes ~ simultaneously''' def __init__(self, cm): CTSTest.__init__(self,cm) self.name="SimulStopLite" def __call__(self, dummy): '''Perform the 'SimulStopLite' setup work. ''' self.incr("calls") self.CM.debug("Setup: " + self.name) # We ignore the "node" parameter... watchpats = [ ] for node in self.CM.Env["nodes"]: if self.CM.ShouldBeStatus[node] == "up": self.incr("WasStarted") watchpats.append(self.CM["Pat:We_stopped"] % node) #if self.CM.Env["use_logd"]: # watchpats.append(self.CM["Pat:Logd_stopped"] % node) if len(watchpats) == 0: self.CM.clear_all_caches() return self.success() # Stop all the nodes - at about the same time... watch = self.create_watch(watchpats, self.CM["DeadTime"]+10) watch.setwatch() self.set_timer() for node in self.CM.Env["nodes"]: if self.CM.ShouldBeStatus[node] == "up": self.CM.StopaCMnoBlock(node) if watch.lookforall(): self.CM.clear_all_caches() # Make sure they're completely down with no residule for node in self.CM.Env["nodes"]: self.CM.rsh(node, self.CM["StopCmd"]) return self.success() did_fail=0 up_nodes = [] for node in self.CM.Env["nodes"]: if self.CM.StataCM(node) == 1: did_fail=1 up_nodes.append(node) if did_fail: return self.failure("Active nodes exist: " + repr(up_nodes)) self.CM.log("Warn: All nodes stopped but CTS didnt detect: " + repr(watch.unmatched)) self.CM.clear_all_caches() return self.failure("Missing log message: "+repr(watch.unmatched)) def is_applicable(self): '''SimulStopLite is a setup test and never applicable''' return 0 ################################################################### class SimulStartLite(CTSTest): ################################################################### '''Start any stopped nodes ~ simultaneously''' def __init__(self, cm): CTSTest.__init__(self,cm) self.name="SimulStartLite" def __call__(self, dummy): '''Perform the 'SimulStartList' setup work. ''' self.incr("calls") self.CM.debug("Setup: " + self.name) # We ignore the "node" parameter... watchpats = [ ] uppat = self.CM["Pat:Slave_started"] if self.CM.upcount() == 0: uppat = self.CM["Pat:Local_started"] for node in self.CM.Env["nodes"]: if self.CM.ShouldBeStatus[node] == "down": self.incr("WasStopped") watchpats.append(uppat % node) if len(watchpats) == 0: return self.success() watchpats.append(self.CM["Pat:DC_IDLE"]) # Start all the nodes - at about the same time... watch = self.create_watch(watchpats, self.CM["DeadTime"]+10) watch.setwatch() self.set_timer() for node in self.CM.Env["nodes"]: if self.CM.ShouldBeStatus[node] == "down": self.CM.StartaCMnoBlock(node) if watch.lookforall(): for attempt in (1, 2, 3, 4, 5): if self.CM.cluster_stable(): return self.success() return self.failure("Cluster did not stabilize") did_fail=0 unstable = [] for node in self.CM.Env["nodes"]: if self.CM.StataCM(node) == 0: did_fail=1 unstable.append(node) if did_fail: return self.failure("Unstarted nodes exist: " + repr(unstable)) unstable = [] for node in self.CM.Env["nodes"]: if not self.CM.node_stable(node): did_fail=1 unstable.append(node) if did_fail: return self.failure("Unstable cluster nodes exist: " + repr(unstable)) self.CM.log("ERROR: All nodes started but CTS didnt detect: " + repr(watch.unmatched)) return self.failure() def is_applicable(self): '''SimulStartLite is a setup test and never applicable''' return 0 def TestList(cm, audits): result = [] for testclass in AllTestClasses: bound_test = testclass(cm) if bound_test.is_applicable(): bound_test.Audits = audits result.append(bound_test) return result # vim:ts=4:sw=4:et: diff --git a/cts/Makefile.am b/cts/Makefile.am index 918153f64b..fde2fc7d98 100644 --- a/cts/Makefile.am +++ b/cts/Makefile.am @@ -1,45 +1,44 @@ # # heartbeat: Linux-HA heartbeat code # # Copyright (C) 2001 Michael Moerz # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. # MAINTAINERCLEANFILES = Makefile.in CLEANFILES = LSBDummy EXTRA_DIST = $(cts_SCRIPTS) $(cts_DATA) ctsdir = $(datadir)/$(PACKAGE)/tests/cts ctslibdir = $(pythondir)/cts ctslib_PYTHON = __init__.py \ CTSvars.py \ CM_lha.py \ CM_ais.py \ CTS.py \ CTSaudits.py \ + CTSlab.py \ CTStests.py \ CTSscenarios.py \ CIB.py cts_DATA = README cts.supp -cts_SCRIPTS = CTSlab.py \ - extracttests.py \ +cts_SCRIPTS = extracttests.py \ cluster_test \ - LSBDummy \ - OCFIPraTest.py + LSBDummy diff --git a/lib/ais/plugin.c b/lib/ais/plugin.c index 6509df975b..e099b6229e 100644 --- a/lib/ais/plugin.c +++ b/lib/ais/plugin.c @@ -1,1657 +1,1657 @@ /* * Copyright (C) 2004 Andrew Beekhof * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * This software is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef AIS_COROSYNC # include # include # include #endif #include #include #include "plugin.h" #include "utils.h" #include #include #include #include #include #include #include #include #include struct corosync_api_v1 *pcmk_api = NULL; uint32_t plugin_has_votes = 0; -uint32_t plugin_expected_votes = 1024; +uint32_t plugin_expected_votes = 2; int use_mgmtd = 0; int plugin_log_level = LOG_DEBUG; char *local_uname = NULL; int local_uname_len = 0; char *local_cname = NULL; int local_cname_len = 0; uint32_t local_nodeid = 0; char *ipc_channel_name = NULL; static uint64_t local_born_on = 0; uint64_t membership_seq = 0; pthread_t pcmk_wait_thread; gboolean wait_active = TRUE; gboolean have_reliable_membership_id = FALSE; GHashTable *ipc_client_list = NULL; GHashTable *membership_list = NULL; GHashTable *membership_notify_list = NULL; #define MAX_RESPAWN 100 #define LOOPBACK_ID 16777343 #define crm_flag_none 0x00000000 #define crm_flag_members 0x00000001 struct crm_identify_msg_s { coroipc_request_header_t header __attribute__((aligned(8))); uint32_t id; uint32_t pid; int32_t votes; uint32_t processes; char uname[256]; char version[256]; uint64_t born_on; } __attribute__((packed)); static crm_child_t pcmk_children[] = { { 0, crm_proc_none, crm_flag_none, 0, 0, FALSE, "none", NULL, NULL, NULL, NULL }, { 0, crm_proc_ais, crm_flag_none, 0, 0, FALSE, "ais", NULL, NULL, NULL, NULL }, { 0, crm_proc_lrmd, crm_flag_none, 3, 0, TRUE, "lrmd", NULL, CRM_DAEMON_DIR"/lrmd", NULL, NULL }, { 0, crm_proc_cib, crm_flag_members, 2, 0, TRUE, "cib", CRM_DAEMON_USER, CRM_DAEMON_DIR"/cib", NULL, NULL }, { 0, crm_proc_crmd, crm_flag_members, 6, 0, TRUE, "crmd", CRM_DAEMON_USER, CRM_DAEMON_DIR"/crmd", NULL, NULL }, { 0, crm_proc_attrd, crm_flag_none, 4, 0, TRUE, "attrd", CRM_DAEMON_USER, CRM_DAEMON_DIR"/attrd", NULL, NULL }, { 0, crm_proc_stonithd, crm_flag_none, 0, 0, TRUE, "stonithd", NULL, "/bin/false", NULL, NULL }, { 0, crm_proc_pe, crm_flag_none, 5, 0, TRUE, "pengine", CRM_DAEMON_USER, CRM_DAEMON_DIR"/pengine", NULL, NULL }, { 0, crm_proc_mgmtd, crm_flag_none, 7, 0, TRUE, "mgmtd", NULL, CRM_DAEMON_DIR"/mgmtd", NULL, NULL }, { 0, crm_proc_stonith_ng, crm_flag_none, 1, 0, TRUE, "stonith-ng", NULL, CRM_DAEMON_DIR"/stonithd", NULL, NULL }, }; void send_cluster_id(void); int send_cluster_msg_raw(const AIS_Message *ais_msg); char *pcmk_generate_membership_data(void); gboolean check_message_sanity(const AIS_Message *msg, const char *data); #ifdef AIS_COROSYNC typedef const void ais_void_ptr; int pcmk_shutdown(void); void pcmk_peer_update(enum totem_configuration_type configuration_type, const unsigned int *member_list, size_t member_list_entries, const unsigned int *left_list, size_t left_list_entries, const unsigned int *joined_list, size_t joined_list_entries, const struct memb_ring_id *ring_id); #else typedef void ais_void_ptr; extern totempg_groups_handle openais_group_handle; int pcmk_shutdown(struct objdb_iface_ver0 *objdb); void pcmk_peer_update(enum totem_configuration_type configuration_type, unsigned int *member_list, int member_list_entries, unsigned int *left_list, int left_list_entries, unsigned int *joined_list, int joined_list_entries, struct memb_ring_id *ring_id); #endif int pcmk_startup (struct corosync_api_v1 *corosync_api); int pcmk_config_init(struct corosync_api_v1 *corosync_api); int pcmk_ipc_exit (void *conn); int pcmk_ipc_connect (void *conn); void pcmk_ipc(void *conn, ais_void_ptr *msg); void pcmk_exec_dump(void); void pcmk_cluster_swab(void *msg); void pcmk_cluster_callback(ais_void_ptr *message, unsigned int nodeid); void pcmk_nodeid(void *conn, ais_void_ptr *msg); void pcmk_nodes(void *conn, ais_void_ptr *msg); void pcmk_notify(void *conn, ais_void_ptr *msg); void pcmk_remove_member(void *conn, ais_void_ptr *msg); void pcmk_quorum(void *conn, ais_void_ptr *msg); void pcmk_cluster_id_swab(void *msg); void pcmk_cluster_id_callback(ais_void_ptr *message, unsigned int nodeid); void ais_remove_peer(char *node_id); static struct corosync_lib_handler pcmk_lib_service[] = { { /* 0 */ .lib_handler_fn = pcmk_ipc, .flow_control = COROSYNC_LIB_FLOW_CONTROL_NOT_REQUIRED, #ifdef AIS_WHITETANK .response_size = sizeof (coroipc_response_header_t), .response_id = CRM_MESSAGE_IPC_ACK, #endif }, { /* 1 */ .lib_handler_fn = pcmk_nodes, .flow_control = COROSYNC_LIB_FLOW_CONTROL_NOT_REQUIRED, #ifdef AIS_WHITETANK .response_size = sizeof (coroipc_response_header_t), .response_id = CRM_MESSAGE_IPC_ACK, #endif }, { /* 2 */ .lib_handler_fn = pcmk_notify, .flow_control = COROSYNC_LIB_FLOW_CONTROL_NOT_REQUIRED, #ifdef AIS_WHITETANK .response_size = sizeof (coroipc_response_header_t), .response_id = CRM_MESSAGE_IPC_ACK, #endif }, { /* 3 */ .lib_handler_fn = pcmk_nodeid, .flow_control = COROSYNC_LIB_FLOW_CONTROL_NOT_REQUIRED, #ifdef AIS_WHITETANK .response_size = sizeof (struct crm_ais_nodeid_resp_s), .response_id = crm_class_nodeid, #endif }, { /* 4 */ .lib_handler_fn = pcmk_remove_member, .flow_control = COROSYNC_LIB_FLOW_CONTROL_NOT_REQUIRED, #ifdef AIS_WHITETANK .response_size = sizeof (coroipc_response_header_t), .response_id = CRM_MESSAGE_IPC_ACK, #endif }, { /* 5 */ .lib_handler_fn = pcmk_quorum, .flow_control = COROSYNC_LIB_FLOW_CONTROL_NOT_REQUIRED, #ifdef AIS_WHITETANK .response_size = sizeof (coroipc_response_header_t), .response_id = CRM_MESSAGE_IPC_ACK, #endif }, }; static struct corosync_exec_handler pcmk_exec_service[] = { { /* 0 */ .exec_handler_fn = pcmk_cluster_callback, .exec_endian_convert_fn = pcmk_cluster_swab }, { /* 1 */ .exec_handler_fn = pcmk_cluster_id_callback, .exec_endian_convert_fn = pcmk_cluster_id_swab } }; /* * Exports the interface for the service */ struct corosync_service_engine pcmk_service_handler = { .name = (unsigned char *)"Pacemaker Cluster Manager "PACKAGE_VERSION, .id = PCMK_SERVICE_ID, .private_data_size = 0, .flow_control = COROSYNC_LIB_FLOW_CONTROL_NOT_REQUIRED, .lib_init_fn = pcmk_ipc_connect, .lib_exit_fn = pcmk_ipc_exit, .exec_init_fn = pcmk_startup, .exec_exit_fn = pcmk_shutdown, .config_init_fn = pcmk_config_init, #ifdef AIS_COROSYNC .priority = 50, .lib_engine = pcmk_lib_service, .lib_engine_count = sizeof (pcmk_lib_service) / sizeof (struct corosync_lib_handler), .exec_engine = pcmk_exec_service, .exec_engine_count = sizeof (pcmk_exec_service) / sizeof (struct corosync_exec_handler), #else .lib_service = pcmk_lib_service, .lib_service_count = sizeof (pcmk_lib_service) / sizeof (struct corosync_lib_handler), .exec_service = pcmk_exec_service, .exec_service_count = sizeof (pcmk_exec_service) / sizeof (struct corosync_exec_handler), #endif .confchg_fn = pcmk_peer_update, .exec_dump_fn = pcmk_exec_dump, /* void (*sync_init) (void); */ /* int (*sync_process) (void); */ /* void (*sync_activate) (void); */ /* void (*sync_abort) (void); */ }; /* * Dynamic Loader definition */ struct corosync_service_engine *pcmk_get_handler_ver0 (void); #ifdef AIS_COROSYNC struct corosync_service_engine_iface_ver0 pcmk_service_handler_iface = { .corosync_get_service_engine_ver0 = pcmk_get_handler_ver0 }; #else struct openais_service_handler_iface_ver0 pcmk_service_handler_iface = { .openais_get_service_handler_ver0 = pcmk_get_handler_ver0 }; #endif static struct lcr_iface openais_pcmk_ver0[1] = { { .name = "pacemaker", .version = 0, .versions_replace = 0, .versions_replace_count = 0, .dependencies = 0, .dependency_count = 0, .constructor = NULL, .destructor = NULL, .interfaces = NULL } }; static struct lcr_comp pcmk_comp_ver0 = { .iface_count = 1, .ifaces = openais_pcmk_ver0 }; struct corosync_service_engine *pcmk_get_handler_ver0 (void) { return (&pcmk_service_handler); } __attribute__ ((constructor)) static void register_this_component (void) { lcr_interfaces_set (&openais_pcmk_ver0[0], &pcmk_service_handler_iface); lcr_component_register (&pcmk_comp_ver0); } static int plugin_has_quorum(void) { if((plugin_expected_votes >> 1) < plugin_has_votes) { return 1; } return 0; } static void update_expected_votes(int value) { if(value < plugin_has_votes) { /* Never drop below the number of connected nodes */ ais_info("Cannot update expected quorum votes %d -> %d:" " value cannot be less that the current number of votes", plugin_expected_votes, value); } else if(plugin_expected_votes != value) { ais_info("Expected quorum votes %d -> %d", plugin_expected_votes, value); plugin_expected_votes = value; } } /* Create our own local copy of the config so we can navigate it */ static void process_ais_conf(void) { char *value = NULL; gboolean any_log = FALSE; hdb_handle_t top_handle = 0; hdb_handle_t local_handle = 0; ais_info("Reading configure"); top_handle = config_find_init(pcmk_api, "logging"); local_handle = config_find_next(pcmk_api, "logging", top_handle); get_config_opt(pcmk_api, local_handle, "debug", &value, "on"); if(ais_get_boolean(value)) { plugin_log_level = LOG_DEBUG; pcmk_env.debug = "1"; } else { plugin_log_level = LOG_INFO; pcmk_env.debug = "0"; } get_config_opt(pcmk_api, local_handle, "to_file", &value, "off"); if(ais_get_boolean(value)) { get_config_opt(pcmk_api, local_handle, "logfile", &value, NULL); if(value == NULL) { ais_err("Logging to a file requested but no log file specified"); } else { struct passwd *superuser = getpwnam("root"); struct passwd *user = getpwnam(CRM_DAEMON_USER); AIS_CHECK(user != NULL, ais_err("Cluster user %s does not exist", CRM_DAEMON_USER)); AIS_CHECK(superuser != NULL, ais_err("Superuser %s does not exist", "root")); pcmk_env.logfile = value; if(superuser && user) { /* Ensure the file has the correct permissions */ FILE *logfile = fopen(value, "a"); int logfd = fileno(logfile); fchown(logfd, superuser->pw_uid, user->pw_gid); fchmod(logfd, S_IRUSR|S_IWUSR|S_IRGRP|S_IWGRP); fprintf(logfile, "Set r/w permissions for "CRM_DAEMON_USER"\n"); fflush(logfile); fsync(fileno(logfile)); fclose(logfile); any_log = TRUE; } } } get_config_opt(pcmk_api, local_handle, "to_syslog", &value, "on"); if(any_log && ais_get_boolean(value) == FALSE) { ais_info("User configured file based logging and explicitly disabled syslog."); value = "none"; } else { if(ais_get_boolean(value) == FALSE) { ais_err("Please enable some sort of logging, either 'to_file: on' or 'to_syslog: on'."); ais_err("If you use file logging, be sure to also define a value for 'logfile'"); } get_config_opt(pcmk_api, local_handle, "syslog_facility", &value, "daemon"); } pcmk_env.syslog = value; config_find_done(pcmk_api, local_handle); top_handle = config_find_init(pcmk_api, "service"); local_handle = config_find_next(pcmk_api, "service", top_handle); while(local_handle) { value = NULL; pcmk_api->object_key_get(local_handle, "name", strlen("name"), (void**)&value, NULL); if(ais_str_eq("pacemaker", value)) { break; } local_handle = config_find_next(pcmk_api, "service", top_handle); } get_config_opt(pcmk_api, local_handle, "clustername", &local_cname, "pcmk"); local_cname_len = strlen(local_cname); get_config_opt(pcmk_api, local_handle, "use_logd", &value, "no"); pcmk_env.use_logd = value; get_config_opt(pcmk_api, local_handle, "use_mgmtd", &value, "no"); if(ais_get_boolean(value) == FALSE) { int lpc = 0; for (; lpc < SIZEOF(pcmk_children); lpc++) { if(crm_proc_mgmtd & pcmk_children[lpc].flag) { /* Disable mgmtd startup */ pcmk_children[lpc].start_seq = 0; break; } } } config_find_done(pcmk_api, local_handle); } int pcmk_config_init(struct corosync_api_v1 *unused) { return 0; } static void *pcmk_wait_dispatch (void *arg) { struct timespec waitsleep = { .tv_sec = 1, .tv_nsec = 0 }; while(wait_active) { int lpc = 0; for (; lpc < SIZEOF(pcmk_children); lpc++) { if(pcmk_children[lpc].pid > 0) { int status; pid_t pid = wait4( pcmk_children[lpc].pid, &status, WNOHANG, NULL); if(pid == 0) { continue; } else if(pid < 0) { ais_perror("Call to wait4(%s) failed", pcmk_children[lpc].name); continue; } /* cleanup */ pcmk_children[lpc].pid = 0; pcmk_children[lpc].conn = NULL; pcmk_children[lpc].async_conn = NULL; if(WIFSIGNALED(status)) { int sig = WTERMSIG(status); ais_err("Child process %s terminated with signal %d" " (pid=%d, core=%s)", pcmk_children[lpc].name, sig, pid, WCOREDUMP(status)?"true":"false"); } else if (WIFEXITED(status)) { int rc = WEXITSTATUS(status); do_ais_log(rc==0?LOG_NOTICE:LOG_ERR, "Child process %s exited (pid=%d, rc=%d)", pcmk_children[lpc].name, pid, rc); if(rc == 100) { ais_notice("Child process %s no longer wishes" " to be respawned", pcmk_children[lpc].name); pcmk_children[lpc].respawn = FALSE; } } pcmk_children[lpc].respawn_count += 1; if(pcmk_children[lpc].respawn_count > MAX_RESPAWN) { ais_err("Child respawn count exceeded by %s", pcmk_children[lpc].name); pcmk_children[lpc].respawn = FALSE; } if(pcmk_children[lpc].respawn) { ais_notice("Respawning failed child process: %s", pcmk_children[lpc].name); spawn_child(&(pcmk_children[lpc])); } else { send_cluster_id(); } } } sched_yield (); nanosleep (&waitsleep, 0); } return 0; } static uint32_t pcmk_update_nodeid(void) { int last = local_nodeid; #if AIS_COROSYNC local_nodeid = pcmk_api->totem_nodeid_get(); #else local_nodeid = totempg_my_nodeid_get(); #endif if(last != local_nodeid) { if(last == 0) { ais_info("Local node id: %u", local_nodeid); } else { char *last_s = NULL; ais_malloc0(last_s, 32); ais_warn("Detected local node id change: %u -> %u", last, local_nodeid); snprintf(last_s, 31, "%u", last); ais_remove_peer(last_s); ais_free(last_s); } update_member(local_nodeid, 0, 0, 1, 0, local_uname, CRM_NODE_MEMBER, NULL); } return local_nodeid; } int pcmk_startup(struct corosync_api_v1 *init_with) { int rc = 0; int lpc = 0; int start_seq = 1; struct utsname us; struct rlimit cores; static int max = SIZEOF(pcmk_children); struct passwd *pwentry = getpwnam(CRM_DAEMON_USER); pcmk_api = init_with; #ifdef AIS_WHITETANK log_init ("crm"); #endif pcmk_env.debug = "0"; pcmk_env.logfile = NULL; pcmk_env.use_logd = "false"; pcmk_env.syslog = "daemon"; process_ais_conf(); membership_list = g_hash_table_new_full( g_direct_hash, g_direct_equal, NULL, destroy_ais_node); membership_notify_list = g_hash_table_new(g_direct_hash, g_direct_equal); ipc_client_list = g_hash_table_new(g_direct_hash, g_direct_equal); ais_info("CRM: Initialized"); log_printf(LOG_INFO, "Logging: Initialized %s\n", __PRETTY_FUNCTION__); rc = getrlimit(RLIMIT_CORE, &cores); if(rc < 0) { ais_perror("Cannot determine current maximum core size."); } if(cores.rlim_max <= 0) { cores.rlim_max = RLIM_INFINITY; rc = setrlimit(RLIMIT_CORE, &cores); if(rc < 0) { ais_perror("Core file generation will remain disabled." " Core files are an important diagnositic tool," " please consider enabling them by default."); } } else { ais_info("Maximum core file size is: %lu", cores.rlim_max); if(system("echo 1 > /proc/sys/kernel/core_uses_pid") != 0) { ais_perror("Could not enable /proc/sys/kernel/core_uses_pid"); } } AIS_CHECK(pwentry != NULL, ais_err("Cluster user %s does not exist", CRM_DAEMON_USER); return TRUE); mkdir(CRM_STATE_DIR, 0750); chown(CRM_STATE_DIR, pwentry->pw_uid, pwentry->pw_gid); mkdir(HA_STATE_DIR"/heartbeat", 0755); /* Used by RAs - Leave owned by root */ mkdir(HA_STATE_DIR"/heartbeat/rsctmp", 0755); /* Used by RAs - Leave owned by root */ rc = uname(&us); AIS_ASSERT(rc == 0); local_uname = ais_strdup(us.nodename); local_uname_len = strlen(local_uname); ais_info("Service: %d", PCMK_SERVICE_ID); ais_info("Local hostname: %s", local_uname); pcmk_update_nodeid(); pthread_create (&pcmk_wait_thread, NULL, pcmk_wait_dispatch, NULL); for (start_seq = 1; start_seq < max; start_seq++) { /* dont start anything with start_seq < 1 */ for (lpc = 0; lpc < max; lpc++) { if(start_seq == pcmk_children[lpc].start_seq) { spawn_child(&(pcmk_children[lpc])); } } } return 0; } /* static void ais_print_node(const char *prefix, struct totem_ip_address *host) { int len = 0; char *buffer = NULL; ais_malloc0(buffer, INET6_ADDRSTRLEN+1); inet_ntop(host->family, host->addr, buffer, INET6_ADDRSTRLEN); len = strlen(buffer); ais_info("%s: %.*s", prefix, len, buffer); ais_free(buffer); } */ #if 0 /* copied here for reference from exec/totempg.c */ char *totempg_ifaces_print (unsigned int nodeid) { static char iface_string[256 * INTERFACE_MAX]; char one_iface[64]; struct totem_ip_address interfaces[INTERFACE_MAX]; char **status; unsigned int iface_count; unsigned int i; int res; iface_string[0] = '\0'; res = totempg_ifaces_get (nodeid, interfaces, &status, &iface_count); if (res == -1) { return ("no interface found for nodeid"); } for (i = 0; i < iface_count; i++) { sprintf (one_iface, "r(%d) ip(%s), ", i, totemip_print (&interfaces[i])); strcat (iface_string, one_iface); } return (iface_string); } #endif static void ais_mark_unseen_peer_dead( gpointer key, gpointer value, gpointer user_data) { int *changed = user_data; crm_node_t *node = value; if(node->last_seen != membership_seq && ais_str_eq(CRM_NODE_LOST, node->state) == FALSE) { ais_info("Node %s was not seen in the previous transition", node->uname); *changed += update_member(node->id, 0, membership_seq, node->votes, node->processes, node->uname, CRM_NODE_LOST, NULL); } } void pcmk_peer_update ( enum totem_configuration_type configuration_type, #ifdef AIS_COROSYNC const unsigned int *member_list, size_t member_list_entries, const unsigned int *left_list, size_t left_list_entries, const unsigned int *joined_list, size_t joined_list_entries, const struct memb_ring_id *ring_id #else unsigned int *member_list, int member_list_entries, unsigned int *left_list, int left_list_entries, unsigned int *joined_list, int joined_list_entries, struct memb_ring_id *ring_id #endif ) { int lpc = 0; int changed = 0; int do_update = 0; AIS_ASSERT(ring_id != NULL); switch(configuration_type) { case TOTEM_CONFIGURATION_REGULAR: do_update = 1; break; case TOTEM_CONFIGURATION_TRANSITIONAL: break; } membership_seq = ring_id->seq; ais_notice("%s membership event on ring %lld: memb=%ld, new=%ld, lost=%ld", do_update?"Stable":"Transitional", ring_id->seq, (long)member_list_entries, (long)joined_list_entries, (long)left_list_entries); if(do_update == 0) { for(lpc = 0; lpc < joined_list_entries; lpc++) { const char *prefix = "new: "; uint32_t nodeid = joined_list[lpc]; ais_info("%s %s %u", prefix, member_uname(nodeid), nodeid); } for(lpc = 0; lpc < member_list_entries; lpc++) { const char *prefix = "memb:"; uint32_t nodeid = member_list[lpc]; ais_info("%s %s %u", prefix, member_uname(nodeid), nodeid); } for(lpc = 0; lpc < left_list_entries; lpc++) { const char *prefix = "lost:"; uint32_t nodeid = left_list[lpc]; ais_info("%s %s %u", prefix, member_uname(nodeid), nodeid); } return; } for(lpc = 0; lpc < joined_list_entries; lpc++) { const char *prefix = "NEW: "; uint32_t nodeid = joined_list[lpc]; crm_node_t *node = NULL; changed += update_member( nodeid, 0, membership_seq, -1, 0, NULL, CRM_NODE_MEMBER, NULL); ais_info("%s %s %u", prefix, member_uname(nodeid), nodeid); node = g_hash_table_lookup(membership_list, GUINT_TO_POINTER(nodeid)); if(node->addr == NULL) { const char *addr = totempg_ifaces_print(nodeid); node->addr = ais_strdup(addr); ais_debug("Node %u has address %s", nodeid, node->addr); } } for(lpc = 0; lpc < member_list_entries; lpc++) { const char *prefix = "MEMB:"; uint32_t nodeid = member_list[lpc]; changed += update_member( nodeid, 0, membership_seq, -1, 0, NULL, CRM_NODE_MEMBER, NULL); ais_info("%s %s %u", prefix, member_uname(nodeid), nodeid); } for(lpc = 0; lpc < left_list_entries; lpc++) { const char *prefix = "LOST:"; uint32_t nodeid = left_list[lpc]; changed += update_member( nodeid, 0, membership_seq, -1, 0, NULL, CRM_NODE_LOST, NULL); ais_info("%s %s %u", prefix, member_uname(nodeid), nodeid); } if(changed && joined_list_entries == 0 && left_list_entries == 0) { ais_err("Something strange happened: %d", changed); changed = 0; } ais_debug_2("Reaping unseen nodes..."); g_hash_table_foreach(membership_list, ais_mark_unseen_peer_dead, &changed); if(member_list_entries > 1) { /* Used to set born-on in send_cluster_id()) * We need to wait until we have at least one peer since first * membership id is based on the one before we stopped and isn't reliable */ have_reliable_membership_id = TRUE; } if(changed) { ais_debug("%d nodes changed", changed); pcmk_update_nodeid(); send_member_notification(); } send_cluster_id(); } int pcmk_ipc_exit (void *conn) { int lpc = 0; const char *client = NULL; void *async_conn = conn; for (; lpc < SIZEOF(pcmk_children); lpc++) { if(pcmk_children[lpc].conn == conn) { if(wait_active == FALSE) { /* Make sure the shutdown loop exits */ pcmk_children[lpc].pid = 0; } pcmk_children[lpc].conn = NULL; pcmk_children[lpc].async_conn = NULL; client = pcmk_children[lpc].name; break; } } g_hash_table_remove(membership_notify_list, async_conn); g_hash_table_remove(ipc_client_list, async_conn); do_ais_log(client?LOG_INFO:(LOG_DEBUG+1), "Client %s (conn=%p, async-conn=%p) left", client?client:"unknown-transient", conn, async_conn); return (0); } int pcmk_ipc_connect (void *conn) { /* OpenAIS hasn't finished setting up the connection at this point * Sending messages now messes up the protocol! */ return (0); } /* * Executive message handlers */ void pcmk_cluster_swab(void *msg) { AIS_Message *ais_msg = msg; ais_debug_3("Performing endian conversion..."); ais_msg->id = swab32 (ais_msg->id); ais_msg->size = swab32 (ais_msg->size); ais_msg->is_compressed = swab32 (ais_msg->is_compressed); ais_msg->compressed_size = swab32 (ais_msg->compressed_size); ais_msg->host.id = swab32 (ais_msg->host.id); ais_msg->host.pid = swab32 (ais_msg->host.pid); ais_msg->host.type = swab32 (ais_msg->host.type); ais_msg->host.size = swab32 (ais_msg->host.size); ais_msg->host.local = swab32 (ais_msg->host.local); ais_msg->sender.id = swab32 (ais_msg->sender.id); ais_msg->sender.pid = swab32 (ais_msg->sender.pid); ais_msg->sender.type = swab32 (ais_msg->sender.type); ais_msg->sender.size = swab32 (ais_msg->sender.size); ais_msg->sender.local = swab32 (ais_msg->sender.local); } void pcmk_cluster_callback ( ais_void_ptr *message, unsigned int nodeid) { const AIS_Message *ais_msg = message; ais_debug_2("Message from node %u (%s)", nodeid, nodeid==local_nodeid?"local":"remote"); /* Shouldn't be required... update_member( ais_msg->sender.id, membership_seq, -1, 0, ais_msg->sender.uname, NULL); */ if(ais_msg->host.size == 0 || ais_str_eq(ais_msg->host.uname, local_uname)) { route_ais_message(ais_msg, FALSE); } else { ais_debug_3("Discarding Msg[%d] (dest=%s:%s, from=%s:%s)", ais_msg->id, ais_dest(&(ais_msg->host)), msg_type2text(ais_msg->host.type), ais_dest(&(ais_msg->sender)), msg_type2text(ais_msg->sender.type)); } } void pcmk_cluster_id_swab(void *msg) { struct crm_identify_msg_s *ais_msg = msg; ais_debug_3("Performing endian conversion..."); ais_msg->id = swab32 (ais_msg->id); ais_msg->pid = swab32 (ais_msg->pid); ais_msg->votes = swab32 (ais_msg->votes); ais_msg->processes = swab32 (ais_msg->processes); } void pcmk_cluster_id_callback (ais_void_ptr *message, unsigned int nodeid) { int changed = 0; const struct crm_identify_msg_s *msg = message; if(nodeid != msg->id) { ais_err("Invalid message: Node %u claimed to be node %d", nodeid, msg->id); return; } ais_debug("Node update: %s (%s)", msg->uname, msg->version); changed = update_member( nodeid, msg->born_on, membership_seq, msg->votes, msg->processes, msg->uname, NULL, msg->version); if(changed) { send_member_notification(); } } struct res_overlay { coroipc_response_header_t header __attribute((aligned(8))); char buf[4096]; }; struct res_overlay *res_overlay = NULL; static void send_ipc_ack(void *conn) { if(res_overlay == NULL) { ais_malloc0(res_overlay, sizeof(struct res_overlay)); } res_overlay->header.id = CRM_MESSAGE_IPC_ACK; res_overlay->header.size = sizeof (coroipc_response_header_t); res_overlay->header.error = CS_OK; #ifdef AIS_COROSYNC pcmk_api->ipc_response_send (conn, res_overlay, res_overlay->header.size); #else openais_response_send (conn, res_overlay, res_overlay->header.size); #endif } /* local callbacks */ void pcmk_ipc(void *conn, ais_void_ptr *msg) { AIS_Message *mutable; int type = 0, size = 0; gboolean transient = TRUE; const AIS_Message *ais_msg = (const AIS_Message*)msg; void *async_conn = conn; ais_debug_2("Message from client %p", conn); if(check_message_sanity(msg, ((const AIS_Message*)msg)->data) == FALSE) { /* The message is corrupted - ignore */ send_ipc_ack(conn); msg = NULL; return; } /* Make a copy of the message here and ACK it * The message is only valid until a response is sent * but the response must also be sent _before_ we send anything else */ mutable = ais_msg_copy(ais_msg); AIS_ASSERT(check_message_sanity(mutable, mutable->data)); size = mutable->header.size; /* ais_malloc0(ais_msg, size); */ /* memcpy(ais_msg, msg, size); */ type = mutable->sender.type; ais_debug_3("type: %d local: %d conn: %p host type: %d ais: %d sender pid: %d child pid: %d size: %d", type, mutable->host.local, pcmk_children[type].conn, mutable->host.type, crm_msg_ais, mutable->sender.pid, pcmk_children[type].pid, ((int)SIZEOF(pcmk_children))); if(type > crm_msg_none && type < SIZEOF(pcmk_children)) { /* known child process */ transient = FALSE; } /* If this check fails, the order of pcmk_children probably * doesn't match that of the crm_ais_msg_types enum */ AIS_CHECK(transient || mutable->sender.pid == pcmk_children[type].pid, ais_err("Sender: %d, child[%d]: %d", mutable->sender.pid, type, pcmk_children[type].pid); return); if(transient == FALSE && type > crm_msg_none && mutable->host.local && pcmk_children[type].conn == NULL && mutable->host.type == crm_msg_ais) { AIS_CHECK(mutable->sender.type != mutable->sender.pid, ais_err("Pid=%d, type=%d", mutable->sender.pid, mutable->sender.type)); ais_info("Recorded connection %p for %s/%d", conn, pcmk_children[type].name, pcmk_children[type].pid); pcmk_children[type].conn = conn; pcmk_children[type].async_conn = async_conn; /* Make sure they have the latest membership */ if(pcmk_children[type].flags & crm_flag_members) { char *update = pcmk_generate_membership_data(); g_hash_table_replace(membership_notify_list, async_conn, async_conn); ais_info("Sending membership update "U64T" to %s", membership_seq, pcmk_children[type].name); send_client_msg(async_conn, crm_class_members, crm_msg_none,update); } } else if(transient) { AIS_CHECK(mutable->sender.type == mutable->sender.pid, ais_err("Pid=%d, type=%d", mutable->sender.pid, mutable->sender.type)); g_hash_table_replace(ipc_client_list, async_conn, GUINT_TO_POINTER(mutable->sender.pid)); } mutable->sender.id = local_nodeid; mutable->sender.size = local_uname_len; memset(mutable->sender.uname, 0, MAX_NAME); memcpy(mutable->sender.uname, local_uname, mutable->sender.size); route_ais_message(mutable, TRUE); send_ipc_ack(conn); msg = NULL; ais_free(mutable); } int pcmk_shutdown ( #ifdef AIS_COROSYNC void #else struct objdb_iface_ver0 *objdb #endif ) { int lpc = 0; static int phase = 0; static int max_wait = 0; static time_t next_log = 0; static int max = SIZEOF(pcmk_children); if(phase == 0) { ais_notice("Shuting down Pacemaker"); phase = max; } wait_active = FALSE; /* stop the wait loop */ for (; phase > 0; phase--) { /* dont stop anything with start_seq < 1 */ for (lpc = max - 1; lpc >= 0; lpc--) { if(phase != pcmk_children[lpc].start_seq) { continue; } #ifdef AIS_WHITETANK retry: #endif if(pcmk_children[lpc].pid) { pid_t pid = 0; int status = 0; time_t now = time(NULL); if(pcmk_children[lpc].respawn) { max_wait = 5; /* 5 * 30s = 2.5 minutes... plenty once the crmd is gone */ next_log = now + 30; pcmk_children[lpc].respawn = FALSE; stop_child(&(pcmk_children[lpc]), SIGTERM); } pid = wait4(pcmk_children[lpc].pid, &status, WNOHANG, NULL); if(pid < 0) { ais_perror("Call to wait4(%s/%d) failed - treating it as stopped", pcmk_children[lpc].name, pcmk_children[lpc].pid); } else if(pid == 0) { if(now >= next_log) { max_wait--; next_log = now + 30; ais_notice("Still waiting for %s (pid=%d, seq=%d) to terminate...", pcmk_children[lpc].name, pcmk_children[lpc].pid, pcmk_children[lpc].start_seq); if(max_wait <= 0 && phase < pcmk_children[crm_msg_crmd].start_seq) { ais_err("Child %s taking too long to terminate, sending SIGKILL", pcmk_children[lpc].name); stop_child(&(pcmk_children[lpc]), SIGKILL); } } #ifdef AIS_WHITETANK { struct timespec waitsleep = { .tv_sec = 1, .tv_nsec = 0 }; sched_yield (); nanosleep (&waitsleep, 0); goto retry; } #else /* Return control to corosync */ return -1; #endif } } /* cleanup */ ais_notice("%s confirmed stopped", pcmk_children[lpc].name); pcmk_children[lpc].async_conn = NULL; pcmk_children[lpc].conn = NULL; pcmk_children[lpc].pid = 0; } } send_cluster_id(); ais_notice("Shutdown complete"); /* TODO: Add back the logsys flush call once its written */ #ifdef AIS_WHITETANK /* Bug bnc#482847, bnc#482905 * * All cluster services are now down, we could allow OpenAIS to continue * unloading plugins, but its kinda new at that and there are a bunch of * race conditions that get exercised. * * Take the easy way out for now (on whitetank) and eventually fix for * CoroSync which is where everyone wants to be eventually anyway */ ais_notice("Forcing clean exit of OpenAIS"); exit(0); #endif return 0; } struct member_loop_data { char *string; }; void member_loop_fn(gpointer key, gpointer value, gpointer user_data) { crm_node_t *node = value; struct member_loop_data *data = user_data; ais_debug_2("Dumping node %u", node->id); data->string = append_member(data->string, node); plugin_has_votes += node->votes; } char *pcmk_generate_membership_data(void) { int size = 0; struct member_loop_data data; size = 256; ais_malloc0(data.string, size); snprintf(data.string, size, "", membership_seq, plugin_has_quorum()?"true":"false", plugin_expected_votes, plugin_has_votes); plugin_has_votes = 0; g_hash_table_foreach(membership_list, member_loop_fn, &data); if(plugin_has_votes > plugin_expected_votes) { update_expected_votes(plugin_has_votes); } size = strlen(data.string); data.string = realloc(data.string, size + 9) ;/* 9 = + nul */ sprintf(data.string + size, ""); return data.string; } void pcmk_nodes(void *conn, ais_void_ptr *msg) { char *data = pcmk_generate_membership_data(); void *async_conn = conn; /* send the ACK before we send any other messages * - but after we no longer need to access the message */ send_ipc_ack(conn); msg = NULL; if(async_conn) { send_client_msg(async_conn, crm_class_members, crm_msg_none, data); } ais_free(data); } void pcmk_remove_member(void *conn, ais_void_ptr *msg) { const AIS_Message *ais_msg = msg; char *data = get_ais_data(ais_msg); send_ipc_ack(conn); msg = NULL; if(data != NULL) { char *bcast = ais_concat("remove-peer", data, ':'); send_cluster_msg(crm_msg_ais, NULL, bcast); ais_info("Sent: %s", bcast); ais_free(bcast); } ais_free(data); } static void send_quorum_details(void *conn) { int size = 256; char *data = NULL; ais_malloc0(data, size); snprintf(data, size, "", membership_seq, plugin_has_quorum()?"true":"false", plugin_expected_votes, plugin_has_votes); send_client_msg(conn, crm_class_quorum, crm_msg_none, data); ais_free(data); } void pcmk_quorum(void *conn, ais_void_ptr *msg) { char *dummy = NULL; const AIS_Message *ais_msg = msg; char *data = get_ais_data(ais_msg); send_ipc_ack(conn); msg = NULL; /* Make sure the current number of votes is accurate */ dummy = pcmk_generate_membership_data(); ais_free(dummy); /* Calls without data just want the current quorum details */ if(data != NULL && strlen(data) > 0) { int value = ais_get_int(data, NULL); update_expected_votes(value); } send_quorum_details(conn); ais_free(data); } void pcmk_notify(void *conn, ais_void_ptr *msg) { const AIS_Message *ais_msg = msg; char *data = get_ais_data(ais_msg); void *async_conn = conn; int enable = 0; int sender = ais_msg->sender.pid; send_ipc_ack(conn); msg = NULL; if(ais_str_eq("true", data)) { enable = 1; } ais_info("%s node notifications for child %d (%p)", enable?"Enabling":"Disabling", sender, async_conn); if(enable) { g_hash_table_replace(membership_notify_list, async_conn, async_conn); } else { g_hash_table_remove(membership_notify_list, async_conn); } ais_free(data); } void pcmk_nodeid(void *conn, ais_void_ptr *msg) { static int counter = 0; struct crm_ais_nodeid_resp_s resp; ais_debug_2("Sending local nodeid: %d to %p[%d]", local_nodeid, conn, counter); resp.header.id = crm_class_nodeid; resp.header.size = sizeof (struct crm_ais_nodeid_resp_s); resp.header.error = CS_OK; resp.id = local_nodeid; resp.counter = counter++; memset(resp.uname, 0, MAX_NAME); memcpy(resp.uname, local_uname, local_uname_len); memset(resp.cname, 0, MAX_NAME); memcpy(resp.cname, local_cname, local_cname_len); #ifdef AIS_COROSYNC pcmk_api->ipc_response_send (conn, &resp, resp.header.size); #else openais_response_send (conn, &resp, resp.header.size); #endif } static gboolean ghash_send_update(gpointer key, gpointer value, gpointer data) { if(send_client_msg(value, crm_class_members, crm_msg_none, data) != 0) { /* remove it */ return TRUE; } return FALSE; } void send_member_notification(void) { char *update = pcmk_generate_membership_data(); ais_info("Sending membership update "U64T" to %d children", membership_seq, g_hash_table_size(membership_notify_list)); g_hash_table_foreach_remove(membership_notify_list, ghash_send_update, update); ais_free(update); } gboolean check_message_sanity(const AIS_Message *msg, const char *data) { gboolean sane = TRUE; gboolean repaired = FALSE; int dest = msg->host.type; int tmp_size = msg->header.size - sizeof(AIS_Message); if(sane && msg->header.size == 0) { ais_err("Message with no size"); sane = FALSE; } if(sane && msg->header.error != CS_OK) { ais_err("Message header contains an error: %d", msg->header.error); sane = FALSE; } AIS_CHECK(msg->header.size > sizeof(AIS_Message), ais_err("Message %d size too small: %d < %zu", msg->header.id, msg->header.size, sizeof(AIS_Message)); return FALSE); if(sane && ais_data_len(msg) != tmp_size) { ais_warn("Message payload size is incorrect: expected %d, got %d", ais_data_len(msg), tmp_size); sane = TRUE; } if(sane && ais_data_len(msg) == 0) { ais_err("Message with no payload"); sane = FALSE; } if(sane && data && msg->is_compressed == FALSE) { int str_size = strlen(data) + 1; if(ais_data_len(msg) != str_size) { int lpc = 0; ais_err("Message payload is corrupted: expected %d bytes, got %d", ais_data_len(msg), str_size); sane = FALSE; for(lpc = (str_size - 10); lpc < msg->size; lpc++) { if(lpc < 0) { lpc = 0; } ais_debug_2("bad_data[%d]: %d / '%c'", lpc, data[lpc], data[lpc]); } } } if(sane == FALSE) { AIS_CHECK(sane, ais_err("Invalid message %d: (dest=%s:%s, from=%s:%s.%d, compressed=%d, size=%d, total=%d)", msg->id, ais_dest(&(msg->host)), msg_type2text(dest), ais_dest(&(msg->sender)), msg_type2text(msg->sender.type), msg->sender.pid, msg->is_compressed, ais_data_len(msg), msg->header.size)); } else if(repaired) { ais_err("Repaired message %d: (dest=%s:%s, from=%s:%s.%d, compressed=%d, size=%d, total=%d)", msg->id, ais_dest(&(msg->host)), msg_type2text(dest), ais_dest(&(msg->sender)), msg_type2text(msg->sender.type), msg->sender.pid, msg->is_compressed, ais_data_len(msg), msg->header.size); } else { ais_debug_3("Verified message %d: (dest=%s:%s, from=%s:%s.%d, compressed=%d, size=%d, total=%d)", msg->id, ais_dest(&(msg->host)), msg_type2text(dest), ais_dest(&(msg->sender)), msg_type2text(msg->sender.type), msg->sender.pid, msg->is_compressed, ais_data_len(msg), msg->header.size); } return sane; } static int delivered_transient = 0; static void deliver_transient_msg(gpointer key, gpointer value, gpointer user_data) { int pid = GPOINTER_TO_INT(value); AIS_Message *mutable = user_data; if(pid == mutable->host.type) { int rc = send_client_ipc(key, mutable); delivered_transient++; ais_info("Sent message to %s.%d (rc=%d)", ais_dest(&(mutable->host)), pid, rc); if(rc != 0) { ais_warn("Sending message to %s.%d failed (rc=%d)", ais_dest(&(mutable->host)), pid, rc); log_ais_message(LOG_DEBUG, mutable); } } } gboolean route_ais_message(const AIS_Message *msg, gboolean local_origin) { int rc = 0; int dest = msg->host.type; const char *reason = "unknown"; AIS_Message *mutable = ais_msg_copy(msg); static int service_id = SERVICE_ID_MAKE(PCMK_SERVICE_ID, 0); ais_debug_3("Msg[%d] (dest=%s:%s, from=%s:%s.%d, remote=%s, size=%d)", mutable->id, ais_dest(&(mutable->host)), msg_type2text(dest), ais_dest(&(mutable->sender)), msg_type2text(mutable->sender.type), mutable->sender.pid, local_origin?"false":"true", ais_data_len((mutable))); if(local_origin == FALSE) { if(mutable->host.size == 0 || ais_str_eq(local_uname, mutable->host.uname)) { mutable->host.local = TRUE; } } if(check_message_sanity(mutable, mutable->data) == FALSE) { /* Dont send this message to anyone */ rc = 1; goto bail; } if(mutable->host.local) { void *conn = NULL; const char *lookup = NULL; if(dest == crm_msg_ais) { process_ais_message(mutable); goto bail; } else if(dest == crm_msg_lrmd) { /* lrmd messages are routed via the crm */ dest = crm_msg_crmd; } else if(dest == crm_msg_te) { /* te messages are routed via the crm */ dest = crm_msg_crmd; } else if(dest >= SIZEOF(pcmk_children)) { /* Transient client */ delivered_transient = 0; g_hash_table_foreach(ipc_client_list, deliver_transient_msg, mutable); if(delivered_transient) { ais_debug_2("Sent message to %d transient clients: %d", delivered_transient, dest); goto bail; } else { /* try the crmd */ ais_debug_2("Sending message to transient client %d via crmd", dest); dest = crm_msg_crmd; } } else if(dest == 0) { ais_err("Invalid destination: %d", dest); log_ais_message(LOG_ERR, mutable); log_printf(LOG_ERR, "%s", get_ais_data(mutable)); rc = 1; goto bail; } lookup = msg_type2text(dest); conn = pcmk_children[dest].async_conn; /* the cluster fails in weird and wonderfully obscure ways when this is not true */ AIS_ASSERT(ais_str_eq(lookup, pcmk_children[dest].name)); if(mutable->header.id == service_id) { mutable->header.id = 0; /* reset this back to zero for IPC messages */ } else if(mutable->header.id != 0) { ais_err("reset header id back to zero from %d", mutable->header.id); mutable->header.id = 0; /* reset this back to zero for IPC messages */ } reason = "ipc delivery failed"; rc = send_client_ipc(conn, mutable); } else if(local_origin) { /* forward to other hosts */ ais_debug_3("Forwarding to cluster"); reason = "cluster delivery failed"; rc = send_cluster_msg_raw(mutable); } if(rc != 0) { ais_warn("Sending message to %s.%s failed: %s (rc=%d)", ais_dest(&(mutable->host)), msg_type2text(dest), reason, rc); log_ais_message(LOG_DEBUG, mutable); } bail: ais_free(mutable); return rc==0?TRUE:FALSE; } int send_cluster_msg_raw(const AIS_Message *ais_msg) { int rc = 0; struct iovec iovec; static uint32_t msg_id = 0; AIS_Message *mutable = ais_msg_copy(ais_msg); AIS_ASSERT(local_nodeid != 0); AIS_ASSERT(ais_msg->header.size == (sizeof(AIS_Message) + ais_data_len(ais_msg))); if(mutable->id == 0) { msg_id++; AIS_CHECK(msg_id != 0 /* detect wrap-around */, msg_id++; ais_err("Message ID wrapped around")); mutable->id = msg_id; } mutable->header.error = CS_OK; mutable->header.id = SERVICE_ID_MAKE(PCMK_SERVICE_ID, 0); mutable->sender.id = local_nodeid; mutable->sender.size = local_uname_len; memset(mutable->sender.uname, 0, MAX_NAME); memcpy(mutable->sender.uname, local_uname, mutable->sender.size); iovec.iov_base = (char *)mutable; iovec.iov_len = mutable->header.size; ais_debug_3("Sending message (size=%u)", (unsigned int)iovec.iov_len); #if AIS_COROSYNC rc = pcmk_api->totem_mcast(&iovec, 1, TOTEMPG_SAFE); #else rc = totempg_groups_mcast_joined(openais_group_handle, &iovec, 1, TOTEMPG_SAFE); #endif if(rc == 0 && mutable->is_compressed == FALSE) { ais_debug_2("Message sent: %.80s", mutable->data); } AIS_CHECK(rc == 0, ais_err("Message not sent (%d): %.120s", rc, mutable->data)); ais_free(mutable); return rc; } #define min(x,y) (x)<(y)?(x):(y) void send_cluster_id(void) { int rc = 0; int lpc = 0; int len = 0; struct iovec iovec; struct crm_identify_msg_s *msg = NULL; AIS_ASSERT(local_nodeid != 0); if(local_born_on == 0 && have_reliable_membership_id) { local_born_on = membership_seq; } ais_malloc0(msg, sizeof(struct crm_identify_msg_s)); msg->header.size = sizeof(struct crm_identify_msg_s); msg->id = local_nodeid; /* msg->header.error = CS_OK; */ msg->header.id = SERVICE_ID_MAKE(PCMK_SERVICE_ID, 1); len = min(local_uname_len, MAX_NAME-1); memset(msg->uname, 0, MAX_NAME); memcpy(msg->uname, local_uname, len); len = min(strlen(VERSION), MAX_NAME-1); memset(msg->version, 0, MAX_NAME); memcpy(msg->version, VERSION, len); msg->votes = 1; msg->pid = getpid(); msg->processes = crm_proc_ais; msg->born_on = local_born_on; for (lpc = 0; lpc < SIZEOF(pcmk_children); lpc++) { if(pcmk_children[lpc].pid != 0) { msg->processes |= pcmk_children[lpc].flag; } } ais_debug("Local update: id=%u, born="U64T", seq="U64T"", local_nodeid, local_born_on, membership_seq); update_member( local_nodeid, local_born_on, membership_seq, msg->votes, msg->processes, NULL, NULL, VERSION); iovec.iov_base = (char *)msg; iovec.iov_len = msg->header.size; #if AIS_COROSYNC rc = pcmk_api->totem_mcast(&iovec, 1, TOTEMPG_SAFE); #else rc = totempg_groups_mcast_joined(openais_group_handle, &iovec, 1, TOTEMPG_SAFE); #endif AIS_CHECK(rc == 0, ais_err("Message not sent (%d)", rc)); ais_free(msg); } static gboolean ghash_send_removal(gpointer key, gpointer value, gpointer data) { send_quorum_details(value); if(send_client_msg(value, crm_class_rmpeer, crm_msg_none, data) != 0) { /* remove it */ return TRUE; } return FALSE; } void ais_remove_peer(char *node_id) { uint32_t id = ais_get_int(node_id, NULL); crm_node_t *node = g_hash_table_lookup(membership_list, GUINT_TO_POINTER(id)); if(node == NULL) { ais_info("Peer %u is unknown", id); } else if(ais_str_eq(CRM_NODE_MEMBER, node->state)) { ais_warn("Peer %u/%s is still active", id, node->uname); } else if(g_hash_table_remove(membership_list, GUINT_TO_POINTER(id))) { plugin_expected_votes--; ais_notice("Removed dead peer %u from the membership list", id); ais_info("Sending removal of %u to %d children", id, g_hash_table_size(membership_notify_list)); g_hash_table_foreach_remove(membership_notify_list, ghash_send_removal, node_id); } else { ais_warn("Peer %u/%s was not removed", id, node->uname); } } gboolean process_ais_message(const AIS_Message *msg) { int len = ais_data_len(msg); char *data = get_ais_data(msg); do_ais_log(LOG_DEBUG, "Msg[%d] (dest=%s:%s, from=%s:%s.%d, remote=%s, size=%d): %.90s", msg->id, ais_dest(&(msg->host)), msg_type2text(msg->host.type), ais_dest(&(msg->sender)), msg_type2text(msg->sender.type), msg->sender.pid, msg->sender.uname==local_uname?"false":"true", ais_data_len(msg), data); if(data && len > 12 && strncmp("remove-peer:", data, 12) == 0) { char *node = data+12; ais_remove_peer(node); } ais_free(data); return TRUE; } static void member_dump_fn(gpointer key, gpointer value, gpointer user_data) { crm_node_t *node = value; ais_info(" node id:%u, uname=%s state=%s processes=%.16x born="U64T" seen="U64T" addr=%s version=%s", node->id, node->uname?node->uname:"-unknown-", node->state, node->processes, node->born, node->last_seen, node->addr?node->addr:"-unknown-", node->version?node->version:"-unknown-"); } void pcmk_exec_dump(void) { /* Called after SIG_USR2 */ process_ais_conf(); ais_info("Local id: %u, uname: %s, born: "U64T, local_nodeid, local_uname, local_born_on); ais_info("Membership id: "U64T", quorate: %s, expected: %u, actual: %u", membership_seq, plugin_has_quorum()?"true":"false", plugin_expected_votes, plugin_has_votes); g_hash_table_foreach(membership_list, member_dump_fn, NULL); } diff --git a/shell/modules/cibconfig.py b/shell/modules/cibconfig.py index 04d6bb13de..78d0100ae6 100644 --- a/shell/modules/cibconfig.py +++ b/shell/modules/cibconfig.py @@ -1,2274 +1,2284 @@ # Copyright (C) 2008 Dejan Muhamedagic # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or (at your option) any later version. # # This software is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # # You should have received a copy of the GNU General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA # import sys import subprocess import copy import xml.dom.minidom import re from singletonmixin import Singleton from userprefs import Options, UserPrefs from vars import Vars from cliformat import * from utils import * from xmlutil import * from msg import * from parse import CliParser from clidisplay import CliDisplay from cibstatus import CibStatus from idmgmt import IdMgmt def show_unrecognized_elems(doc): try: conf = doc.getElementsByTagName("configuration")[0] except: common_warn("CIB has no configuration element") return for topnode in conf.childNodes: if not is_element(topnode): continue if is_defaults(topnode): continue if not topnode.tagName in cib_topnodes: common_warn("unrecognized CIB element %s" % c.tagName) continue for c in topnode.childNodes: if not is_element(c): continue if not c.tagName in cib_object_map: common_warn("unrecognized CIB element %s" % c.tagName) # # object sets (enables operations on sets of elements) # def mkset_obj(*args): if args and args[0] == "xml": obj = lambda: CibObjectSetRaw(*args[1:]) else: obj = lambda: CibObjectSetCli(*args) return obj() class CibObjectSet(object): ''' Edit or display a set of cib objects. repr() for objects representation and save() used to store objects into internal structures are defined in subclasses. ''' def __init__(self, *args): self.obj_list = [] def _open_url(self,src): import urllib try: return urllib.urlopen(src) except: pass if src == "-": return sys.stdin try: return open(src) except: pass common_err("could not open %s" % src) return False def init_aux_lists(self): ''' Before edit, initialize two auxiliary lists which will hold a list of objects to be removed and a list of objects which were created. Then, we can create a new object list which will match the current state of affairs, i.e. the object set after the last edit. ''' self.remove_objs = copy.copy(self.obj_list) self.add_objs = [] def recreate_obj_list(self): ''' Recreate obj_list: remove deleted objects and add created objects ''' for obj in self.remove_objs: self.obj_list.remove(obj) self.obj_list += self.add_objs rmlist = [] for obj in self.obj_list: if obj.invalid: rmlist.append(obj) for obj in rmlist: self.obj_list.remove(obj) def edit_save(self,s,erase = False): ''' Save string s to a tmp file. Invoke editor to edit it. Parse/save the resulting file. In case of syntax error, allow user to reedit. If erase is True, erase the CIB first. If no changes are done, return silently. ''' tmp = str2tmp(s) if not tmp: return False filehash = hash(s) rc = False while True: if edit_file(tmp) != 0: break try: f = open(tmp,'r') except IOError, msg: common_err(msg) break s = ''.join(f) f.close() if hash(s) == filehash: # file unchanged rc = True break if erase: cib_factory.erase() if not self.save(s): if ask("Do you want to edit again?"): continue rc = True break try: os.unlink(tmp) except: pass return rc def edit(self): if options.batch: common_info("edit not allowed in batch mode") return False cli_display.set_no_pretty() s = self.repr() cli_display.reset_no_pretty() return self.edit_save(s) def save_to_file(self,fname): if fname == "-": f = sys.stdout else: if not options.batch and os.access(fname,os.F_OK): if not ask("File %s exists. Do you want to overwrite it?"%fname): return False try: f = open(fname,"w") except IOError, msg: common_err(msg) return False rc = True cli_display.set_no_pretty() s = self.repr() cli_display.reset_no_pretty() if s: f.write(s) f.write('\n') elif self.obj_list: rc = False if f != sys.stdout: f.close() return rc def show(self): s = self.repr() if not s: if self.obj_list: # objects could not be displayed return False else: return True page_string(s) def import_file(self,method,fname): if not cib_factory.is_cib_sane(): return False if method == "replace": if options.interactive and cib_factory.has_cib_changed(): if not ask("This operation will erase all changes. Do you want to proceed?"): return False cib_factory.erase() f = self._open_url(fname) if not f: return False s = ''.join(f) if f != sys.stdin: f.close() - return self.save(s) + return self.save(s, method == "update") def repr(self): ''' Return a string with objects's representations (either CLI or XML). ''' return '' - def save(self,s): + def save(self, s, update = False): ''' For each object: - try to find a corresponding object in obj_list - - if not found: create new - - if found: replace the object in the obj_list with + - if (update and not found) or found: + replace the object in the obj_list with the new object + - if not found: create new See below for specific implementations. ''' pass def verify2(self): ''' Test objects for sanity. This is about semantics. ''' rc = 0 for obj in self.obj_list: rc |= obj.check_sanity() return rc def lookup_cli(self,cli_list): for obj in self.obj_list: if obj.matchcli(cli_list): return obj def lookup(self,xml_obj_type,obj_id): for obj in self.obj_list: if obj.match(xml_obj_type,obj_id): return obj def drop_remaining(self): 'Any remaining objects in obj_list are deleted.' l = [x.obj_id for x in self.remove_objs] return cib_factory.delete(*l) def get_comments(cli_list): if not cli_list: return [] last = cli_list[len(cli_list)-1] try: if last[0] == "comments": cli_list.pop() return last[1] except: pass return [] class CibObjectSetCli(CibObjectSet): ''' Edit or display a set of cib objects (using cli notation). ''' def __init__(self, *args): CibObjectSet.__init__(self, *args) self.obj_list = cib_factory.mkobj_list("cli",*args) def repr(self): "Return a string containing cli format of all objects." if not self.obj_list: return '' return '\n'.join(obj.repr_cli() \ for obj in processing_sort_cli(self.obj_list)) - def process(self,cli_list): + def process(self, cli_list, update = False): ''' Create new objects or update existing ones. ''' comments = get_comments(cli_list) - obj = self.lookup_cli(cli_list) + myobj = obj = self.lookup_cli(cli_list) + if update and not obj: + obj = cib_factory.find_object_for_cli(cli_list) if obj: - rc = obj.update_from_cli(cli_list) != False - self.remove_objs.remove(obj) + rc = cib_factory.update_from_cli(obj,cli_list) != False + if myobj: + self.remove_objs.remove(myobj) else: obj = cib_factory.create_from_cli(cli_list) rc = obj != None if rc: self.add_objs.append(obj) if rc: obj.set_comment(comments) return rc - def save(self,s): + def save(self, s, update = False): ''' Save a user supplied cli format configuration. On errors user is typically asked to review the configuration (for instance on editting). On syntax error (return code 1), no changes are done, but on semantic errors (return code 2), some changes did take place so object list must be updated properly. Finally, once syntax check passed, there's no way back, all changes are applied to the current configuration. TODO: Implement undo configuration changes. ''' l = [] rc = True err_buf.start_tmp_lineno() cp = CliParser() for cli_text in lines2cli(s): err_buf.incr_lineno() cli_list = cp.parse(cli_text) if cli_list: l.append(cli_list) elif cli_list == False: rc = False err_buf.stop_tmp_lineno() # we can't proceed if there was a syntax error, but we # can ask the user to fix problems if not rc: return rc self.init_aux_lists() if l: for cli_list in processing_sort_cli(l): - if self.process(cli_list) == False: + if self.process(cli_list,update) == False: rc = False if not self.drop_remaining(): # this is tricky, we don't know what was removed! # it could happen that the user dropped a resource # which was running and therefore couldn't be removed rc = False self.recreate_obj_list() return rc cib_verify = "crm_verify -V -p" class CibObjectSetRaw(CibObjectSet): ''' Edit or display one or more CIB objects (XML). ''' def __init__(self, *args): CibObjectSet.__init__(self, *args) self.obj_list = cib_factory.mkobj_list("xml",*args) def repr(self): "Return a string containing xml of all objects." doc = cib_factory.objlist2doc(self.obj_list) s = doc.toprettyxml(user_prefs.xmlindent) doc.unlink() return s def repr_configure(self): ''' Return a string containing xml of configure and its children. ''' doc = cib_factory.objlist2doc(self.obj_list) conf_node = doc.getElementsByTagName("configuration")[0] s = conf_node.toprettyxml(user_prefs.xmlindent) doc.unlink() return s - def process(self,node): + def process(self, node, update = False): if not cib_factory.is_cib_sane(): return False - obj = self.lookup(node.tagName,node.getAttribute("id")) + myobj = obj = self.lookup(node.tagName,node.getAttribute("id")) + if update and not obj: + obj = cib_factory.find_object_for_node(node) if obj: - rc = obj.update_from_node(node) - self.remove_objs.remove(obj) + rc = cib_factory.update_from_node(obj,node) + if myobj: + self.remove_objs.remove(obj) else: new_obj = cib_factory.create_from_node(node) rc = new_obj != None if rc: self.add_objs.append(new_obj) return rc - def save(self,s): + def save(self, s, update = False): try: doc = xml.dom.minidom.parseString(s) except xml.parsers.expat.ExpatError,msg: cib_parse_err(msg,s) return False rc = True sanitize_cib(doc) show_unrecognized_elems(doc) newnodes = get_interesting_nodes(doc,[]) self.init_aux_lists() if newnodes: for node in processing_sort(newnodes): - if not self.process(node): + if not self.process(node,update): rc = False if not self.drop_remaining(): rc = False doc.unlink() self.recreate_obj_list() return rc def verify(self): if not self.obj_list: return True cli_display.set_no_pretty() rc = pipe_string(cib_verify,self.repr()) cli_display.reset_no_pretty() return rc in (0,1) def ptest(self, nograph, scores, verbosity): if not cib_factory.is_cib_sane(): return False ptest = "ptest -X -%s" % verbosity.upper() if scores: ptest = "%s -s" % ptest if user_prefs.dotty and not nograph: fd,tmpfile = mkstemp() ptest = "%s -D %s" % (ptest,tmpfile) else: tmpfile = None doc = cib_factory.objlist2doc(self.obj_list) cib = doc.childNodes[0] status = cib_status.get_status() if not status: common_err("no status section found") return False cib.appendChild(doc.importNode(status,1)) pipe_string(ptest,doc.toprettyxml()) doc.unlink() if tmpfile: p = subprocess.Popen("%s %s" % (user_prefs.dotty,tmpfile), shell=True, bufsize=0, stdin=None, stdout=None, stderr=None, close_fds=True) common_info("starting %s to show transition graph"%user_prefs.dotty) vars.tmpfiles.append(tmpfile) else: if not nograph: common_info("install graphviz to see a transition graph") return True # # XML generate utilities # def set_id(node,oldnode,id_hint,id_required = True): ''' Set the id attribute for the node. Procedure: - if the node already contains "id", keep it - if the old node contains "id", copy that - if neither is true, then create a new one using id_hint (exception: if not id_required, then no new id is generated) Finally, save the new id in id_store. ''' old_id = None new_id = node.getAttribute("id") if oldnode and oldnode.getAttribute("id"): old_id = oldnode.getAttribute("id") if not new_id: new_id = old_id if not new_id: if id_required: new_id = id_store.new(node,id_hint) else: id_store.save(new_id) if new_id: node.setAttribute("id",new_id) if oldnode and old_id == new_id: set_id_used_attr(oldnode) def mkxmlsimple(e,oldnode,id_hint): ''' Create an xml node from the (name,dict) pair. The name is the name of the element. The dict contains a set of attributes. ''' node = cib_factory.createElement(e[0]) for n,v in e[1]: if n == "$children": # this one's skipped continue if n == "operation": v = v.lower() if n.startswith('$'): n = n.lstrip('$') if (type(v) != type('') and type(v) != type(u'')) \ or v: # skip empty strings node.setAttribute(n,v) id_ref = node.getAttribute("id-ref") if id_ref: id_ref_2 = cib_factory.resolve_id_ref(e[0],id_ref) node.setAttribute("id-ref",id_ref_2) else: set_id(node,lookup_node(node,oldnode),id_hint) return node def mkxmlnvpairs(e,oldnode,id_hint): ''' Create xml from the (name,dict) pair. The name is the name of the element. The dict contains a set of nvpairs. Stuff such as instance_attributes. NB: Other tags not containing nvpairs are fine if the dict is empty. ''' node = cib_factory.createElement(e[0]) match_node = lookup_node(node,oldnode) #if match_node: #print "found nvpairs set:",match_node.tagName,match_node.getAttribute("id") id_ref = find_value(e[1],"$id-ref") if id_ref: id_ref_2 = cib_factory.resolve_id_ref(e[0],id_ref) node.setAttribute("id-ref",id_ref_2) if e[0] != "operations": return node # id_ref is the only attribute (if not operations) e[1].remove(["$id-ref",id_ref]) v = find_value(e[1],"$id") if v: node.setAttribute("id",v) e[1].remove(["$id",v]) else: if e[0] == "operations": # operations don't need no id set_id(node,match_node,id_hint,id_required = False) else: set_id(node,match_node,id_hint) try: subpfx = vars.subpfx_list[e[0]] except: subpfx = '' subpfx = subpfx and "%s_%s" % (id_hint,subpfx) or id_hint nvpair_pfx = node.getAttribute("id") or subpfx for n,v in e[1]: nvpair = cib_factory.createElement("nvpair") node.appendChild(nvpair) nvpair.setAttribute("name",n) if v != None: nvpair.setAttribute("value",v) set_id(nvpair,lookup_node(nvpair,match_node),nvpair_pfx) return node def mkxmlop(e,oldnode,id_hint): ''' Create an operation xml from the (name,dict) pair. ''' node = cib_factory.createElement(e[0]) inst_attr = [] for n,v in e[1]: if n in olist(vars.req_op_attributes + vars.op_attributes): node.setAttribute(n,v) else: inst_attr.append([n,v]) tmp = cib_factory.createElement("operations") oldops = lookup_node(tmp,oldnode) # first find old operations oldop = lookup_node(node,oldops) set_id(node,oldop,id_hint) if inst_attr: e = ["instance_attributes",inst_attr] nia = mkxmlnvpairs(e,oldop,node.getAttribute("id")) node.appendChild(nia) return node def mkxmldate(e,oldnode,id_hint): ''' Create a date_expression xml from the (name,dict) pair. ''' node = cib_factory.createElement(e[0]) operation = find_value(e[1],"operation").lower() node.setAttribute("operation", operation) old_date = lookup_node(node,oldnode) # first find old date element set_id(node,old_date,id_hint) date_spec_attr = [] for n,v in e[1]: if n in olist(vars.date_ops) or n == "operation": continue elif n in vars.in_range_attrs: node.setAttribute(n,v) else: date_spec_attr.append([n,v]) if not date_spec_attr: return node elem = operation == "date_spec" and "date_spec" or "duration" tmp = cib_factory.createElement(elem) old_date_spec = lookup_node(tmp,old_date) # first find old date element set_id(tmp,old_date_spec,id_hint) for n,v in date_spec_attr: tmp.setAttribute(n,v) node.appendChild(tmp) return node def mkxmlrsc_set(e,oldnode,id_hint): ''' Create a resource_set xml from the (name,dict) pair. ''' node = cib_factory.createElement(e[0]) old_rsc_set = lookup_node(node,oldnode) # first find old date element set_id(node,old_rsc_set,id_hint) for ref in e[1]: if ref[0] == "resource_ref": ref_node = cib_factory.createElement(ref[0]) ref_node.setAttribute(ref[1][0],ref[1][1]) node.appendChild(ref_node) elif ref[0] in ("sequential", "action", "role"): node.setAttribute(ref[0], ref[1]) return node conv_list = { "params": "instance_attributes", "meta": "meta_attributes", "property": "cluster_property_set", "rsc_defaults": "meta_attributes", "op_defaults": "meta_attributes", "attributes": "instance_attributes", "utilization": "utilization", "operations": "operations", "op": "op", } def mkxmlnode(e,oldnode,id_hint): ''' Create xml from the (name,dict) pair. The name is the name of the element. The dict contains either a set of nvpairs or a set of attributes. The id is either generated or copied if found in the provided xml. Stuff such as instance_attributes. ''' if e[0] in conv_list: e[0] = conv_list[e[0]] if e[0] in ("instance_attributes","meta_attributes","operations","cluster_property_set","utilization"): return mkxmlnvpairs(e,oldnode,id_hint) elif e[0] == "op": return mkxmlop(e,oldnode,id_hint) elif e[0] == "date_expression": return mkxmldate(e,oldnode,id_hint) elif e[0] == "resource_set": return mkxmlrsc_set(e,oldnode,id_hint) else: return mkxmlsimple(e,oldnode,id_hint) def set_nvpair(set_node,name,value): n_id = set_node.getAttribute("id") for c in set_node.childNodes: if is_element(c) and c.getAttribute("name") == name: c.setAttribute("value",value) return np = cib_factory.createElement("nvpair") np.setAttribute("name",name) np.setAttribute("value",value) new_id = id_store.new(np,n_id) np.setAttribute("id",new_id) set_node.appendChild(np) # # cib element classes (CibObject the parent class) # class CibObject(object): ''' The top level object of the CIB. Resources and constraints. ''' state_fmt = "%16s %-8s%-8s%-8s%-8s%-8s%-4s" set_names = {} def __init__(self,xml_obj_type,obj_id = None): if not xml_obj_type in cib_object_map: unsupported_err(xml_obj_type) return self.obj_type = cib_object_map[xml_obj_type][0] self.parent_type = cib_object_map[xml_obj_type][2] self.xml_obj_type = xml_obj_type self.origin = "" # where did it originally come from? self.nocli = False # we don't support this one self.nocli_warn = True # don't issue warnings all the time self.updated = False # was the object updated self.invalid = False # the object has been invalidated (removed) self.moved = False # the object has been moved (from/to a container) self.recreate = False # constraints to be recreated self.comment = '' # comment as text self.parent = None # object superior (group/clone/ms) self.children = [] # objects inferior if obj_id: if not self.mknode(obj_id): self = None # won't do :( else: self.obj_id = None self.node = None def dump_state(self): 'Print object status' print self.state_fmt % \ (self.obj_id,self.origin,self.updated,self.moved,self.invalid, \ self.parent and self.parent.obj_id or "", \ len(self.children)) def repr_cli_xml(self,node,format): h = cli_display.keyword("xml") l = node.toprettyxml('\t').split('\n') l = [x for x in l if x] # drop empty lines if format > 0: return "%s %s" % (h,' \\\n'.join(l)) else: return "%s %s" % (h,''.join(l)) def repr_cli(self,node = None,format = 1): ''' CLI representation for the node. repr_cli_head and repr_cli_child in subclasess. ''' if not node: node = self.node if self.nocli: return self.repr_cli_xml(node,format) l = [] l.append(self.repr_cli_head(node)) cli_add_description(node,l) for c in node.childNodes: if not is_element(c): continue s = self.repr_cli_child(c,format) if s: l.append(s) return self.cli_format(l,format) def repr_cli_child(self,c,format): if c.tagName in self.set_names: return "%s %s" % \ (cli_display.keyword(self.set_names[c.tagName]), \ cli_pairs(nvpairs2list(c))) def cli2node(self,cli,oldnode = None): ''' Convert CLI representation to a DOM node. Defined in subclasses. ''' cli_list = mk_cli_list(cli) if not cli_list: return None if not oldnode: if self.obj_type == "property": oldnode = cib_factory.topnode[cib_object_map[self.xml_obj_type][2]] else: oldnode = self.node return self.cli_list2node(cli_list,oldnode) def cli_format(self,l,format): ''' Format and add comment (if any). ''' s = cli_format(l,format) return (self.comment and format >=0) and '\n'.join([self.comment,s]) or s def set_comment(self,l): s = '\n'.join(l) if self.comment != s: self.comment = s self.modified = True def pull_comments(self): ''' Collect comments from within this node. Remove them from the parent and stuff them in self.comments as an array. ''' l = [] cnodes = [x for x in self.node.childNodes if is_comment(x)] for n in cnodes: l.append(n.data) n.parentNode.removeChild(n) # convert comments from XML node to text. Multiple comments # are concatenated with '\n'. if not l: self.comment = '' return s = '\n'.join(l) l = s.split('\n') for i in range(len(l)): if not l[i].startswith('#'): l[i] = '#%s' % l[i] self.comment = '\n'.join(l) def mknode(self,obj_id): if not cib_factory.is_cib_sane(): return False if id_store.id_in_use(obj_id): return False if self.xml_obj_type in vars.defaults_tags: tag = "meta_attributes" else: tag = self.xml_obj_type self.node = cib_factory.createElement(tag) self.obj_id = obj_id self.node.setAttribute("id",self.obj_id) self.origin = "user" return True def mkcopy(self): ''' Create a new object with the same obj_id and obj_type (for the purpose of CibFactory.delete_objects) ''' obj_copy = CibObject(self.xml_obj_type) obj_copy.obj_id = self.obj_id obj_copy.obj_type = self.obj_type return obj_copy def can_be_renamed(self): ''' Return False if this object can't be renamed. ''' if is_rsc_running(self.obj_id): common_err("cannot rename a running resource (%s)" % self.obj_id) return False if not is_live_cib() and self.node.tagName == "node": common_err("cannot rename nodes") return False return True def attr_exists(self,attr): if not attr in self.node.attributes.keys(): no_attribute_err(attr,self.obj_id) return False return True def cli_use_validate(self): ''' Check validity of the object, as we know it. It may happen that we don't recognize a construct, but that the object is still valid for the CRM. In that case, the object is marked as "CLI read only", i.e. we will neither convert it to CLI nor try to edit it in that format. The validation procedure: we convert xml to cli and then back to xml. If the two xml representations match then we can understand the xml. ''' if not self.node: return True if not self.attr_exists("id"): return False cli_display.set_no_pretty() cli_text = self.repr_cli(format = -1) cli_display.reset_no_pretty() if not cli_text: return False xml2 = self.cli2node(cli_text) if not xml2: return False rc = xml_cmp(self.node, xml2, show = True) xml2.unlink() return rc def check_sanity(self): ''' Right now, this is only for primitives. ''' return 0 def matchcli(self,cli_list): head = cli_list[0] return self.obj_type == head[0] \ and self.obj_id == find_value(head[1],"id") def match(self,xml_obj_type,obj_id): return self.xml_obj_type == xml_obj_type and self.obj_id == obj_id def obj_string(self): return "%s:%s" % (self.obj_type,self.obj_id) def reset_updated(self): self.updated = False self.moved = False self.recreate = False for child in self.children: child.reset_updated() def propagate_updated(self): if self.parent: self.parent.updated = self.updated self.parent.propagate_updated() - def update_links(self): - ''' - Update the structure links for the object (self.children, - self.parent). Update also the dom nodes, if necessary. - ''' - self.children = [] - if self.obj_type not in vars.container_tags: - return - for c in self.node.childNodes: - if is_child_rsc(c): - child = cib_factory.find_object_for_node(c) - if not child: - missing_obj_err(c) - continue - child.parent = self - self.children.append(child) - if not c.isSameNode(child.node): - rmnode(child.node) - child.node = c - def update_from_cli(self,cli_list): - 'Update ourselves from the cli intermediate.' - if len(cli_list) >= 2 and cli_list[1][0] == "raw": - doc = xml.dom.minidom.parseString(cli_list[1][1]) - return self.update_element(doc.childNodes[0]) - return self.update_element(self.cli2node(cli_list)) - def update_from_node(self,node): - 'Update ourselves from a doc node.' - return self.update_element(node) - def update_element(self,newnode): - 'Update ourselves from a doc node.' - if not newnode: - return False - if not cib_factory.is_cib_sane(): - return False - oldnode = self.node - if xml_cmp(oldnode,newnode): - newnode.unlink() - return True # the new and the old versions are equal - obj.node = node - if not cib_factory.test_element(self,newnode) or \ - not id_store.replace_xml(oldnode,newnode): - obj.node = oldnode - newnode.unlink() - return False - self.node = cib_factory.replaceNode(newnode,oldnode) - self.nocli = False # try again after update - if not self.cli_use_validate(): - self.nocli = True - self.nocli_warn = True - else: - self.nocli = False - cib_factory.adjust_children(self) - oldnode.unlink() - self.updated = True - self.propagate_updated() - return True def top_parent(self): '''Return the top parent or self''' if self.parent: return self.parent.top_parent() else: return self def find_child_in_node(self,child): for c in self.node.childNodes: if not is_element(c): continue if c.tagName == child.obj_type and \ c.getAttribute("id") == child.obj_id: return c return None def filter(self,*args): "Filter objects." if not args: return True if args[0] == "NOOBJ": return False if args[0] == "changed": return self.updated or self.origin == "user" return self.obj_id in args def mk_cli_list(cli): 'Sometimes we get a string and sometimes a list.' if type(cli) == type('') or type(cli) == type(u''): return CliParser().parse(cli) else: return cli class CibNode(CibObject): ''' Node and node's attributes. ''' set_names = { "instance_attributes": "attributes", "utilization": "utilization", } def repr_cli_head(self,node): obj_type = vars.cib_cli_map[node.tagName] node_id = node.getAttribute("id") uname = node.getAttribute("uname") s = cli_display.keyword(obj_type) if node_id != uname: s = '%s $id="%s"' % (s, node_id) s = '%s %s' % (s, cli_display.id(uname)) type = node.getAttribute("type") if type != vars.node_default_type: s = '%s:%s' % (s, type) return s def cli_list2node(self,cli_list,oldnode): head = copy.copy(cli_list[0]) head[0] = backtrans[head[0]] obj_id = find_value(head[1],"$id") if not obj_id: obj_id = find_value(head[1],"uname") if not obj_id: return None type = find_value(head[1],"type") if not type: type = vars.node_default_type head[1].append(["type",type]) headnode = mkxmlsimple(head,cib_factory.topnode[cib_object_map[self.xml_obj_type][2]],'node') id_hint = headnode.getAttribute("id") for e in cli_list[1:]: n = mkxmlnode(e,oldnode,id_hint) headnode.appendChild(n) remove_id_used_attributes(cib_factory.topnode[cib_object_map[self.xml_obj_type][2]]) return headnode class CibPrimitive(CibObject): ''' Primitives. ''' set_names = { "instance_attributes": "params", "meta_attributes": "meta", "utilization": "utilization", } def repr_cli_head(self,node): obj_type = vars.cib_cli_map[node.tagName] node_id = node.getAttribute("id") ra_type = node.getAttribute("type") ra_class = node.getAttribute("class") ra_provider = node.getAttribute("provider") s1 = s2 = '' if ra_class: s1 = "%s:"%ra_class if ra_provider: s2 = "%s:"%ra_provider s = cli_display.keyword(obj_type) id = cli_display.id(node_id) return "%s %s %s" % (s, id, ''.join((s1,s2,ra_type))) def repr_cli_child(self,c,format): if c.tagName in self.set_names: return "%s %s" % \ (cli_display.keyword(self.set_names[c.tagName]), \ cli_pairs(nvpairs2list(c))) elif c.tagName == "operations": return cli_operations(c,format) def cli_list2node(self,cli_list,oldnode): ''' Convert a CLI description to DOM node. Try to preserve as many ids as possible in case there's an old XML version. ''' head = copy.copy(cli_list[0]) head[0] = backtrans[head[0]] headnode = mkxmlsimple(head,oldnode,'rsc') id_hint = headnode.getAttribute("id") operations = None for e in cli_list[1:]: n = mkxmlnode(e,oldnode,id_hint) if keyword_cmp(e[0], "operations"): operations = n if not keyword_cmp(e[0], "op"): headnode.appendChild(n) else: if not operations: operations = mkxmlnode(["operations",{}],oldnode,id_hint) headnode.appendChild(operations) operations.appendChild(n) remove_id_used_attributes(oldnode) return headnode + def add_operation(self,cli_list): + # check if there is already an op with the same interval + head = copy.copy(cli_list[0]) + name = find_value(head[1], "name") + interval = find_value(head[1], "interval") + if find_operation(self.node,name,interval): + common_err("%s already has a %s op with interval %s" % \ + (self.obj_id, name, interval)) + return None + # drop the rsc attribute + head[1].remove(["rsc",self.obj_id]) + # create an xml node + mon_node = mkxmlsimple(head, None, self.obj_id) + # get the place to append it to + try: + op_node = self.node.getElementsByTagName("operations")[0] + except: + op_node = cib_factory.createElement("operations") + self.node.appendChild(op_node) + op_node.appendChild(mon_node) + # the resource is updated + self.updated = True + self.propagate_updated() + return self def check_sanity(self): ''' Check operation timeouts and if all required parameters are defined. ''' if not self.node: # eh? common_err("%s: no xml (strange)" % self.obj_id) return user_prefs.get_check_rc() from ra import RAInfo ra_type = self.node.getAttribute("type") ra_class = self.node.getAttribute("class") ra_provider = self.node.getAttribute("provider") ra = RAInfo(ra_class,ra_type,ra_provider) if not ra.mk_ra_node(): # no RA found? ra.error("no such resource agent") return user_prefs.get_check_rc() params = [] for c in self.node.childNodes: if not is_element(c): continue if c.tagName == "instance_attributes": params += nvpairs2list(c) rc1 = ra.sanity_check_params(self.obj_id, params) actions = {} for c in self.node.childNodes: if not is_element(c): continue if c.tagName == "operations": for c2 in c.childNodes: if is_element(c2) and c2.tagName == "op": op,pl = op2list(c2) if op: actions[op] = pl rc2 = ra.sanity_check_ops(self.obj_id, actions) return rc1 | rc2 class CibContainer(CibObject): ''' Groups and clones and ms. ''' set_names = { "instance_attributes": "params", "meta_attributes": "meta", } def repr_cli_head(self,node): obj_type = vars.cib_cli_map[node.tagName] node_id = node.getAttribute("id") children = [] for c in node.childNodes: if not is_element(c): continue if (obj_type == "group" and is_primitive(c)) or \ is_child_rsc(c): children.append(cli_display.rscref(c.getAttribute("id"))) elif obj_type in vars.clonems_tags and is_child_rsc(c): children.append(cli_display.rscref(c.getAttribute("id"))) s = cli_display.keyword(obj_type) id = cli_display.id(node_id) return "%s %s %s" % (s, id, ' '.join(children)) def cli_list2node(self,cli_list,oldnode): head = copy.copy(cli_list[0]) head[0] = backtrans[head[0]] headnode = mkxmlsimple(head,oldnode,'grp') id_hint = headnode.getAttribute("id") for e in cli_list[1:]: n = mkxmlnode(e,oldnode,id_hint) headnode.appendChild(n) v = find_value(head[1],"$children") if v: for child_id in v: obj = cib_factory.find_object(child_id) if obj: n = obj.node.cloneNode(1) headnode.appendChild(n) else: no_object_err(child_id) remove_id_used_attributes(oldnode) return headnode class CibLocation(CibObject): ''' Location constraint. ''' def repr_cli_head(self,node): obj_type = vars.cib_cli_map[node.tagName] node_id = node.getAttribute("id") rsc = cli_display.rscref(node.getAttribute("rsc")) s = cli_display.keyword(obj_type) id = cli_display.id(node_id) s = "%s %s %s"%(s,id,rsc) pref_node = node.getAttribute("node") score = cli_display.score(get_score(node)) if pref_node: return "%s %s %s" % (s,score,pref_node) else: return s def repr_cli_child(self,c,format): if c.tagName == "rule": return "%s %s" % \ (cli_display.keyword("rule"), cli_rule(c)) def cli_list2node(self,cli_list,oldnode): head = copy.copy(cli_list[0]) head[0] = backtrans[head[0]] headnode = mkxmlsimple(head,oldnode,'location') id_hint = headnode.getAttribute("id") oldrule = None for e in cli_list[1:]: if e[0] in ("expression","date_expression"): n = mkxmlnode(e,oldrule,id_hint) else: n = mkxmlnode(e,oldnode,id_hint) if keyword_cmp(e[0], "rule"): add_missing_attr(n) rule = n headnode.appendChild(n) oldrule = lookup_node(rule,oldnode,location_only=True) else: rule.appendChild(n) remove_id_used_attributes(oldnode) return headnode class CibSimpleConstraint(CibObject): ''' Colocation and order constraints. ''' def repr_cli_head(self,node): obj_type = vars.cib_cli_map[node.tagName] node_id = node.getAttribute("id") s = cli_display.keyword(obj_type) id = cli_display.id(node_id) score = cli_display.score(get_score(node)) if node.getElementsByTagName("resource_set"): col = rsc_set_constraint(node,obj_type) else: col = two_rsc_constraint(node,obj_type) symm = node.getAttribute("symmetrical") if symm: col.append("symmetrical=%s"%symm) return "%s %s %s %s" % (s,id,score,' '.join(col)) def repr_cli_child(self,c,format): pass # no children here def cli_list2node(self,cli_list,oldnode): head = copy.copy(cli_list[0]) head[0] = backtrans[head[0]] headnode = mkxmlsimple(head,oldnode,'') id_hint = headnode.getAttribute("id") for e in cli_list[1:]: # if more than one element, it's a resource set n = mkxmlnode(e,oldnode,id_hint) headnode.appendChild(n) remove_id_used_attributes(oldnode) return headnode class CibProperty(CibObject): ''' Cluster properties. ''' def repr_cli_head(self,node): return '%s $id="%s"' % \ (cli_display.keyword(self.obj_type), node.getAttribute("id")) def repr_cli_child(self,c,format): name = c.getAttribute("name") if "value" in c.attributes.keys(): value = c.getAttribute("value") else: value = None return nvpair_format(name,value) def cli_list2node(self,cli_list,oldnode): head = copy.copy(cli_list[0]) head[0] = backtrans[head[0]] obj_id = find_value(head[1],"$id") if not obj_id: obj_id = cib_object_map[self.xml_obj_type][3] headnode = mkxmlnode(head,oldnode,obj_id) remove_id_used_attributes(oldnode) return headnode def matchcli(self,cli_list): head = cli_list[0] return self.obj_type == head[0] \ and self.obj_id == find_value(head[1],"$id") # ################################################################ # # cib update interface (cibadmin) # cib_piped = "cibadmin -p" def cib_delete_element(obj): 'Remove one element from the CIB.' if obj.xml_obj_type in vars.defaults_tags: node = cib_factory.createElement("meta_attributes") else: node = cib_factory.createElement(obj.xml_obj_type) node.setAttribute("id",obj.obj_id) rc = pipe_string("%s -D" % cib_piped, node.toxml()) if rc != 0: update_err(obj.obj_id,'-D',node.toprettyxml()) node.unlink() return rc def cib_update_elements(upd_list): 'Update a set of objects in the CIB.' l = [x.obj_id for x in upd_list] o = CibObjectSetRaw(*l) xml = o.repr_configure() rc = pipe_string("%s -U" % cib_piped, xml) if rc != 0: update_err(' '.join(l),'-U',xml) return rc def cib_replace_element(obj): comm_node = None if obj.comment: comm_node = cib_factory.createComment(s) if obj.node.hasChildNodes(): obj.node.insertBefore(comm_node, obj.node.firstChild) else: obj.node.appendChild(comm_node) rc = pipe_string("%s -R -o %s" % \ (cib_piped, obj.parent_type), obj.node.toxml()) if rc != 0: update_err(obj.obj_id,'-R',obj.node.toprettyxml()) if comm_node: rmnode(comm_node) return rc def cib_delete_moved_children(obj): for c in obj.children: if c.origin == "cib" and c.moved: cib_delete_element(c) def get_cib_default(property): if cib_factory.is_cib_sane(): return cib_factory.get_property(property) # xml -> cli translations (and classes) cib_object_map = { "node": ( "node", CibNode, "nodes" ), "primitive": ( "primitive", CibPrimitive, "resources" ), "group": ( "group", CibContainer, "resources" ), "clone": ( "clone", CibContainer, "resources" ), "master": ( "ms", CibContainer, "resources" ), "rsc_location": ( "location", CibLocation, "constraints" ), "rsc_colocation": ( "colocation", CibSimpleConstraint, "constraints" ), "rsc_order": ( "order", CibSimpleConstraint, "constraints" ), "cluster_property_set": ( "property", CibProperty, "crm_config", "cib-bootstrap-options" ), "rsc_defaults": ( "rsc_defaults", CibProperty, "rsc_defaults", "rsc-options" ), "op_defaults": ( "op_defaults", CibProperty, "op_defaults", "op-options" ), } backtrans = odict() # generate a translation cli -> tag for key in cib_object_map: backtrans[cib_object_map[key][0]] = key cib_topnodes = [] # get a list of parents for key in cib_object_map: if not cib_object_map[key][2] in cib_topnodes: cib_topnodes.append(cib_object_map[key][2]) cib_upgrade = "cibadmin --upgrade --force" class CibFactory(Singleton): ''' Juggle with CIB objects. See check_structure below for details on the internal cib representation. ''' shadowcmd = ">/dev/null 1: common_warn("%s contains more than one %s, using first" % \ (obj.obj_id,attr_list_type)) id = node_l[0].getAttribute("id") if not id: common_err("%s reference not found" % id_ref) return id_ref # hope that user will fix that return id # verify if id_ref exists node_l = self.doc.getElementsByTagName(attr_list_type) for node in node_l: if node.getAttribute("id") == id_ref: return id_ref common_err("%s reference not found" % id_ref) return id_ref # hope that user will fix that def get_property(self,property): ''' Get the value of the given cluster property. ''' for obj in self.cib_objects: if obj.obj_type == "property" and obj.node: pl = nvpairs2list(obj.node) v = find_value(pl, property) if v: return v return None def new_object(self,obj_type,obj_id): "Create a new object of type obj_type." if id_store.id_in_use(obj_id): return None for xml_obj_type,v in cib_object_map.items(): if v[0] == obj_type: obj = v[1](xml_obj_type,obj_id) if obj.obj_id: return obj else: return None return None def mkobj_list(self,mode,*args): obj_list = [] for obj in self.cib_objects: f = lambda: obj.filter(*args) if not f(): continue if mode == "cli" and obj.nocli and obj.nocli_warn: obj.nocli_warn = False obj_cli_warn(obj.obj_id) obj_list.append(obj) return obj_list def has_cib_changed(self): return self.mkobj_list("xml","changed") or self.remove_queue def verify_constraints(self,node): ''' Check if all resources referenced in a constraint exist ''' rc = True constraint_id = node.getAttribute("id") for obj_id in referenced_resources(node): if not self.find_object(obj_id): constraint_norefobj_err(constraint_id,obj_id) rc = False return rc def verify_rsc_children(self,node): ''' Check prerequisites: a) all children must exist b) no child may have other parent than me (or should we steal children?) c) there may not be duplicate children ''' obj_id = node.getAttribute("id") if not obj_id: common_err("element %s has no id" % node.tagName) return False try: obj_type = cib_object_map[node.tagName][0] except: common_err("element %s (%s) not recognized"%(node.tagName,obj_id)) return False c_ids = get_rsc_children_ids(node) if not c_ids: return True rc = True c_dict = {} for child_id in c_ids: if not self.verify_child(child_id,obj_type,obj_id): rc = False if child_id in c_dict: common_err("in group %s child %s listed more than once"%(obj_id,child_id)) rc = False c_dict[child_id] = 1 return rc def verify_child(self,child_id,obj_type,obj_id): 'Check if child exists and obj_id is (or may become) its parent.' child = self.find_object(child_id) if not child: no_object_err(child_id) return False if child.parent and child.parent.obj_id != obj_id: common_err("%s already in use at %s"%(child_id,child.parent.obj_id)) return False if obj_type == "group" and child.obj_type != "primitive": common_err("a group may contain only primitives; %s is %s"%(child_id,child.obj_type)) return False if not child.obj_type in vars.children_tags: common_err("%s may contain a primitive or a group; %s is %s"%(obj_type,child_id,child.obj_type)) return False return True def verify_element(self,node): ''' Can we create this object given its CLI representation. This is not about syntax, we're past that, but about semantics. Right now we check if the children, if any, are fit for the parent. And if this is a constraint, if all referenced resources are present. ''' rc = True if not self.verify_rsc_children(node): rc = False if not self.verify_constraints(node): rc = False return rc def create_object(self,*args): return self.create_from_cli(CliParser().parse(list(args))) != None def set_property_cli(self,cli_list): head_pl = cli_list[0] obj_type = head_pl[0].lower() pset_id = find_value(head_pl[1],"$id") if pset_id: head_pl[1].remove(["$id",pset_id]) else: pset_id = cib_object_map[backtrans[obj_type]][3] obj = self.find_object(pset_id) if not obj: if not is_id_valid(pset_id): invalid_id_err(pset_id) return None obj = self.new_object(obj_type,pset_id) if not obj: return None self.topnode[obj.parent_type].appendChild(obj.node) obj.origin = "user" self.cib_objects.append(obj) for n,v in head_pl[1]: set_nvpair(obj.node,n,v) obj.updated = True return obj def add_op(self,cli_list): '''Add an op to a primitive.''' head = cli_list[0] # does the referenced primitive exist rsc_id = find_value(head[1],"rsc") - rsc_obj = cib_factory.find_object(rsc_id) + rsc_obj = self.find_object(rsc_id) if not rsc_obj: no_object_err(rsc_id) return None if rsc_obj.obj_type != "primitive": common_err("%s is not a primitive" % rsc_id) return None - # check if there is already an op with the same interval - name = find_value(head[1], "name") - interval = find_value(head[1], "interval") - if find_operation(rsc_obj.node,name,interval): - common_err("%s already has a %s op with interval %s" % \ - (rsc_id, name, interval)) - return None - # drop the rsc attribute - head[1].remove(["rsc",rsc_id]) - # create an xml node - mon_node = mkxmlsimple(head, None, rsc_id) - # get the place to append it to - try: - op_node = rsc_obj.node.getElementsByTagName("operations")[0] - except: - op_node = self.createElement("operations") - rsc_obj.node.appendChild(op_node) - op_node.appendChild(mon_node) - # the resource is updated - rsc_obj.updated = True - rsc_obj.propagate_updated() - return rsc_obj + return rsc_obj.add_operation(cli_list) def create_from_cli(self,cli): 'Create a new cib object from the cli representation.' cli_list = mk_cli_list(cli) if not cli_list: return None head = cli_list[0] obj_type = head[0].lower() obj_id = find_value(head[1],"id") if obj_id and not is_id_valid(obj_id): invalid_id_err(obj_id) return None if len(cli_list) >= 2 and cli_list[1][0] == "raw": doc = xml.dom.minidom.parseString(cli_list[1][1]) return self.create_from_node(doc.childNodes[0]) if obj_type in olist(vars.nvset_cli_names): return self.set_property_cli(cli_list) if obj_type == "op": return self.add_op(cli_list) obj = self.new_object(obj_type,obj_id) if not obj: return None node = obj.cli2node(cli_list) - obj.node = node - obj.obj_id = obj_id - if not node: - return None - if not self.test_element(obj, node): - id_store.remove_xml(node) - node.unlink() - return None - self.add_element(obj, node) - return obj + return self.add_element(obj, node) + def update_from_cli(self,obj,cli_list): + 'Update element from the cli intermediate.' + id_store.remove_xml(obj.node) + if len(cli_list) >= 2 and cli_list[1][0] == "raw": + doc = xml.dom.minidom.parseString(cli_list[1][1]) + id_store.store_xml(doc.childNodes[0]) + return self.update_element(obj,doc.childNodes[0]) + return self.update_element(obj,obj.cli2node(cli_list)) + def update_from_node(self,obj,node): + 'Update element from a doc node.' + id_store.replace_xml(obj.node,node) + return self.update_element(obj,node) + def update_element(self,obj,newnode): + 'Update element from a doc node.' + if not newnode: + return False + if not self.is_cib_sane(): + id_store.replace_xml(newnode,obj.node) + return False + oldnode = obj.node + if xml_cmp(oldnode,newnode): + newnode.unlink() + return True # the new and the old versions are equal + obj.node = newnode + if not self.test_element(obj,newnode): + id_store.replace_xml(newnode,oldnode) + obj.node = oldnode + newnode.unlink() + return False + obj.node = self.replaceNode(newnode,oldnode) + obj.nocli = False # try again after update + self.adjust_children(obj) + if not obj.cli_use_validate(): + obj.nocli_warn = True + obj.nocli = True + oldnode.unlink() + obj.updated = True + obj.propagate_updated() + return True def update_moved(self,obj): 'Updated the moved flag. Mark affected constraints.' obj.moved = not obj.moved if obj.moved: for c_obj in self.related_constraints(obj): c_obj.recreate = True def adjust_children(self,obj): ''' All stuff children related: manage the nodes of children, update the list of children for the parent, update parents in the children. ''' new_children_ids = get_rsc_children_ids(obj.node) if not new_children_ids: return old_children = obj.children obj.children = [self.find_object(x) for x in new_children_ids] self._relink_orphans_to_top(old_children,obj.children) self._update_children(obj) def _relink_child_to_top(self,obj): 'Relink a child to the top node.' obj.node.parentNode.removeChild(obj.node) self.topnode[obj.parent_type].appendChild(obj.node) if obj.origin == "cib": self.update_moved(obj) obj.parent = None def _update_children(self,obj): '''For composite objects: update all children nodes. ''' # unlink all and find them in the new node for child in obj.children: oldnode = child.node child.node = obj.find_child_in_node(child) if child.children: # and children of children self._update_children(child) rmnode(oldnode) if not child.parent and child.origin == "cib": self.update_moved(child) if child.parent and child.parent != obj: child.parent.updated = True # the other parent updated child.parent = obj def _relink_orphans_to_top(self,old_children,new_children): "New orphans move to the top level for the object type." for child in old_children: if child not in new_children: self._relink_child_to_top(child) def test_element(self,obj,node): if not node.getAttribute("id"): return False if not self.verify_element(node): return False if user_prefs.is_check_always() \ and obj.check_sanity() > 1: return False return True + def update_links(self,obj): + ''' + Update the structure links for the object (obj.children, + obj.parent). Update also the dom nodes, if necessary. + ''' + obj.children = [] + if obj.obj_type not in vars.container_tags: + return + for c in obj.node.childNodes: + if is_child_rsc(c): + child = self.find_object_for_node(c) + if not child: + missing_obj_err(c) + continue + child.parent = obj + obj.children.append(child) + if not c.isSameNode(child.node): + rmnode(child.node) + child.node = c def add_element(self,obj,node): + obj.node = node + obj.obj_id = node.getAttribute("id") + if not self.test_element(obj, node): + id_store.remove_xml(node) + node.unlink() + return None common_debug("append child %s to %s" % \ (obj.obj_id,self.topnode[obj.parent_type].tagName)) self.topnode[obj.parent_type].appendChild(node) self.adjust_children(obj) self.redirect_children_constraints(obj) if not obj.cli_use_validate(): self.nocli_warn = True obj.nocli = True - obj.update_links() + self.update_links(obj) obj.origin = "user" self.cib_objects.append(obj) + return obj def create_from_node(self,node): 'Create a new cib object from a document node.' if not node: return None try: obj_type = cib_object_map[node.tagName][0] except: return None if is_defaults(node): node = get_rscop_defaults_meta_node(node) if not node: return None if node.ownerDocument != self.doc: node = self.doc.importNode(node,1) obj = self.new_object(obj_type, node.getAttribute("id")) if not obj: return None if not id_store.store_xml(node): return None - obj.node = node - obj.obj_id = node.getAttribute("id") - if not self.test_element(obj, node): - id_store.remove_xml(node) - node.unlink() - return None - self.add_element(obj, node) - return obj + return self.add_element(obj, node) def cib_objects_string(self, obj_list = None): l = [] if not obj_list: obj_list = self.cib_objects for obj in obj_list: l.append(obj.obj_string()) return ' '.join(l) def _remove_obj(self,obj): "Remove a cib object and its children." # remove children first # can't remove them here from obj.children! common_debug("remove object %s" % obj.obj_string()) for child in obj.children: #self._remove_obj(child) # just relink, don't remove children self._relink_child_to_top(child) if obj.parent: # remove obj from its parent, if any obj.parent.children.remove(obj) id_store.remove_xml(obj.node) rmnode(obj.node) obj.invalid = True self.add_to_remove_queue(obj) self.cib_objects.remove(obj) for c_obj in self.related_constraints(obj): if is_simpleconstraint(c_obj.node) and obj.children: # the first child inherits constraints rename_rscref(c_obj,obj.obj_id,obj.children[0].obj_id) delete_rscref(c_obj,obj.obj_id) if silly_constraint(c_obj.node,obj.obj_id): # remove invalid constraints self._remove_obj(c_obj) if not self._no_constraint_rm_msg: err_buf.info("hanging %s deleted" % c_obj.obj_string()) def related_constraints(self,obj): if not is_resource(obj.node): return [] c_list = [] for obj2 in self.cib_objects: if not is_constraint(obj2.node): continue if rsc_constraint(obj.obj_id,obj2.node): c_list.append(obj2) return c_list def redirect_children_constraints(self,obj): ''' Redirect constraints to the new parent ''' for child in obj.children: for c_obj in self.related_constraints(child): self.remove_queue.append(c_obj.mkcopy()) rename_rscref(c_obj,child.obj_id,obj.obj_id) # drop useless constraints which may have been created above for c_obj in self.related_constraints(obj): if silly_constraint(c_obj.node,obj.obj_id): self._no_constraint_rm_msg = True self._remove_obj(c_obj) self._no_constraint_rm_msg = False def add_to_remove_queue(self,obj): if obj.origin == "cib": self.remove_queue.append(obj) #print self.cib_objects_string(self.remove_queue) def delete_1(self,obj): ''' Remove an object and its parent in case the object is the only child. ''' if obj.parent and len(obj.parent.children) == 1: self.delete_1(obj.parent) if obj in self.cib_objects: # don't remove parents twice self._remove_obj(obj) def delete(self,*args): 'Delete a cib object.' if not self.doc: empty_cib_err() return False rc = True l = [] for obj_id in args: obj = self.find_object(obj_id) if not obj: no_object_err(obj_id) rc = False continue if is_rsc_running(obj_id): common_warn("resource %s is running, can't delete it" % obj_id) else: l.append(obj) if l: l = processing_sort_cli(l) for obj in reversed(l): self.delete_1(obj) return rc def remove_on_rename(self,obj): ''' If the renamed object is coming from the cib, then it must be removed and a new one created. ''' if obj.origin == "cib": self.remove_queue.append(obj.mkcopy()) obj.origin = "user" def rename(self,old_id,new_id): ''' Rename a cib object. - check if the resource (if it's a resource) is stopped - check if the new id is not taken - find the object with old id - rename old id to new id in all related objects (constraints) - if the object came from the CIB, then it must be deleted and the one with the new name created - rename old id to new id in the object ''' if not self.doc: empty_cib_err() return False if id_store.id_in_use(new_id): return False obj = self.find_object(old_id) if not obj: no_object_err(old_id) return False if not obj.can_be_renamed(): return False for c_obj in self.related_constraints(obj): rename_rscref(c_obj,old_id,new_id) self.remove_on_rename(obj) rename_id(obj.node,old_id,new_id) obj.obj_id = new_id id_store.rename(old_id,new_id) obj.updated = True obj.propagate_updated() def erase(self): "Remove all cib objects." # remove only bottom objects and no constraints # the rest will automatically follow if not self.doc: empty_cib_err() return False erase_ok = True l = [] for obj in [obj for obj in self.cib_objects \ if not obj.children and not is_constraint(obj.node) \ and obj.obj_type != "node" ]: if is_rsc_running(obj.obj_id): common_warn("resource %s is running, can't delete it" % obj.obj_id) erase_ok = False else: l.append(obj) if not erase_ok: common_err("CIB erase aborted (nothing was deleted)") return False self._no_constraint_rm_msg = True for obj in l: self.delete(obj.obj_id) self._no_constraint_rm_msg = False remaining = 0 for obj in self.cib_objects: if obj.obj_type != "node": remaining += 1 if remaining > 0: common_err("strange, but these objects remained:") for obj in self.cib_objects: if obj.obj_type != "node": print >> sys.stderr, obj.obj_string() self.cib_objects = [] return True def erase_nodes(self): "Remove nodes only." if not self.doc: empty_cib_err() return False l = [obj for obj in self.cib_objects if obj.obj_type == "node"] for obj in l: self.delete(obj.obj_id) def refresh(self): "Refresh from the CIB." self.reset() self.initialize() user_prefs = UserPrefs.getInstance() options = Options.getInstance() err_buf = ErrorBuffer.getInstance() vars = Vars.getInstance() cib_factory = CibFactory.getInstance() cli_display = CliDisplay.getInstance() cib_status = CibStatus.getInstance() id_store = IdMgmt.getInstance() # vim:ts=4:sw=4:et: diff --git a/shell/modules/msg.py b/shell/modules/msg.py index 3ae1a4ff88..1073216a4b 100644 --- a/shell/modules/msg.py +++ b/shell/modules/msg.py @@ -1,149 +1,153 @@ # Copyright (C) 2008 Dejan Muhamedagic # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or (at your option) any later version. # # This software is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # # You should have received a copy of the GNU General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA # import sys from singletonmixin import Singleton from userprefs import Options, UserPrefs class ErrorBuffer(Singleton): ''' Show error messages either immediately or buffered. ''' def __init__(self): self.msg_list = [] self.mode = "immediate" self.lineno = -1 def buffer(self): self.mode = "keep" def release(self): if self.msg_list: print >> sys.stderr, '\n'.join(self.msg_list) if not options.batch: try: raw_input("Press enter to continue... ") except EOFError: pass self.msg_list = [] self.mode = "immediate" def writemsg(self,msg): if self.mode == "immediate": if options.regression_tests: print msg else: print >> sys.stderr, msg else: self.msg_list.append(msg) def reset_lineno(self): self.lineno = 0 def incr_lineno(self): if self.lineno >= 0: self.lineno += 1 def start_tmp_lineno(self): self._save_lineno = self.lineno self.reset_lineno() def stop_tmp_lineno(self): self.lineno = self._save_lineno def add_lineno(self,s): if self.lineno > 0: return "%d: %s" % (self.lineno,s) else: return s def error(self,s): self.writemsg("ERROR: %s" % self.add_lineno(s)) def warning(self,s): self.writemsg("WARNING: %s" % self.add_lineno(s)) def info(self,s): self.writemsg("INFO: %s" % self.add_lineno(s)) def debug(self,s): if user_prefs.get_debug(): self.writemsg("DEBUG: %s" % self.add_lineno(s)) +def common_error(s): + err_buf.error(s) def common_err(s): err_buf.error(s) +def common_warning(s): + err_buf.warning(s) def common_warn(s): err_buf.warning(s) def common_info(s): err_buf.info(s) def common_debug(s): err_buf.debug(s) def no_prog_err(name): err_buf.error("%s not available, check your installation"%name) def missing_prog_warn(name): err_buf.warning("could not find any %s on the system"%name) def no_attribute_err(attr,obj_type): err_buf.error("required attribute %s not found in %s"%(attr,obj_type)) def bad_def_err(what,msg): err_buf.error("bad %s definition: %s"%(what,msg)) def unsupported_err(name): err_buf.error("%s is not supported"%name) def no_such_obj_err(name): err_buf.error("%s object is not supported"%name) def obj_cli_warn(name): err_buf.warning("object %s cannot be represented in the CLI notation"%name) def missing_obj_err(node): err_buf.error("object %s:%s missing (shouldn't have happened)"% \ (node.tagName,node.getAttribute("id"))) def constraint_norefobj_err(constraint_id,obj_id): err_buf.error("constraint %s references a resource %s which doesn't exist"% \ (constraint_id,obj_id)) def obj_exists_err(name): err_buf.error("object %s already exists"%name) def no_object_err(name): err_buf.error("object %s does not exist"%name) def invalid_id_err(obj_id): err_buf.error("%s: invalid object id"%obj_id) def id_used_err(node_id): err_buf.error("%s: id is already in use"%node_id) def skill_err(s): err_buf.error("%s: this command is not allowed at this skill level"%' '.join(s)) def syntax_err(s,token = '',context = ''): pfx = "syntax" if context: pfx = "%s in %s" %(pfx,context) if type(s) == type(''): err_buf.error("%s near <%s>"%(pfx,s)) elif token: err_buf.error("%s near <%s>: %s"%(pfx,token,' '.join(s))) else: err_buf.error("%s: %s"%(pfx,' '.join(s))) def bad_usage(cmd,args): err_buf.error("bad usage: %s %s"%(cmd,args)) def empty_cib_err(): err_buf.error("No CIB!") def cib_parse_err(msg,s): err_buf.error("%s"%msg) err_buf.info("offending string: %s" % s) def cib_no_elem_err(el_name): err_buf.error("CIB contains no '%s' element!"%el_name) def cib_ver_unsupported_err(validator,rel): err_buf.error("CIB not supported: validator '%s', release '%s'"% (validator,rel)) err_buf.error("You may try the upgrade command") def update_err(obj_id,cibadm_opt,xml): if cibadm_opt == '-U': task = "update" elif cibadm_opt == '-D': task = "delete" else: task = "replace" err_buf.error("could not %s %s"%(task,obj_id)) err_buf.info("offending xml: %s" % xml) def not_impl_info(s): err_buf.info("%s is not implemented yet" % s) user_prefs = UserPrefs.getInstance() err_buf = ErrorBuffer.getInstance() options = Options.getInstance() # vim:ts=4:sw=4:et: diff --git a/shell/modules/ui.py.in b/shell/modules/ui.py.in index 4aa7e6cdfd..bb939cbfc7 100644 --- a/shell/modules/ui.py.in +++ b/shell/modules/ui.py.in @@ -1,1771 +1,1778 @@ # Copyright (C) 2008 Dejan Muhamedagic # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or (at your option) any later version. # # This software is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # # You should have received a copy of the GNU General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA # import sys import re import os import readline import time from help import HelpSystem, cmd_help from vars import Vars from cache import WCache from levels import Levels from cibconfig import mkset_obj, CibFactory from cibstatus import CibStatus from template import LoadTemplate from ra import * from msg import * from utils import * from xmlutil import * def cmd_end(cmd,dir = ".."): "Go up one level." levels.droplevel() def cmd_exit(cmd): "Exit the crm program" cmd_end(cmd) if options.interactive and not options.batch: print "bye" try: readline.write_history_file(hist_file) except: pass for f in vars.tmpfiles: os.unlink(f) sys.exit() class UserInterface(object): ''' Stuff common to all user interface classes. ''' global_cmd_aliases = { "quit": ("bye","exit"), "end": ("cd","up"), } def __init__(self): self.cmd_table = odict() self.cmd_table["help"] = (self.help,(0,1),0) self.cmd_table["quit"] = (self.exit,(0,0),0) self.cmd_table["end"] = (self.end,(0,1),0) self.cmd_aliases = self.global_cmd_aliases.copy() def end_game(self, no_questions_asked = False): pass def help(self,cmd,topic = ''): "usage: help []" cmd_help(self.help_table,topic) def end(self,cmd,dir = ".."): "usage: end" self.end_game() cmd_end(cmd,dir) def exit(self,cmd): "usage: exit" self.end_game() cmd_exit(cmd) class CliOptions(UserInterface): ''' Manage user preferences ''' def __init__(self): UserInterface.__init__(self) self.help_table = help_sys.load_level("options") self.cmd_table["skill-level"] = (self.set_skill_level,(1,1),0,(skills_list,)) self.cmd_table["editor"] = (self.set_editor,(1,1),0) self.cmd_table["pager"] = (self.set_pager,(1,1),0) self.cmd_table["user"] = (self.set_crm_user,(0,1),0) self.cmd_table["output"] = (self.set_output,(1,1),0) self.cmd_table["colorscheme"] = (self.set_colors,(1,1),0) self.cmd_table["check-frequency"] = (self.set_check_frequency,(1,1),0) self.cmd_table["check-mode"] = (self.set_check_mode,(1,1),0) self.cmd_table["sort-elements"] = (self.set_sort_elements,(1,1),0) self.cmd_table["save"] = (self.save_options,(0,0),0) self.cmd_table["show"] = (self.show_options,(0,0),0) setup_aliases(self) def set_skill_level(self,cmd,skill_level): """usage: skill-level level: operator | administrator | expert""" return user_prefs.set_skill_level(skill_level) def set_editor(self,cmd,prog): "usage: editor " return user_prefs.set_editor(prog) def set_pager(self,cmd,prog): "usage: pager " return user_prefs.set_pager(prog) def set_crm_user(self,cmd,user = ''): "usage: user []" return user_prefs.set_crm_user(user) def set_output(self,cmd,otypes): "usage: output " return user_prefs.set_output(otypes) def set_colors(self,cmd,scheme): "usage: colorscheme " return user_prefs.set_colors(scheme) def set_check_frequency(self,cmd,freq): "usage: check-frequence " return user_prefs.set_check_freq(freq) def set_check_mode(self,cmd,mode): "usage: check-mode " return user_prefs.set_check_mode(mode) def set_sort_elements(self,cmd,opt): "usage: sort-elements {yes|no}" if not verify_boolean(opt): common_err("%s: bad boolean option"%opt) return True return user_prefs.set_sort_elems(opt) def show_options(self,cmd): "usage: show" return user_prefs.write_rc(sys.stdout) def save_options(self,cmd): "usage: save" return user_prefs.save_options(vars.rc_file) def end_game(self, no_questions_asked = False): if no_questions_asked and not options.interactive: self.save_options("save") def listshadows(): return stdout2list("ls @CRM_CONFIG_DIR@ | fgrep shadow. | sed 's/^shadow\.//'") def shadowfile(name): return "@CRM_CONFIG_DIR@/shadow.%s" % name def shadow2doc(name): return file2doc(shadowfile(name)) class CibShadow(UserInterface): ''' CIB shadow management class ''' extcmd = ">/dev/null &1" % self.extcmd) except os.error: no_prog_err(self.extcmd) return False return True def new(self,cmd,name,*args): "usage: new [withstatus] [force]" if not is_filename_sane(name): return False new_cmd = "%s -c '%s'" % (self.extcmd,name) for par in args: if not par in ("force","--force","withstatus"): syntax_err((cmd,name,par), context = 'new') return False if user_prefs.get_force() or "force" in args or "--force" in args: new_cmd = "%s --force" % new_cmd if ext_cmd(new_cmd) == 0: common_info("%s shadow CIB created"%name) self.use("use",name) if "withstatus" in args: cib_status.load("shadow:%s" % name) def delete(self,cmd,name): "usage: delete " if not is_filename_sane(name): return False if vars.cib_in_use == name: common_err("%s shadow CIB is in use"%name) return False if ext_cmd("%s -D '%s' --force" % (self.extcmd,name)) == 0: common_info("%s shadow CIB deleted"%name) else: common_err("failed to delete %s shadow CIB"%name) return False def reset(self,cmd,name): "usage: reset " if not is_filename_sane(name): return False if ext_cmd("%s -r '%s'" % (self.extcmd,name)) == 0: common_info("copied live CIB to %s"%name) else: common_err("failed to copy live CIB to %s"%name) return False def commit(self,cmd,name): "usage: commit " if not is_filename_sane(name): return False if ext_cmd("%s -C '%s' --force" % (self.extcmd,name)) == 0: common_info("commited '%s' shadow CIB to the cluster"%name) wcache.clear() else: common_err("failed to commit the %s shadow CIB"%name) return False def diff(self,cmd): "usage: diff" s = get_stdout(add_sudo("%s -d" % self.extcmd_stdout)) page_string(s) def list(self,cmd): "usage: list" if options.regression_tests: for t in listshadows(): print t else: multicolumn(listshadows()) def _use(self,name,withstatus): # Choose a shadow cib for further changes. If the name # provided is empty, then choose the live (cluster) cib. # Don't allow ' in shadow names if not name or name == "live": os.unsetenv(vars.shadow_envvar) vars.cib_in_use = "" if withstatus: cib_status.load("live") else: os.putenv(vars.shadow_envvar,name) vars.cib_in_use = name if withstatus: cib_status.load("shadow:%s" % name) def use(self,cmd,name = '', withstatus = ''): "usage: use [] [withstatus]" # check the name argument if name and not is_filename_sane(name): return False if name and name != "live": if not os.access(shadowfile(name),os.F_OK): common_err("%s: no such shadow CIB"%name) return False if withstatus and withstatus != "withstatus": syntax_err((cmd,withstatus), context = 'use') return False # If invoked from configure # take special precautions try: prev_level = levels.previous().myname() except: prev_level = '' if prev_level != "cibconfig": self._use(name,withstatus) return True if not cib_factory.has_cib_changed(): self._use(name,withstatus) # new CIB: refresh the CIB factory cib_factory.refresh() return True saved_cib = vars.cib_in_use self._use(name,'') # don't load the status yet if not cib_factory.is_current_cib_equal(silent = True): # user made changes and now wants to switch to a # different and unequal CIB; we refuse to cooperate common_err("the requested CIB is different from the current one") if user_prefs.get_force(): common_info("CIB overwrite forced") elif not ask("All changes will be dropped. Do you want to proceed?"): self._use(saved_cib,'') # revert to the previous CIB return False self._use(name,withstatus) # now load the status too return True def listtemplates(): l = [] for f in os.listdir(vars.tmpl_dir): if os.path.isfile("%s/%s" % (vars.tmpl_dir,f)): l.append(f) return l def listconfigs(): l = [] for f in os.listdir(vars.tmpl_conf_dir): if os.path.isfile("%s/%s" % (vars.tmpl_conf_dir,f)): l.append(f) return l def check_transition(inp,state,possible_l): if not state in possible_l: common_err("input (%s) in wrong state %s" % (inp,state)) return False return True class Template(UserInterface): ''' Configuration templates. ''' def __init__(self): UserInterface.__init__(self) self.help_table = help_sys.load_level("template") self.cmd_table["new"] = (self.new,(2,),1,(null_list,templates_list,loop)) self.cmd_table["load"] = (self.load,(0,1),1,(config_list,)) self.cmd_table["edit"] = (self.edit,(0,1),1,(config_list,)) self.cmd_table["delete"] = (self.delete,(1,2),1,(config_list,)) self.cmd_table["show"] = (self.show,(0,1),0,(config_list,)) self.cmd_table["apply"] = (self.apply,(0,2),1,(config_list_method,config_list)) self.cmd_table["list"] = (self.list,(0,1),0) setup_aliases(self) self.init_dir() self.curr_conf = '' def init_dir(self): '''Create the conf directory, link to templates''' if not os.path.isdir(vars.tmpl_conf_dir): try: os.makedirs(vars.tmpl_conf_dir) except os.error,msg: common_err("makedirs: %s"%msg) return def get_depends(self,tmpl): '''return a list of required templates''' # Not used. May need it later. try: tf = open("%s/%s" % (vars.tmpl_dir, tmpl),"r") except IOError,msg: common_err("open: %s"%msg) return l = [] for s in tf: a = s.split() if len(a) >= 2 and a[0] == '%depends_on': l += a[1:] tf.close() return l def replace_params(self,s,user_data): change = False for i in range(len(s)): word = s[i] for p in user_data: # is parameter in the word? pos = word.find('%' + p) if pos < 0: continue endpos = pos + len('%' + p) # and it isn't part of another word? if re.match("[A-Za-z0-9]", word[endpos:endpos+1]): continue # if the value contains a space or # it is a value of an attribute # put quotes around it if user_data[p].find(' ') >= 0 or word[pos-1:pos] == '=': v = '"' + user_data[p] + '"' else: v = user_data[p] word = word.replace('%' + p, v) change = True # we did replace something if change: s[i] = word if 'opt' in s: if not change: s = [] else: s.remove('opt') return s def generate(self,l,user_data): '''replace parameters (user_data) and generate output ''' l2 = [] for piece in l: piece2 = [] for s in piece: s = self.replace_params(s,user_data) if s: piece2.append(' '.join(s)) if piece2: l2.append(' \\\n\t'.join(piece2)) return '\n'.join(l2) def process(self,config = ''): '''Create a cli configuration from the current config''' try: f = open("%s/%s" % (vars.tmpl_conf_dir, config or self.curr_conf),'r') except IOError,msg: common_err("open: %s"%msg) return '' l = [] piece = [] user_data = {} # states START = 0; PFX = 1; DATA = 2; GENERATE = 3 state = START err_buf.start_tmp_lineno() rc = True for inp in f: err_buf.incr_lineno() if inp.startswith('#'): continue if type(inp) == type(u''): inp = inp.encode('ascii') inp = inp.strip() try: s = shlex.split(inp) except ValueError, msg: common_err(msg) continue while '\n' in s: s.remove('\n') if not s: if state == GENERATE and piece: l.append(piece) piece = [] elif s[0] in ("%name","%depends_on","%suggests"): continue elif s[0] == "%pfx": if check_transition(inp,state,(START,DATA)) and len(s) == 2: pfx = s[1] state = PFX elif s[0] == "%required": if check_transition(inp,state,(PFX,)): state = DATA data_reqd = True elif s[0] == "%optional": if check_transition(inp,state,(PFX,DATA)): state = DATA data_reqd = False elif s[0] == "%%": if state != DATA: common_warn("user data in wrong state %s" % state) if len(s) < 2: common_warn("parameter name missing") elif len(s) == 2: if data_reqd: common_err("required parameter %s not set" % s[1]) rc = False elif len(s) == 3: user_data["%s:%s" % (pfx,s[1])] = s[2] else: common_err("%s: syntax error" % inp) elif s[0] == "%generate": if check_transition(inp,state,(DATA,)): state = GENERATE piece = [] elif state == GENERATE: if s: piece.append(s) else: common_err("<%s> unexpected" % inp) if piece: l.append(piece) err_buf.stop_tmp_lineno() f.close() if not rc: return '' return self.generate(l,user_data) def new(self,cmd,name,*args): "usage: new