diff --git a/GNUmakefile b/GNUmakefile index 6614d5c4ce..5a8e1a30ec 100644 --- a/GNUmakefile +++ b/GNUmakefile @@ -1,377 +1,375 @@ # # Copyright 2008-2018 Andrew Beekhof # # This source code is licensed under the GNU General Public License version 2 # or later (GPLv2+) WITHOUT ANY WARRANTY. # default: $(shell test ! -e configure && echo init) $(shell test -e configure && echo core) -include Makefile PACKAGE ?= pacemaker # Force 'make dist' to be consistent with 'make export' distdir = $(PACKAGE)-$(TAG) TARFILE = $(PACKAGE)-$(SHORTTAG).tar.gz DIST_ARCHIVES = $(TARFILE) RPM_ROOT = $(shell pwd) RPM_OPTS = --define "_sourcedir $(RPM_ROOT)" \ --define "_specdir $(RPM_ROOT)" \ --define "_srcrpmdir $(RPM_ROOT)" \ MOCK_OPTIONS ?= --resultdir=$(RPM_ROOT)/mock --no-cleanup-after # Default to building Fedora-compliant spec files # SLES: /etc/SuSE-release # openSUSE: /etc/SuSE-release # RHEL: /etc/redhat-release, /etc/system-release # Fedora: /etc/fedora-release, /etc/redhat-release, /etc/system-release # CentOS: /etc/centos-release, /etc/redhat-release, /etc/system-release F ?= $(shell test ! -e /etc/fedora-release && echo 0; test -e /etc/fedora-release && rpm --eval %{fedora}) ARCH ?= $(shell test -e /etc/fedora-release && rpm --eval %{_arch}) MOCK_CFG ?= $(shell test -e /etc/fedora-release && echo fedora-$(F)-$(ARCH)) DISTRO ?= $(shell test -e /etc/SuSE-release && echo suse; echo fedora) COMMIT ?= HEAD TAG ?= $(shell T=$$(git describe --all '$(COMMIT)' | sed -n 's|tags/\(.*\)|\1|p'); \ test -n "$${T}" && echo "$${T}" \ || git log --pretty=format:%H -n 1 '$(COMMIT)') lparen = ( rparen = ) SHORTTAG ?= $(shell case $(TAG) in Pacemaker-*$(rparen) echo '$(TAG)' | cut -c11-;; \ *$(rparen) git log --pretty=format:%h -n 1 '$(TAG)';; esac) SHORTTAG_ABBREV = $(shell printf %s '$(SHORTTAG)' | wc -c) WITH ?= --without doc #WITH ?= --without=doc --with=gcov LAST_RC ?= $(shell test -e /Volumes || git tag -l | grep Pacemaker | sort -Vr | grep rc | head -n 1) ifneq ($(origin VERSION), undefined) LAST_RELEASE ?= Pacemaker-$(VERSION) else LAST_RELEASE ?= $(shell git tag -l | grep Pacemaker | sort -Vr | grep -v rc | head -n 1) endif NEXT_RELEASE ?= $(shell echo $(LAST_RELEASE) | awk -F. '/[0-9]+\./{$$3+=1;OFS=".";print $$1,$$2,$$3}') BUILD_COUNTER ?= build.counter LAST_COUNT = $(shell test ! -e $(BUILD_COUNTER) && echo 0; test -e $(BUILD_COUNTER) && cat $(BUILD_COUNTER)) COUNT = $(shell expr 1 + $(LAST_COUNT)) SPECVERSION ?= $(COUNT) # toplevel rsync destination for www targets (without trailing slash) RSYNC_DEST ?= root@www.clusterlabs.org:/var/www/html # recursive, preserve symlinks/permissions/times, verbose, compress, # don't cross filesystems, sparse, show progress RSYNC_OPTS = -rlptvzxS --progress # rpmbuild wrapper that translates "--with[out] FEATURE" into RPM macros # # Unfortunately, at least recent versions of rpm do not support mentioned # switch. To work this around, we can emulate mechanism that rpm uses # internally: unfold the flags into respective macro definitions: # # --with[out] FOO -> --define "_with[out]_FOO --with[out]-FOO" # # $(1) ... WITH string (e.g., --with pre_release --without cman) # $(2) ... options following the initial "rpmbuild" in the command # $(3) ... final arguments determined with $2 (e.g., pacemaker.spec) # # Note that if $(3) is a specfile, extra case is taken so as to reflect # pcmkversion correctly (using in-place modification). # # Also note that both ways to specify long option with an argument # (i.e., what getopt and, importantly, rpm itself support) can be used: # # --with FOO # --with=FOO rpmbuild-with = \ WITH=$$(getopt -o "" -l with:,without: -- $(1)) || exit 1; \ CMD='rpmbuild $(2)'; PREREL=0; \ eval set -- "$${WITH}"; \ while true; do \ case "$$1" in \ --with) CMD="$${CMD} --define \"_with_$$2 --with-$$2\""; \ [ "$$2" != pre_release ] || PREREL=1; shift 2;; \ --without) CMD="$${CMD} --define \"_without_$$2 --without-$$2\""; \ [ "$$2" != pre_release ] || PREREL=0; shift 2;; \ --) shift ; break ;; \ *) echo "cannot parse WITH: $$1"; exit 1;; \ esac; \ done; \ case "$(3)" in \ *.spec) { [ $${PREREL} -eq 0 ] || [ $(LAST_RELEASE) = $(TAG) ]; } \ && sed -i "s/^\(%global pcmkversion \).*/\1$$(echo $(LAST_RELEASE) | sed -e s:Pacemaker-:: -e s:-.*::)/" $(3) \ || sed -i "s/^\(%global pcmkversion \).*/\1$$(echo $(NEXT_RELEASE) | sed -e s:Pacemaker-:: -e s:-.*::)/" $(3);; \ esac; \ CMD="$${CMD} $(3)"; \ eval "$${CMD}" init: ./autogen.sh init export: rm -f $(PACKAGE)-dirty.tar.* $(PACKAGE)-tip.tar.* $(PACKAGE)-HEAD.tar.* if [ ! -f $(TARFILE) ]; then \ rm -f $(PACKAGE).tar.*; \ if [ $(TAG) = dirty ]; then \ git commit -m "DO-NOT-PUSH" -a; \ git archive --prefix=$(distdir)/ -o "$(TARFILE)" HEAD^{tree}; \ git reset --mixed HEAD^; \ else \ git archive --prefix=$(distdir)/ -o "$(TARFILE)" $(TAG)^{tree}; \ fi; \ echo `date`: Rebuilt $(TARFILE); \ else \ echo `date`: Using existing tarball: $(TARFILE); \ fi $(PACKAGE)-opensuse.spec: $(PACKAGE)-suse.spec cp $^ $@ @echo Rebuilt $@ $(PACKAGE)-suse.spec: $(PACKAGE).spec.in GNUmakefile rm -f $@ if [ x != x"`git ls-files -m | grep pacemaker.spec.in`" ]; then \ cp $(PACKAGE).spec.in $@; \ echo "Rebuilt $@ (local modifications)"; \ elif [ x = x"`git show $(TAG):pacemaker.spec.in 2>/dev/null`" ]; then \ cp $(PACKAGE).spec.in $@; \ echo "Rebuilt $@"; \ else \ git show $(TAG):$(PACKAGE).spec.in >> $@; \ echo "Rebuilt $@ from $(TAG)"; \ fi sed -i \ -e 's:%{_docdir}/%{name}:%{_docdir}/%{name}-%{version}:g' \ -e 's:%{name}-libs:lib%{name}3:g' \ -e 's@Requires:\( *\)%{python_pkg}\(-devel\)\? @Requires:\1python-curses python-xml %{python_pkg}\2 @' \ -e 's: libtool-ltdl-devel\(%{?_isa}\)\?::g' \ -e 's:bzip2-devel:libbz2-devel:g' \ -e 's:docbook-style-xsl:docbook-xsl-stylesheets:g' \ -e 's: byacc::g' \ -e 's:gnutls-devel:libgnutls-devel:g' \ -e 's:corosynclib:libcorosync:g' \ -e 's:cluster-glue-libs:libglue:g' \ -e 's:shadow-utils:shadow:g' \ -e 's: publican::g' \ -e 's: 189: 90:g' \ -e 's:%{_libexecdir}/lcrso:%{_libdir}/lcrso:g' \ -e 's:procps-ng:procps:g' \ $@ @echo "Applied SUSE-specific modifications" # Works for all fedora based distros $(PACKAGE)-%.spec: $(PACKAGE).spec.in rm -f $@ if [ x != x"`git ls-files -m | grep pacemaker.spec.in`" ]; then \ cp $(PACKAGE).spec.in $(PACKAGE)-$*.spec; \ echo "Rebuilt $@ (local modifications)"; \ elif [ x = x"`git show $(TAG):pacemaker.spec.in 2>/dev/null`" ]; then \ cp $(PACKAGE).spec.in $(PACKAGE)-$*.spec; \ echo "Rebuilt $@"; \ else \ git show $(TAG):$(PACKAGE).spec.in >> $(PACKAGE)-$*.spec; \ echo "Rebuilt $@ from $(TAG)"; \ fi srpm-%: export $(PACKAGE)-%.spec rm -f *.src.rpm cp $(PACKAGE)-$*.spec $(PACKAGE).spec echo "* $(shell date +"%a %b %d %Y") Andrew Beekhof $(shell git describe --tags $(TAG) | sed -e s:Pacemaker-:: -e s:-.*::)-1" >> $(PACKAGE).spec echo " - See included ChangeLog file or https://raw.github.com/ClusterLabs/pacemaker/master/ChangeLog for full details" >> $(PACKAGE).spec if [ -e $(BUILD_COUNTER) ]; then \ echo $(COUNT) > $(BUILD_COUNTER); \ fi sed -e 's/global\ specversion\ .*/global\ specversion\ $(SPECVERSION)/' \ -e 's/global\ commit\ .*/global\ commit\ $(TAG)/' \ -e 's/global\ commit_abbrev\ .*/global\ commit_abbrev\ $(SHORTTAG_ABBREV)/' \ -i $(PACKAGE).spec $(call rpmbuild-with,$(WITH),-bs --define "dist .$*" $(RPM_OPTS),$(PACKAGE).spec) chroot: mock-$(MOCK_CFG) mock-install-$(MOCK_CFG) mock-sh-$(MOCK_CFG) echo "Done" mock-next: make F=$(shell expr 1 + $(F)) mock mock-rawhide: make F=rawhide mock mock-install-%: echo "Installing packages" mock --root=$* $(MOCK_OPTIONS) --install $(RPM_ROOT)/mock/*.rpm vi sudo valgrind lcov gdb fence-agents psmisc mock-install: mock-install-$(MOCK_CFG) echo "Done" mock-sh: mock-sh-$(MOCK_CFG) echo "Done" mock-sh-%: echo "Connecting" mock --root=$* $(MOCK_OPTIONS) --shell echo "Done" # eg. WITH="--with cman" make rpm mock-%: make srpm-$(firstword $(shell echo $(@:mock-%=%) | tr '-' ' ')) -rm -rf $(RPM_ROOT)/mock @echo "mock --root=$* --rebuild $(WITH) $(MOCK_OPTIONS) $(RPM_ROOT)/*.src.rpm" mock --root=$* --no-cleanup-after --rebuild $(WITH) $(MOCK_OPTIONS) $(RPM_ROOT)/*.src.rpm srpm: srpm-$(DISTRO) echo "Done" mock: mock-$(MOCK_CFG) echo "Done" rpm-dep: $(PACKAGE)-$(DISTRO).spec if [ x != x`which yum-builddep 2>/dev/null` ]; then \ echo "Installing with yum-builddep"; \ sudo yum-builddep $(PACKAGE)-$(DISTRO).spec; \ elif [ x != x`which yum 2>/dev/null` ]; then \ echo -e "Installing: $(shell grep BuildRequires pacemaker.spec.in | sed -e s/BuildRequires:// -e s:\>.*0:: | tr '\n' ' ')\n\n"; \ sudo yum install $(shell grep BuildRequires pacemaker.spec.in | sed -e s/BuildRequires:// -e s:\>.*0:: | tr '\n' ' '); \ elif [ x != x`which zypper` ]; then \ echo -e "Installing: $(shell grep BuildRequires pacemaker.spec.in | sed -e s/BuildRequires:// -e s:\>.*0:: | tr '\n' ' ')\n\n"; \ sudo zypper install $(shell grep BuildRequires pacemaker.spec.in | sed -e s/BuildRequires:// -e s:\>.*0:: | tr '\n' ' ');\ else \ echo "I don't know how to install $(shell grep BuildRequires pacemaker.spec.in | sed -e s/BuildRequires:// -e s:\>.*0:: | tr '\n' ' ')";\ fi rpm: srpm @echo To create custom builds, edit the flags and options in $(PACKAGE).spec first $(call rpmbuild-with,$(WITH),$(RPM_OPTS),--rebuild $(RPM_ROOT)/*.src.rpm) release: make TAG=$(LAST_RELEASE) rpm rc: make TAG=$(LAST_RC) rpm dirty: make TAG=dirty mock COVERITY_DIR = $(shell pwd)/coverity-$(TAG) COVFILE = $(PACKAGE)-coverity-$(TAG).tgz COVHOST ?= scan5.coverity.com COVPASS ?= password # Public coverity coverity: test -e configure || ./autogen.sh test -e Makefile || ./configure make core-clean rm -rf $(COVERITY_DIR) cov-build --dir $(COVERITY_DIR) make core tar czf $(COVFILE) --transform=s@.*$(TAG)@cov-int@ $(COVERITY_DIR) @echo "Uploading to public Coverity instance..." curl --form file=@$(COVFILE) --form project=$(PACKAGE) --form password=$(COVPASS) --form email=andrew@beekhof.net http://$(COVHOST)/cgi-bin/upload.py rm -rf $(COVFILE) $(COVERITY_DIR) coverity-corp: test -e configure || ./autogen.sh test -e Makefile || ./configure make core-clean rm -rf $(COVERITY_DIR) cov-build --dir $(COVERITY_DIR) make core @echo "Waiting for a corporate Coverity license..." cov-analyze --dir $(COVERITY_DIR) --wait-for-license cov-format-errors --dir $(COVERITY_DIR) --emacs-style > $(TAG).coverity cov-format-errors --dir $(COVERITY_DIR) rsync $(RSYNC_OPTS) "$(COVERITY_DIR)/c/output/errors/" "$(RSYNC_DEST)/$(PACKAGE)/coverity/$(TAG)/" make core-clean # cov-commit-defects --host $(COVHOST) --dir $(COVERITY_DIR) --stream $(PACKAGE) --user auto --password $(COVPASS) rm -rf $(COVERITY_DIR) global: clean-generic gtags -q global-upload: global htags -sanhIT rsync $(RSYNC_OPTS) HTML/ "$(RSYNC_DEST)/$(PACKAGE)/global/$(TAG)/" %.8.html: %.8 echo groff -mandoc `man -w ./$<` -T html > $@ groff -mandoc `man -w ./$<` -T html > $@ rsync $(RSYNC_OPTS) "$@" "$(RSYNC_DEST)/$(PACKAGE)/man/" %.7.html: %.7 echo groff -mandoc `man -w ./$<` -T html > $@ groff -mandoc `man -w ./$<` -T html > $@ rsync $(RSYNC_OPTS) "$@" "$(RSYNC_DEST)/$(PACKAGE)/man/" manhtml-upload: all find . -name "[a-z]*.[78]" -exec make \{\}.html \; doxygen: Doxyfile doxygen Doxyfile doxygen-upload: doxygen rsync $(RSYNC_OPTS) doc/api/html/ "$(RSYNC_DEST)/$(PACKAGE)/doxygen/$(TAG)/" abi: ./abi-check pacemaker $(LAST_RELEASE) $(TAG) abi-www: export RSYNC_DEST=$(RSYNC_DEST); ./abi-check -u pacemaker $(LAST_RELEASE) $(TAG) www: manhtml-upload global-upload doxygen-upload make RSYNC_DEST=$(RSYNC_DEST) -C doc www summary: @printf "\n* `date +"%a %b %d %Y"` `git config user.name` <`git config user.email`> $(NEXT_RELEASE)-1" @printf "\n- Changesets: `git log --pretty=oneline $(LAST_RELEASE)..HEAD | wc -l`" @printf "\n- Diff: " @git diff $(LAST_RELEASE)..HEAD --shortstat include lib daemons tools xml rc-changes: @make NEXT_RELEASE=$(shell echo $(LAST_RC) | sed s:-rc.*::) LAST_RELEASE=$(LAST_RC) changes changes: summary @printf "\n- Features added since $(LAST_RELEASE)\n" @git log --pretty=format:' +%s' --abbrev-commit $(LAST_RELEASE)..HEAD | grep -e Feature: | sed -e 's@Feature:@@' | sort -uf @printf "\n- Changes since $(LAST_RELEASE)\n" @git log --pretty=format:' +%s' --abbrev-commit $(LAST_RELEASE)..HEAD | grep -e High: -e Fix: -e Bug | sed -e 's@Fix:@@' -e s@High:@@ -e s@Fencing:@fencing:@ -e 's@Bug@ Bug@' -e s@PE:@scheduler:@ -e s@pengine:@scheduler:@ | sort -uf changelog: @make changes > ChangeLog @printf "\n">> ChangeLog git show $(LAST_RELEASE):ChangeLog >> ChangeLog @echo -e "\033[1;35m -- Don't forget to run the bumplibs.sh script! --\033[0m" DO_NOT_INDENT = lib/gnu daemons/controld/controld_fsa.h indent: find . -name "*.[ch]" -exec ./p-indent \{\} \; git co HEAD $(DO_NOT_INDENT) rel-tags: tags find . -name TAGS -exec sed -i 's:\(.*\)/\(.*\)/TAGS:\2/TAGS:g' \{\} \; CLANG_analyzer = $(shell which scan-build) CLANG_checkers = # Use CPPCHECK_ARGS to pass extra cppcheck options, e.g.: # --enable={warning,style,performance,portability,information,all} # --inconclusive --std=posix CPPCHECK_ARGS ?= cppcheck: - for d in replace lib daemons tools; \ - do cppcheck $(CPPCHECK_ARGS) -q $$d; \ - done + cppcheck $(CPPCHECK_ARGS) -I include --max-configs=25 -q replace lib daemons tools clang: test -e $(CLANG_analyzer) scan-build $(CLANG_checkers:%=-enable-checker %) make clean all # V3 = scandir unsetenv alphasort xalloc # V2 = setenv strerror strchrnul strndup # http://www.gnu.org/software/gnulib/manual/html_node/Initial-import.html#Initial-import GNU_MODS = crypto/md5 gnulib-update: -test ! -e gnulib && git clone git://git.savannah.gnu.org/gnulib.git cd gnulib && git pull gnulib/gnulib-tool --source-base=lib/gnu --lgpl=2 --no-vc-files --import $(GNU_MODS) diff --git a/lib/common/operations.c b/lib/common/operations.c index 3b45bf02b3..40d4c18214 100644 --- a/lib/common/operations.c +++ b/lib/common/operations.c @@ -1,584 +1,585 @@ /* * Copyright 2004-2018 Andrew Beekhof * * This source code is licensed under the GNU Lesser General Public License * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY. */ #include #ifndef _GNU_SOURCE # define _GNU_SOURCE #endif #include #include #include #include #include #include #include #include #include /*! * \brief Generate an operation key * * \param[in] rsc_id ID of resource being operated on * \param[in] op_type Operation name * \param[in] interval_ms Operation interval * * \return Newly allocated memory containing operation key as string * * \note It is the caller's responsibility to free() the result. */ char * generate_op_key(const char *rsc_id, const char *op_type, guint interval_ms) { CRM_ASSERT(rsc_id != NULL); CRM_ASSERT(op_type != NULL); return crm_strdup_printf(CRM_OP_FMT, rsc_id, op_type, interval_ms); } gboolean parse_op_key(const char *key, char **rsc_id, char **op_type, guint *interval_ms) { char *notify = NULL; char *mutable_key = NULL; char *mutable_key_ptr = NULL; size_t len = 0, offset = 0; unsigned long long ch = 0; guint local_interval_ms = 0; // Initialize output variables in case of early return if (rsc_id) { *rsc_id = NULL; } if (op_type) { *op_type = NULL; } if (interval_ms) { *interval_ms = 0; } CRM_CHECK(key && *key, return FALSE); // Parse interval at end of string len = strlen(key); offset = len - 1; while ((offset > 0) && isdigit(key[offset])) { ch = key[offset] - '0'; for (int digits = len - offset; digits > 1; --digits) { ch = ch * 10; } local_interval_ms += ch; offset--; } crm_trace("Operation key '%s' has interval %ums", key, local_interval_ms); if (interval_ms) { *interval_ms = local_interval_ms; } CRM_CHECK((offset != (len - 1)) && (key[offset] == '_'), return FALSE); mutable_key = strndup(key, offset); offset--; while (offset > 0 && key[offset] != '_') { offset--; } CRM_CHECK(key[offset] == '_', free(mutable_key); return FALSE); mutable_key_ptr = mutable_key + offset + 1; crm_trace(" Action: %s", mutable_key_ptr); if (op_type) { *op_type = strdup(mutable_key_ptr); } mutable_key[offset] = 0; offset--; notify = strstr(mutable_key, "_post_notify"); if (notify && safe_str_eq(notify, "_post_notify")) { notify[0] = 0; } notify = strstr(mutable_key, "_pre_notify"); if (notify && safe_str_eq(notify, "_pre_notify")) { notify[0] = 0; } crm_trace(" Resource: %s", mutable_key); if (rsc_id) { *rsc_id = mutable_key; } else { free(mutable_key); } return TRUE; } char * generate_notify_key(const char *rsc_id, const char *notify_type, const char *op_type) { CRM_CHECK(rsc_id != NULL, return NULL); CRM_CHECK(op_type != NULL, return NULL); CRM_CHECK(notify_type != NULL, return NULL); return crm_strdup_printf("%s_%s_notify_%s_0", rsc_id, notify_type, op_type); } char * generate_transition_magic(const char *transition_key, int op_status, int op_rc) { CRM_CHECK(transition_key != NULL, return NULL); return crm_strdup_printf("%d:%d;%s", op_status, op_rc, transition_key); } gboolean decode_transition_magic(const char *magic, char **uuid, int *transition_id, int *action_id, int *op_status, int *op_rc, int *target_rc) { int res = 0; char *key = NULL; gboolean result = TRUE; CRM_CHECK(magic != NULL, return FALSE); CRM_CHECK(op_rc != NULL, return FALSE); CRM_CHECK(op_status != NULL, return FALSE); key = calloc(1, strlen(magic) + 1); res = sscanf(magic, "%d:%d;%s", op_status, op_rc, key); if (res != 3) { crm_warn("Only found %d items in: '%s'", res, magic); free(key); return FALSE; } CRM_CHECK(decode_transition_key(key, uuid, transition_id, action_id, target_rc), result = FALSE); free(key); return result; } char * generate_transition_key(int transition_id, int action_id, int target_rc, const char *node) { CRM_CHECK(node != NULL, return NULL); return crm_strdup_printf("%d:%d:%d:%-*s", action_id, transition_id, target_rc, 36, node); } gboolean decode_transition_key(const char *key, char **uuid, int *transition_id, int *action_id, int *target_rc) { CRM_CHECK(uuid != NULL, return FALSE); CRM_CHECK(target_rc != NULL, return FALSE); CRM_CHECK(action_id != NULL, return FALSE); CRM_CHECK(transition_id != NULL, return FALSE); *uuid = calloc(37, sizeof(char)); if (sscanf(key, "%d:%d:%d:%36s", action_id, transition_id, target_rc, *uuid) != 4) { crm_err("Invalid transition key '%s'", key); free(*uuid); *uuid = NULL; *target_rc = -1; *action_id = -1; *transition_id = -1; return FALSE; } if (strlen(*uuid) != 36) { crm_warn("Invalid UUID '%s' in transition key '%s'", *uuid, key); } return TRUE; } void filter_action_parameters(xmlNode * param_set, const char *version) { char *key = NULL; char *timeout = NULL; char *interval_ms_s = NULL; const char *attr_filter[] = { XML_ATTR_ID, XML_ATTR_CRM_VERSION, XML_LRM_ATTR_OP_DIGEST, XML_LRM_ATTR_TARGET, XML_LRM_ATTR_TARGET_UUID, "pcmk_external_ip" }; gboolean do_delete = FALSE; int lpc = 0; static int meta_len = 0; if (meta_len == 0) { meta_len = strlen(CRM_META); } if (param_set == NULL) { return; } for (lpc = 0; lpc < DIMOF(attr_filter); lpc++) { xml_remove_prop(param_set, attr_filter[lpc]); } key = crm_meta_name(XML_LRM_ATTR_INTERVAL_MS); interval_ms_s = crm_element_value_copy(param_set, key); free(key); key = crm_meta_name(XML_ATTR_TIMEOUT); timeout = crm_element_value_copy(param_set, key); if (param_set) { xmlAttrPtr xIter = param_set->properties; while (xIter) { const char *prop_name = (const char *)xIter->name; xIter = xIter->next; do_delete = FALSE; if (strncasecmp(prop_name, CRM_META, meta_len) == 0) { do_delete = TRUE; } if (do_delete) { xml_remove_prop(param_set, prop_name); } } } if (interval_ms_s && strcmp(interval_ms_s, "0")) { /* Re-instate the operation's timeout value */ if (timeout != NULL) { crm_xml_add(param_set, key, timeout); } } free(interval_ms_s); free(timeout); free(key); } #define FAKE_TE_ID "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" static void append_digest(lrmd_event_data_t * op, xmlNode * update, const char *version, const char *magic, int level) { /* this will enable us to later determine that the * resource's parameters have changed and we should force * a restart */ char *digest = NULL; xmlNode *args_xml = NULL; if (op->params == NULL) { return; } args_xml = create_xml_node(NULL, XML_TAG_PARAMS); g_hash_table_foreach(op->params, hash2field, args_xml); filter_action_parameters(args_xml, version); digest = calculate_operation_digest(args_xml, version); #if 0 if (level < get_crm_log_level() && op->interval_ms == 0 && crm_str_eq(op->op_type, CRMD_ACTION_START, TRUE)) { char *digest_source = dump_xml_unformatted(args_xml); do_crm_log(level, "Calculated digest %s for %s (%s). Source: %s\n", digest, ID(update), magic, digest_source); free(digest_source); } #endif crm_xml_add(update, XML_LRM_ATTR_OP_DIGEST, digest); free_xml(args_xml); free(digest); } int rsc_op_expected_rc(lrmd_event_data_t * op) { int rc = 0; if (op && op->user_data) { int dummy = 0; char *uuid = NULL; decode_transition_key(op->user_data, &uuid, &dummy, &dummy, &rc); free(uuid); } return rc; } gboolean did_rsc_op_fail(lrmd_event_data_t * op, int target_rc) { switch (op->op_status) { case PCMK_LRM_OP_CANCELLED: case PCMK_LRM_OP_PENDING: return FALSE; break; case PCMK_LRM_OP_NOTSUPPORTED: case PCMK_LRM_OP_TIMEOUT: case PCMK_LRM_OP_ERROR: return TRUE; break; default: if (target_rc != op->rc) { return TRUE; } } return FALSE; } /*! * \brief Create a CIB XML element for an operation * * \param[in] parent If not NULL, make new XML node a child of this one * \param[in] prefix Generate an ID using this prefix * \param[in] task Operation task to set * \param[in] interval_spec Operation interval to set * \param[in] timeout If not NULL, operation timeout to set * * \return New XML object on success, NULL otherwise */ xmlNode * crm_create_op_xml(xmlNode *parent, const char *prefix, const char *task, const char *interval_spec, const char *timeout) { xmlNode *xml_op; CRM_CHECK(prefix && task && interval_spec, return NULL); xml_op = create_xml_node(parent, XML_ATTR_OP); crm_xml_set_id(xml_op, "%s-%s-%s", prefix, task, interval_spec); crm_xml_add(xml_op, XML_LRM_ATTR_INTERVAL, interval_spec); crm_xml_add(xml_op, "name", task); if (timeout) { crm_xml_add(xml_op, XML_ATTR_TIMEOUT, timeout); } return xml_op; } xmlNode * create_operation_update(xmlNode * parent, lrmd_event_data_t * op, const char * caller_version, int target_rc, const char * node, const char * origin, int level) { char *key = NULL; char *magic = NULL; char *op_id = NULL; char *op_id_additional = NULL; char *local_user_data = NULL; const char *exit_reason = NULL; xmlNode *xml_op = NULL; const char *task = NULL; CRM_CHECK(op != NULL, return NULL); do_crm_log(level, "%s: Updating resource %s after %s op %s (interval=%u)", origin, op->rsc_id, op->op_type, services_lrm_status_str(op->op_status), op->interval_ms); crm_trace("DC version: %s", caller_version); task = op->op_type; /* Remap the task name under various scenarios, to make life easier for the * PE when determining the current state. */ if (crm_str_eq(task, "reload", TRUE)) { if (op->op_status == PCMK_LRM_OP_DONE) { task = CRMD_ACTION_START; } else { task = CRMD_ACTION_STATUS; } } else if (crm_str_eq(task, CRMD_ACTION_MIGRATE, TRUE)) { /* if the migrate_from fails it will have enough info to do the right thing */ if (op->op_status == PCMK_LRM_OP_DONE) { task = CRMD_ACTION_STOP; } else { task = CRMD_ACTION_STATUS; } } else if ((op->op_status == PCMK_LRM_OP_DONE) && crm_str_eq(task, CRMD_ACTION_MIGRATED, TRUE)) { task = CRMD_ACTION_START; } key = generate_op_key(op->rsc_id, task, op->interval_ms); if (crm_str_eq(task, CRMD_ACTION_NOTIFY, TRUE)) { const char *n_type = crm_meta_value(op->params, "notify_type"); const char *n_task = crm_meta_value(op->params, "notify_operation"); CRM_LOG_ASSERT(n_type != NULL); CRM_LOG_ASSERT(n_task != NULL); op_id = generate_notify_key(op->rsc_id, n_type, n_task); if (op->op_status != PCMK_LRM_OP_PENDING) { /* Ignore notify errors. * * @TODO It might be better to keep the correct result here, and * ignore it in process_graph_event(). */ op->op_status = PCMK_LRM_OP_DONE; op->rc = 0; } } else if (did_rsc_op_fail(op, target_rc)) { op_id = generate_op_key(op->rsc_id, "last_failure", 0); if (op->interval_ms == 0) { // Ensure 'last' gets updated, in case record-pending is true op_id_additional = generate_op_key(op->rsc_id, "last", 0); } exit_reason = op->exit_reason; } else if (op->interval_ms > 0) { op_id = strdup(key); } else { op_id = generate_op_key(op->rsc_id, "last", 0); } again: xml_op = find_entity(parent, XML_LRM_TAG_RSC_OP, op_id); if (xml_op == NULL) { xml_op = create_xml_node(parent, XML_LRM_TAG_RSC_OP); } if (op->user_data == NULL) { crm_debug("Generating fake transition key for: " CRM_OP_FMT " %d from %s", op->rsc_id, op->op_type, op->interval_ms, op->call_id, origin); local_user_data = generate_transition_key(-1, op->call_id, target_rc, FAKE_TE_ID); op->user_data = local_user_data; } if(magic == NULL) { magic = generate_transition_magic(op->user_data, op->op_status, op->rc); } crm_xml_add(xml_op, XML_ATTR_ID, op_id); crm_xml_add(xml_op, XML_LRM_ATTR_TASK_KEY, key); crm_xml_add(xml_op, XML_LRM_ATTR_TASK, task); crm_xml_add(xml_op, XML_ATTR_ORIGIN, origin); crm_xml_add(xml_op, XML_ATTR_CRM_VERSION, caller_version); crm_xml_add(xml_op, XML_ATTR_TRANSITION_KEY, op->user_data); crm_xml_add(xml_op, XML_ATTR_TRANSITION_MAGIC, magic); crm_xml_add(xml_op, XML_LRM_ATTR_EXIT_REASON, exit_reason == NULL ? "" : exit_reason); crm_xml_add(xml_op, XML_LRM_ATTR_TARGET, node); /* For context during triage */ crm_xml_add_int(xml_op, XML_LRM_ATTR_CALLID, op->call_id); crm_xml_add_int(xml_op, XML_LRM_ATTR_RC, op->rc); crm_xml_add_int(xml_op, XML_LRM_ATTR_OPSTATUS, op->op_status); crm_xml_add_ms(xml_op, XML_LRM_ATTR_INTERVAL_MS, op->interval_ms); if (compare_version("2.1", caller_version) <= 0) { if (op->t_run || op->t_rcchange || op->exec_time || op->queue_time) { crm_trace("Timing data (" CRM_OP_FMT "): last=%u change=%u exec=%u queue=%u", op->rsc_id, op->op_type, op->interval_ms, op->t_run, op->t_rcchange, op->exec_time, op->queue_time); if (op->interval_ms == 0) { /* The values are the same for non-recurring ops */ crm_xml_add_int(xml_op, XML_RSC_OP_LAST_RUN, op->t_run); crm_xml_add_int(xml_op, XML_RSC_OP_LAST_CHANGE, op->t_run); } else if(op->t_rcchange) { /* last-run is not accurate for recurring ops */ crm_xml_add_int(xml_op, XML_RSC_OP_LAST_CHANGE, op->t_rcchange); } else { /* ...but is better than nothing otherwise */ crm_xml_add_int(xml_op, XML_RSC_OP_LAST_CHANGE, op->t_run); } crm_xml_add_int(xml_op, XML_RSC_OP_T_EXEC, op->exec_time); crm_xml_add_int(xml_op, XML_RSC_OP_T_QUEUE, op->queue_time); } } if (crm_str_eq(op->op_type, CRMD_ACTION_MIGRATE, TRUE) || crm_str_eq(op->op_type, CRMD_ACTION_MIGRATED, TRUE)) { /* * Record migrate_source and migrate_target always for migrate ops. */ const char *name = XML_LRM_ATTR_MIGRATE_SOURCE; crm_xml_add(xml_op, name, crm_meta_value(op->params, name)); name = XML_LRM_ATTR_MIGRATE_TARGET; crm_xml_add(xml_op, name, crm_meta_value(op->params, name)); } append_digest(op, xml_op, caller_version, magic, LOG_DEBUG); if (op_id_additional) { free(op_id); op_id = op_id_additional; op_id_additional = NULL; goto again; } if (local_user_data) { free(local_user_data); op->user_data = NULL; } free(magic); free(op_id); free(key); return xml_op; } /*! * \brief Check whether an operation requires resource agent meta-data * * \param[in] rsc_class Resource agent class (or NULL to skip class check) * \param[in] op Operation action (or NULL to skip op check) * * \return TRUE if operation needs meta-data, FALSE otherwise * \note At least one of rsc_class and op must be specified. */ bool crm_op_needs_metadata(const char *rsc_class, const char *op) { /* Agent meta-data is used to determine whether a reload is possible, and to * evaluate versioned parameters -- so if this op is not relevant to those * features, we don't need the meta-data. */ CRM_CHECK(rsc_class || op, return FALSE); - if (is_set(pcmk_get_ra_caps(rsc_class), pcmk_ra_cap_params)) { + if (rsc_class + && is_not_set(pcmk_get_ra_caps(rsc_class), pcmk_ra_cap_params)) { /* Meta-data is only needed for resource classes that use parameters */ return FALSE; } /* Meta-data is only needed for these actions */ if (op && strcmp(op, CRMD_ACTION_START) && strcmp(op, CRMD_ACTION_STATUS) && strcmp(op, CRMD_ACTION_PROMOTE) && strcmp(op, CRMD_ACTION_DEMOTE) && strcmp(op, CRMD_ACTION_RELOAD) && strcmp(op, CRMD_ACTION_MIGRATE) && strcmp(op, CRMD_ACTION_MIGRATED) && strcmp(op, CRMD_ACTION_NOTIFY)) { return FALSE; } return TRUE; } diff --git a/tools/crm_mon.c b/tools/crm_mon.c index 2147eced25..671bee392d 100644 --- a/tools/crm_mon.c +++ b/tools/crm_mon.c @@ -1,4446 +1,4451 @@ /* * Copyright 2004-2018 Andrew Beekhof * * This source code is licensed under the GNU General Public License version 2 * or later (GPLv2+) WITHOUT ANY WARRANTY. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* crm_ends_with_ext */ #include #include #include #include #include #include #include #include <../lib/pengine/unpack.h> #include #include extern void cleanup_alloc_calculations(pe_working_set_t * data_set); static void clean_up_connections(void); -static void clean_up(crm_exit_t exit_code); +static crm_exit_t clean_up(crm_exit_t exit_code); static void crm_diff_update(const char *event, xmlNode * msg); static gboolean mon_refresh_display(gpointer user_data); static int cib_connect(gboolean full); static void mon_st_callback_event(stonith_t * st, stonith_event_t * e); static void mon_st_callback_display(stonith_t * st, stonith_event_t * e); static void kick_refresh(gboolean data_updated); static char *get_node_display_name(node_t *node); /* * Definitions indicating which items to print */ #define mon_show_times (0x0001U) #define mon_show_stack (0x0002U) #define mon_show_dc (0x0004U) #define mon_show_count (0x0008U) #define mon_show_nodes (0x0010U) #define mon_show_resources (0x0020U) #define mon_show_attributes (0x0040U) #define mon_show_failcounts (0x0080U) #define mon_show_operations (0x0100U) #define mon_show_tickets (0x0200U) #define mon_show_bans (0x0400U) #define mon_show_fence_history (0x0800U) #define mon_show_headers (mon_show_times | mon_show_stack | mon_show_dc \ | mon_show_count) #define mon_show_default (mon_show_headers | mon_show_nodes \ | mon_show_resources) #define mon_show_all (mon_show_default | mon_show_attributes \ | mon_show_failcounts | mon_show_operations \ | mon_show_tickets | mon_show_bans \ | mon_show_fence_history) static unsigned int show = mon_show_default; /* * Definitions indicating how to output */ enum mon_output_format_e { mon_output_none, mon_output_monitor, mon_output_plain, mon_output_console, mon_output_xml, mon_output_html, mon_output_cgi } output_format = mon_output_console; static char *output_filename = NULL; /* if sending output to a file, its name */ /* other globals */ static char *pid_file = NULL; static gboolean group_by_node = FALSE; static gboolean inactive_resources = FALSE; static int reconnect_msec = 5000; static gboolean daemonize = FALSE; static GMainLoop *mainloop = NULL; static guint timer_id = 0; static mainloop_timer_t *refresh_timer = NULL; static GList *attr_list = NULL; static const char *external_agent = NULL; static const char *external_recipient = NULL; static cib_t *cib = NULL; static stonith_t *st = NULL; static xmlNode *current_cib = NULL; static gboolean one_shot = FALSE; static gboolean has_warnings = FALSE; static gboolean print_timing = FALSE; static gboolean watch_fencing = FALSE; static gboolean fence_history = FALSE; static gboolean fence_full_history = FALSE; static gboolean fence_connect = FALSE; static int fence_history_level = 1; static gboolean print_brief = FALSE; static gboolean print_pending = TRUE; static gboolean print_clone_detail = FALSE; #if CURSES_ENABLED static gboolean curses_console_initialized = FALSE; #endif /* FIXME allow, detect, and correctly interpret glob pattern or regex? */ const char *print_neg_location_prefix = ""; /* Never display node attributes whose name starts with one of these prefixes */ #define FILTER_STR { CRM_FAIL_COUNT_PREFIX, CRM_LAST_FAILURE_PREFIX, \ "shutdown", "terminate", "standby", "probe_complete", \ "#", NULL } long last_refresh = 0; crm_trigger_t *refresh_trigger = NULL; /* Define exit codes for monitoring-compatible output * For nagios plugins, the possibilities are * OK=0, WARN=1, CRIT=2, and UNKNOWN=3 */ #define MON_STATUS_WARN CRM_EX_ERROR #define MON_STATUS_CRIT CRM_EX_INVALID_PARAM #define MON_STATUS_UNKNOWN CRM_EX_UNIMPLEMENT_FEATURE /* Convenience macro for prettifying output (e.g. "node" vs "nodes") */ #define s_if_plural(i) (((i) == 1)? "" : "s") #if CURSES_ENABLED # define print_dot() if (output_format == mon_output_console) { \ printw("."); \ clrtoeol(); \ refresh(); \ } else { \ fprintf(stdout, "."); \ } #else # define print_dot() fprintf(stdout, "."); #endif #if CURSES_ENABLED # define print_as(fmt, args...) if (output_format == mon_output_console) { \ printw(fmt, ##args); \ clrtoeol(); \ refresh(); \ } else { \ fprintf(stdout, fmt, ##args); \ } #else # define print_as(fmt, args...) fprintf(stdout, fmt, ##args); #endif static void blank_screen(void) { #if CURSES_ENABLED int lpc = 0; for (lpc = 0; lpc < LINES; lpc++) { move(lpc, 0); clrtoeol(); } move(0, 0); refresh(); #endif } static gboolean mon_timer_popped(gpointer data) { int rc = pcmk_ok; #if CURSES_ENABLED if (output_format == mon_output_console) { clear(); refresh(); } #endif if (timer_id > 0) { g_source_remove(timer_id); timer_id = 0; } print_as("Reconnecting...\n"); rc = cib_connect(TRUE); if (rc != pcmk_ok) { timer_id = g_timeout_add(reconnect_msec, mon_timer_popped, NULL); } return FALSE; } static void mon_cib_connection_destroy(gpointer user_data) { print_as("Connection to the cluster-daemons terminated\n"); if (refresh_timer != NULL) { /* we'll trigger a refresh after reconnect */ mainloop_timer_stop(refresh_timer); } if (timer_id) { /* we'll trigger a new reconnect-timeout at the end */ g_source_remove(timer_id); timer_id = 0; } if (st) { /* the client API won't properly reconnect notifications * if they are still in the table - so remove them */ st->cmds->remove_notification(st, T_STONITH_NOTIFY_DISCONNECT); st->cmds->remove_notification(st, T_STONITH_NOTIFY_FENCE); st->cmds->remove_notification(st, T_STONITH_NOTIFY_HISTORY); if (st->state != stonith_disconnected) { st->cmds->disconnect(st); } } if (cib) { cib->cmds->signoff(cib); timer_id = g_timeout_add(reconnect_msec, mon_timer_popped, NULL); } return; } /* * Mainloop signal handler. */ static void mon_shutdown(int nsig) { clean_up(CRM_EX_OK); } #if ON_DARWIN # define sighandler_t sig_t #endif #if CURSES_ENABLED # ifndef HAVE_SIGHANDLER_T typedef void (*sighandler_t) (int); # endif static sighandler_t ncurses_winch_handler; static void mon_winresize(int nsig) { static int not_done; int lines = 0, cols = 0; if (!not_done++) { if (ncurses_winch_handler) /* the original ncurses WINCH signal handler does the * magic of retrieving the new window size; * otherwise, we'd have to use ioctl or tgetent */ (*ncurses_winch_handler) (SIGWINCH); getmaxyx(stdscr, lines, cols); resizeterm(lines, cols); mainloop_set_trigger(refresh_trigger); } not_done--; } #endif static int cib_connect(gboolean full) { int rc = pcmk_ok; static gboolean need_pass = TRUE; CRM_CHECK(cib != NULL, return -EINVAL); if (getenv("CIB_passwd") != NULL) { need_pass = FALSE; } if ((fence_connect) && (st == NULL)) { st = stonith_api_new(); } if ((fence_connect) && (st->state == stonith_disconnected)) { crm_trace("Connecting to stonith"); rc = st->cmds->connect(st, crm_system_name, NULL); if (rc == pcmk_ok) { crm_trace("Setting up stonith callbacks"); if (watch_fencing) { st->cmds->register_notification(st, T_STONITH_NOTIFY_DISCONNECT, mon_st_callback_event); st->cmds->register_notification(st, T_STONITH_NOTIFY_FENCE, mon_st_callback_event); } else { st->cmds->register_notification(st, T_STONITH_NOTIFY_DISCONNECT, mon_st_callback_display); st->cmds->register_notification(st, T_STONITH_NOTIFY_HISTORY, mon_st_callback_display); } } } if (cib->state != cib_connected_query && cib->state != cib_connected_command) { crm_trace("Connecting to the CIB"); if ((output_format == mon_output_console) && need_pass && (cib->variant == cib_remote)) { need_pass = FALSE; print_as("Password:"); } rc = cib->cmds->signon(cib, crm_system_name, cib_query); if (rc != pcmk_ok) { return rc; } rc = cib->cmds->query(cib, NULL, ¤t_cib, cib_scope_local | cib_sync_call); if (rc == pcmk_ok) { mon_refresh_display(NULL); } if (rc == pcmk_ok && full) { if (rc == pcmk_ok) { rc = cib->cmds->set_connection_dnotify(cib, mon_cib_connection_destroy); if (rc == -EPROTONOSUPPORT) { print_as ("Notification setup not supported, won't be able to reconnect after failure"); if (output_format == mon_output_console) { sleep(2); } rc = pcmk_ok; } } if (rc == pcmk_ok) { cib->cmds->del_notify_callback(cib, T_CIB_DIFF_NOTIFY, crm_diff_update); rc = cib->cmds->add_notify_callback(cib, T_CIB_DIFF_NOTIFY, crm_diff_update); } if (rc != pcmk_ok) { print_as("Notification setup failed, could not monitor CIB actions"); if (output_format == mon_output_console) { sleep(2); } clean_up_connections(); } } } return rc; } /* *INDENT-OFF* */ static struct crm_option long_options[] = { /* Top-level Options */ {"help", 0, 0, '?', "\tThis text"}, {"version", 0, 0, '$', "\tVersion information" }, {"verbose", 0, 0, 'V', "\tIncrease debug output"}, {"quiet", 0, 0, 'Q', "\tDisplay only essential output" }, {"-spacer-", 1, 0, '-', "\nModes (mutually exclusive):"}, {"as-html", 1, 0, 'h', "\tWrite cluster status to the named html file"}, {"as-xml", 0, 0, 'X', "\t\tWrite cluster status as xml to stdout. This will enable one-shot mode."}, {"web-cgi", 0, 0, 'w', "\t\tWeb mode with output suitable for CGI (preselected when run as *.cgi)"}, {"simple-status", 0, 0, 's', "\tDisplay the cluster status once as a simple one line output (suitable for nagios)"}, {"-spacer-", 1, 0, '-', "\nDisplay Options:"}, {"group-by-node", 0, 0, 'n', "\tGroup resources by node" }, {"inactive", 0, 0, 'r', "\t\tDisplay inactive resources" }, {"failcounts", 0, 0, 'f', "\tDisplay resource fail counts"}, {"operations", 0, 0, 'o', "\tDisplay resource operation history" }, {"timing-details", 0, 0, 't', "\tDisplay resource operation history with timing details" }, {"tickets", 0, 0, 'c', "\t\tDisplay cluster tickets"}, {"watch-fencing", 0, 0, 'W', "\tListen for fencing events. For use with --external-agent"}, {"fence-history", 2, 0, 'm', "Show fence history\n" "\t\t\t\t\t0=off, 1=failures and pending (default without option),\n" "\t\t\t\t\t2=add successes (default without value for option),\n" "\t\t\t\t\t3=show full history without reduction to most recent of each flavor"}, {"neg-locations", 2, 0, 'L', "Display negative location constraints [optionally filtered by id prefix]"}, {"show-node-attributes", 0, 0, 'A', "Display node attributes" }, {"hide-headers", 0, 0, 'D', "\tHide all headers" }, {"show-detail", 0, 0, 'R', "\tShow more details (node IDs, individual clone instances)" }, {"brief", 0, 0, 'b', "\t\tBrief output" }, {"pending", 0, 0, 'j', "\t\tDisplay pending state if 'record-pending' is enabled", pcmk_option_hidden}, {"-spacer-", 1, 0, '-', "\nAdditional Options:"}, {"interval", 1, 0, 'i', "\tUpdate frequency in seconds" }, {"one-shot", 0, 0, '1', "\t\tDisplay the cluster status once on the console and exit"}, {"disable-ncurses",0, 0, 'N', "\tDisable the use of ncurses", !CURSES_ENABLED}, {"daemonize", 0, 0, 'd', "\tRun in the background as a daemon"}, {"pid-file", 1, 0, 'p', "\t(Advanced) Daemon pid file location"}, {"external-agent", 1, 0, 'E', "A program to run when resource operations take place."}, {"external-recipient",1, 0, 'e', "A recipient for your program (assuming you want the program to send something to someone)."}, {"xml-file", 1, 0, 'x', NULL, pcmk_option_hidden}, {"-spacer-", 1, 0, '-', "\nExamples:", pcmk_option_paragraph}, {"-spacer-", 1, 0, '-', "Display the cluster status on the console with updates as they occur:", pcmk_option_paragraph}, {"-spacer-", 1, 0, '-', " crm_mon", pcmk_option_example}, {"-spacer-", 1, 0, '-', "Display the cluster status on the console just once then exit:", pcmk_option_paragraph}, {"-spacer-", 1, 0, '-', " crm_mon -1", pcmk_option_example}, {"-spacer-", 1, 0, '-', "Display your cluster status, group resources by node, and include inactive resources in the list:", pcmk_option_paragraph}, {"-spacer-", 1, 0, '-', " crm_mon --group-by-node --inactive", pcmk_option_example}, {"-spacer-", 1, 0, '-', "Start crm_mon as a background daemon and have it write the cluster status to an HTML file:", pcmk_option_paragraph}, {"-spacer-", 1, 0, '-', " crm_mon --daemonize --as-html /path/to/docroot/filename.html", pcmk_option_example}, {"-spacer-", 1, 0, '-', "Start crm_mon and export the current cluster status as xml to stdout, then exit.:", pcmk_option_paragraph}, {"-spacer-", 1, 0, '-', " crm_mon --as-xml", pcmk_option_example}, {NULL, 0, 0, 0} }; /* *INDENT-ON* */ #if CURSES_ENABLED static const char * get_option_desc(char c) { int lpc; for (lpc = 0; long_options[lpc].name != NULL; lpc++) { if (long_options[lpc].name[0] == '-') continue; if (long_options[lpc].val == c) { static char *buf = NULL; const char *rv; char *nl; /* chop off tabs and cut at newline */ free(buf); /* free string from last usage */ buf = strdup(long_options[lpc].desc); rv = buf; /* make a copy to keep buf pointer unaltered for freeing when we come by next time. Like this the result stays valid until the next call. */ while(isspace(rv[0])) { rv++; } nl = strchr(rv, '\n'); if (nl) { *nl = '\0'; } return rv; } } return NULL; } #define print_option_help(option, condition) \ print_as("%c %c: \t%s\n", ((condition)? '*': ' '), option, get_option_desc(option)); static gboolean detect_user_input(GIOChannel *channel, GIOCondition condition, gpointer unused) { int c; gboolean config_mode = FALSE; while (1) { /* Get user input */ c = getchar(); switch (c) { case 'm': if (!fence_history_level) { fence_history = TRUE; fence_connect = TRUE; if (st == NULL) { mon_cib_connection_destroy(NULL); } } show ^= mon_show_fence_history; break; case 'c': show ^= mon_show_tickets; break; case 'f': show ^= mon_show_failcounts; break; case 'n': group_by_node = ! group_by_node; break; case 'o': show ^= mon_show_operations; if ((show & mon_show_operations) == 0) { print_timing = 0; } break; case 'r': inactive_resources = ! inactive_resources; break; case 'R': print_clone_detail = ! print_clone_detail; break; case 't': print_timing = ! print_timing; if (print_timing) { show |= mon_show_operations; } break; case 'A': show ^= mon_show_attributes; break; case 'L': show ^= mon_show_bans; break; case 'D': /* If any header is shown, clear them all, otherwise set them all */ if (show & mon_show_headers) { show &= ~mon_show_headers; } else { show |= mon_show_headers; } break; case 'b': print_brief = ! print_brief; break; case 'j': print_pending = ! print_pending; break; case '?': config_mode = TRUE; break; default: goto refresh; } if (!config_mode) goto refresh; blank_screen(); print_as("Display option change mode\n"); print_as("\n"); print_option_help('c', show & mon_show_tickets); print_option_help('f', show & mon_show_failcounts); print_option_help('n', group_by_node); print_option_help('o', show & mon_show_operations); print_option_help('r', inactive_resources); print_option_help('t', print_timing); print_option_help('A', show & mon_show_attributes); print_option_help('L', show & mon_show_bans); print_option_help('D', (show & mon_show_headers) == 0); print_option_help('R', print_clone_detail); print_option_help('b', print_brief); print_option_help('j', print_pending); print_option_help('m', (show & mon_show_fence_history)); print_as("\n"); print_as("Toggle fields via field letter, type any other key to return"); } refresh: mon_refresh_display(NULL); return TRUE; } #endif int main(int argc, char **argv) { int flag; int argerr = 0; int option_index = 0; int rc = pcmk_ok; pid_file = strdup("/tmp/ClusterMon.pid"); crm_log_cli_init("crm_mon"); crm_set_options(NULL, "mode [options]", long_options, "Provides a summary of cluster's current state." "\n\nOutputs varying levels of detail in a number of different formats.\n"); #if !defined (ON_DARWIN) && !defined (ON_BSD) /* prevent zombies */ signal(SIGCLD, SIG_IGN); #endif if (crm_ends_with_ext(argv[0], ".cgi") == TRUE) { output_format = mon_output_cgi; one_shot = TRUE; } /* to enable stonith-connection when called via some application like pcs * set environment-variable FENCE_HISTORY to desired level * so you don't have to modify this application */ /* fence_history_level = crm_atoi(getenv("FENCE_HISTORY"), "0"); */ while (1) { flag = crm_get_option(argc, argv, &option_index); if (flag == -1) break; switch (flag) { case 'V': crm_bump_log_level(argc, argv); break; case 'Q': show &= ~mon_show_times; break; case 'i': reconnect_msec = crm_get_msec(optarg); break; case 'n': group_by_node = TRUE; break; case 'r': inactive_resources = TRUE; break; case 'W': watch_fencing = TRUE; fence_connect = TRUE; break; case 'm': fence_history_level = crm_atoi(optarg, "2"); break; case 'd': daemonize = TRUE; break; case 't': print_timing = TRUE; show |= mon_show_operations; break; case 'o': show |= mon_show_operations; break; case 'f': show |= mon_show_failcounts; break; case 'A': show |= mon_show_attributes; break; case 'L': show |= mon_show_bans; print_neg_location_prefix = optarg? optarg : ""; break; case 'D': show &= ~mon_show_headers; break; case 'b': print_brief = TRUE; break; case 'j': print_pending = TRUE; break; case 'R': print_clone_detail = TRUE; break; case 'c': show |= mon_show_tickets; break; case 'p': free(pid_file); if(optarg == NULL) { - crm_help(flag, CRM_EX_USAGE); + return crm_help(flag, CRM_EX_USAGE); } pid_file = strdup(optarg); break; case 'x': if(optarg == NULL) { - crm_help(flag, CRM_EX_USAGE); + return crm_help(flag, CRM_EX_USAGE); } setenv("CIB_file", optarg, 1); one_shot = TRUE; break; case 'h': if(optarg == NULL) { - crm_help(flag, CRM_EX_USAGE); + return crm_help(flag, CRM_EX_USAGE); } argerr += (output_format != mon_output_console); output_format = mon_output_html; output_filename = strdup(optarg); umask(S_IWGRP | S_IWOTH); break; case 'X': argerr += (output_format != mon_output_console); output_format = mon_output_xml; one_shot = TRUE; break; case 'w': /* do not allow argv[0] and argv[1...] redundancy */ argerr += (output_format != mon_output_console); output_format = mon_output_cgi; one_shot = TRUE; break; case 's': argerr += (output_format != mon_output_console); output_format = mon_output_monitor; one_shot = TRUE; break; case 'E': external_agent = optarg; break; case 'e': external_recipient = optarg; break; case '1': one_shot = TRUE; break; case 'N': if (output_format == mon_output_console) { output_format = mon_output_plain; } break; case '$': case '?': - crm_help(flag, CRM_EX_OK); + return crm_help(flag, CRM_EX_OK); break; default: printf("Argument code 0%o (%c) is not (?yet?) supported\n", flag, flag); ++argerr; break; } } if (watch_fencing) { /* don't moan as fence_history_level == 1 is default */ fence_history_level = 0; } /* create the cib-object early to be able to do further * decisions based on the cib-source */ cib = cib_new(); if (cib == NULL) { rc = -EINVAL; } else { switch (cib->variant) { case cib_native: /* cib & fencing - everything available */ break; case cib_file: /* Don't try to connect to fencing as we * either don't have a running cluster or * the fencing-information would possibly * not match the cib data from a file. * As we don't expect cib-updates coming * in enforce one-shot. */ fence_history_level = 0; one_shot = TRUE; break; case cib_remote: /* updates coming in but no fencing */ fence_history_level = 0; break; case cib_undefined: case cib_database: default: /* something is odd */ rc = -EINVAL; crm_err("Invalid cib-source"); break; } } switch (fence_history_level) { case 3: fence_full_history = TRUE; /* fall through to next lower level */ case 2: show |= mon_show_fence_history; /* fall through to next lower level */ case 1: fence_history = TRUE; fence_connect = TRUE; break; default: break; } /* Extra sanity checks when in CGI mode */ if (output_format == mon_output_cgi) { argerr += (optind < argc); argerr += (output_filename != NULL); argerr += ((cib) && (cib->variant == cib_file)); argerr += (external_agent != NULL); argerr += (daemonize == TRUE); /* paranoia */ } else if (optind < argc) { printf("non-option ARGV-elements: "); while (optind < argc) printf("%s ", argv[optind++]); printf("\n"); } if (argerr) { - clean_up(CRM_EX_USAGE); + return clean_up(CRM_EX_USAGE); } /* XML output always prints everything */ if (output_format == mon_output_xml) { show = mon_show_all; print_timing = TRUE; } if (one_shot) { if (output_format == mon_output_console) { output_format = mon_output_plain; } } else if (daemonize) { if ((output_format == mon_output_console) || (output_format == mon_output_plain)) { output_format = mon_output_none; } crm_enable_stderr(FALSE); if ((output_format != mon_output_html) && !external_agent) { printf ("Looks like you forgot to specify one or more of: " "--as-html, --external-agent\n"); - clean_up(CRM_EX_USAGE); + return clean_up(CRM_EX_USAGE); } if (cib) { /* to be on the safe side don't have cib-object around * when we are forking */ cib_delete(cib); cib = NULL; crm_make_daemon(crm_system_name, TRUE, pid_file); cib = cib_new(); if (cib == NULL) { rc = -EINVAL; } /* otherwise assume we've got the same cib-object we've just destroyed * in our parent */ } } else if (output_format == mon_output_console) { #if CURSES_ENABLED initscr(); cbreak(); noecho(); crm_enable_stderr(FALSE); curses_console_initialized = TRUE; #else one_shot = TRUE; output_format = mon_output_plain; printf("Defaulting to one-shot mode\n"); printf("You need to have curses available at compile time to enable console mode\n"); #endif } crm_info("Starting %s", crm_system_name); if (cib) { do { if (!one_shot) { print_as("Waiting until cluster is available on this node ...\n"); } rc = cib_connect(!one_shot); if (one_shot) { break; } else if (rc != pcmk_ok) { sleep(reconnect_msec / 1000); #if CURSES_ENABLED if (output_format == mon_output_console) { clear(); refresh(); } #endif } else { if (output_format == mon_output_html) { print_as("Writing html to %s ...\n", output_filename); } } } while (rc == -ENOTCONN); } if (rc != pcmk_ok) { if (output_format == mon_output_monitor) { printf("CLUSTER CRIT: Connection to cluster failed: %s\n", pcmk_strerror(rc)); - clean_up(MON_STATUS_CRIT); + return clean_up(MON_STATUS_CRIT); } else { if (rc == -ENOTCONN) { print_as("\nError: cluster is not available on this node\n"); } else { print_as("\nConnection to cluster failed: %s\n", pcmk_strerror(rc)); } } if (output_format == mon_output_console) { sleep(2); } - clean_up(crm_errno2exit(rc)); + return clean_up(crm_errno2exit(rc)); } if (one_shot) { - clean_up(CRM_EX_OK); + return clean_up(CRM_EX_OK); } mainloop = g_main_loop_new(NULL, FALSE); mainloop_add_signal(SIGTERM, mon_shutdown); mainloop_add_signal(SIGINT, mon_shutdown); #if CURSES_ENABLED if (output_format == mon_output_console) { ncurses_winch_handler = signal(SIGWINCH, mon_winresize); if (ncurses_winch_handler == SIG_DFL || ncurses_winch_handler == SIG_IGN || ncurses_winch_handler == SIG_ERR) ncurses_winch_handler = NULL; g_io_add_watch(g_io_channel_unix_new(STDIN_FILENO), G_IO_IN, detect_user_input, NULL); } #endif refresh_trigger = mainloop_add_trigger(G_PRIORITY_LOW, mon_refresh_display, NULL); g_main_loop_run(mainloop); g_main_destroy(mainloop); crm_info("Exiting %s", crm_system_name); - clean_up(CRM_EX_OK); - return CRM_EX_OK; // Should never be reached + return clean_up(CRM_EX_OK); } #define mon_warn(fmt...) do { \ if (!has_warnings) { \ print_as("CLUSTER WARN:"); \ } else { \ print_as(","); \ } \ print_as(fmt); \ has_warnings = TRUE; \ } while(0) static int count_resources(pe_working_set_t * data_set, resource_t * rsc) { int count = 0; GListPtr gIter = NULL; if (rsc == NULL) { gIter = data_set->resources; } else if (rsc->children) { gIter = rsc->children; } else { return is_not_set(rsc->flags, pe_rsc_orphan); } for (; gIter != NULL; gIter = gIter->next) { count += count_resources(data_set, gIter->data); } return count; } /*! * \internal * \brief Print one-line status suitable for use with monitoring software * * \param[in] data_set Working set of CIB state * \param[in] history List of stonith actions * * \note This function's output (and the return code when the program exits) * should conform to https://www.monitoring-plugins.org/doc/guidelines.html */ static void print_simple_status(pe_working_set_t * data_set, stonith_history_t *history) { GListPtr gIter = NULL; int nodes_online = 0; int nodes_standby = 0; int nodes_maintenance = 0; if (data_set->dc_node == NULL) { mon_warn(" No DC"); } for (gIter = data_set->nodes; gIter != NULL; gIter = gIter->next) { node_t *node = (node_t *) gIter->data; if (node->details->standby && node->details->online) { nodes_standby++; } else if (node->details->maintenance && node->details->online) { nodes_maintenance++; } else if (node->details->online) { nodes_online++; } else { mon_warn(" offline node: %s", node->details->uname); } } if (!has_warnings) { int nresources = count_resources(data_set, NULL); print_as("CLUSTER OK: %d node%s online", nodes_online, s_if_plural(nodes_online)); if (nodes_standby > 0) { print_as(", %d standby node%s", nodes_standby, s_if_plural(nodes_standby)); } if (nodes_maintenance > 0) { print_as(", %d maintenance node%s", nodes_maintenance, s_if_plural(nodes_maintenance)); } print_as(", %d resource%s configured", nresources, s_if_plural(nresources)); } print_as("\n"); } /*! * \internal * \brief Print a [name]=[value][units] pair, optionally using time string * * \param[in] stream File stream to display output to * \param[in] name Name to display * \param[in] value Value to display (or NULL to convert time instead) * \param[in] units Units to display (or NULL for no units) * \param[in] epoch_time Epoch time to convert if value is NULL */ static void print_nvpair(FILE *stream, const char *name, const char *value, const char *units, time_t epoch_time) { /* print name= */ switch (output_format) { case mon_output_plain: case mon_output_console: print_as(" %s=", name); break; case mon_output_html: case mon_output_cgi: case mon_output_xml: fprintf(stream, " %s=", name); break; default: break; } /* If we have a value (and optionally units), print it */ if (value) { switch (output_format) { case mon_output_plain: case mon_output_console: print_as("%s%s", value, (units? units : "")); break; case mon_output_html: case mon_output_cgi: fprintf(stream, "%s%s", value, (units? units : "")); break; case mon_output_xml: fprintf(stream, "\"%s%s\"", value, (units? units : "")); break; default: break; } /* Otherwise print user-friendly time string */ } else { static char empty_str[] = ""; char *c, *date_str = asctime(localtime(&epoch_time)); for (c = (date_str != NULL) ? date_str : empty_str; *c != '\0'; ++c) { if (*c == '\n') { *c = '\0'; break; } } switch (output_format) { case mon_output_plain: case mon_output_console: print_as("'%s'", date_str); break; case mon_output_html: case mon_output_cgi: case mon_output_xml: fprintf(stream, "\"%s\"", date_str); break; default: break; } } } /*! * \internal * \brief Print whatever is needed to start a node section * * \param[in] stream File stream to display output to * \param[in] node Node to print */ static void print_node_start(FILE *stream, node_t *node) { char *node_name; switch (output_format) { case mon_output_plain: case mon_output_console: node_name = get_node_display_name(node); print_as("* Node %s:\n", node_name); free(node_name); break; case mon_output_html: case mon_output_cgi: node_name = get_node_display_name(node); fprintf(stream, "

Node: %s

\n
    \n", node_name); free(node_name); break; case mon_output_xml: fprintf(stream, " \n", node->details->uname); break; default: break; } } /*! * \internal * \brief Print whatever is needed to end a node section * * \param[in] stream File stream to display output to */ static void print_node_end(FILE *stream) { switch (output_format) { case mon_output_html: case mon_output_cgi: fprintf(stream, "
\n"); break; case mon_output_xml: fprintf(stream, " \n"); break; default: break; } } /*! * \internal * \brief Print resources section heading appropriate to options * * \param[in] stream File stream to display output to */ static void print_resources_heading(FILE *stream) { const char *heading; if (group_by_node) { /* Active resources have already been printed by node */ heading = (inactive_resources? "Inactive resources" : NULL); } else if (inactive_resources) { heading = "Full list of resources"; } else { heading = "Active resources"; } /* Print section heading */ switch (output_format) { case mon_output_plain: case mon_output_console: print_as("\n%s:\n\n", heading); break; case mon_output_html: case mon_output_cgi: fprintf(stream, "
\n

%s

\n", heading); break; case mon_output_xml: fprintf(stream, " \n"); break; default: break; } } /*! * \internal * \brief Print whatever resource section closing is appropriate * * \param[in] stream File stream to display output to */ static void print_resources_closing(FILE *stream, gboolean printed_heading) { const char *heading; /* What type of resources we did or did not display */ if (group_by_node) { heading = "inactive "; } else if (inactive_resources) { heading = ""; } else { heading = "active "; } switch (output_format) { case mon_output_plain: case mon_output_console: if (!printed_heading) { print_as("\nNo %sresources\n\n", heading); } break; case mon_output_html: case mon_output_cgi: if (!printed_heading) { fprintf(stream, "
\n

No %sresources

\n", heading); } break; case mon_output_xml: fprintf(stream, " %s\n", (printed_heading? "
" : "")); break; default: break; } } /*! * \internal * \brief Print whatever resource section(s) are appropriate * * \param[in] stream File stream to display output to * \param[in] data_set Cluster state to display * \param[in] print_opts Bitmask of pe_print_options */ static void print_resources(FILE *stream, pe_working_set_t *data_set, int print_opts) { GListPtr rsc_iter; const char *prefix = NULL; gboolean printed_heading = FALSE; gboolean brief_output = print_brief; /* If we already showed active resources by node, and * we're not showing inactive resources, we have nothing to do */ if (group_by_node && !inactive_resources) { return; } /* XML uses an indent, and ignores brief option for resources */ if (output_format == mon_output_xml) { prefix = " "; brief_output = FALSE; } /* If we haven't already printed resources grouped by node, * and brief output was requested, print resource summary */ if (brief_output && !group_by_node) { print_resources_heading(stream); printed_heading = TRUE; print_rscs_brief(data_set->resources, NULL, print_opts, stream, inactive_resources); } /* For each resource, display it if appropriate */ for (rsc_iter = data_set->resources; rsc_iter != NULL; rsc_iter = rsc_iter->next) { resource_t *rsc = (resource_t *) rsc_iter->data; /* Complex resources may have some sub-resources active and some inactive */ gboolean is_active = rsc->fns->active(rsc, TRUE); gboolean partially_active = rsc->fns->active(rsc, FALSE); /* Skip inactive orphans (deleted but still in CIB) */ if (is_set(rsc->flags, pe_rsc_orphan) && !is_active) { continue; /* Skip active resources if we already displayed them by node */ } else if (group_by_node) { if (is_active) { continue; } /* Skip primitives already counted in a brief summary */ } else if (brief_output && (rsc->variant == pe_native)) { continue; /* Skip resources that aren't at least partially active, * unless we're displaying inactive resources */ } else if (!partially_active && !inactive_resources) { continue; } /* Print this resource */ if (printed_heading == FALSE) { print_resources_heading(stream); printed_heading = TRUE; } rsc->fns->print(rsc, prefix, print_opts, stream); } print_resources_closing(stream, printed_heading); } /*! * \internal * \brief Print heading for resource history * * \param[in] stream File stream to display output to * \param[in] data_set Current state of CIB * \param[in] node Node that ran this resource * \param[in] rsc Resource to print * \param[in] rsc_id ID of resource to print * \param[in] all Whether to print every resource or just failed ones */ static void print_rsc_history_start(FILE *stream, pe_working_set_t *data_set, node_t *node, resource_t *rsc, const char *rsc_id, gboolean all) { time_t last_failure = 0; int failcount = rsc? pe_get_failcount(node, rsc, &last_failure, pe_fc_default, NULL, data_set) : 0; if (!all && !failcount && (last_failure <= 0)) { return; } /* Print resource ID */ switch (output_format) { case mon_output_plain: case mon_output_console: print_as(" %s:", rsc_id); break; case mon_output_html: case mon_output_cgi: fprintf(stream, "
  • %s:", rsc_id); break; case mon_output_xml: fprintf(stream, " 0)) { /* Print migration threshold */ switch (output_format) { case mon_output_plain: case mon_output_console: print_as(" migration-threshold=%d", rsc->migration_threshold); break; case mon_output_html: case mon_output_cgi: fprintf(stream, " migration-threshold=%d", rsc->migration_threshold); break; case mon_output_xml: fprintf(stream, " orphan=\"false\" migration-threshold=\"%d\"", rsc->migration_threshold); break; default: break; } /* Print fail count if any */ if (failcount > 0) { switch (output_format) { case mon_output_plain: case mon_output_console: print_as(" " CRM_FAIL_COUNT_PREFIX "=%d", failcount); break; case mon_output_html: case mon_output_cgi: fprintf(stream, " " CRM_FAIL_COUNT_PREFIX "=%d", failcount); break; case mon_output_xml: fprintf(stream, " " CRM_FAIL_COUNT_PREFIX "=\"%d\"", failcount); break; default: break; } } /* Print last failure time if any */ if (last_failure > 0) { print_nvpair(stream, CRM_LAST_FAILURE_PREFIX, NULL, NULL, last_failure); } } /* End the heading */ switch (output_format) { case mon_output_plain: case mon_output_console: print_as("\n"); break; case mon_output_html: case mon_output_cgi: fprintf(stream, "\n
      \n"); break; case mon_output_xml: fprintf(stream, ">\n"); break; default: break; } } /*! * \internal * \brief Print closing for resource history * * \param[in] stream File stream to display output to */ static void print_rsc_history_end(FILE *stream) { switch (output_format) { case mon_output_html: case mon_output_cgi: fprintf(stream, "
    \n
  • \n"); break; case mon_output_xml: fprintf(stream, " \n"); break; default: break; } } /*! * \internal * \brief Print operation history * * \param[in] stream File stream to display output to * \param[in] data_set Current state of CIB * \param[in] node Node this operation is for * \param[in] xml_op Root of XML tree describing this operation * \param[in] task Task parsed from this operation's XML * \param[in] interval_ms_s Interval parsed from this operation's XML * \param[in] rc Return code parsed from this operation's XML */ static void print_op_history(FILE *stream, pe_working_set_t *data_set, node_t *node, xmlNode *xml_op, const char *task, const char *interval_ms_s, int rc) { const char *value = NULL; const char *call = crm_element_value(xml_op, XML_LRM_ATTR_CALLID); /* Begin the operation description */ switch (output_format) { case mon_output_plain: case mon_output_console: print_as(" + (%s) %s:", call, task); break; case mon_output_html: case mon_output_cgi: fprintf(stream, "
  • (%s) %s:", call, task); break; case mon_output_xml: fprintf(stream, " 0) { print_nvpair(stream, attr, NULL, NULL, int_value); } } attr = XML_RSC_OP_LAST_RUN; value = crm_element_value(xml_op, attr); if (value) { int_value = crm_parse_int(value, NULL); if (int_value > 0) { print_nvpair(stream, attr, NULL, NULL, int_value); } } attr = XML_RSC_OP_T_EXEC; value = crm_element_value(xml_op, attr); if (value) { print_nvpair(stream, attr, value, "ms", 0); } attr = XML_RSC_OP_T_QUEUE; value = crm_element_value(xml_op, attr); if (value) { print_nvpair(stream, attr, value, "ms", 0); } } /* End the operation description */ switch (output_format) { case mon_output_plain: case mon_output_console: print_as(" rc=%d (%s)\n", rc, services_ocf_exitcode_str(rc)); break; case mon_output_html: case mon_output_cgi: fprintf(stream, " rc=%d (%s)
  • \n", rc, services_ocf_exitcode_str(rc)); break; case mon_output_xml: fprintf(stream, " rc=\"%d\" rc_text=\"%s\" />\n", rc, services_ocf_exitcode_str(rc)); break; default: break; } } /*! * \internal * \brief Print resource operation/failure history * * \param[in] stream File stream to display output to * \param[in] data_set Current state of CIB * \param[in] node Node that ran this resource * \param[in] rsc_entry Root of XML tree describing resource status * \param[in] operations Whether to print operations or just failcounts */ static void print_rsc_history(FILE *stream, pe_working_set_t *data_set, node_t *node, xmlNode *rsc_entry, gboolean operations) { GListPtr gIter = NULL; GListPtr op_list = NULL; gboolean printed = FALSE; const char *rsc_id = crm_element_value(rsc_entry, XML_ATTR_ID); resource_t *rsc = pe_find_resource(data_set->resources, rsc_id); xmlNode *rsc_op = NULL; /* If we're not showing operations, just print the resource failure summary */ if (operations == FALSE) { print_rsc_history_start(stream, data_set, node, rsc, rsc_id, FALSE); print_rsc_history_end(stream); return; } /* Create a list of this resource's operations */ for (rsc_op = __xml_first_child(rsc_entry); rsc_op != NULL; rsc_op = __xml_next(rsc_op)) { if (crm_str_eq((const char *)rsc_op->name, XML_LRM_TAG_RSC_OP, TRUE)) { op_list = g_list_append(op_list, rsc_op); } } op_list = g_list_sort(op_list, sort_op_by_callid); /* Print each operation */ for (gIter = op_list; gIter != NULL; gIter = gIter->next) { xmlNode *xml_op = (xmlNode *) gIter->data; const char *task = crm_element_value(xml_op, XML_LRM_ATTR_TASK); const char *interval_ms_s = crm_element_value(xml_op, XML_LRM_ATTR_INTERVAL_MS); const char *op_rc = crm_element_value(xml_op, XML_LRM_ATTR_RC); int rc = crm_parse_int(op_rc, "0"); /* Display 0-interval monitors as "probe" */ if (safe_str_eq(task, CRMD_ACTION_STATUS) && ((interval_ms_s == NULL) || safe_str_eq(interval_ms_s, "0"))) { task = "probe"; } /* Ignore notifies and some probes */ if (safe_str_eq(task, CRMD_ACTION_NOTIFY) || (safe_str_eq(task, "probe") && (rc == 7))) { continue; } /* If this is the first printed operation, print heading for resource */ if (printed == FALSE) { printed = TRUE; print_rsc_history_start(stream, data_set, node, rsc, rsc_id, TRUE); } /* Print the operation */ print_op_history(stream, data_set, node, xml_op, task, interval_ms_s, rc); } /* Free the list we created (no need to free the individual items) */ g_list_free(op_list); /* If we printed anything, close the resource */ if (printed) { print_rsc_history_end(stream); } } /*! * \internal * \brief Print node operation/failure history * * \param[in] stream File stream to display output to * \param[in] data_set Current state of CIB * \param[in] node_state Root of XML tree describing node status * \param[in] operations Whether to print operations or just failcounts */ static void print_node_history(FILE *stream, pe_working_set_t *data_set, xmlNode *node_state, gboolean operations) { node_t *node = pe_find_node_id(data_set->nodes, ID(node_state)); xmlNode *lrm_rsc = NULL; xmlNode *rsc_entry = NULL; if (node && node->details && node->details->online) { print_node_start(stream, node); lrm_rsc = find_xml_node(node_state, XML_CIB_TAG_LRM, FALSE); lrm_rsc = find_xml_node(lrm_rsc, XML_LRM_TAG_RESOURCES, FALSE); /* Print history of each of the node's resources */ for (rsc_entry = __xml_first_child(lrm_rsc); rsc_entry != NULL; rsc_entry = __xml_next(rsc_entry)) { if (crm_str_eq((const char *)rsc_entry->name, XML_LRM_TAG_RESOURCE, TRUE)) { print_rsc_history(stream, data_set, node, rsc_entry, operations); } } print_node_end(stream); } } /*! * \internal * \brief Print extended information about an attribute if appropriate * * \param[in] data_set Working set of CIB state * * \return TRUE if extended information was printed, FALSE otherwise * \note Currently, extended information is only supported for ping/pingd * resources, for which a message will be printed if connectivity is lost * or degraded. */ static gboolean print_attr_msg(FILE *stream, node_t * node, GListPtr rsc_list, const char *attrname, const char *attrvalue) { GListPtr gIter = NULL; for (gIter = rsc_list; gIter != NULL; gIter = gIter->next) { resource_t *rsc = (resource_t *) gIter->data; const char *type = g_hash_table_lookup(rsc->meta, "type"); if (rsc->children != NULL) { if (print_attr_msg(stream, node, rsc->children, attrname, attrvalue)) { return TRUE; } } if (safe_str_eq(type, "ping") || safe_str_eq(type, "pingd")) { const char *name = g_hash_table_lookup(rsc->parameters, "name"); if (name == NULL) { name = "pingd"; } /* To identify the resource with the attribute name. */ if (safe_str_eq(name, attrname)) { int host_list_num = 0; int expected_score = 0; int value = crm_parse_int(attrvalue, "0"); const char *hosts = g_hash_table_lookup(rsc->parameters, "host_list"); const char *multiplier = g_hash_table_lookup(rsc->parameters, "multiplier"); if(hosts) { char **host_list = g_strsplit(hosts, " ", 0); host_list_num = g_strv_length(host_list); g_strfreev(host_list); } /* pingd multiplier is the same as the default value. */ expected_score = host_list_num * crm_parse_int(multiplier, "1"); switch (output_format) { case mon_output_plain: case mon_output_console: if (value <= 0) { print_as("\t: Connectivity is lost"); } else if (value < expected_score) { print_as("\t: Connectivity is degraded (Expected=%d)", expected_score); } break; case mon_output_html: case mon_output_cgi: if (value <= 0) { fprintf(stream, " (connectivity is lost)"); } else if (value < expected_score) { fprintf(stream, " (connectivity is degraded -- expected %d)", expected_score); } break; case mon_output_xml: fprintf(stream, " expected=\"%d\"", expected_score); break; default: break; } return TRUE; } } } return FALSE; } static int compare_attribute(gconstpointer a, gconstpointer b) { int rc; rc = strcmp((const char *)a, (const char *)b); return rc; } static void create_attr_list(gpointer name, gpointer value, gpointer data) { int i; const char *filt_str[] = FILTER_STR; CRM_CHECK(name != NULL, return); /* filtering automatic attributes */ for (i = 0; filt_str[i] != NULL; i++) { if (g_str_has_prefix(name, filt_str[i])) { return; } } attr_list = g_list_insert_sorted(attr_list, name, compare_attribute); } /* structure for passing multiple user data to g_list_foreach() */ struct mon_attr_data { FILE *stream; node_t *node; }; static void print_node_attribute(gpointer name, gpointer user_data) { const char *value = NULL; struct mon_attr_data *data = (struct mon_attr_data *) user_data; value = pe_node_attribute_raw(data->node, name); /* Print attribute name and value */ switch (output_format) { case mon_output_plain: case mon_output_console: print_as(" + %-32s\t: %-10s", (char *)name, value); break; case mon_output_html: case mon_output_cgi: fprintf(data->stream, "
  • %s: %s", (char *)name, value); break; case mon_output_xml: fprintf(data->stream, " stream, data->node, data->node->details->running_rsc, name, value); /* Close out the attribute */ switch (output_format) { case mon_output_plain: case mon_output_console: print_as("\n"); break; case mon_output_html: case mon_output_cgi: fprintf(data->stream, "
  • \n"); break; case mon_output_xml: fprintf(data->stream, " />\n"); break; default: break; } } static void print_node_summary(FILE *stream, pe_working_set_t * data_set, gboolean operations) { xmlNode *node_state = NULL; xmlNode *cib_status = get_object_root(XML_CIB_TAG_STATUS, data_set->input); /* Print heading */ switch (output_format) { case mon_output_plain: case mon_output_console: if (operations) { print_as("\nOperations:\n"); } else { print_as("\nMigration Summary:\n"); } break; case mon_output_html: case mon_output_cgi: if (operations) { fprintf(stream, "
    \n

    Operations

    \n"); } else { fprintf(stream, "
    \n

    Migration Summary

    \n"); } break; case mon_output_xml: fprintf(stream, " \n"); break; default: break; } /* Print each node in the CIB status */ for (node_state = __xml_first_child(cib_status); node_state != NULL; node_state = __xml_next(node_state)) { if (crm_str_eq((const char *)node_state->name, XML_CIB_TAG_STATE, TRUE)) { print_node_history(stream, data_set, node_state, operations); } } /* Close section */ switch (output_format) { case mon_output_xml: fprintf(stream, " \n"); break; default: break; } } static void print_ticket(gpointer name, gpointer value, gpointer data) { ticket_t *ticket = (ticket_t *) value; FILE *stream = (FILE *) data; switch (output_format) { case mon_output_plain: case mon_output_console: print_as("* %s:\t%s%s", ticket->id, (ticket->granted? "granted" : "revoked"), (ticket->standby? " [standby]" : "")); break; case mon_output_html: case mon_output_cgi: fprintf(stream, "
  • %s: %s%s", ticket->id, (ticket->granted? "granted" : "revoked"), (ticket->standby? " [standby]" : "")); break; case mon_output_xml: fprintf(stream, " id, (ticket->granted? "granted" : "revoked"), (ticket->standby? "true" : "false")); break; default: break; } if (ticket->last_granted > -1) { print_nvpair(stdout, "last-granted", NULL, NULL, ticket->last_granted); } switch (output_format) { case mon_output_plain: case mon_output_console: print_as("\n"); break; case mon_output_html: case mon_output_cgi: fprintf(stream, "
  • \n"); break; case mon_output_xml: fprintf(stream, " />\n"); break; default: break; } } static void print_cluster_tickets(FILE *stream, pe_working_set_t * data_set) { /* Print section heading */ switch (output_format) { case mon_output_plain: case mon_output_console: print_as("\nTickets:\n"); break; case mon_output_html: case mon_output_cgi: fprintf(stream, "
    \n

    Tickets

    \n
      \n"); break; case mon_output_xml: fprintf(stream, " \n"); break; default: break; } /* Print each ticket */ g_hash_table_foreach(data_set->tickets, print_ticket, stream); /* Close section */ switch (output_format) { case mon_output_html: case mon_output_cgi: fprintf(stream, "
    \n"); break; case mon_output_xml: fprintf(stream, " \n"); break; default: break; } } /*! * \internal * \brief Return human-friendly string representing node name * * The returned string will be in the format * uname[@hostUname] [(nodeID)] * "@hostUname" will be printed if the node is a guest node. * "(nodeID)" will be printed if the node ID is different from the node uname, * and detailed output has been requested. * * \param[in] node Node to represent * \return Newly allocated string with representation of node name * \note It is the caller's responsibility to free the result with free(). */ static char * get_node_display_name(node_t *node) { char *node_name; const char *node_host = NULL; const char *node_id = NULL; int name_len; CRM_ASSERT((node != NULL) && (node->details != NULL) && (node->details->uname != NULL)); /* Host is displayed only if this is a guest node */ if (is_container_remote_node(node)) { node_t *host_node = pe__current_node(node->details->remote_rsc); if (host_node && host_node->details) { node_host = host_node->details->uname; } if (node_host == NULL) { node_host = ""; /* so we at least get "uname@" to indicate guest */ } } /* Node ID is displayed if different from uname and detail is requested */ if (print_clone_detail && safe_str_neq(node->details->uname, node->details->id)) { node_id = node->details->id; } /* Determine name length */ name_len = strlen(node->details->uname) + 1; if (node_host) { name_len += strlen(node_host) + 1; /* "@node_host" */ } if (node_id) { name_len += strlen(node_id) + 3; /* + " (node_id)" */ } /* Allocate and populate display name */ node_name = malloc(name_len); CRM_ASSERT(node_name != NULL); strcpy(node_name, node->details->uname); if (node_host) { strcat(node_name, "@"); strcat(node_name, node_host); } if (node_id) { strcat(node_name, " ("); strcat(node_name, node_id); strcat(node_name, ")"); } return node_name; } /*! * \internal * \brief Print a negative location constraint * * \param[in] stream File stream to display output to * \param[in] node Node affected by constraint * \param[in] location Constraint to print */ static void print_ban(FILE *stream, node_t *node, rsc_to_node_t *location) { char *node_name = NULL; switch (output_format) { case mon_output_plain: case mon_output_console: node_name = get_node_display_name(node); print_as(" %s\tprevents %s from running %son %s\n", location->id, location->rsc_lh->id, ((location->role_filter == RSC_ROLE_MASTER)? "as Master " : ""), node_name); break; case mon_output_html: case mon_output_cgi: node_name = get_node_display_name(node); fprintf(stream, "
  • %s prevents %s from running %son %s
  • \n", location->id, location->rsc_lh->id, ((location->role_filter == RSC_ROLE_MASTER)? "as Master " : ""), node_name); break; case mon_output_xml: fprintf(stream, " \n", location->id, location->rsc_lh->id, node->details->uname, node->weight, ((location->role_filter == RSC_ROLE_MASTER)? "true" : "false")); break; default: break; } free(node_name); } /*! * \internal * \brief Print section for negative location constraints * * \param[in] stream File stream to display output to * \param[in] data_set Working set corresponding to CIB status to display */ static void print_neg_locations(FILE *stream, pe_working_set_t *data_set) { GListPtr gIter, gIter2; /* Print section heading */ switch (output_format) { case mon_output_plain: case mon_output_console: print_as("\nNegative Location Constraints:\n"); break; case mon_output_html: case mon_output_cgi: fprintf(stream, "
    \n

    Negative Location Constraints

    \n
      \n"); break; case mon_output_xml: fprintf(stream, " \n"); break; default: break; } /* Print each ban */ for (gIter = data_set->placement_constraints; gIter != NULL; gIter = gIter->next) { rsc_to_node_t *location = (rsc_to_node_t *) gIter->data; if (!g_str_has_prefix(location->id, print_neg_location_prefix)) continue; for (gIter2 = location->node_list_rh; gIter2 != NULL; gIter2 = gIter2->next) { node_t *node = (node_t *) gIter2->data; if (node->weight < 0) { print_ban(stream, node, location); } } } /* Close section */ switch (output_format) { case mon_output_cgi: case mon_output_html: fprintf(stream, "
    \n"); break; case mon_output_xml: fprintf(stream, " \n"); break; default: break; } } static void crm_mon_get_parameters(resource_t *rsc, pe_working_set_t * data_set) { get_rsc_attributes(rsc->parameters, rsc, NULL, data_set); crm_trace("Beekhof: unpacked params for %s (%d)", rsc->id, g_hash_table_size(rsc->parameters)); if(rsc->children) { GListPtr gIter = NULL; for (gIter = rsc->children; gIter != NULL; gIter = gIter->next) { crm_mon_get_parameters(gIter->data, data_set); } } } /*! * \internal * \brief Print node attributes section * * \param[in] stream File stream to display output to * \param[in] data_set Working set of CIB state */ static void print_node_attributes(FILE *stream, pe_working_set_t *data_set) { GListPtr gIter = NULL; /* Print section heading */ switch (output_format) { case mon_output_plain: case mon_output_console: print_as("\nNode Attributes:\n"); break; case mon_output_html: case mon_output_cgi: fprintf(stream, "
    \n

    Node Attributes

    \n"); break; case mon_output_xml: fprintf(stream, " \n"); break; default: break; } /* Unpack all resource parameters (it would be more efficient to do this * only when needed for the first time in print_attr_msg()) */ for (gIter = data_set->resources; gIter != NULL; gIter = gIter->next) { crm_mon_get_parameters(gIter->data, data_set); } /* Display each node's attributes */ for (gIter = data_set->nodes; gIter != NULL; gIter = gIter->next) { struct mon_attr_data data; data.stream = stream; data.node = (node_t *) gIter->data; if (data.node && data.node->details && data.node->details->online) { print_node_start(stream, data.node); g_hash_table_foreach(data.node->details->attrs, create_attr_list, NULL); g_list_foreach(attr_list, print_node_attribute, &data); g_list_free(attr_list); attr_list = NULL; print_node_end(stream); } } /* Print section footer */ switch (output_format) { case mon_output_xml: fprintf(stream, " \n"); break; default: break; } } /*! * \internal * \brief Return resource display options corresponding to command-line choices * * \return Bitmask of pe_print_options suitable for resource print functions */ static int get_resource_display_options(void) { int print_opts; /* Determine basic output format */ switch (output_format) { case mon_output_console: print_opts = pe_print_ncurses; break; case mon_output_html: case mon_output_cgi: print_opts = pe_print_html; break; case mon_output_xml: print_opts = pe_print_xml; break; default: print_opts = pe_print_printf; break; } /* Add optional display elements */ if (print_pending) { print_opts |= pe_print_pending; } if (print_clone_detail) { print_opts |= pe_print_clone_details|pe_print_implicit; } if (!inactive_resources) { print_opts |= pe_print_clone_active; } if (print_brief) { print_opts |= pe_print_brief; } return print_opts; } /*! * \internal * \brief Return human-friendly string representing current time * * \return Current time as string (as by ctime() but without newline) on success * or "Could not determine current time" on error * \note The return value points to a statically allocated string which might be * overwritten by subsequent calls to any of the C library date and time functions. */ static const char * crm_now_string(void) { time_t a_time = time(NULL); char *since_epoch = ctime(&a_time); if ((a_time == (time_t) -1) || (since_epoch == NULL)) { return "Could not determine current time"; } since_epoch[strlen(since_epoch) - 1] = EOS; /* trim newline */ return (since_epoch); } /*! * \internal * \brief Print header for cluster summary if needed * * \param[in] stream File stream to display output to */ static void print_cluster_summary_header(FILE *stream) { switch (output_format) { case mon_output_html: case mon_output_cgi: fprintf(stream, "

    Cluster Summary

    \n

    \n"); break; case mon_output_xml: fprintf(stream, "

    \n"); break; default: break; } } /*! * \internal * \brief Print footer for cluster summary if needed * * \param[in] stream File stream to display output to */ static void print_cluster_summary_footer(FILE *stream) { switch (output_format) { case mon_output_cgi: case mon_output_html: fprintf(stream, "

    \n"); break; case mon_output_xml: fprintf(stream, "
    \n"); break; default: break; } } /*! * \internal * \brief Print times the display was last updated and CIB last changed * * \param[in] stream File stream to display output to * \param[in] data_set Working set of CIB state */ static void print_cluster_times(FILE *stream, pe_working_set_t *data_set) { const char *last_written = crm_element_value(data_set->input, XML_CIB_ATTR_WRITTEN); const char *user = crm_element_value(data_set->input, XML_ATTR_UPDATE_USER); const char *client = crm_element_value(data_set->input, XML_ATTR_UPDATE_CLIENT); const char *origin = crm_element_value(data_set->input, XML_ATTR_UPDATE_ORIG); switch (output_format) { case mon_output_plain: case mon_output_console: print_as("Last updated: %s", crm_now_string()); print_as((user || client || origin)? "\n" : "\t\t"); print_as("Last change: %s", last_written ? last_written : ""); if (user) { print_as(" by %s", user); } if (client) { print_as(" via %s", client); } if (origin) { print_as(" on %s", origin); } print_as("\n"); break; case mon_output_html: case mon_output_cgi: fprintf(stream, " Last updated: %s
    \n", crm_now_string()); fprintf(stream, " Last change: %s", last_written ? last_written : ""); if (user) { fprintf(stream, " by %s", user); } if (client) { fprintf(stream, " via %s", client); } if (origin) { fprintf(stream, " on %s", origin); } fprintf(stream, "
    \n"); break; case mon_output_xml: fprintf(stream, " \n", crm_now_string()); fprintf(stream, " \n", last_written ? last_written : "", user ? user : "", client ? client : "", origin ? origin : ""); break; default: break; } } /*! * \internal * \brief Print cluster stack * * \param[in] stream File stream to display output to * \param[in] stack_s Stack name */ static void print_cluster_stack(FILE *stream, const char *stack_s) { switch (output_format) { case mon_output_plain: case mon_output_console: print_as("Stack: %s\n", stack_s); break; case mon_output_html: case mon_output_cgi: fprintf(stream, " Stack: %s
    \n", stack_s); break; case mon_output_xml: fprintf(stream, " \n", stack_s); break; default: break; } } /*! * \internal * \brief Print current DC and its version * * \param[in] stream File stream to display output to * \param[in] data_set Working set of CIB state */ static void print_cluster_dc(FILE *stream, pe_working_set_t *data_set) { node_t *dc = data_set->dc_node; xmlNode *dc_version = get_xpath_object("//nvpair[@name='dc-version']", data_set->input, LOG_DEBUG); const char *dc_version_s = dc_version? crm_element_value(dc_version, XML_NVPAIR_ATTR_VALUE) : NULL; const char *quorum = crm_element_value(data_set->input, XML_ATTR_HAVE_QUORUM); char *dc_name = dc? get_node_display_name(dc) : NULL; switch (output_format) { case mon_output_plain: case mon_output_console: print_as("Current DC: "); if (dc) { print_as("%s (version %s) - partition %s quorum\n", dc_name, (dc_version_s? dc_version_s : "unknown"), (crm_is_true(quorum) ? "with" : "WITHOUT")); } else { print_as("NONE\n"); } break; case mon_output_html: case mon_output_cgi: fprintf(stream, " Current DC: "); if (dc) { fprintf(stream, "%s (version %s) - partition %s quorum", dc_name, (dc_version_s? dc_version_s : "unknown"), (crm_is_true(quorum)? "with" : "WITHOUT")); } else { fprintf(stream, "NONE"); } fprintf(stream, "
    \n"); break; case mon_output_xml: fprintf(stream, " details->uname, dc->details->id, (crm_is_true(quorum) ? "true" : "false")); } else { fprintf(stream, "present=\"false\""); } fprintf(stream, " />\n"); break; default: break; } free(dc_name); } /*! * \internal * \brief Print counts of configured nodes and resources * * \param[in] stream File stream to display output to * \param[in] data_set Working set of CIB state * \param[in] stack_s Stack name */ static void print_cluster_counts(FILE *stream, pe_working_set_t *data_set, const char *stack_s) { int nnodes = g_list_length(data_set->nodes); int nresources = count_resources(data_set, NULL); switch (output_format) { case mon_output_plain: case mon_output_console: print_as("\n%d node%s configured\n", nnodes, s_if_plural(nnodes)); print_as("%d resource%s configured", nresources, s_if_plural(nresources)); if(data_set->disabled_resources || data_set->blocked_resources) { print_as(" ("); if (data_set->disabled_resources) { print_as("%d DISABLED", data_set->disabled_resources); } if (data_set->disabled_resources && data_set->blocked_resources) { print_as(", "); } if (data_set->blocked_resources) { print_as("%d BLOCKED from starting due to failure", data_set->blocked_resources); } print_as(")"); } print_as("\n"); break; case mon_output_html: case mon_output_cgi: fprintf(stream, " %d node%s configured
    \n", nnodes, s_if_plural(nnodes)); fprintf(stream, " %d resource%s configured", nresources, s_if_plural(nresources)); if (data_set->disabled_resources || data_set->blocked_resources) { fprintf(stream, " ("); if (data_set->disabled_resources) { fprintf(stream, "%d DISABLED", data_set->disabled_resources); } if (data_set->disabled_resources && data_set->blocked_resources) { fprintf(stream, ", "); } if (data_set->blocked_resources) { fprintf(stream, "%d BLOCKED from starting due to failure", data_set->blocked_resources); } fprintf(stream, ")"); } fprintf(stream, "
    \n"); break; case mon_output_xml: fprintf(stream, " \n", g_list_length(data_set->nodes)); fprintf(stream, " \n", count_resources(data_set, NULL), data_set->disabled_resources, data_set->blocked_resources); break; default: break; } } /*! * \internal * \brief Print cluster-wide options * * \param[in] stream File stream to display output to * \param[in] data_set Working set of CIB state * * \note Currently this is only implemented for HTML and XML output, and * prints only a few options. If there is demand, more could be added. */ static void print_cluster_options(FILE *stream, pe_working_set_t *data_set) { switch (output_format) { case mon_output_plain: case mon_output_console: if (is_set(data_set->flags, pe_flag_maintenance_mode)) { print_as("\n *** Resource management is DISABLED ***"); print_as("\n The cluster will not attempt to start, stop or recover services"); print_as("\n"); } break; case mon_output_html: fprintf(stream, "

    \n

    Config Options

    \n"); fprintf(stream, " \n"); fprintf(stream, " \n", is_set(data_set->flags, pe_flag_stonith_enabled)? "enabled" : "disabled"); fprintf(stream, " \n", is_set(data_set->flags, pe_flag_symmetric_cluster)? "" : "a"); fprintf(stream, " \n"); fprintf(stream, " \n"); fprintf(stream, "
    STONITH of failed nodes%s
    Cluster is%ssymmetric
    No Quorum Policy"); switch (data_set->no_quorum_policy) { case no_quorum_freeze: fprintf(stream, "Freeze resources"); break; case no_quorum_stop: fprintf(stream, "Stop ALL resources"); break; case no_quorum_ignore: fprintf(stream, "Ignore"); break; case no_quorum_suicide: fprintf(stream, "Suicide"); break; } fprintf(stream, "
    Resource management"); if (is_set(data_set->flags, pe_flag_maintenance_mode)) { fprintf(stream, "DISABLED (the cluster will " "not attempt to start, stop or recover services)"); } else { fprintf(stream, "enabled"); } fprintf(stream, "
    \n

    \n"); break; case mon_output_xml: fprintf(stream, " flags, pe_flag_stonith_enabled)? "true" : "false"); fprintf(stream, " symmetric-cluster=\"%s\"", is_set(data_set->flags, pe_flag_symmetric_cluster)? "true" : "false"); fprintf(stream, " no-quorum-policy=\""); switch (data_set->no_quorum_policy) { case no_quorum_freeze: fprintf(stream, "freeze"); break; case no_quorum_stop: fprintf(stream, "stop"); break; case no_quorum_ignore: fprintf(stream, "ignore"); break; case no_quorum_suicide: fprintf(stream, "suicide"); break; } fprintf(stream, "\""); fprintf(stream, " maintenance-mode=\"%s\"", is_set(data_set->flags, pe_flag_maintenance_mode)? "true" : "false"); fprintf(stream, " />\n"); break; default: break; } } /*! * \internal * \brief Get the name of the stack in use (or "unknown" if not available) * * \param[in] data_set Working set of CIB state * * \return String representing stack name */ static const char * get_cluster_stack(pe_working_set_t *data_set) { xmlNode *stack = get_xpath_object("//nvpair[@name='cluster-infrastructure']", data_set->input, LOG_DEBUG); return stack? crm_element_value(stack, XML_NVPAIR_ATTR_VALUE) : "unknown"; } /*! * \internal * \brief Print a summary of cluster-wide information * * \param[in] stream File stream to display output to * \param[in] data_set Working set of CIB state */ static void print_cluster_summary(FILE *stream, pe_working_set_t *data_set) { const char *stack_s = get_cluster_stack(data_set); gboolean header_printed = FALSE; if (show & mon_show_stack) { if (header_printed == FALSE) { print_cluster_summary_header(stream); header_printed = TRUE; } print_cluster_stack(stream, stack_s); } /* Always print DC if none, even if not requested */ if ((data_set->dc_node == NULL) || (show & mon_show_dc)) { if (header_printed == FALSE) { print_cluster_summary_header(stream); header_printed = TRUE; } print_cluster_dc(stream, data_set); } if (show & mon_show_times) { if (header_printed == FALSE) { print_cluster_summary_header(stream); header_printed = TRUE; } print_cluster_times(stream, data_set); } if (is_set(data_set->flags, pe_flag_maintenance_mode) || data_set->disabled_resources || data_set->blocked_resources || is_set(show, mon_show_count)) { if (header_printed == FALSE) { print_cluster_summary_header(stream); header_printed = TRUE; } print_cluster_counts(stream, data_set, stack_s); } /* There is not a separate option for showing cluster options, so show with * stack for now; a separate option could be added if there is demand */ if (show & mon_show_stack) { print_cluster_options(stream, data_set); } if (header_printed) { print_cluster_summary_footer(stream); } } /*! * \internal * \brief Print a failed action * * \param[in] stream File stream to display output to * \param[in] xml_op Root of XML tree describing failed action */ static void print_failed_action(FILE *stream, xmlNode *xml_op) { const char *op_key = crm_element_value(xml_op, XML_LRM_ATTR_TASK_KEY); const char *op_key_attr = "op_key"; const char *last = crm_element_value(xml_op, XML_RSC_OP_LAST_CHANGE); const char *node = crm_element_value(xml_op, XML_ATTR_UNAME); const char *call = crm_element_value(xml_op, XML_LRM_ATTR_CALLID); const char *exit_reason = crm_element_value(xml_op, XML_LRM_ATTR_EXIT_REASON); int rc = crm_parse_int(crm_element_value(xml_op, XML_LRM_ATTR_RC), "0"); int status = crm_parse_int(crm_element_value(xml_op, XML_LRM_ATTR_OPSTATUS), "0"); char *exit_reason_cleaned; /* If no op_key was given, use id instead */ if (op_key == NULL) { op_key = ID(xml_op); op_key_attr = "id"; } /* If no exit reason was given, use "none" */ if (exit_reason == NULL) { exit_reason = "none"; } /* Print common action information */ switch (output_format) { case mon_output_plain: case mon_output_console: print_as("* %s on %s '%s' (%d): call=%s, status=%s, exitreason='%s'", op_key, node, services_ocf_exitcode_str(rc), rc, call, services_lrm_status_str(status), exit_reason); break; case mon_output_html: case mon_output_cgi: fprintf(stream, "

  • %s on %s '%s' (%d): call=%s, status=%s, exitreason='%s'", op_key, node, services_ocf_exitcode_str(rc), rc, call, services_lrm_status_str(status), exit_reason); break; case mon_output_xml: exit_reason_cleaned = crm_xml_escape(exit_reason); fprintf(stream, " \n"); break; case mon_output_xml: fprintf(stream, " />\n"); break; default: break; } } /*! * \internal * \brief Print a section for failed actions * * \param[in] stream File stream to display output to * \param[in] data_set Working set of CIB state */ static void print_failed_actions(FILE *stream, pe_working_set_t *data_set) { xmlNode *xml_op = NULL; /* Print section heading */ switch (output_format) { case mon_output_plain: case mon_output_console: print_as("\nFailed Resource Actions:\n"); break; case mon_output_html: case mon_output_cgi: fprintf(stream, "
    \n

    Failed Resource Actions

    \n
      \n"); break; case mon_output_xml: fprintf(stream, " \n"); break; default: break; } /* Print each failed action */ for (xml_op = __xml_first_child(data_set->failed); xml_op != NULL; xml_op = __xml_next(xml_op)) { print_failed_action(stream, xml_op); } /* End section */ switch (output_format) { case mon_output_html: case mon_output_cgi: fprintf(stream, "
    \n"); break; case mon_output_xml: fprintf(stream, " \n"); break; default: break; } } /*! * \internal * \brief Reduce the stonith-history * for successful actions we keep the last of every action-type & target * for failed actions we record as well who had failed * for actions in progress we keep full track * * \param[in] history List of stonith actions * */ static stonith_history_t * reduce_stonith_history(stonith_history_t *history) { stonith_history_t *new = NULL, *hp, *np, *tmp; for (hp = history; hp; ) { for (np = new; np; np = np->next) { if ((hp->state == st_done) || (hp->state == st_failed)) { /* action not in progress */ if (safe_str_eq(hp->target, np->target) && safe_str_eq(hp->action, np->action) && (hp->state == np->state)) { if ((hp->state == st_done) || safe_str_eq(hp->delegate, np->delegate)) { /* replace or purge */ if (hp->completed < np->completed) { /* purge older hp */ tmp = hp->next; hp->next = NULL; stonith_history_free(hp); hp = tmp; break; } /* damn single linked list */ free(hp->target); free(hp->action); free(np->origin); np->origin = hp->origin; free(np->delegate); np->delegate = hp->delegate; free(np->client); np->client = hp->client; np->completed = hp->completed; tmp = hp; hp = hp->next; free(tmp); break; } } if (np->next) { continue; } } np = 0; /* let outer loop progress hp */ break; } /* simply move hp from history to new */ if (np == NULL) { tmp = hp->next; hp->next = new; new = hp; hp = tmp; } } return new; } /*! * \internal * \brief Sort the stonith-history * sort by competed most current on the top * pending actions lacking a completed-stamp are gathered at the top * * \param[in] history List of stonith actions * */ static stonith_history_t * sort_stonith_history(stonith_history_t *history) { stonith_history_t *new = NULL, *pending = NULL, *hp, *np, *tmp; for (hp = history; hp; ) { tmp = hp->next; if ((hp->state == st_done) || (hp->state == st_failed)) { /* sort into new */ if ((!new) || (hp->completed > new->completed)) { hp->next = new; new = hp; } else { np = new; do { if ((!np->next) || (hp->completed > np->next->completed)) { hp->next = np->next; np->next = hp; break; } np = np->next; } while (1); } } else { /* put into pending */ hp->next = pending; pending = hp; } hp = tmp; } /* pending actions don't have a completed-stamp so make them go front */ if (pending) { stonith_history_t *last_pending = pending; while (last_pending->next) { last_pending = last_pending->next; } last_pending->next = new; new = pending; } return new; } /*! * \internal * \brief Turn stonith action into a better readable string * * \param[in] action Stonith action */ static char * fence_action_str(const char *action) { char *str = NULL; if (action == NULL) { str = strdup("fencing"); } else if (!strcmp(action, "on")) { str = strdup("unfencing"); } else if (!strcmp(action, "off")) { str = strdup("turning off"); } else { str = strdup(action); } return str; } /*! * \internal * \brief Print a stonith action * * \param[in] stream File stream to display output to * \param[in] event stonith event */ static void print_stonith_action(FILE *stream, stonith_history_t *event) { char *action_s = fence_action_str(event->action); char *run_at_s = ctime(&event->completed); if ((run_at_s) && (run_at_s[0] != 0)) { run_at_s[strlen(run_at_s)-1] = 0; /* Overwrite the newline */ } switch(output_format) { case mon_output_xml: fprintf(stream, " target, event->action); switch(event->state) { case st_done: fprintf(stream, " state=\"success\""); break; case st_failed: fprintf(stream, " state=\"failed\""); break; default: fprintf(stream, " state=\"pending\""); } fprintf(stream, " origin=\"%s\" client=\"%s\"", event->origin, event->client); if (event->delegate) { fprintf(stream, " delegate=\"%s\"", event->delegate); } switch(event->state) { case st_done: case st_failed: fprintf(stream, " completed=\"%s\"", run_at_s?run_at_s:""); break; default: fprintf(stream, " state=\"pending\""); } fprintf(stream, " />\n"); break; case mon_output_plain: case mon_output_console: switch(event->state) { case st_done: print_as("* %s of %s successful: delegate=%s, client=%s, origin=%s,\n" " %s='%s'\n", action_s, event->target, event->delegate ? event->delegate : "", event->client, event->origin, ((!fence_full_history) && (output_format != mon_output_xml))? "last-successful":"completed", run_at_s?run_at_s:""); break; case st_failed: print_as("* %s of %s failed: delegate=%s, client=%s, origin=%s,\n" " %s='%s'\n", action_s, event->target, event->delegate ? event->delegate : "", event->client, event->origin, ((!fence_full_history) && (output_format != mon_output_xml))? "last-failed":"completed", run_at_s?run_at_s:""); break; default: print_as("* %s of %s pending: client=%s, origin=%s\n", action_s, event->target, event->client, event->origin); } break; case mon_output_html: case mon_output_cgi: switch(event->state) { case st_done: fprintf(stream, "
  • %s of %s successful: delegate=%s, " "client=%s, origin=%s, %s='%s'
  • \n", action_s, event->target, event->delegate ? event->delegate : "", event->client, event->origin, ((!fence_full_history) && (output_format != mon_output_xml))? "last-successful":"completed", run_at_s?run_at_s:""); break; case st_failed: fprintf(stream, "
  • %s of %s failed: delegate=%s, " "client=%s, origin=%s, %s='%s'
  • \n", action_s, event->target, event->delegate ? event->delegate : "", event->client, event->origin, ((!fence_full_history) && (output_format != mon_output_xml))? "last-failed":"completed", run_at_s?run_at_s:""); break; default: fprintf(stream, "
  • %s of %s pending: client=%s, " "origin=%s
  • \n", action_s, event->target, event->client, event->origin); } break; default: /* no support for fence history for other formats so far */ break; } free(action_s); } /*! * \internal * \brief Print a section for failed stonith actions * * \param[in] stream File stream to display output to * \param[in] history List of stonith actions * */ static void print_failed_stonith_actions(FILE *stream, stonith_history_t *history) { stonith_history_t *hp; for (hp = history; hp; hp = hp->next) { if (hp->state == st_failed) { break; } } if (!hp) { return; } /* Print section heading */ switch (output_format) { /* no need to take care of xml in here as xml gets full * history anyway */ case mon_output_plain: case mon_output_console: print_as("\nFailed Fencing Actions:\n"); break; case mon_output_html: case mon_output_cgi: fprintf(stream, "
    \n

    Failed Fencing Actions

    \n
      \n"); break; default: break; } /* Print each failed stonith action */ for (hp = history; hp; hp = hp->next) { if (hp->state == st_failed) { print_stonith_action(stream, hp); } } /* End section */ switch (output_format) { case mon_output_html: case mon_output_cgi: fprintf(stream, "
    \n"); break; default: break; } } /*! * \internal * \brief Print pending stonith actions * * \param[in] stream File stream to display output to * \param[in] history List of stonith actions * */ static void print_stonith_pending(FILE *stream, stonith_history_t *history) { /* xml-output always shows the full history * so we'll never have to show pending-actions * separately */ if (history && (history->state != st_failed) && (history->state != st_done)) { stonith_history_t *hp; /* Print section heading */ switch (output_format) { case mon_output_plain: case mon_output_console: print_as("\nPending Fencing Actions:\n"); break; case mon_output_html: case mon_output_cgi: fprintf(stream, "
    \n

    Pending Fencing Actions

    \n
      \n"); break; default: break; } for (hp = history; hp; hp = hp->next) { if ((hp->state == st_failed) || (hp->state == st_done)) { break; } print_stonith_action(stream, hp); } /* End section */ switch (output_format) { case mon_output_html: case mon_output_cgi: fprintf(stream, "
    \n"); break; default: break; } } } /*! * \internal * \brief Print a section for stonith-history * * \param[in] stream File stream to display output to * \param[in] history List of stonith actions * */ static void print_stonith_history(FILE *stream, stonith_history_t *history) { stonith_history_t *hp; /* Print section heading */ switch (output_format) { case mon_output_plain: case mon_output_console: print_as("\nFencing History:\n"); break; case mon_output_html: case mon_output_cgi: fprintf(stream, "
    \n

    Fencing History

    \n
      \n"); break; case mon_output_xml: fprintf(stream, " \n"); break; default: break; } for (hp = history; hp; hp = hp->next) { if ((hp->state != st_failed) || (output_format == mon_output_xml)) { print_stonith_action(stream, hp); } } /* End section */ switch (output_format) { case mon_output_html: case mon_output_cgi: fprintf(stream, "
    \n"); break; case mon_output_xml: fprintf(stream, " \n"); break; default: break; } } /*! * \internal * \brief Print cluster status to screen * * This uses the global display preferences set by command-line options * to display cluster status in a human-friendly way. * * \param[in] data_set Working set of CIB state * \param[in] stonith_history List of stonith actions */ static void print_status(pe_working_set_t * data_set, stonith_history_t *stonith_history) { GListPtr gIter = NULL; int print_opts = get_resource_display_options(); /* space-separated lists of node names */ char *online_nodes = NULL; char *online_remote_nodes = NULL; char *online_guest_nodes = NULL; char *offline_nodes = NULL; char *offline_remote_nodes = NULL; if (output_format == mon_output_console) { blank_screen(); } print_cluster_summary(stdout, data_set); print_as("\n"); /* Gather node information (and print if in bad state or grouping by node) */ for (gIter = data_set->nodes; gIter != NULL; gIter = gIter->next) { node_t *node = (node_t *) gIter->data; const char *node_mode = NULL; char *node_name = get_node_display_name(node); /* Get node mode */ if (node->details->unclean) { if (node->details->online) { node_mode = "UNCLEAN (online)"; } else if (node->details->pending) { node_mode = "UNCLEAN (pending)"; } else { node_mode = "UNCLEAN (offline)"; } } else if (node->details->pending) { node_mode = "pending"; } else if (node->details->standby_onfail && node->details->online) { node_mode = "standby (on-fail)"; } else if (node->details->standby) { if (node->details->online) { if (node->details->running_rsc) { node_mode = "standby (with active resources)"; } else { node_mode = "standby"; } } else { node_mode = "OFFLINE (standby)"; } } else if (node->details->maintenance) { if (node->details->online) { node_mode = "maintenance"; } else { node_mode = "OFFLINE (maintenance)"; } } else if (node->details->online) { node_mode = "online"; if (group_by_node == FALSE) { if (is_container_remote_node(node)) { online_guest_nodes = add_list_element(online_guest_nodes, node_name); } else if (is_baremetal_remote_node(node)) { online_remote_nodes = add_list_element(online_remote_nodes, node_name); } else { online_nodes = add_list_element(online_nodes, node_name); } free(node_name); continue; } } else { node_mode = "OFFLINE"; if (group_by_node == FALSE) { if (is_baremetal_remote_node(node)) { offline_remote_nodes = add_list_element(offline_remote_nodes, node_name); } else if (is_container_remote_node(node)) { /* ignore offline guest nodes */ } else { offline_nodes = add_list_element(offline_nodes, node_name); } free(node_name); continue; } } /* If we get here, node is in bad state, or we're grouping by node */ /* Print the node name and status */ if (is_container_remote_node(node)) { print_as("Guest"); } else if (is_baremetal_remote_node(node)) { print_as("Remote"); } print_as("Node %s: %s\n", node_name, node_mode); /* If we're grouping by node, print its resources */ if (group_by_node) { if (print_brief) { print_rscs_brief(node->details->running_rsc, "\t", print_opts | pe_print_rsconly, stdout, FALSE); } else { GListPtr gIter2 = NULL; for (gIter2 = node->details->running_rsc; gIter2 != NULL; gIter2 = gIter2->next) { resource_t *rsc = (resource_t *) gIter2->data; rsc->fns->print(rsc, "\t", print_opts | pe_print_rsconly, stdout); } } } free(node_name); } /* If we're not grouping by node, summarize nodes by status */ if (online_nodes) { print_as("Online: [%s ]\n", online_nodes); free(online_nodes); } if (offline_nodes) { print_as("OFFLINE: [%s ]\n", offline_nodes); free(offline_nodes); } if (online_remote_nodes) { print_as("RemoteOnline: [%s ]\n", online_remote_nodes); free(online_remote_nodes); } if (offline_remote_nodes) { print_as("RemoteOFFLINE: [%s ]\n", offline_remote_nodes); free(offline_remote_nodes); } if (online_guest_nodes) { print_as("GuestOnline: [%s ]\n", online_guest_nodes); free(online_guest_nodes); } /* Print resources section, if needed */ print_resources(stdout, data_set, print_opts); /* print Node Attributes section if requested */ if (show & mon_show_attributes) { print_node_attributes(stdout, data_set); } /* If requested, print resource operations (which includes failcounts) * or just failcounts */ if (show & (mon_show_operations | mon_show_failcounts)) { print_node_summary(stdout, data_set, ((show & mon_show_operations)? TRUE : FALSE)); } /* If there were any failed actions, print them */ if (xml_has_children(data_set->failed)) { print_failed_actions(stdout, data_set); } /* Print failed stonith actions */ if (fence_history) { print_failed_stonith_actions(stdout, stonith_history); } /* Print tickets if requested */ if (show & mon_show_tickets) { print_cluster_tickets(stdout, data_set); } /* Print negative location constraints if requested */ if (show & mon_show_bans) { print_neg_locations(stdout, data_set); } /* Print stonith history */ if (fence_history) { if (show & mon_show_fence_history) { print_stonith_history(stdout, stonith_history); } else { print_stonith_pending(stdout, stonith_history); } } #if CURSES_ENABLED if (output_format == mon_output_console) { refresh(); } #endif } /*! * \internal * \brief Print cluster status in XML format * * \param[in] data_set Working set of CIB state */ static void print_xml_status(pe_working_set_t * data_set, stonith_history_t *stonith_history) { FILE *stream = stdout; GListPtr gIter = NULL; int print_opts = get_resource_display_options(); fprintf(stream, "\n"); fprintf(stream, "\n", VERSION); print_cluster_summary(stream, data_set); /*** NODES ***/ fprintf(stream, " \n"); for (gIter = data_set->nodes; gIter != NULL; gIter = gIter->next) { node_t *node = (node_t *) gIter->data; const char *node_type = "unknown"; switch (node->details->type) { case node_member: node_type = "member"; break; case node_remote: node_type = "remote"; break; case node_ping: node_type = "ping"; break; } fprintf(stream, " details->uname); fprintf(stream, "id=\"%s\" ", node->details->id); fprintf(stream, "online=\"%s\" ", node->details->online ? "true" : "false"); fprintf(stream, "standby=\"%s\" ", node->details->standby ? "true" : "false"); fprintf(stream, "standby_onfail=\"%s\" ", node->details->standby_onfail ? "true" : "false"); fprintf(stream, "maintenance=\"%s\" ", node->details->maintenance ? "true" : "false"); fprintf(stream, "pending=\"%s\" ", node->details->pending ? "true" : "false"); fprintf(stream, "unclean=\"%s\" ", node->details->unclean ? "true" : "false"); fprintf(stream, "shutdown=\"%s\" ", node->details->shutdown ? "true" : "false"); fprintf(stream, "expected_up=\"%s\" ", node->details->expected_up ? "true" : "false"); fprintf(stream, "is_dc=\"%s\" ", node->details->is_dc ? "true" : "false"); fprintf(stream, "resources_running=\"%d\" ", g_list_length(node->details->running_rsc)); fprintf(stream, "type=\"%s\" ", node_type); if (is_container_remote_node(node)) { fprintf(stream, "id_as_resource=\"%s\" ", node->details->remote_rsc->container->id); } if (group_by_node) { GListPtr lpc2 = NULL; fprintf(stream, ">\n"); for (lpc2 = node->details->running_rsc; lpc2 != NULL; lpc2 = lpc2->next) { resource_t *rsc = (resource_t *) lpc2->data; rsc->fns->print(rsc, " ", print_opts | pe_print_rsconly, stream); } fprintf(stream, " \n"); } else { fprintf(stream, "/>\n"); } } fprintf(stream, " \n"); /* Print resources section, if needed */ print_resources(stream, data_set, print_opts); /* print Node Attributes section if requested */ if (show & mon_show_attributes) { print_node_attributes(stream, data_set); } /* If requested, print resource operations (which includes failcounts) * or just failcounts */ if (show & (mon_show_operations | mon_show_failcounts)) { print_node_summary(stream, data_set, ((show & mon_show_operations)? TRUE : FALSE)); } /* If there were any failed actions, print them */ if (xml_has_children(data_set->failed)) { print_failed_actions(stream, data_set); } /* Print stonith history */ if (fence_history) { print_stonith_history(stdout, stonith_history); } /* Print tickets if requested */ if (show & mon_show_tickets) { print_cluster_tickets(stream, data_set); } /* Print negative location constraints if requested */ if (show & mon_show_bans) { print_neg_locations(stream, data_set); } fprintf(stream, "\n"); fflush(stream); fclose(stream); } /*! * \internal * \brief Print cluster status in HTML format (with HTTP headers if CGI) * * \param[in] data_set Working set of CIB state * \param[in] filename Name of file to write HTML to (ignored if CGI) * * \return 0 on success, -1 on error */ static int print_html_status(pe_working_set_t * data_set, const char *filename, stonith_history_t *stonith_history) { FILE *stream; GListPtr gIter = NULL; char *filename_tmp = NULL; int print_opts = get_resource_display_options(); if (output_format == mon_output_cgi) { stream = stdout; fprintf(stream, "Content-Type: text/html\n\n"); } else { filename_tmp = crm_concat(filename, "tmp", '.'); stream = fopen(filename_tmp, "w"); if (stream == NULL) { crm_perror(LOG_ERR, "Cannot open %s for writing", filename_tmp); free(filename_tmp); return -1; } } fprintf(stream, "\n"); fprintf(stream, " \n"); fprintf(stream, " Cluster status\n"); fprintf(stream, " \n", reconnect_msec / 1000); fprintf(stream, " \n"); fprintf(stream, "\n"); print_cluster_summary(stream, data_set); /*** NODE LIST ***/ fprintf(stream, "
    \n

    Node List

    \n"); fprintf(stream, "
      \n"); for (gIter = data_set->nodes; gIter != NULL; gIter = gIter->next) { node_t *node = (node_t *) gIter->data; char *node_name = get_node_display_name(node); fprintf(stream, "
    • Node: %s: ", node_name); if (node->details->standby_onfail && node->details->online) { fprintf(stream, "standby (on-fail)\n"); } else if (node->details->standby && node->details->online) { fprintf(stream, "standby%s\n", node->details->running_rsc?" (with active resources)":""); } else if (node->details->standby) { fprintf(stream, "OFFLINE (standby)\n"); } else if (node->details->maintenance && node->details->online) { fprintf(stream, "maintenance\n"); } else if (node->details->maintenance) { fprintf(stream, "OFFLINE (maintenance)\n"); } else if (node->details->online) { fprintf(stream, "online\n"); } else { fprintf(stream, "OFFLINE\n"); } if (print_brief && group_by_node) { fprintf(stream, "
        \n"); print_rscs_brief(node->details->running_rsc, NULL, print_opts | pe_print_rsconly, stream, FALSE); fprintf(stream, "
      \n"); } else if (group_by_node) { GListPtr lpc2 = NULL; fprintf(stream, "
        \n"); for (lpc2 = node->details->running_rsc; lpc2 != NULL; lpc2 = lpc2->next) { resource_t *rsc = (resource_t *) lpc2->data; fprintf(stream, "
      • "); rsc->fns->print(rsc, NULL, print_opts | pe_print_rsconly, stream); fprintf(stream, "
      • \n"); } fprintf(stream, "
      \n"); } fprintf(stream, "
    • \n"); free(node_name); } fprintf(stream, "
    \n"); /* Print resources section, if needed */ print_resources(stream, data_set, print_opts); /* print Node Attributes section if requested */ if (show & mon_show_attributes) { print_node_attributes(stream, data_set); } /* If requested, print resource operations (which includes failcounts) * or just failcounts */ if (show & (mon_show_operations | mon_show_failcounts)) { print_node_summary(stream, data_set, ((show & mon_show_operations)? TRUE : FALSE)); } /* If there were any failed actions, print them */ if (xml_has_children(data_set->failed)) { print_failed_actions(stream, data_set); } /* Print failed stonith actions */ if (fence_history) { print_failed_stonith_actions(stream, stonith_history); } /* Print stonith history */ if (fence_history) { if (show & mon_show_fence_history) { print_stonith_history(stream, stonith_history); } else { print_stonith_pending(stdout, stonith_history); } } /* Print tickets if requested */ if (show & mon_show_tickets) { print_cluster_tickets(stream, data_set); } /* Print negative location constraints if requested */ if (show & mon_show_bans) { print_neg_locations(stream, data_set); } fprintf(stream, "\n"); fprintf(stream, "\n"); fflush(stream); fclose(stream); if (output_format != mon_output_cgi) { if (rename(filename_tmp, filename) != 0) { crm_perror(LOG_ERR, "Unable to rename %s->%s", filename_tmp, filename); } free(filename_tmp); } return 0; } static int send_custom_trap(const char *node, const char *rsc, const char *task, int target_rc, int rc, int status, const char *desc) { pid_t pid; /*setenv needs chars, these are ints */ char *rc_s = crm_itoa(rc); char *status_s = crm_itoa(status); char *target_rc_s = crm_itoa(target_rc); crm_debug("Sending external notification to '%s' via '%s'", external_recipient, external_agent); if(rsc) { setenv("CRM_notify_rsc", rsc, 1); } if (external_recipient) { setenv("CRM_notify_recipient", external_recipient, 1); } setenv("CRM_notify_node", node, 1); setenv("CRM_notify_task", task, 1); setenv("CRM_notify_desc", desc, 1); setenv("CRM_notify_rc", rc_s, 1); setenv("CRM_notify_target_rc", target_rc_s, 1); setenv("CRM_notify_status", status_s, 1); pid = fork(); if (pid == -1) { crm_perror(LOG_ERR, "notification fork() failed."); } if (pid == 0) { /* crm_debug("notification: I am the child. Executing the nofitication program."); */ execl(external_agent, external_agent, NULL); exit(CRM_EX_ERROR); } crm_trace("Finished running custom notification program '%s'.", external_agent); free(target_rc_s); free(status_s); free(rc_s); return 0; } static void handle_rsc_op(xmlNode * xml, const char *node_id) { int rc = -1; int status = -1; int action = -1; int target_rc = -1; int transition_num = -1; gboolean notify = TRUE; char *rsc = NULL; char *task = NULL; const char *desc = NULL; const char *magic = NULL; const char *id = NULL; char *update_te_uuid = NULL; const char *node = NULL; xmlNode *n = xml; xmlNode * rsc_op = xml; if(strcmp((const char*)xml->name, XML_LRM_TAG_RSC_OP) != 0) { xmlNode *cIter; for(cIter = xml->children; cIter; cIter = cIter->next) { handle_rsc_op(cIter, node_id); } return; } id = crm_element_value(rsc_op, XML_LRM_ATTR_TASK_KEY); if (id == NULL) { /* Compatibility with <= 1.1.5 */ id = ID(rsc_op); } magic = crm_element_value(rsc_op, XML_ATTR_TRANSITION_MAGIC); if (magic == NULL) { /* non-change */ return; } if (FALSE == decode_transition_magic(magic, &update_te_uuid, &transition_num, &action, &status, &rc, &target_rc)) { crm_err("Invalid event %s detected for %s", magic, id); return; } if (parse_op_key(id, &rsc, &task, NULL) == FALSE) { crm_err("Invalid event detected for %s", id); goto bail; } node = crm_element_value(rsc_op, XML_LRM_ATTR_TARGET); while (n != NULL && safe_str_neq(XML_CIB_TAG_STATE, TYPE(n))) { n = n->parent; } if(node == NULL && n) { node = crm_element_value(n, XML_ATTR_UNAME); } if (node == NULL && n) { node = ID(n); } if (node == NULL) { node = node_id; } if (node == NULL) { crm_err("No node detected for event %s (%s)", magic, id); goto bail; } /* look up where we expected it to be? */ desc = pcmk_strerror(pcmk_ok); if (status == PCMK_LRM_OP_DONE && target_rc == rc) { crm_notice("%s of %s on %s completed: %s", task, rsc, node, desc); if (rc == PCMK_OCF_NOT_RUNNING) { notify = FALSE; } } else if (status == PCMK_LRM_OP_DONE) { desc = services_ocf_exitcode_str(rc); crm_warn("%s of %s on %s failed: %s", task, rsc, node, desc); } else { desc = services_lrm_status_str(status); crm_warn("%s of %s on %s failed: %s", task, rsc, node, desc); } if (notify && external_agent) { send_custom_trap(node, rsc, task, target_rc, rc, status, desc); } bail: free(update_te_uuid); free(rsc); free(task); } static gboolean mon_trigger_refresh(gpointer user_data) { mainloop_set_trigger(refresh_trigger); return FALSE; } #define NODE_PATT "/lrm[@id=" static char * get_node_from_xpath(const char *xpath) { char *nodeid = NULL; char *tmp = strstr(xpath, NODE_PATT); if(tmp) { tmp += strlen(NODE_PATT); tmp += 1; nodeid = strdup(tmp); tmp = strstr(nodeid, "\'"); CRM_ASSERT(tmp); tmp[0] = 0; } return nodeid; } static void crm_diff_update_v2(const char *event, xmlNode * msg) { xmlNode *change = NULL; xmlNode *diff = get_message_xml(msg, F_CIB_UPDATE_RESULT); for (change = __xml_first_child(diff); change != NULL; change = __xml_next(change)) { const char *name = NULL; const char *op = crm_element_value(change, XML_DIFF_OP); const char *xpath = crm_element_value(change, XML_DIFF_PATH); xmlNode *match = NULL; const char *node = NULL; if(op == NULL) { continue; } else if(strcmp(op, "create") == 0) { match = change->children; } else if(strcmp(op, "move") == 0) { continue; } else if(strcmp(op, "delete") == 0) { continue; } else if(strcmp(op, "modify") == 0) { match = first_named_child(change, XML_DIFF_RESULT); if(match) { match = match->children; } } if(match) { name = (const char *)match->name; } crm_trace("Handling %s operation for %s %p, %s", op, xpath, match, name); if(xpath == NULL) { /* Version field, ignore */ } else if(name == NULL) { crm_debug("No result for %s operation to %s", op, xpath); CRM_ASSERT(strcmp(op, "delete") == 0 || strcmp(op, "move") == 0); } else if(strcmp(name, XML_TAG_CIB) == 0) { xmlNode *state = NULL; xmlNode *status = first_named_child(match, XML_CIB_TAG_STATUS); for (state = __xml_first_child(status); state != NULL; state = __xml_next(state)) { node = crm_element_value(state, XML_ATTR_UNAME); if (node == NULL) { node = ID(state); } handle_rsc_op(state, node); } } else if(strcmp(name, XML_CIB_TAG_STATUS) == 0) { xmlNode *state = NULL; for (state = __xml_first_child(match); state != NULL; state = __xml_next(state)) { node = crm_element_value(state, XML_ATTR_UNAME); if (node == NULL) { node = ID(state); } handle_rsc_op(state, node); } } else if(strcmp(name, XML_CIB_TAG_STATE) == 0) { node = crm_element_value(match, XML_ATTR_UNAME); if (node == NULL) { node = ID(match); } handle_rsc_op(match, node); } else if(strcmp(name, XML_CIB_TAG_LRM) == 0) { node = ID(match); handle_rsc_op(match, node); } else if(strcmp(name, XML_LRM_TAG_RESOURCES) == 0) { char *local_node = get_node_from_xpath(xpath); handle_rsc_op(match, local_node); free(local_node); } else if(strcmp(name, XML_LRM_TAG_RESOURCE) == 0) { char *local_node = get_node_from_xpath(xpath); handle_rsc_op(match, local_node); free(local_node); } else if(strcmp(name, XML_LRM_TAG_RSC_OP) == 0) { char *local_node = get_node_from_xpath(xpath); handle_rsc_op(match, local_node); free(local_node); } else { crm_trace("Ignoring %s operation for %s %p, %s", op, xpath, match, name); } } } static void crm_diff_update_v1(const char *event, xmlNode * msg) { /* Process operation updates */ xmlXPathObject *xpathObj = xpath_search(msg, "//" F_CIB_UPDATE_RESULT "//" XML_TAG_DIFF_ADDED "//" XML_LRM_TAG_RSC_OP); int lpc = 0, max = numXpathResults(xpathObj); for (lpc = 0; lpc < max; lpc++) { xmlNode *rsc_op = getXpathResult(xpathObj, lpc); handle_rsc_op(rsc_op, NULL); } freeXpathObject(xpathObj); } static void crm_diff_update(const char *event, xmlNode * msg) { int rc = -1; static bool stale = FALSE; gboolean cib_updated = FALSE; xmlNode *diff = get_message_xml(msg, F_CIB_UPDATE_RESULT); print_dot(); if (current_cib != NULL) { rc = xml_apply_patchset(current_cib, diff, TRUE); switch (rc) { case -pcmk_err_diff_resync: case -pcmk_err_diff_failed: crm_notice("[%s] Patch aborted: %s (%d)", event, pcmk_strerror(rc), rc); free_xml(current_cib); current_cib = NULL; break; case pcmk_ok: cib_updated = TRUE; break; default: crm_notice("[%s] ABORTED: %s (%d)", event, pcmk_strerror(rc), rc); free_xml(current_cib); current_cib = NULL; } } if (current_cib == NULL) { crm_trace("Re-requesting the full cib"); cib->cmds->query(cib, NULL, ¤t_cib, cib_scope_local | cib_sync_call); } if (external_agent) { int format = 0; crm_element_value_int(diff, "format", &format); switch(format) { case 1: crm_diff_update_v1(event, msg); break; case 2: crm_diff_update_v2(event, msg); break; default: crm_err("Unknown patch format: %d", format); } } if (current_cib == NULL) { if(!stale) { print_as("--- Stale data ---"); } stale = TRUE; return; } stale = FALSE; kick_refresh(cib_updated); } static gboolean mon_refresh_display(gpointer user_data) { xmlNode *cib_copy = copy_xml(current_cib); pe_working_set_t data_set; stonith_history_t *stonith_history = NULL; last_refresh = time(NULL); if (cli_config_update(&cib_copy, NULL, FALSE) == FALSE) { if (cib) { cib->cmds->signoff(cib); } print_as("Upgrade failed: %s", pcmk_strerror(-pcmk_err_schema_validation)); if (output_format == mon_output_console) { sleep(2); } clean_up(CRM_EX_CONFIG); return FALSE; } /* get the stonith-history if there is evidence we need it */ while (fence_history) { if (st != NULL) { if (st->cmds->history(st, st_opt_sync_call, NULL, &stonith_history, 120)) { fprintf(stderr, "Critical: Unable to get stonith-history\n"); mon_cib_connection_destroy(NULL); } else { if ((!fence_full_history) && (output_format != mon_output_xml)) { stonith_history = reduce_stonith_history(stonith_history); } stonith_history = sort_stonith_history(stonith_history); break; /* all other cases are errors */ } } else { fprintf(stderr, "Critical: No stonith-API\n"); } free_xml(cib_copy); print_as("Reading stonith-history failed"); if (output_format == mon_output_console) { sleep(2); } return FALSE; } set_working_set_defaults(&data_set); data_set.input = cib_copy; cluster_status(&data_set); /* Unpack constraints if any section will need them * (tickets may be referenced in constraints but not granted yet, * and bans need negative location constraints) */ if (show & (mon_show_bans | mon_show_tickets)) { xmlNode *cib_constraints = get_object_root(XML_CIB_TAG_CONSTRAINTS, data_set.input); unpack_constraints(cib_constraints, &data_set); } switch (output_format) { case mon_output_html: case mon_output_cgi: if (print_html_status(&data_set, output_filename, stonith_history) != 0) { fprintf(stderr, "Critical: Unable to output html file\n"); clean_up(CRM_EX_CANTCREAT); + return FALSE; } break; case mon_output_xml: print_xml_status(&data_set, stonith_history); break; case mon_output_monitor: print_simple_status(&data_set, stonith_history); if (has_warnings) { clean_up(MON_STATUS_WARN); + return FALSE; } break; case mon_output_plain: case mon_output_console: print_status(&data_set, stonith_history); break; case mon_output_none: break; } stonith_history_free(stonith_history); stonith_history = NULL; cleanup_alloc_calculations(&data_set); return TRUE; } static void mon_st_callback_event(stonith_t * st, stonith_event_t * e) { if (st->state == stonith_disconnected) { /* disconnect cib as well and have everything reconnect */ mon_cib_connection_destroy(NULL); } else if (external_agent) { char *desc = crm_strdup_printf("Operation %s requested by %s for peer %s: %s (ref=%s)", e->operation, e->origin, e->target, pcmk_strerror(e->result), e->id); send_custom_trap(e->target, NULL, e->operation, pcmk_ok, e->result, 0, desc); free(desc); } } static void kick_refresh(gboolean data_updated) { static int updates = 0; long now = time(NULL); if (data_updated) { updates++; } if(refresh_timer == NULL) { refresh_timer = mainloop_timer_add("refresh", 2000, FALSE, mon_trigger_refresh, NULL); } /* Refresh * - immediately if the last update was more than 5s ago * - every 10 cib-updates * - at most 2s after the last update */ if ((now - last_refresh) > (reconnect_msec / 1000)) { mainloop_set_trigger(refresh_trigger); mainloop_timer_stop(refresh_timer); updates = 0; } else if(updates >= 10) { mainloop_set_trigger(refresh_trigger); mainloop_timer_stop(refresh_timer); updates = 0; } else { mainloop_timer_start(refresh_timer); } } static void mon_st_callback_display(stonith_t * st, stonith_event_t * e) { if (st->state == stonith_disconnected) { /* disconnect cib as well and have everything reconnect */ mon_cib_connection_destroy(NULL); } else { print_dot(); kick_refresh(TRUE); } } static void clean_up_connections(void) { if (cib != NULL) { cib->cmds->signoff(cib); cib_delete(cib); cib = NULL; } if (st != NULL) { if (st->state != stonith_disconnected) { st->cmds->remove_notification(st, T_STONITH_NOTIFY_DISCONNECT); st->cmds->remove_notification(st, T_STONITH_NOTIFY_FENCE); st->cmds->remove_notification(st, T_STONITH_NOTIFY_HISTORY); st->cmds->disconnect(st); } stonith_api_delete(st); st = NULL; } } /* * De-init ncurses, disconnect from the CIB manager, disconnect fencing, * deallocate memory and show usage-message if requested. + * + * We don't actually return, but nominally returning crm_exit_t allows a usage + * like "return clean_up(exit_code);" which helps static analysis understand the + * code flow. */ -static void +static crm_exit_t clean_up(crm_exit_t exit_code) { #if CURSES_ENABLED if (curses_console_initialized) { output_format = mon_output_plain; echo(); nocbreak(); endwin(); curses_console_initialized = FALSE; } #endif clean_up_connections(); free(output_filename); free(pid_file); if (exit_code == CRM_EX_USAGE) { if (output_format == mon_output_cgi) { fprintf(stdout, "Content-Type: text/plain\n" "Status: 500\n\n"); } else { - crm_help('?', CRM_EX_USAGE); + return crm_help('?', CRM_EX_USAGE); } } - crm_exit(exit_code); + return crm_exit(exit_code); } diff --git a/tools/crm_node.c b/tools/crm_node.c index c09fa0ddd2..23ce1da332 100644 --- a/tools/crm_node.c +++ b/tools/crm_node.c @@ -1,585 +1,588 @@ /* * Copyright 2004-2018 Andrew Beekhof * * This source code is licensed under the GNU General Public License version 2 * or later (GPLv2+) WITHOUT ANY WARRANTY. */ #include #include #include #include #include #include #include #include #include #include static int command = 0; static char *pid_s = NULL; static GMainLoop *mainloop = NULL; static crm_exit_t exit_code = CRM_EX_OK; static struct crm_option long_options[] = { /* Top-level Options */ {"help", 0, 0, '?', "\tThis text"}, {"version", 0, 0, '$', "\tVersion information" }, {"verbose", 0, 0, 'V', "\tIncrease debug output"}, {"quiet", 0, 0, 'Q', "\tEssential output only"}, {"-spacer-", 1, 0, '-', "\nCommands:"}, {"name", 0, 0, 'n', "\tDisplay the name used by the cluster for this node"}, {"name-for-id", 1, 0, 'N', "\tDisplay the name used by the cluster for the node with the specified id"}, {"quorum", 0, 0, 'q', "\tDisplay a 1 if our partition has quorum, 0 if not"}, {"list", 0, 0, 'l', "\tDisplay all known members (past and present) of this cluster"}, {"partition", 0, 0, 'p', "Display the members of this partition"}, {"cluster-id", 0, 0, 'i', "Display this node's cluster id"}, {"remove", 1, 0, 'R', "(Advanced) Remove the (stopped) node with the specified name from Pacemaker's configuration and caches"}, {"-spacer-", 1, 0, '-', "(the node must already have been removed from the underlying cluster stack configuration)"}, {"-spacer-", 1, 0, '-', "\nAdditional Options:"}, {"force", 0, 0, 'f'}, #if SUPPORT_COROSYNC { "corosync", 0, 0, 'C', NULL, pcmk_option_hidden }, #endif // @TODO add timeout option for when IPC replies are needed {0, 0, 0, 0} }; /*! * \internal * \brief Exit crm_node * Clean up memory, and either quit mainloop (if running) or exit * * \param[in] value Exit status */ static void crm_node_exit(crm_exit_t value) { if (pid_s) { free(pid_s); pid_s = NULL; } exit_code = value; if (mainloop && g_main_loop_is_running(mainloop)) { g_main_loop_quit(mainloop); } else { crm_exit(exit_code); } } static void exit_disconnect(gpointer user_data) { fprintf(stderr, "error: Lost connection to cluster\n"); crm_node_exit(CRM_EX_DISCONNECT); } typedef int (*ipc_dispatch_fn) (const char *buffer, ssize_t length, gpointer userdata); static crm_ipc_t * new_mainloop_for_ipc(const char *system, ipc_dispatch_fn dispatch) { mainloop_io_t *source = NULL; crm_ipc_t *ipc = NULL; struct ipc_client_callbacks ipc_callbacks = { .dispatch = dispatch, .destroy = exit_disconnect }; mainloop = g_main_loop_new(NULL, FALSE); source = mainloop_add_ipc_client(system, G_PRIORITY_DEFAULT, 0, NULL, &ipc_callbacks); ipc = mainloop_get_ipc_client(source); if (ipc == NULL) { fprintf(stderr, "error: Could not connect to cluster (is it running?)\n"); crm_node_exit(CRM_EX_DISCONNECT); } return ipc; } static void run_mainloop_and_exit() { g_main_loop_run(mainloop); g_main_loop_unref(mainloop); mainloop = NULL; crm_node_exit(exit_code); } static int send_controller_hello(crm_ipc_t *controller) { xmlNode *hello = NULL; int rc; pid_s = crm_getpid_s(); hello = create_hello_message(pid_s, crm_system_name, "1", "0"); rc = crm_ipc_send(controller, hello, 0, 0, NULL); free_xml(hello); return (rc < 0)? rc : 0; } static int send_node_info_request(crm_ipc_t *controller, uint32_t nodeid) { xmlNode *ping = NULL; int rc; ping = create_request(CRM_OP_NODE_INFO, NULL, NULL, CRM_SYSTEM_CRMD, crm_system_name, pid_s); if (nodeid > 0) { crm_xml_add_int(ping, XML_ATTR_ID, nodeid); } rc = crm_ipc_send(controller, ping, 0, 0, NULL); free_xml(ping); return (rc < 0)? rc : 0; } static int dispatch_controller(const char *buffer, ssize_t length, gpointer userdata) { xmlNode *message = string2xml(buffer); xmlNode *data = NULL; const char *value = NULL; if (message == NULL) { fprintf(stderr, "error: Could not understand reply from controller\n"); crm_node_exit(CRM_EX_PROTOCOL); return 0; } crm_log_xml_trace(message, "controller reply"); exit_code = CRM_EX_PROTOCOL; // Validate reply value = crm_element_value(message, F_CRM_MSG_TYPE); if (safe_str_neq(value, XML_ATTR_RESPONSE)) { fprintf(stderr, "error: Message from controller was not a reply\n"); goto done; } value = crm_element_value(message, XML_ATTR_REFERENCE); if (value == NULL) { fprintf(stderr, "error: Controller reply did not specify original message\n"); goto done; } data = get_message_xml(message, F_CRM_DATA); if (data == NULL) { fprintf(stderr, "error: Controller reply did not contain any data\n"); goto done; } switch (command) { case 'i': value = crm_element_value(data, XML_ATTR_ID); if (value == NULL) { fprintf(stderr, "error: Controller reply did not contain node ID\n"); } else { printf("%s\n", value); exit_code = CRM_EX_OK; } break; case 'n': case 'N': value = crm_element_value(data, XML_ATTR_UNAME); if (value == NULL) { fprintf(stderr, "Node is not known to cluster\n"); exit_code = CRM_EX_NOHOST; } else { printf("%s\n", value); exit_code = CRM_EX_OK; } break; case 'q': value = crm_element_value(data, XML_ATTR_HAVE_QUORUM); if (value == NULL) { fprintf(stderr, "error: Controller reply did not contain quorum status\n"); } else { bool quorum = crm_is_true(value); printf("%d\n", quorum); exit_code = quorum? CRM_EX_OK : CRM_EX_QUORUM; } break; default: fprintf(stderr, "internal error: Controller reply not expected\n"); exit_code = CRM_EX_SOFTWARE; break; } done: free_xml(message); crm_node_exit(exit_code); return 0; } static void run_controller_mainloop(uint32_t nodeid) { crm_ipc_t *controller = NULL; int rc; controller = new_mainloop_for_ipc(CRM_SYSTEM_CRMD, dispatch_controller); rc = send_controller_hello(controller); if (rc < 0) { fprintf(stderr, "error: Could not register with controller: %s\n", pcmk_strerror(rc)); crm_node_exit(crm_errno2exit(rc)); } rc = send_node_info_request(controller, nodeid); if (rc < 0) { fprintf(stderr, "error: Could not ping controller: %s\n", pcmk_strerror(rc)); crm_node_exit(crm_errno2exit(rc)); } // Run main loop to get controller reply via dispatch_controller() run_mainloop_and_exit(); } static void print_node_name() { // Check environment first (i.e. when called by resource agent) const char *name = getenv("OCF_RESKEY_" CRM_META "_" XML_LRM_ATTR_TARGET); if (name != NULL) { printf("%s\n", name); crm_node_exit(CRM_EX_OK); } else { // Otherwise ask the controller run_controller_mainloop(0); } } static int cib_remove_node(long id, const char *name) { int rc; cib_t *cib = NULL; xmlNode *node = NULL; xmlNode *node_state = NULL; crm_trace("Removing %s from the CIB", name); if(name == NULL && id == 0) { return -ENOTUNIQ; } node = create_xml_node(NULL, XML_CIB_TAG_NODE); node_state = create_xml_node(NULL, XML_CIB_TAG_STATE); crm_xml_add(node, XML_ATTR_UNAME, name); crm_xml_add(node_state, XML_ATTR_UNAME, name); if (id > 0) { crm_xml_set_id(node, "%ld", id); crm_xml_add(node_state, XML_ATTR_ID, ID(node)); } cib = cib_new(); cib->cmds->signon(cib, crm_system_name, cib_command); rc = cib->cmds->remove(cib, XML_CIB_TAG_NODES, node, cib_sync_call); if (rc != pcmk_ok) { printf("Could not remove %s[%ld] from " XML_CIB_TAG_NODES ": %s", name, id, pcmk_strerror(rc)); } rc = cib->cmds->remove(cib, XML_CIB_TAG_STATUS, node_state, cib_sync_call); if (rc != pcmk_ok) { printf("Could not remove %s[%ld] from " XML_CIB_TAG_STATUS ": %s", name, id, pcmk_strerror(rc)); } cib->cmds->signoff(cib); cib_delete(cib); return rc; } static int tools_remove_node_cache(const char *node_name, long nodeid, const char *target) { int rc = -1; crm_ipc_t *conn = crm_ipc_new(target, 0); xmlNode *cmd = NULL; if (!conn) { return -ENOTCONN; } if (!crm_ipc_connect(conn)) { crm_perror(LOG_ERR, "Connection to %s failed", target); crm_ipc_destroy(conn); return -ENOTCONN; } if(safe_str_eq(target, CRM_SYSTEM_CRMD)) { // The controller requires a hello message before sending a request rc = send_controller_hello(conn); if (rc < 0) { fprintf(stderr, "error: Could not register with controller: %s\n", pcmk_strerror(rc)); return rc; } } crm_trace("Removing %s[%ld] from the %s membership cache", node_name, nodeid, target); if(safe_str_eq(target, T_ATTRD)) { cmd = create_xml_node(NULL, __FUNCTION__); crm_xml_add(cmd, F_TYPE, T_ATTRD); crm_xml_add(cmd, F_ORIG, crm_system_name); crm_xml_add(cmd, F_ATTRD_TASK, ATTRD_OP_PEER_REMOVE); crm_xml_add(cmd, F_ATTRD_HOST, node_name); if (nodeid > 0) { crm_xml_add_int(cmd, F_ATTRD_HOST_ID, (int) nodeid); } } else { cmd = create_request(CRM_OP_RM_NODE_CACHE, NULL, NULL, target, crm_system_name, pid_s); if (nodeid > 0) { crm_xml_set_id(cmd, "%ld", nodeid); } crm_xml_add(cmd, XML_ATTR_UNAME, node_name); } rc = crm_ipc_send(conn, cmd, 0, 0, NULL); crm_debug("%s peer cache cleanup for %s (%ld): %d", target, node_name, nodeid, rc); if (rc > 0) { rc = cib_remove_node(nodeid, node_name); } if (conn) { crm_ipc_close(conn); crm_ipc_destroy(conn); } free_xml(cmd); return rc > 0 ? 0 : rc; } static void remove_node(const char *target_uname) { int d = 0; long nodeid = 0; const char *node_name = NULL; char *endptr = NULL; const char *daemons[] = { CRM_SYSTEM_CRMD, "stonith-ng", T_ATTRD, CRM_SYSTEM_MCP, }; // Check whether node was specified by name or numeric ID errno = 0; nodeid = strtol(target_uname, &endptr, 10); if ((errno != 0) || (endptr == target_uname) || (*endptr != '\0') || (nodeid <= 0)) { // It's not a positive integer, so assume it's a node name nodeid = 0; node_name = target_uname; } for (d = 0; d < DIMOF(daemons); d++) { if (tools_remove_node_cache(node_name, nodeid, daemons[d])) { crm_err("Failed to connect to %s to remove node '%s'", daemons[d], target_uname); crm_node_exit(CRM_EX_ERROR); return; } } crm_node_exit(CRM_EX_OK); } static gint compare_node_xml(gconstpointer a, gconstpointer b) { const char *a_name = crm_element_value((xmlNode*) a, "uname"); const char *b_name = crm_element_value((xmlNode*) b, "uname"); return strcmp((a_name? a_name : ""), (b_name? b_name : "")); } static int node_mcp_dispatch(const char *buffer, ssize_t length, gpointer userdata) { GList *nodes = NULL; xmlNode *node = NULL; xmlNode *msg = string2xml(buffer); const char *uname; const char *state; if (msg == NULL) { fprintf(stderr, "error: Could not understand pacemakerd response\n"); crm_node_exit(CRM_EX_PROTOCOL); return 0; } crm_log_xml_trace(msg, "message"); for (node = __xml_first_child(msg); node != NULL; node = __xml_next(node)) { nodes = g_list_insert_sorted(nodes, node, compare_node_xml); } for (GList *iter = nodes; iter; iter = iter->next) { node = (xmlNode*) iter->data; uname = crm_element_value(node, "uname"); state = crm_element_value(node, "state"); if (command == 'l') { int id = 0; crm_element_value_int(node, "id", &id); printf("%d %s %s\n", id, (uname? uname : ""), (state? state : "")); // This is CRM_NODE_MEMBER but we don't want to include cluster header } else if ((command == 'p') && safe_str_eq(state, "member")) { printf("%s ", (uname? uname : "")); } } if (command == 'p') { fprintf(stdout, "\n"); } free_xml(msg); crm_node_exit(CRM_EX_OK); return 0; } static void run_pacemakerd_mainloop() { crm_ipc_t *ipc = NULL; xmlNode *poke = NULL; ipc = new_mainloop_for_ipc(CRM_SYSTEM_MCP, node_mcp_dispatch); // Sending anything will get us a list of nodes poke = create_xml_node(NULL, "poke"); crm_ipc_send(ipc, poke, 0, 0, NULL); free_xml(poke); // Handle reply via node_mcp_dispatch() run_mainloop_and_exit(); } int main(int argc, char **argv) { int flag = 0; int argerr = 0; uint32_t nodeid = 0; gboolean force_flag = FALSE; gboolean dangerous_cmd = FALSE; int option_index = 0; const char *target_uname = NULL; crm_log_cli_init("crm_node"); crm_set_options(NULL, "command [options]", long_options, "Tool for displaying low-level node information"); while (flag >= 0) { flag = crm_get_option(argc, argv, &option_index); switch (flag) { case -1: break; case 'V': crm_bump_log_level(argc, argv); break; case '$': case '?': - crm_help(flag, CRM_EX_OK); + return crm_help(flag, CRM_EX_OK); break; case 'Q': // currently unused break; case 'C': // unused and deprecated break; case 'f': force_flag = TRUE; break; case 'R': command = flag; dangerous_cmd = TRUE; target_uname = optarg; + if (optarg == NULL) { + ++argerr; + } break; case 'N': command = flag; nodeid = crm_parse_int(optarg, NULL); break; case 'p': case 'q': case 'i': case 'l': case 'n': command = flag; break; default: ++argerr; break; } } if (optind > argc) { ++argerr; } if (argerr) { - crm_help('?', CRM_EX_USAGE); + return crm_help('?', CRM_EX_USAGE); } if (dangerous_cmd && force_flag == FALSE) { fprintf(stderr, "The supplied command is considered dangerous." " To prevent accidental destruction of the cluster," " the --force flag is required in order to proceed.\n"); crm_node_exit(CRM_EX_USAGE); } switch (command) { case 'n': print_node_name(); break; case 'R': remove_node(target_uname); break; case 'i': case 'q': case 'N': run_controller_mainloop(nodeid); break; case 'l': case 'p': run_pacemakerd_mainloop(); break; default: break; } fprintf(stderr, "error: Must specify a command option\n"); crm_node_exit(CRM_EX_USAGE); return CRM_EX_USAGE; }