diff --git a/GNUmakefile b/GNUmakefile index 514457a8d9..483ff2da8a 100644 --- a/GNUmakefile +++ b/GNUmakefile @@ -1,324 +1,330 @@ # # Copyright (C) 2008 Andrew Beekhof # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. # default: $(shell test ! -e configure && echo init) $(shell test -e configure && echo core) -include Makefile PACKAGE ?= pacemaker # Force 'make dist' to be consistent with 'make export' distdir = $(PACKAGE)-$(TAG) TARFILE = $(distdir).tar.gz DIST_ARCHIVES = $(TARFILE) RPM_ROOT = $(shell pwd) RPM_OPTS = --define "_sourcedir $(RPM_ROOT)" \ --define "_specdir $(RPM_ROOT)" \ --define "_srcrpmdir $(RPM_ROOT)" \ MOCK_OPTIONS ?= --resultdir=$(RPM_ROOT)/mock --no-cleanup-after # Default to fedora compliant spec files # SLES: /etc/SuSE-release # openSUSE: /etc/SuSE-release # RHEL: /etc/redhat-release # Fedora: /etc/fedora-release, /etc/redhat-release, /etc/system-release F ?= $(shell test ! -e /etc/fedora-release && echo 0; test -e /etc/fedora-release && rpm --eval %{fedora}) ARCH ?= $(shell test -e /etc/fedora-release && rpm --eval %{_arch}) MOCK_CFG ?= $(shell test -e /etc/fedora-release && echo fedora-$(F)-$(ARCH)) DISTRO ?= $(shell test -e /etc/SuSE-release && echo suse; echo fedora) TAG ?= $(shell git log --pretty="format:%H" -n 1) WITH ?= --without doc #WITH ?= --without=doc --with=gcov LAST_RC ?= $(shell test -e /Volumes || git tag -l | grep Pacemaker | sort -Vr | grep rc | head -n 1) LAST_RELEASE ?= $(shell test -e /Volumes || git tag -l | grep Pacemaker | sort -Vr | grep -v rc | head -n 1) NEXT_RELEASE ?= $(shell echo $(LAST_RELEASE) | awk -F. '/[0-9]+\./{$$3+=1;OFS=".";print $$1,$$2,$$3}') beekhof: echo $(LAST_RELEASE) $(NEXT_RELEASE) BUILD_COUNTER ?= build.counter LAST_COUNT = $(shell test ! -e $(BUILD_COUNTER) && echo 0; test -e $(BUILD_COUNTER) && cat $(BUILD_COUNTER)) COUNT = $(shell expr 1 + $(LAST_COUNT)) init: ./autogen.sh export: rm -f $(PACKAGE)-dirty.tar.* $(PACKAGE)-tip.tar.* $(PACKAGE)-HEAD.tar.* if [ ! -f $(TARFILE) ]; then \ rm -f $(PACKAGE).tar.*; \ if [ $(TAG) = dirty ]; then \ git commit -m "DO-NOT-PUSH" -a; \ git archive --prefix=$(distdir)/ HEAD | gzip > $(TARFILE); \ git reset --mixed HEAD^; \ else \ git archive --prefix=$(distdir)/ $(TAG) | gzip > $(TARFILE); \ fi; \ echo `date`: Rebuilt $(TARFILE); \ else \ echo `date`: Using existing tarball: $(TARFILE); \ fi $(PACKAGE)-opensuse.spec: $(PACKAGE)-suse.spec cp $^ $@ @echo Rebuilt $@ $(PACKAGE)-suse.spec: $(PACKAGE).spec.in GNUmakefile rm -f $@ if [ x != x"`git ls-files -m | grep pacemaker.spec.in`" ]; then \ cp $(PACKAGE).spec.in $@; \ echo "Rebuilt $@ (local modifications)"; \ elif [ x = x"`git show $(TAG):pacemaker.spec.in 2>/dev/null`" ]; then \ cp $(PACKAGE).spec.in $@; \ echo "Rebuilt $@"; \ else \ git show $(TAG):$(PACKAGE).spec.in >> $@; \ echo "Rebuilt $@ from $(TAG)"; \ fi sed -i s:%{_docdir}/%{name}:%{_docdir}/%{name}-%{version}:g $@ sed -i s:corosynclib:libcorosync:g $@ sed -i s:libexecdir}/lcrso:libdir}/lcrso:g $@ sed -i 's:%{name}-libs:lib%{name}3:g' $@ sed -i s:heartbeat-libs:heartbeat:g $@ sed -i s:cluster-glue-libs:libglue:g $@ sed -i s:libselinux-devel:automake:g $@ sed -i s:lm_sensors-devel:automake:g $@ sed -i s:bzip2-devel:libbz2-devel:g $@ sed -i s:bcond_without\ publican:bcond_with\ publican:g $@ sed -i s:docbook-style-xsl:docbook-xsl-stylesheets:g $@ sed -i s:libtool-ltdl-devel::g $@ sed -i s:dbus-devel:dbus-1-devel:g $@ sed -i s:publican::g $@ sed -i s:byacc::g $@ sed -i s:global\ cs_major.*:global\ cs_major\ 1:g $@ sed -i s:global\ cs_minor.*:global\ cs_minor\ 4:g $@ sed -i s:gnutls-devel:libgnutls-devel:g $@ sed -i s:189:90:g $@ sed -i 's@python-devel@python-devel python-curses python-xml@' $@ sed -i 's@Requires: python@Requires: python python-curses python-xml@' $@ sed -i 's@%systemd_post pacemaker.service@if [ ZZZ -eq 1 ]; then systemctl preset pacemaker.service || : ; fi@' $@ sed -i 's@%systemd_postun_with_restart pacemaker.service@systemctl daemon-reload || : ; if [ ZZZ -ge 1 ]; then systemctl try-restart pacemaker.service || : ; fi@' $@ sed -i 's@%systemd_preun pacemaker.service@if [ ZZZ -eq 0 ]; then systemctl --no-reload disable pacemaker.service || : ; systemctl stop pacemaker.service || : ; fi@' $@ sed -i 's@%systemd_post pacemaker_remote.service@if [ ZZZ -eq 1 ]; then systemctl preset pacemaker_remote.service || : ; fi@' $@ sed -i 's@%systemd_postun_with_restart pacemaker_remote.service@systemctl daemon-reload || : ; if [ ZZZ -ge 1 ]; then systemctl try-restart pacemaker_remote.service || : ; fi@' $@ sed -i 's@%systemd_preun pacemaker_remote.service@if [ ZZZ -eq 0 ]; then systemctl --no-reload disable pacemaker_remote.service || : ; systemctl stop pacemaker_remote.service || : ; fi@' $@ sed -i "s@ZZZ@\o0441@g" $@ @echo "Applied SUSE-specific modifications" # Works for all fedora based distros $(PACKAGE)-%.spec: $(PACKAGE).spec.in rm -f $@ if [ x != x"`git ls-files -m | grep pacemaker.spec.in`" ]; then \ cp $(PACKAGE).spec.in $(PACKAGE)-$*.spec; \ echo "Rebuilt $@ (local modifications)"; \ elif [ x = x"`git show $(TAG):pacemaker.spec.in 2>/dev/null`" ]; then \ cp $(PACKAGE).spec.in $(PACKAGE)-$*.spec; \ echo "Rebuilt $@"; \ else \ git show $(TAG):$(PACKAGE).spec.in >> $(PACKAGE)-$*.spec; \ echo "Rebuilt $@ from $(TAG)"; \ fi srpm-%: export $(PACKAGE)-%.spec rm -f *.src.rpm cp $(PACKAGE)-$*.spec $(PACKAGE).spec if [ -e $(BUILD_COUNTER) ]; then \ echo $(COUNT) > $(BUILD_COUNTER); \ fi sed -i 's/global\ specversion.*/global\ specversion\ $(COUNT)/' $(PACKAGE).spec sed -i 's/global\ commit.*/global\ commit\ $(TAG)/' $(PACKAGE).spec case "$(WITH)" in \ *pre_release*) \ sed -i 's/Version:.*/Version:\ $(shell echo $(NEXT_RELEASE) | sed -e s:Pacemaker-:: -e s:-.*::)/' $(PACKAGE).spec;;\ *) \ sed -i 's/Version:.*/Version:\ $(shell git describe --tags $(TAG) | sed -e s:Pacemaker-:: -e s:-.*::)/' $(PACKAGE).spec;;\ esac rpmbuild -bs --define "dist .$*" $(RPM_OPTS) $(WITH) $(PACKAGE).spec chroot: mock-$(MOCK_CFG) mock-install-$(MOCK_CFG) mock-sh-$(MOCK_CFG) echo "Done" mock-next: make F=$(shell expr 1 + $(F)) mock mock-rawhide: make F=rawhide mock mock-install-%: echo "Installing packages" mock --root=$* $(MOCK_OPTIONS) --install $(RPM_ROOT)/mock/*.rpm vi sudo valgrind lcov gdb fence-agents mock-sh: mock-sh-$(MOCK_CFG) echo "Done" mock-sh-%: echo "Connecting" mock --root=$* $(MOCK_OPTIONS) --shell echo "Done" # eg. WITH="--with cman" make rpm mock-%: make srpm-$(firstword $(shell echo $(@:mock-%=%) | tr '-' ' ')) -rm -rf $(RPM_ROOT)/mock @echo "mock --root=$* --rebuild $(WITH) $(MOCK_OPTIONS) $(RPM_ROOT)/*.src.rpm" mock --root=$* --no-cleanup-after --rebuild $(WITH) $(MOCK_OPTIONS) $(RPM_ROOT)/*.src.rpm srpm: srpm-$(DISTRO) echo "Done" mock: mock-$(MOCK_CFG) echo "Done" rpm-dep: $(PACKAGE)-$(DISTRO).spec if [ x != x`which yum-builddep 2>/dev/null` ]; then \ echo "Installing with yum-builddep"; \ sudo yum-builddep $(PACKAGE)-$(DISTRO).spec; \ elif [ x != x`which yum 2>/dev/null` ]; then \ echo -e "Installing: $(shell grep BuildRequires pacemaker.spec.in | sed -e s/BuildRequires:// -e s:\>.*0:: | tr '\n' ' ')\n\n"; \ sudo yum install $(shell grep BuildRequires pacemaker.spec.in | sed -e s/BuildRequires:// -e s:\>.*0:: | tr '\n' ' '); \ elif [ x != x`which zypper` ]; then \ echo -e "Installing: $(shell grep BuildRequires pacemaker.spec.in | sed -e s/BuildRequires:// -e s:\>.*0:: | tr '\n' ' ')\n\n"; \ sudo zypper install $(shell grep BuildRequires pacemaker.spec.in | sed -e s/BuildRequires:// -e s:\>.*0:: | tr '\n' ' ');\ else \ echo "I don't know how to install $(shell grep BuildRequires pacemaker.spec.in | sed -e s/BuildRequires:// -e s:\>.*0:: | tr '\n' ' ')";\ fi rpm: srpm @echo To create custom builds, edit the flags and options in $(PACKAGE).spec first rpmbuild $(RPM_OPTS) $(WITH) --rebuild $(RPM_ROOT)/*.src.rpm release: make TAG=$(LAST_RELEASE) rpm rc: make TAG=$(LAST_RC) rpm dirty: make TAG=dirty mock COVERITY_DIR = $(shell pwd)/coverity-$(TAG) COVFILE = pacemaker-coverity-$(TAG).tgz COVHOST ?= scan5.coverity.com COVPASS ?= password # Public coverity coverity: test -e configure || ./autogen.sh test -e Makefile || ./configure make core-clean rm -rf $(COVERITY_DIR) cov-build --dir $(COVERITY_DIR) make core tar czf $(COVFILE) --transform=s@.*$(TAG)@cov-int@ $(COVERITY_DIR) @echo "Uploading to public Coverity instance..." curl --form file=@$(COVFILE) --form project=$(PACKAGE) --form password=$(COVPASS) --form email=andrew@beekhof.net http://$(COVHOST)/cgi-bin/upload.py rm -rf $(COVFILE) $(COVERITY_DIR) coverity-corp: test -e configure || ./autogen.sh test -e Makefile || ./configure make core-clean rm -rf $(COVERITY_DIR) cov-build --dir $(COVERITY_DIR) make core @echo "Waiting for a corporate Coverity license..." cov-analyze --dir $(COVERITY_DIR) --wait-for-license cov-format-errors --dir $(COVERITY_DIR) --emacs-style > $(TAG).coverity cov-format-errors --dir $(COVERITY_DIR) rsync -avzxlSD --progress $(COVERITY_DIR)/c/output/errors/ root@www.clusterlabs.org:/var/www/html/coverity/$(PACKAGE)/$(TAG) make core-clean # cov-commit-defects --host $(COVHOST) --dir $(COVERITY_DIR) --stream $(PACKAGE) --user auto --password $(COVPASS) rm -rf $(COVERITY_DIR) global: clean-generic gtags -q %.8.html: %.8 echo groff -mandoc `man -w ./$<` -T html > $@ groff -mandoc `man -w ./$<` -T html > $@ rsync -azxlSD --progress $@ root@www.clusterlabs.org:/var/www/html/man/ %.7.html: %.7 echo groff -mandoc `man -w ./$<` -T html > $@ groff -mandoc `man -w ./$<` -T html > $@ rsync -azxlSD --progress $@ root@www.clusterlabs.org:/var/www/html/man/ doxygen: doxygen Doxyfile abi: abi-check pacemaker $(LAST_RELEASE) $(TAG) abi-www: abi-check -u pacemaker $(LAST_RELEASE) $(TAG) www: all global doxygen find . -name "[a-z]*.8" -exec make \{\}.html \; find . -name "[a-z]*.7" -exec make \{\}.html \; htags -sanhIT rsync -avzxlSD --progress HTML/ root@www.clusterlabs.org:/var/www/html/global/$(PACKAGE)/$(TAG) rsync -avzxlSD --progress doc/api/html/ root@www.clusterlabs.org:/var/www/html/doxygen/$(PACKAGE)/$(TAG) make -C doc www make coverity summary: @printf "\n* `date +"%a %b %d %Y"` `git config user.name` <`git config user.email`> $(NEXT_RELEASE)-1" @printf "\n- Update source tarball to revision: `git id`" @printf "\n- Changesets: `git log --pretty=format:'%h' $(LAST_RELEASE)..HEAD | wc -l`" @printf "\n- Diff: " @git diff -r $(LAST_RELEASE)..HEAD --stat include lib mcp pengine/*.c pengine/*.h cib crmd fencing lrmd tools xml | tail -n 1 rc-changes: @make NEXT_RELEASE=$(shell echo $(LAST_RC) | sed s:-rc.*::) LAST_RELEASE=$(LAST_RC) changes changes: summary @printf "\n- Features added since $(LAST_RELEASE)\n" @git log --pretty=format:' +%s' --abbrev-commit $(LAST_RELEASE)..HEAD | grep -e Feature: | sed -e 's@Feature:@@' | sort -uf @printf "\n- Changes since $(LAST_RELEASE)\n" @git log --pretty=format:' +%s' --abbrev-commit $(LAST_RELEASE)..HEAD | grep -e High: -e Fix: -e Bug | sed -e 's@Fix:@@' -e s@High:@@ -e s@Fencing:@fencing:@ -e 's@Bug@ Bug@' -e s@PE:@pengine:@ | sort -uf changelog: @make changes > ChangeLog @printf "\n">> ChangeLog git show $(LAST_RELEASE):ChangeLog >> ChangeLog @echo -e "\033[1;35m -- Don't forget to run the bumplibs.sh script! --\033[0m" indent: find . -name "*.h" -exec ./p-indent \{\} \; find . -name "*.c" -exec ./p-indent \{\} \; git co HEAD crmd/fsa_proto.h lib/gnu rel-tags: tags find . -name TAGS -exec sed -i 's:\(.*\)/\(.*\)/TAGS:\2/TAGS:g' \{\} \; CLANG_analyzer = $(shell which scan-build) CLANG_checkers = +check: clang cppcheck + +# Extra cppcheck options: --enable=all --inconclusive --std=posix +cppcheck: + for d in $(LIBLTDL_DIR) replace lib mcp attrd pengine cib crmd fencing lrmd tools; do cppcheck -q $$d; done + clang: test -e $(CLANG_analyzer) scan-build $(CLANG_checkers:%=-enable-checker %) make # V3 = scandir unsetenv alphasort # V2 = setenv strerror strchrnul strndup # http://www.gnu.org/software/gnulib/manual/html_node/Initial-import.html#Initial-import GNU_MODS = crypto/md5 gnulib-update: -test ! -e gnulib && git clone git://git.savannah.gnu.org/gnulib.git cd gnulib && git pull gnulib/gnulib-tool --source-base=lib/gnu --lgpl=2 --no-vc-files --import $(GNU_MODS) diff --git a/crmd/lrm.c b/crmd/lrm.c index 00f94ea6ac..a5234c672a 100644 --- a/crmd/lrm.c +++ b/crmd/lrm.c @@ -1,2135 +1,2135 @@ /* * Copyright (C) 2004 Andrew Beekhof * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This software is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #include #include #include #include #include #include #include #include #include #include #include #include #include #define START_DELAY_THRESHOLD 5 * 60 * 1000 #define MAX_LRM_REG_FAILS 30 struct delete_event_s { int rc; const char *rsc; lrm_state_t *lrm_state; }; gboolean process_lrm_event(lrm_state_t * lrm_state, lrmd_event_data_t * op); static gboolean is_rsc_active(lrm_state_t * lrm_state, const char *rsc_id); static gboolean build_active_RAs(lrm_state_t * lrm_state, xmlNode * rsc_list); static gboolean stop_recurring_actions(gpointer key, gpointer value, gpointer user_data); static int delete_rsc_status(lrm_state_t * lrm_state, const char *rsc_id, int call_options, const char *user_name); static lrmd_event_data_t *construct_op(lrm_state_t * lrm_state, xmlNode * rsc_op, const char *rsc_id, const char *operation); static void do_lrm_rsc_op(lrm_state_t * lrm_state, lrmd_rsc_info_t * rsc, const char *operation, xmlNode * msg, xmlNode * request); void send_direct_ack(const char *to_host, const char *to_sys, lrmd_rsc_info_t * rsc, lrmd_event_data_t * op, const char *rsc_id); static gboolean lrm_state_verify_stopped(lrm_state_t * lrm_state, enum crmd_fsa_state cur_state, int log_level); static void lrm_connection_destroy(void) { if (is_set(fsa_input_register, R_LRM_CONNECTED)) { crm_crit("LRM Connection failed"); register_fsa_input(C_FSA_INTERNAL, I_ERROR, NULL); clear_bit(fsa_input_register, R_LRM_CONNECTED); } else { crm_info("LRM Connection disconnected"); } } static char * make_stop_id(const char *rsc, int call_id) { char *op_id = NULL; op_id = calloc(1, strlen(rsc) + 34); if (op_id != NULL) { snprintf(op_id, strlen(rsc) + 34, "%s:%d", rsc, call_id); } return op_id; } static void copy_instance_keys(gpointer key, gpointer value, gpointer user_data) { if (strstr(key, CRM_META "_") == NULL) { g_hash_table_replace(user_data, strdup((const char *)key), strdup((const char *)value)); } } static void copy_meta_keys(gpointer key, gpointer value, gpointer user_data) { if (strstr(key, CRM_META "_") != NULL) { g_hash_table_replace(user_data, strdup((const char *)key), strdup((const char *)value)); } } static void update_history_cache(lrm_state_t * lrm_state, lrmd_rsc_info_t * rsc, lrmd_event_data_t * op) { int target_rc = 0; rsc_history_t *entry = NULL; if (op->rsc_deleted) { crm_debug("Purged history for '%s' after %s", op->rsc_id, op->op_type); delete_rsc_status(lrm_state, op->rsc_id, cib_quorum_override, NULL); return; } if (safe_str_eq(op->op_type, RSC_NOTIFY)) { return; } crm_debug("Updating history for '%s' with %s op", op->rsc_id, op->op_type); entry = g_hash_table_lookup(lrm_state->resource_history, op->rsc_id); if (entry == NULL && rsc) { entry = calloc(1, sizeof(rsc_history_t)); entry->id = strdup(op->rsc_id); g_hash_table_insert(lrm_state->resource_history, entry->id, entry); entry->rsc.id = entry->id; entry->rsc.type = strdup(rsc->type); entry->rsc.class = strdup(rsc->class); if (rsc->provider) { entry->rsc.provider = strdup(rsc->provider); } else { entry->rsc.provider = NULL; } } else if (entry == NULL) { crm_info("Resource %s no longer exists, not updating cache", op->rsc_id); return; } entry->last_callid = op->call_id; target_rc = rsc_op_expected_rc(op); if (op->op_status == PCMK_LRM_OP_CANCELLED) { if (op->interval > 0) { GList *gIter, *gIterNext; crm_trace("Removing cancelled recurring op: %s_%s_%d", op->rsc_id, op->op_type, op->interval); for (gIter = entry->recurring_op_list; gIter != NULL; gIter = gIterNext) { lrmd_event_data_t *existing = gIter->data; gIterNext = gIter->next; if (crm_str_eq(op->rsc_id, existing->rsc_id, TRUE) && safe_str_eq(op->op_type, existing->op_type) && op->interval == existing->interval) { lrmd_free_event(existing); entry->recurring_op_list = g_list_delete_link(entry->recurring_op_list, gIter); } } return; } else { crm_trace("Skipping %s_%s_%d rc=%d, status=%d", op->rsc_id, op->op_type, op->interval, op->rc, op->op_status); } } else if (did_rsc_op_fail(op, target_rc)) { /* We must store failed monitors here * - otherwise the block below will cause them to be forgetten them when a stop happens */ if (entry->failed) { lrmd_free_event(entry->failed); } entry->failed = lrmd_copy_event(op); } else if (op->interval == 0) { if (entry->last) { lrmd_free_event(entry->last); } entry->last = lrmd_copy_event(op); if (op->params && (safe_str_eq(CRMD_ACTION_START, op->op_type) || safe_str_eq(CRMD_ACTION_STATUS, op->op_type))) { if (entry->stop_params) { g_hash_table_destroy(entry->stop_params); } entry->stop_params = g_hash_table_new_full(crm_str_hash, g_str_equal, g_hash_destroy_str, g_hash_destroy_str); g_hash_table_foreach(op->params, copy_instance_keys, entry->stop_params); } } if (op->interval > 0) { GListPtr iter = NULL; for(iter = entry->recurring_op_list; iter; iter = iter->next) { lrmd_event_data_t *o = iter->data; /* op->rsc_id is implied */ if(op->interval == o->interval && strcmp(op->op_type, o->op_type) == 0) { crm_trace("Removing existing recurring op entry: %s_%s_%d", op->rsc_id, op->op_type, op->interval); entry->recurring_op_list = g_list_remove(entry->recurring_op_list, o); break; } } crm_trace("Adding recurring op: %s_%s_%d", op->rsc_id, op->op_type, op->interval); entry->recurring_op_list = g_list_prepend(entry->recurring_op_list, lrmd_copy_event(op)); } else if (entry->recurring_op_list && safe_str_eq(op->op_type, RSC_STATUS) == FALSE) { GList *gIter = entry->recurring_op_list; crm_trace("Dropping %d recurring ops because of: %s_%s_%d", g_list_length(gIter), op->rsc_id, op->op_type, op->interval); for (; gIter != NULL; gIter = gIter->next) { lrmd_free_event(gIter->data); } g_list_free(entry->recurring_op_list); entry->recurring_op_list = NULL; } } void lrm_op_callback(lrmd_event_data_t * op) { const char *nodename = NULL; lrm_state_t *lrm_state = NULL; CRM_CHECK(op != NULL, return); /* determine the node name for this connection. */ nodename = op->remote_nodename ? op->remote_nodename : fsa_our_uname; if (op->type == lrmd_event_disconnect && (safe_str_eq(nodename, fsa_our_uname))) { /* if this is the local lrmd ipc connection, set the right bits in the * crmd when the connection goes down */ lrm_connection_destroy(); return; } else if (op->type != lrmd_event_exec_complete) { /* we only need to process execution results */ return; } lrm_state = lrm_state_find(nodename); CRM_ASSERT(lrm_state != NULL); process_lrm_event(lrm_state, op); } /* A_LRM_CONNECT */ void do_lrm_control(long long action, enum crmd_fsa_cause cause, enum crmd_fsa_state cur_state, enum crmd_fsa_input current_input, fsa_data_t * msg_data) { /* This only pertains to local lrmd connections. Remote connections are handled as * resources within the pengine. Connecting and disconnecting from remote lrmd instances * handled differently than the local. */ lrm_state_t *lrm_state = NULL; if(fsa_our_uname == NULL) { return; /* Nothing to do */ } lrm_state = lrm_state_find_or_create(fsa_our_uname); if (lrm_state == NULL) { register_fsa_error(C_FSA_INTERNAL, I_ERROR, NULL); return; } if (action & A_LRM_DISCONNECT) { if (lrm_state_verify_stopped(lrm_state, cur_state, LOG_INFO) == FALSE) { if (action == A_LRM_DISCONNECT) { crmd_fsa_stall(FALSE); return; } } clear_bit(fsa_input_register, R_LRM_CONNECTED); crm_info("Disconnecting from the LRM"); lrm_state_disconnect(lrm_state); lrm_state_reset_tables(lrm_state); crm_notice("Disconnected from the LRM"); } if (action & A_LRM_CONNECT) { int ret = pcmk_ok; crm_debug("Connecting to the LRM"); ret = lrm_state_ipc_connect(lrm_state); if (ret != pcmk_ok) { if (lrm_state->num_lrm_register_fails < MAX_LRM_REG_FAILS) { crm_warn("Failed to sign on to the LRM %d" " (%d max) times", lrm_state->num_lrm_register_fails, MAX_LRM_REG_FAILS); crm_timer_start(wait_timer); crmd_fsa_stall(FALSE); return; } } if (ret != pcmk_ok) { crm_err("Failed to sign on to the LRM %d" " (max) times", lrm_state->num_lrm_register_fails); register_fsa_error(C_FSA_INTERNAL, I_ERROR, NULL); return; } set_bit(fsa_input_register, R_LRM_CONNECTED); crm_info("LRM connection established"); } if (action & ~(A_LRM_CONNECT | A_LRM_DISCONNECT)) { crm_err("Unexpected action %s in %s", fsa_action2string(action), __FUNCTION__); } } static gboolean lrm_state_verify_stopped(lrm_state_t * lrm_state, enum crmd_fsa_state cur_state, int log_level) { int counter = 0; gboolean rc = TRUE; const char *when = "lrm disconnect"; GHashTableIter gIter; const char *key = NULL; rsc_history_t *entry = NULL; struct recurring_op_s *pending = NULL; crm_debug("Checking for active resources before exit"); if (cur_state == S_TERMINATE) { log_level = LOG_ERR; when = "shutdown"; } else if (is_set(fsa_input_register, R_SHUTDOWN)) { when = "shutdown... waiting"; } if (lrm_state->pending_ops && lrm_state_is_connected(lrm_state) == TRUE) { guint removed = g_hash_table_foreach_remove( lrm_state->pending_ops, stop_recurring_actions, lrm_state); crm_notice("Stopped %u recurring operations at %s (%u ops remaining)", removed, when, g_hash_table_size(lrm_state->pending_ops)); } if (lrm_state->pending_ops) { g_hash_table_iter_init(&gIter, lrm_state->pending_ops); while (g_hash_table_iter_next(&gIter, NULL, (void **)&pending)) { /* Ignore recurring actions in the shutdown calculations */ if (pending->interval == 0) { counter++; } } } if (counter > 0) { do_crm_log(log_level, "%d pending LRM operations at %s", counter, when); if (cur_state == S_TERMINATE || !is_set(fsa_input_register, R_SENT_RSC_STOP)) { g_hash_table_iter_init(&gIter, lrm_state->pending_ops); while (g_hash_table_iter_next(&gIter, (gpointer*)&key, (gpointer*)&pending)) { do_crm_log(log_level, "Pending action: %s (%s)", key, pending->op_key); } } else { rc = FALSE; } return rc; } if (lrm_state->resource_history == NULL) { return rc; } if (cur_state == S_TERMINATE || is_set(fsa_input_register, R_SHUTDOWN)) { /* At this point we're not waiting, we're just shutting down */ when = "shutdown"; } counter = 0; g_hash_table_iter_init(&gIter, lrm_state->resource_history); while (g_hash_table_iter_next(&gIter, NULL, (gpointer*)&entry)) { if (is_rsc_active(lrm_state, entry->id) == FALSE) { continue; } counter++; crm_trace("Found %s active", entry->id); if (lrm_state->pending_ops) { GHashTableIter hIter; g_hash_table_iter_init(&hIter, lrm_state->pending_ops); while (g_hash_table_iter_next(&hIter, (gpointer*)&key, (gpointer*)&pending)) { if (crm_str_eq(entry->id, pending->rsc_id, TRUE)) { crm_notice("%sction %s (%s) incomplete at %s", pending->interval == 0 ? "A" : "Recurring a", key, pending->op_key, when); } } } } if (counter) { crm_err("%d resources were active at %s.", counter, when); } return rc; } static char * get_rsc_metadata(const char *type, const char *class, const char *provider) { int rc = 0; char *metadata = NULL; /* Always use a local connection for this operation */ lrm_state_t *lrm_state = lrm_state_find(fsa_our_uname); CRM_CHECK(type != NULL, return NULL); CRM_CHECK(class != NULL, return NULL); CRM_CHECK(lrm_state != NULL, return NULL); if (provider == NULL) { provider = "heartbeat"; } crm_trace("Retreiving metadata for %s::%s:%s", type, class, provider); rc = lrm_state_get_metadata(lrm_state, class, provider, type, &metadata, 0); if (metadata) { /* copy the metadata because the LRM likes using * g_alloc instead of cl_malloc */ char *m_copy = strdup(metadata); g_free(metadata); metadata = m_copy; } else { crm_warn("No metadata found for %s::%s:%s: %s (%d)", type, class, provider, pcmk_strerror(rc), rc); } return metadata; } typedef struct reload_data_s { char *key; char *metadata; time_t last_query; gboolean can_reload; GListPtr restart_list; } reload_data_t; static void g_hash_destroy_reload(gpointer data) { reload_data_t *reload = data; free(reload->key); free(reload->metadata); g_list_free_full(reload->restart_list, free); free(reload); } GHashTable *reload_hash = NULL; static GListPtr get_rsc_restart_list(lrmd_rsc_info_t * rsc, lrmd_event_data_t * op) { int len = 0; char *key = NULL; char *copy = NULL; const char *value = NULL; const char *provider = NULL; xmlNode *param = NULL; xmlNode *params = NULL; xmlNode *actions = NULL; xmlNode *metadata = NULL; time_t now = time(NULL); reload_data_t *reload = NULL; if (reload_hash == NULL) { reload_hash = g_hash_table_new_full(crm_str_hash, g_str_equal, NULL, g_hash_destroy_reload); } provider = rsc->provider; if (provider == NULL) { provider = "heartbeat"; } len = strlen(rsc->type) + strlen(rsc->class) + strlen(provider) + 4; key = malloc(len); if(key) { snprintf(key, len, "%s::%s:%s", rsc->type, rsc->class, provider); reload = g_hash_table_lookup(reload_hash, key); } if (reload && ((now - 9) > reload->last_query) && safe_str_eq(op->op_type, RSC_START)) { reload = NULL; /* re-query */ } if (reload == NULL) { xmlNode *action = NULL; reload = calloc(1, sizeof(reload_data_t)); g_hash_table_replace(reload_hash, key, reload); reload->last_query = now; reload->key = key; key = NULL; reload->metadata = get_rsc_metadata(rsc->type, rsc->class, provider); if(reload->metadata == NULL) { goto cleanup; } metadata = string2xml(reload->metadata); if (metadata == NULL) { crm_err("Metadata for %s::%s:%s is not valid XML", rsc->provider, rsc->class, rsc->type); goto cleanup; } actions = find_xml_node(metadata, "actions", TRUE); for (action = __xml_first_child(actions); action != NULL; action = __xml_next(action)) { if (crm_str_eq((const char *)action->name, "action", TRUE)) { value = crm_element_value(action, "name"); if (safe_str_eq("reload", value)) { reload->can_reload = TRUE; break; } } } if (reload->can_reload == FALSE) { goto cleanup; } params = find_xml_node(metadata, "parameters", TRUE); for (param = __xml_first_child(params); param != NULL; param = __xml_next(param)) { if (crm_str_eq((const char *)param->name, "parameter", TRUE)) { value = crm_element_value(param, "unique"); if (crm_is_true(value)) { value = crm_element_value(param, "name"); if (value == NULL) { crm_err("%s: NULL param", key); continue; } crm_debug("Attr %s is not reloadable", value); copy = strdup(value); CRM_LOG_ASSERT(copy != NULL); if(copy == NULL) { continue; }; reload->restart_list = g_list_append(reload->restart_list, copy); } } } } cleanup: free(key); free_xml(metadata); return reload->restart_list; } static void append_restart_list(lrmd_rsc_info_t * rsc, lrmd_event_data_t * op, xmlNode * update, const char *version) { int len = 0; char *list = NULL; char *digest = NULL; const char *value = NULL; xmlNode *restart = NULL; GListPtr restart_list = NULL; GListPtr lpc = NULL; if (op->interval > 0) { /* monitors are not reloadable */ return; } else if (op->params == NULL) { crm_debug("%s has no parameters", ID(update)); return; } else if (rsc == NULL) { return; } else if (crm_str_eq(CRMD_ACTION_STOP, op->op_type, TRUE)) { /* Stopped resources don't need to be reloaded */ return; } else if (compare_version("1.0.8", version) > 0) { /* Caller version does not support reloads */ return; } restart_list = get_rsc_restart_list(rsc, op); if (restart_list == NULL) { /* Resource does not support reloads */ return; } restart = create_xml_node(NULL, XML_TAG_PARAMS); for (lpc = restart_list; lpc != NULL; lpc = lpc->next) { const char *param = (const char *)lpc->data; int start = len; CRM_LOG_ASSERT(param != NULL); if(param == NULL) { continue; }; value = g_hash_table_lookup(op->params, param); if (value != NULL) { crm_xml_add(restart, param, value); } len += strlen(param) + 2; list = realloc(list, len + 1); sprintf(list + start, " %s ", param); } digest = calculate_operation_digest(restart, version); crm_xml_add(update, XML_LRM_ATTR_OP_RESTART, list); crm_xml_add(update, XML_LRM_ATTR_RESTART_DIGEST, digest); crm_trace("%s: %s, %s", rsc->id, digest, list); crm_log_xml_trace(restart, "restart digest source"); free_xml(restart); free(digest); free(list); } static gboolean build_operation_update(xmlNode * parent, lrmd_rsc_info_t * rsc, lrmd_event_data_t * op, const char *src) { int target_rc = 0; xmlNode *xml_op = NULL; const char *caller_version = CRM_FEATURE_SET; if (op == NULL) { return FALSE; } else if (AM_I_DC) { } else if (fsa_our_dc_version != NULL) { caller_version = fsa_our_dc_version; } else if (op->params == NULL) { caller_version = fsa_our_dc_version; } else { /* there is a small risk in formerly mixed clusters that * it will be sub-optimal. * however with our upgrade policy, the update we send * should still be completely supported anyway */ caller_version = g_hash_table_lookup(op->params, XML_ATTR_CRM_VERSION); crm_debug("Falling back to operation originator version: %s", caller_version); } target_rc = rsc_op_expected_rc(op); xml_op = create_operation_update(parent, op, caller_version, target_rc, src, LOG_DEBUG); crm_xml_add(xml_op, XML_LRM_ATTR_TARGET, fsa_our_uname); /* For context during triage */ if (xml_op) { append_restart_list(rsc, op, xml_op, caller_version); } return TRUE; } static gboolean is_rsc_active(lrm_state_t * lrm_state, const char *rsc_id) { rsc_history_t *entry = NULL; entry = g_hash_table_lookup(lrm_state->resource_history, rsc_id); if (entry == NULL || entry->last == NULL) { return FALSE; } crm_trace("Processing %s: %s.%d=%d", rsc_id, entry->last->op_type, entry->last->interval, entry->last->rc); if (entry->last->rc == PCMK_OCF_OK && safe_str_eq(entry->last->op_type, CRMD_ACTION_STOP)) { return FALSE; } else if (entry->last->rc == PCMK_OCF_OK && safe_str_eq(entry->last->op_type, CRMD_ACTION_MIGRATE)) { /* a stricter check is too complex... * leave that to the PE */ return FALSE; } else if (entry->last->rc == PCMK_OCF_NOT_RUNNING) { return FALSE; } else if (entry->last->interval == 0 && entry->last->rc == PCMK_OCF_NOT_CONFIGURED) { /* Badly configured resources can't be reliably stopped */ return FALSE; } return TRUE; } static gboolean build_active_RAs(lrm_state_t * lrm_state, xmlNode * rsc_list) { GHashTableIter iter; rsc_history_t *entry = NULL; g_hash_table_iter_init(&iter, lrm_state->resource_history); while (g_hash_table_iter_next(&iter, NULL, (void **)&entry)) { GList *gIter = NULL; xmlNode *xml_rsc = create_xml_node(rsc_list, XML_LRM_TAG_RESOURCE); crm_xml_add(xml_rsc, XML_ATTR_ID, entry->id); crm_xml_add(xml_rsc, XML_ATTR_TYPE, entry->rsc.type); crm_xml_add(xml_rsc, XML_AGENT_ATTR_CLASS, entry->rsc.class); crm_xml_add(xml_rsc, XML_AGENT_ATTR_PROVIDER, entry->rsc.provider); if (entry->last && entry->last->params) { const char *container = g_hash_table_lookup(entry->last->params, CRM_META"_"XML_RSC_ATTR_CONTAINER); if (container) { crm_trace("Resource %s is a part of container resource %s", entry->id, container); crm_xml_add(xml_rsc, XML_RSC_ATTR_CONTAINER, container); } } - build_operation_update(xml_rsc, &(entry->rsc), entry->last, __FUNCTION__); build_operation_update(xml_rsc, &(entry->rsc), entry->failed, __FUNCTION__); + build_operation_update(xml_rsc, &(entry->rsc), entry->last, __FUNCTION__); for (gIter = entry->recurring_op_list; gIter != NULL; gIter = gIter->next) { build_operation_update(xml_rsc, &(entry->rsc), gIter->data, __FUNCTION__); } } return FALSE; } xmlNode * do_lrm_query_internal(lrm_state_t * lrm_state, gboolean is_replace) { xmlNode *xml_state = NULL; xmlNode *xml_data = NULL; xmlNode *rsc_list = NULL; const char *uuid = NULL; if (safe_str_eq(lrm_state->node_name, fsa_our_uname)) { crm_node_t *peer = crm_get_peer(0, lrm_state->node_name); xml_state = do_update_node_cib(peer, node_update_cluster|node_update_peer, NULL, __FUNCTION__); /* The next two lines shouldn't be necessary for newer DCs */ crm_xml_add(xml_state, XML_NODE_JOIN_STATE, CRMD_JOINSTATE_MEMBER); crm_xml_add(xml_state, XML_NODE_EXPECTED, CRMD_JOINSTATE_MEMBER); uuid = fsa_our_uuid; } else { xml_state = create_xml_node(NULL, XML_CIB_TAG_STATE); crm_xml_add(xml_state, XML_NODE_IS_REMOTE, "true"); crm_xml_add(xml_state, XML_ATTR_ID, lrm_state->node_name); crm_xml_add(xml_state, XML_ATTR_UNAME, lrm_state->node_name); uuid = lrm_state->node_name; } xml_data = create_xml_node(xml_state, XML_CIB_TAG_LRM); crm_xml_add(xml_data, XML_ATTR_ID, uuid); rsc_list = create_xml_node(xml_data, XML_LRM_TAG_RESOURCES); /* Build a list of active (not always running) resources */ build_active_RAs(lrm_state, rsc_list); crm_log_xml_trace(xml_state, "Current state of the LRM"); return xml_state; } xmlNode * do_lrm_query(gboolean is_replace, const char *node_name) { lrm_state_t *lrm_state = lrm_state_find(node_name); if (!lrm_state) { crm_err("Could not query lrm state for lrmd node %s", node_name); return NULL; } return do_lrm_query_internal(lrm_state, is_replace); } static void notify_deleted(lrm_state_t * lrm_state, ha_msg_input_t * input, const char *rsc_id, int rc) { lrmd_event_data_t *op = NULL; const char *from_sys = crm_element_value(input->msg, F_CRM_SYS_FROM); const char *from_host = crm_element_value(input->msg, F_CRM_HOST_FROM); crm_info("Notifying %s on %s that %s was%s deleted", from_sys, from_host, rsc_id, rc == pcmk_ok ? "" : " not"); op = construct_op(lrm_state, input->xml, rsc_id, CRMD_ACTION_DELETE); CRM_ASSERT(op != NULL); if (rc == pcmk_ok) { op->op_status = PCMK_LRM_OP_DONE; op->rc = PCMK_OCF_OK; } else { op->op_status = PCMK_LRM_OP_ERROR; op->rc = PCMK_OCF_UNKNOWN_ERROR; } send_direct_ack(from_host, from_sys, NULL, op, rsc_id); lrmd_free_event(op); if (safe_str_neq(from_sys, CRM_SYSTEM_TENGINE)) { /* this isn't expected - trigger a new transition */ time_t now = time(NULL); char *now_s = crm_itoa(now); crm_debug("Triggering a refresh after %s deleted %s from the LRM", from_sys, rsc_id); update_attr_delegate(fsa_cib_conn, cib_none, XML_CIB_TAG_CRMCONFIG, NULL, NULL, NULL, NULL, "last-lrm-refresh", now_s, FALSE, NULL, NULL); free(now_s); } } static gboolean lrm_remove_deleted_rsc(gpointer key, gpointer value, gpointer user_data) { struct delete_event_s *event = user_data; struct pending_deletion_op_s *op = value; if (crm_str_eq(event->rsc, op->rsc, TRUE)) { notify_deleted(event->lrm_state, op->input, event->rsc, event->rc); return TRUE; } return FALSE; } static gboolean lrm_remove_deleted_op(gpointer key, gpointer value, gpointer user_data) { const char *rsc = user_data; struct recurring_op_s *pending = value; if (crm_str_eq(rsc, pending->rsc_id, TRUE)) { crm_info("Removing op %s:%d for deleted resource %s", pending->op_key, pending->call_id, rsc); return TRUE; } return FALSE; } /* * Remove the rsc from the CIB * * Avoids refreshing the entire LRM section of this host */ #define rsc_template "//"XML_CIB_TAG_STATE"[@uname='%s']//"XML_LRM_TAG_RESOURCE"[@id='%s']" static int delete_rsc_status(lrm_state_t * lrm_state, const char *rsc_id, int call_options, const char *user_name) { char *rsc_xpath = NULL; int max = 0; int rc = pcmk_ok; CRM_CHECK(rsc_id != NULL, return -ENXIO); max = strlen(rsc_template) + strlen(rsc_id) + strlen(lrm_state->node_name) + 1; rsc_xpath = calloc(1, max); snprintf(rsc_xpath, max, rsc_template, lrm_state->node_name, rsc_id); rc = cib_internal_op(fsa_cib_conn, CIB_OP_DELETE, NULL, rsc_xpath, NULL, NULL, call_options | cib_xpath, user_name); free(rsc_xpath); return rc; } static void delete_rsc_entry(lrm_state_t * lrm_state, ha_msg_input_t * input, const char *rsc_id, GHashTableIter * rsc_gIter, int rc, const char *user_name) { struct delete_event_s event; CRM_CHECK(rsc_id != NULL, return); if (rc == pcmk_ok) { char *rsc_id_copy = strdup(rsc_id); if (rsc_gIter) g_hash_table_iter_remove(rsc_gIter); else g_hash_table_remove(lrm_state->resource_history, rsc_id_copy); crm_debug("sync: Sending delete op for %s", rsc_id_copy); delete_rsc_status(lrm_state, rsc_id_copy, cib_quorum_override, user_name); g_hash_table_foreach_remove(lrm_state->pending_ops, lrm_remove_deleted_op, rsc_id_copy); free(rsc_id_copy); } if (input) { notify_deleted(lrm_state, input, rsc_id, rc); } event.rc = rc; event.rsc = rsc_id; event.lrm_state = lrm_state; g_hash_table_foreach_remove(lrm_state->deletion_ops, lrm_remove_deleted_rsc, &event); } /* * Remove the op from the CIB * * Avoids refreshing the entire LRM section of this host */ #define op_template "//"XML_CIB_TAG_STATE"[@uname='%s']//"XML_LRM_TAG_RESOURCE"[@id='%s']/"XML_LRM_TAG_RSC_OP"[@id='%s']" #define op_call_template "//"XML_CIB_TAG_STATE"[@uname='%s']//"XML_LRM_TAG_RESOURCE"[@id='%s']/"XML_LRM_TAG_RSC_OP"[@id='%s' and @"XML_LRM_ATTR_CALLID"='%d']" static void delete_op_entry(lrm_state_t * lrm_state, lrmd_event_data_t * op, const char *rsc_id, const char *key, int call_id) { xmlNode *xml_top = NULL; if (op != NULL) { xml_top = create_xml_node(NULL, XML_LRM_TAG_RSC_OP); crm_xml_add_int(xml_top, XML_LRM_ATTR_CALLID, op->call_id); crm_xml_add(xml_top, XML_ATTR_TRANSITION_KEY, op->user_data); if (op->interval > 0) { char *op_id = generate_op_key(op->rsc_id, op->op_type, op->interval); /* Avoid deleting last_failure too (if it was a result of this recurring op failing) */ crm_xml_add(xml_top, XML_ATTR_ID, op_id); free(op_id); } crm_debug("async: Sending delete op for %s_%s_%d (call=%d)", op->rsc_id, op->op_type, op->interval, op->call_id); fsa_cib_conn->cmds->delete(fsa_cib_conn, XML_CIB_TAG_STATUS, xml_top, cib_quorum_override); } else if (rsc_id != NULL && key != NULL) { int max = 0; char *op_xpath = NULL; if (call_id > 0) { max = strlen(op_call_template) + strlen(rsc_id) + strlen(lrm_state->node_name) + strlen(key) + 10; op_xpath = calloc(1, max); snprintf(op_xpath, max, op_call_template, lrm_state->node_name, rsc_id, key, call_id); } else { max = strlen(op_template) + strlen(rsc_id) + strlen(lrm_state->node_name) + strlen(key) + 1; op_xpath = calloc(1, max); snprintf(op_xpath, max, op_template, lrm_state->node_name, rsc_id, key); } crm_debug("sync: Sending delete op for %s (call=%d)", rsc_id, call_id); fsa_cib_conn->cmds->delete(fsa_cib_conn, op_xpath, NULL, cib_quorum_override | cib_xpath); free(op_xpath); } else { crm_err("Not enough information to delete op entry: rsc=%p key=%p", rsc_id, key); return; } crm_log_xml_trace(xml_top, "op:cancel"); free_xml(xml_top); } void lrm_clear_last_failure(const char *rsc_id, const char *node_name) { char *attr = NULL; GHashTableIter iter; GList *lrm_state_list = lrm_state_get_list(); GList *state_entry; rsc_history_t *entry = NULL; attr = generate_op_key(rsc_id, "last_failure", 0); /* This clears last failure for every lrm state that has this rsc.*/ for (state_entry = lrm_state_list; state_entry != NULL; state_entry = state_entry->next) { lrm_state_t *lrm_state = state_entry->data; if (node_name != NULL) { if (strcmp(node_name, lrm_state->node_name) != 0) { /* filter by node_name if node_name is present */ continue; } } delete_op_entry(lrm_state, NULL, rsc_id, attr, 0); if (!lrm_state->resource_history) { continue; } g_hash_table_iter_init(&iter, lrm_state->resource_history); while (g_hash_table_iter_next(&iter, NULL, (void **)&entry)) { if (crm_str_eq(rsc_id, entry->id, TRUE)) { lrmd_free_event(entry->failed); entry->failed = NULL; } } } free(attr); g_list_free(lrm_state_list); } /* Returns: gboolean - cancellation is in progress */ static gboolean cancel_op(lrm_state_t * lrm_state, const char *rsc_id, const char *key, int op, gboolean remove) { int rc = pcmk_ok; char *local_key = NULL; struct recurring_op_s *pending = NULL; CRM_CHECK(op != 0, return FALSE); CRM_CHECK(rsc_id != NULL, return FALSE); if (key == NULL) { local_key = make_stop_id(rsc_id, op); key = local_key; } pending = g_hash_table_lookup(lrm_state->pending_ops, key); if (pending) { if (remove && pending->remove == FALSE) { pending->remove = TRUE; crm_debug("Scheduling %s for removal", key); } if (pending->cancelled) { crm_debug("Operation %s already cancelled", key); free(local_key); return FALSE; } pending->cancelled = TRUE; } else { crm_info("No pending op found for %s", key); free(local_key); return FALSE; } crm_debug("Cancelling op %d for %s (%s)", op, rsc_id, key); rc = lrm_state_cancel(lrm_state, pending->rsc_id, pending->op_type, pending->interval); if (rc == pcmk_ok) { crm_debug("Op %d for %s (%s): cancelled", op, rsc_id, key); free(local_key); return TRUE; } crm_debug("Op %d for %s (%s): Nothing to cancel", op, rsc_id, key); /* The caller needs to make sure the entry is * removed from the pending_ops list * * Usually by returning TRUE inside the worker function * supplied to g_hash_table_foreach_remove() * * Not removing the entry from pending_ops will block * the node from shutting down */ free(local_key); return FALSE; } struct cancel_data { gboolean done; gboolean remove; const char *key; lrmd_rsc_info_t *rsc; lrm_state_t *lrm_state; }; static gboolean cancel_action_by_key(gpointer key, gpointer value, gpointer user_data) { gboolean remove = FALSE; struct cancel_data *data = user_data; struct recurring_op_s *op = (struct recurring_op_s *)value; if (crm_str_eq(op->op_key, data->key, TRUE)) { data->done = TRUE; remove = !cancel_op(data->lrm_state, data->rsc->id, key, op->call_id, data->remove); } return remove; } static gboolean cancel_op_key(lrm_state_t * lrm_state, lrmd_rsc_info_t * rsc, const char *key, gboolean remove) { guint removed = 0; struct cancel_data data; CRM_CHECK(rsc != NULL, return FALSE); CRM_CHECK(key != NULL, return FALSE); data.key = key; data.rsc = rsc; data.done = FALSE; data.remove = remove; data.lrm_state = lrm_state; removed = g_hash_table_foreach_remove(lrm_state->pending_ops, cancel_action_by_key, &data); crm_trace("Removed %u op cache entries, new size: %u", removed, g_hash_table_size(lrm_state->pending_ops)); return data.done; } static lrmd_rsc_info_t * get_lrm_resource(lrm_state_t * lrm_state, xmlNode * resource, xmlNode * op_msg, gboolean do_create) { lrmd_rsc_info_t *rsc = NULL; const char *id = ID(resource); const char *type = crm_element_value(resource, XML_ATTR_TYPE); const char *class = crm_element_value(resource, XML_AGENT_ATTR_CLASS); const char *provider = crm_element_value(resource, XML_AGENT_ATTR_PROVIDER); const char *long_id = crm_element_value(resource, XML_ATTR_ID_LONG); crm_trace("Retrieving %s from the LRM.", id); CRM_CHECK(id != NULL, return NULL); rsc = lrm_state_get_rsc_info(lrm_state, id, 0); if (!rsc && long_id) { rsc = lrm_state_get_rsc_info(lrm_state, long_id, 0); } if (!rsc && do_create) { CRM_CHECK(class != NULL, return NULL); CRM_CHECK(type != NULL, return NULL); crm_trace("Adding rsc %s before operation", id); lrm_state_register_rsc(lrm_state, id, class, provider, type, lrmd_opt_drop_recurring); rsc = lrm_state_get_rsc_info(lrm_state, id, 0); if (!rsc) { fsa_data_t *msg_data = NULL; crm_err("Could not add resource %s to LRM", id); register_fsa_error(C_FSA_INTERNAL, I_FAIL, NULL); } } return rsc; } static void delete_resource(lrm_state_t * lrm_state, const char *id, lrmd_rsc_info_t * rsc, GHashTableIter * gIter, const char *sys, const char *host, const char *user, ha_msg_input_t * request) { int rc = pcmk_ok; crm_info("Removing resource %s for %s (%s) on %s", id, sys, user ? user : "internal", host); if (rsc) { rc = lrm_state_unregister_rsc(lrm_state, id, 0); } if (rc == pcmk_ok) { crm_trace("Resource '%s' deleted", id); } else if (rc == -EINPROGRESS) { crm_info("Deletion of resource '%s' pending", id); if (request) { struct pending_deletion_op_s *op = NULL; char *ref = crm_element_value_copy(request->msg, XML_ATTR_REFERENCE); op = calloc(1, sizeof(struct pending_deletion_op_s)); op->rsc = strdup(rsc->id); op->input = copy_ha_msg_input(request); g_hash_table_insert(lrm_state->deletion_ops, ref, op); } return; } else { crm_warn("Deletion of resource '%s' for %s (%s) on %s failed: %d", id, sys, user ? user : "internal", host, rc); } delete_rsc_entry(lrm_state, request, id, gIter, rc, user); } /* A_LRM_INVOKE */ void do_lrm_invoke(long long action, enum crmd_fsa_cause cause, enum crmd_fsa_state cur_state, enum crmd_fsa_input current_input, fsa_data_t * msg_data) { gboolean create_rsc = TRUE; lrm_state_t *lrm_state = NULL; const char *crm_op = NULL; const char *from_sys = NULL; const char *from_host = NULL; const char *operation = NULL; ha_msg_input_t *input = fsa_typed_data(fsa_dt_ha_msg); const char *user_name = NULL; const char *target_node = NULL; gboolean is_remote_node = FALSE; if (input->xml != NULL) { /* Remote node operations are routed here to their remote connections */ target_node = crm_element_value(input->xml, XML_LRM_ATTR_TARGET); } if (target_node == NULL) { target_node = fsa_our_uname; } else if (safe_str_neq(target_node, fsa_our_uname)) { is_remote_node = TRUE; } lrm_state = lrm_state_find(target_node); if (lrm_state == NULL && is_remote_node) { crm_err("no lrmd connection for remote node %s found on cluster node %s. Can not process request.", target_node, fsa_our_uname); return; } CRM_ASSERT(lrm_state != NULL); #if ENABLE_ACL user_name = crm_acl_get_set_user(input->msg, F_CRM_USER, NULL); crm_trace("LRM command from user '%s'", user_name); #endif crm_op = crm_element_value(input->msg, F_CRM_TASK); from_sys = crm_element_value(input->msg, F_CRM_SYS_FROM); if (safe_str_neq(from_sys, CRM_SYSTEM_TENGINE)) { from_host = crm_element_value(input->msg, F_CRM_HOST_FROM); } crm_trace("LRM command from: %s", from_sys); if (safe_str_eq(crm_op, CRM_OP_LRM_DELETE)) { operation = CRMD_ACTION_DELETE; } else if (safe_str_eq(crm_op, CRM_OP_LRM_REFRESH)) { operation = CRM_OP_LRM_REFRESH; } else if (safe_str_eq(crm_op, CRM_OP_LRM_FAIL)) { rsc_history_t *entry = NULL; lrmd_event_data_t *op = NULL; lrmd_rsc_info_t *rsc = NULL; xmlNode *xml_rsc = find_xml_node(input->xml, XML_CIB_TAG_RESOURCE, TRUE); CRM_CHECK(xml_rsc != NULL, return); /* The lrmd can not fail a resource, it does not understand the * concept of success or failure in relation to a resource, it simply * executes operations and reports the results. We determine what a failure is. * Becaues of this, if we want to fail a resource we have to fake what we * understand a failure to look like. * * To do this we create a fake lrmd operation event for the resource * we want to fail. We then pass that event to the lrmd client callback * so it will be processed as if it actually came from the lrmd. */ op = construct_op(lrm_state, input->xml, ID(xml_rsc), "asyncmon"); CRM_ASSERT(op != NULL); free((char *)op->user_data); op->user_data = NULL; entry = g_hash_table_lookup(lrm_state->resource_history, op->rsc_id); /* Make sure the call id is greater than the last successful operation, * otherwise the failure will not result in a possible recovery of the resource * as it could appear the failure occurred before the successful start */ if (entry) { op->call_id = entry->last_callid + 1; if (op->call_id < 0) { op->call_id = 1; } } op->interval = 0; op->op_status = PCMK_LRM_OP_DONE; op->rc = PCMK_OCF_UNKNOWN_ERROR; op->t_run = time(NULL); op->t_rcchange = op->t_run; #if ENABLE_ACL if (user_name && is_privileged(user_name) == FALSE) { crm_err("%s does not have permission to fail %s", user_name, ID(xml_rsc)); send_direct_ack(from_host, from_sys, NULL, op, ID(xml_rsc)); lrmd_free_event(op); return; } #endif rsc = get_lrm_resource(lrm_state, xml_rsc, input->xml, create_rsc); if (rsc) { crm_info("Failing resource %s...", rsc->id); process_lrm_event(lrm_state, op); op->op_status = PCMK_LRM_OP_DONE; op->rc = PCMK_OCF_OK; lrmd_free_rsc_info(rsc); } else { crm_info("Cannot find/create resource in order to fail it..."); crm_log_xml_warn(input->msg, "bad input"); } send_direct_ack(from_host, from_sys, NULL, op, ID(xml_rsc)); lrmd_free_event(op); return; } else if (input->xml != NULL) { operation = crm_element_value(input->xml, XML_LRM_ATTR_TASK); } if (safe_str_eq(crm_op, CRM_OP_LRM_REFRESH)) { int rc = pcmk_ok; xmlNode *fragment = do_lrm_query_internal(lrm_state, TRUE); fsa_cib_update(XML_CIB_TAG_STATUS, fragment, cib_quorum_override, rc, user_name); crm_info("Forced a local LRM refresh: call=%d", rc); if(strcmp(CRM_SYSTEM_CRMD, from_sys) != 0) { xmlNode *reply = create_request( CRM_OP_INVOKE_LRM, fragment, from_host, from_sys, CRM_SYSTEM_LRMD, fsa_our_uuid); crm_debug("ACK'ing refresh from %s (%s)", from_sys, from_host); if (relay_message(reply, TRUE) == FALSE) { crm_log_xml_err(reply, "Unable to route reply"); } free_xml(reply); } free_xml(fragment); } else if (safe_str_eq(crm_op, CRM_OP_LRM_QUERY)) { xmlNode *data = do_lrm_query_internal(lrm_state, FALSE); xmlNode *reply = create_reply(input->msg, data); if (relay_message(reply, TRUE) == FALSE) { crm_err("Unable to route reply"); crm_log_xml_err(reply, "reply"); } free_xml(reply); free_xml(data); } else if (safe_str_eq(operation, CRM_OP_PROBED)) { update_attrd(lrm_state->node_name, CRM_OP_PROBED, XML_BOOLEAN_TRUE, user_name, is_remote_node); } else if (safe_str_eq(operation, CRM_OP_REPROBE) || safe_str_eq(crm_op, CRM_OP_REPROBE)) { GHashTableIter gIter; rsc_history_t *entry = NULL; crm_notice("Forcing the status of all resources to be redetected"); g_hash_table_iter_init(&gIter, lrm_state->resource_history); while (g_hash_table_iter_next(&gIter, NULL, (void **)&entry)) { delete_resource(lrm_state, entry->id, &entry->rsc, &gIter, from_sys, from_host, user_name, NULL); } /* Now delete the copy in the CIB */ erase_status_tag(lrm_state->node_name, XML_CIB_TAG_LRM, cib_scope_local); /* And finally, _delete_ the value in attrd * Setting it to FALSE results in the PE sending us back here again */ update_attrd(lrm_state->node_name, CRM_OP_PROBED, NULL, user_name, is_remote_node); if(strcmp(CRM_SYSTEM_TENGINE, from_sys) != 0 && strcmp(CRM_SYSTEM_TENGINE, from_sys) != 0) { xmlNode *reply = create_request( CRM_OP_INVOKE_LRM, NULL, from_host, from_sys, CRM_SYSTEM_LRMD, fsa_our_uuid); crm_debug("ACK'ing re-probe from %s (%s)", from_sys, from_host); if (relay_message(reply, TRUE) == FALSE) { crm_log_xml_err(reply, "Unable to route reply"); } free_xml(reply); } } else if (operation != NULL) { lrmd_rsc_info_t *rsc = NULL; xmlNode *params = NULL; xmlNode *xml_rsc = find_xml_node(input->xml, XML_CIB_TAG_RESOURCE, TRUE); CRM_CHECK(xml_rsc != NULL, return); /* only the first 16 chars are used by the LRM */ params = find_xml_node(input->xml, XML_TAG_ATTRS, TRUE); if (safe_str_eq(operation, CRMD_ACTION_DELETE)) { create_rsc = FALSE; } rsc = get_lrm_resource(lrm_state, xml_rsc, input->xml, create_rsc); if (rsc == NULL && create_rsc) { crm_err("Invalid resource definition"); crm_log_xml_warn(input->msg, "bad input"); } else if (rsc == NULL) { lrmd_event_data_t *op = NULL; crm_notice("Not creating resource for a %s event: %s", operation, ID(input->xml)); delete_rsc_entry(lrm_state, input, ID(xml_rsc), NULL, pcmk_ok, user_name); op = construct_op(lrm_state, input->xml, ID(xml_rsc), operation); op->op_status = PCMK_LRM_OP_DONE; op->rc = PCMK_OCF_OK; CRM_ASSERT(op != NULL); send_direct_ack(from_host, from_sys, NULL, op, ID(xml_rsc)); lrmd_free_event(op); } else if (safe_str_eq(operation, CRMD_ACTION_CANCEL)) { char *op_key = NULL; char *meta_key = NULL; int call = 0; const char *call_id = NULL; const char *op_task = NULL; const char *op_interval = NULL; gboolean in_progress = FALSE; CRM_CHECK(params != NULL, crm_log_xml_warn(input->xml, "Bad command"); return); meta_key = crm_meta_name(XML_LRM_ATTR_INTERVAL); op_interval = crm_element_value(params, meta_key); free(meta_key); meta_key = crm_meta_name(XML_LRM_ATTR_TASK); op_task = crm_element_value(params, meta_key); free(meta_key); meta_key = crm_meta_name(XML_LRM_ATTR_CALLID); call_id = crm_element_value(params, meta_key); free(meta_key); CRM_CHECK(op_task != NULL, crm_log_xml_warn(input->xml, "Bad command"); return); CRM_CHECK(op_interval != NULL, crm_log_xml_warn(input->xml, "Bad command"); return); op_key = generate_op_key(rsc->id, op_task, crm_parse_int(op_interval, "0")); crm_debug("PE requested op %s (call=%s) be cancelled", op_key, call_id ? call_id : "NA"); call = crm_parse_int(call_id, "0"); if (call == 0) { /* the normal case when the PE cancels a recurring op */ in_progress = cancel_op_key(lrm_state, rsc, op_key, TRUE); } else { /* the normal case when the PE cancels an orphan op */ in_progress = cancel_op(lrm_state, rsc->id, NULL, call, TRUE); } if (in_progress == FALSE) { lrmd_event_data_t *op = construct_op(lrm_state, input->xml, rsc->id, op_task); crm_info("Nothing known about operation %d for %s", call, op_key); delete_op_entry(lrm_state, NULL, rsc->id, op_key, call); CRM_ASSERT(op != NULL); op->rc = PCMK_OCF_OK; op->op_status = PCMK_LRM_OP_DONE; send_direct_ack(from_host, from_sys, rsc, op, rsc->id); lrmd_free_event(op); /* needed?? surely not otherwise the cancel_op_(_key) wouldn't * have failed in the first place */ g_hash_table_remove(lrm_state->pending_ops, op_key); } free(op_key); } else if (rsc != NULL && safe_str_eq(operation, CRMD_ACTION_DELETE)) { #if ENABLE_ACL int cib_rc = delete_rsc_status(lrm_state, rsc->id, cib_dryrun | cib_sync_call, user_name); if (cib_rc != pcmk_ok) { lrmd_event_data_t *op = NULL; crm_err ("Attempted deletion of resource status '%s' from CIB for %s (user=%s) on %s failed: (rc=%d) %s", rsc->id, from_sys, user_name ? user_name : "unknown", from_host, cib_rc, pcmk_strerror(cib_rc)); op = construct_op(lrm_state, input->xml, rsc->id, operation); op->op_status = PCMK_LRM_OP_ERROR; if (cib_rc == -EACCES) { op->rc = PCMK_OCF_INSUFFICIENT_PRIV; } else { op->rc = PCMK_OCF_UNKNOWN_ERROR; } send_direct_ack(from_host, from_sys, NULL, op, rsc->id); lrmd_free_event(op); return; } #endif delete_resource(lrm_state, rsc->id, rsc, NULL, from_sys, from_host, user_name, input); } else if (rsc != NULL) { do_lrm_rsc_op(lrm_state, rsc, operation, input->xml, input->msg); } lrmd_free_rsc_info(rsc); } else { crm_err("Operation was neither a lrm_query, nor a rsc op. %s", crm_str(crm_op)); register_fsa_error(C_FSA_INTERNAL, I_ERROR, NULL); } } static lrmd_event_data_t * construct_op(lrm_state_t * lrm_state, xmlNode * rsc_op, const char *rsc_id, const char *operation) { lrmd_event_data_t *op = NULL; const char *op_delay = NULL; const char *op_timeout = NULL; const char *op_interval = NULL; GHashTable *params = NULL; const char *transition = NULL; CRM_ASSERT(rsc_id != NULL); op = calloc(1, sizeof(lrmd_event_data_t)); op->type = lrmd_event_exec_complete; op->op_type = strdup(operation); op->op_status = PCMK_LRM_OP_PENDING; op->rc = -1; op->rsc_id = strdup(rsc_id); op->interval = 0; op->timeout = 0; op->start_delay = 0; if (rsc_op == NULL) { CRM_LOG_ASSERT(safe_str_eq(CRMD_ACTION_STOP, operation)); op->user_data = NULL; /* the stop_all_resources() case * by definition there is no DC (or they'd be shutting * us down). * So we should put our version here. */ op->params = g_hash_table_new_full(crm_str_hash, g_str_equal, g_hash_destroy_str, g_hash_destroy_str); g_hash_table_insert(op->params, strdup(XML_ATTR_CRM_VERSION), strdup(CRM_FEATURE_SET)); crm_trace("Constructed %s op for %s", operation, rsc_id); return op; } params = xml2list(rsc_op); g_hash_table_remove(params, CRM_META "_op_target_rc"); op_delay = crm_meta_value(params, XML_OP_ATTR_START_DELAY); op_timeout = crm_meta_value(params, XML_ATTR_TIMEOUT); op_interval = crm_meta_value(params, XML_LRM_ATTR_INTERVAL); op->interval = crm_parse_int(op_interval, "0"); op->timeout = crm_parse_int(op_timeout, "0"); op->start_delay = crm_parse_int(op_delay, "0"); if (safe_str_neq(operation, RSC_STOP)) { op->params = params; } else { rsc_history_t *entry = g_hash_table_lookup(lrm_state->resource_history, rsc_id); /* If we do not have stop parameters cached, use * whatever we are given */ if (!entry || !entry->stop_params) { op->params = params; } else { /* Copy the cached parameter list so that we stop the resource * with the old attributes, not the new ones */ op->params = g_hash_table_new_full(crm_str_hash, g_str_equal, g_hash_destroy_str, g_hash_destroy_str); g_hash_table_foreach(params, copy_meta_keys, op->params); g_hash_table_foreach(entry->stop_params, copy_instance_keys, op->params); g_hash_table_destroy(params); params = NULL; } } /* sanity */ if (op->interval < 0) { op->interval = 0; } if (op->timeout <= 0) { op->timeout = op->interval; } if (op->start_delay < 0) { op->start_delay = 0; } transition = crm_element_value(rsc_op, XML_ATTR_TRANSITION_KEY); CRM_CHECK(transition != NULL, return op); op->user_data = strdup(transition); if (op->interval != 0) { if (safe_str_eq(operation, CRMD_ACTION_START) || safe_str_eq(operation, CRMD_ACTION_STOP)) { crm_err("Start and Stop actions cannot have an interval: %d", op->interval); op->interval = 0; } } crm_trace("Constructed %s op for %s: interval=%d", operation, rsc_id, op->interval); return op; } void send_direct_ack(const char *to_host, const char *to_sys, lrmd_rsc_info_t * rsc, lrmd_event_data_t * op, const char *rsc_id) { xmlNode *reply = NULL; xmlNode *update, *iter; crm_node_t *peer = NULL; CRM_CHECK(op != NULL, return); if (op->rsc_id == NULL) { CRM_ASSERT(rsc_id != NULL); op->rsc_id = strdup(rsc_id); } if (to_sys == NULL) { to_sys = CRM_SYSTEM_TENGINE; } peer = crm_get_peer(0, fsa_our_uname); update = do_update_node_cib(peer, node_update_none, NULL, __FUNCTION__); iter = create_xml_node(update, XML_CIB_TAG_LRM); crm_xml_add(iter, XML_ATTR_ID, fsa_our_uuid); iter = create_xml_node(iter, XML_LRM_TAG_RESOURCES); iter = create_xml_node(iter, XML_LRM_TAG_RESOURCE); crm_xml_add(iter, XML_ATTR_ID, op->rsc_id); build_operation_update(iter, rsc, op, __FUNCTION__); reply = create_request(CRM_OP_INVOKE_LRM, update, to_host, to_sys, CRM_SYSTEM_LRMD, NULL); crm_log_xml_trace(update, "ACK Update"); crm_debug("ACK'ing resource op %s_%s_%d from %s: %s", op->rsc_id, op->op_type, op->interval, op->user_data, crm_element_value(reply, XML_ATTR_REFERENCE)); if (relay_message(reply, TRUE) == FALSE) { crm_log_xml_err(reply, "Unable to route reply"); } free_xml(update); free_xml(reply); } gboolean verify_stopped(enum crmd_fsa_state cur_state, int log_level) { gboolean res = TRUE; GList *lrm_state_list = lrm_state_get_list(); GList *state_entry; for (state_entry = lrm_state_list; state_entry != NULL; state_entry = state_entry->next) { lrm_state_t *lrm_state = state_entry->data; if (!lrm_state_verify_stopped(lrm_state, cur_state, log_level)) { /* keep iterating through all even when false is returned */ res = FALSE; } } set_bit(fsa_input_register, R_SENT_RSC_STOP); g_list_free(lrm_state_list); lrm_state_list = NULL; return res; } struct stop_recurring_action_s { lrmd_rsc_info_t *rsc; lrm_state_t *lrm_state; }; static gboolean stop_recurring_action_by_rsc(gpointer key, gpointer value, gpointer user_data) { gboolean remove = FALSE; struct stop_recurring_action_s *event = user_data; struct recurring_op_s *op = (struct recurring_op_s *)value; if (op->interval != 0 && crm_str_eq(op->rsc_id, event->rsc->id, TRUE)) { crm_debug("Cancelling op %d for %s (%s)", op->call_id, op->rsc_id, key); remove = !cancel_op(event->lrm_state, event->rsc->id, key, op->call_id, FALSE); } return remove; } static gboolean stop_recurring_actions(gpointer key, gpointer value, gpointer user_data) { gboolean remove = FALSE; lrm_state_t *lrm_state = user_data; struct recurring_op_s *op = (struct recurring_op_s *)value; if (op->interval != 0) { crm_info("Cancelling op %d for %s (%s)", op->call_id, op->rsc_id, key); remove = !cancel_op(lrm_state, op->rsc_id, key, op->call_id, FALSE); } return remove; } static void do_lrm_rsc_op(lrm_state_t * lrm_state, lrmd_rsc_info_t * rsc, const char *operation, xmlNode * msg, xmlNode * request) { int call_id = 0; char *op_id = NULL; lrmd_event_data_t *op = NULL; lrmd_key_value_t *params = NULL; fsa_data_t *msg_data = NULL; const char *transition = NULL; CRM_CHECK(rsc != NULL, return); CRM_CHECK(operation != NULL, return); if (msg != NULL) { transition = crm_element_value(msg, XML_ATTR_TRANSITION_KEY); if (transition == NULL) { crm_log_xml_err(msg, "Missing transition number"); } } op = construct_op(lrm_state, msg, rsc->id, operation); CRM_CHECK(op != NULL, return); /* stop any previous monitor operations before changing the resource state */ if (op->interval == 0 && strcmp(operation, CRMD_ACTION_STATUS) != 0 && strcmp(operation, CRMD_ACTION_NOTIFY) != 0) { guint removed = 0; struct stop_recurring_action_s data; data.rsc = rsc; data.lrm_state = lrm_state; removed = g_hash_table_foreach_remove( lrm_state->pending_ops, stop_recurring_action_by_rsc, &data); crm_debug("Stopped %u recurring operations in preparation for %s_%s_%d", removed, rsc->id, operation, op->interval); } /* now do the op */ crm_info("Performing key=%s op=%s_%s_%d", transition, rsc->id, operation, op->interval); if (fsa_state != S_NOT_DC && fsa_state != S_POLICY_ENGINE && fsa_state != S_TRANSITION_ENGINE) { if (safe_str_neq(operation, "fail") && safe_str_neq(operation, CRMD_ACTION_STOP)) { crm_info("Discarding attempt to perform action %s on %s in state %s", operation, rsc->id, fsa_state2string(fsa_state)); op->rc = 99; op->op_status = PCMK_LRM_OP_ERROR; send_direct_ack(NULL, NULL, rsc, op, rsc->id); lrmd_free_event(op); free(op_id); return; } } op_id = generate_op_key(rsc->id, op->op_type, op->interval); if (op->interval > 0) { /* cancel it so we can then restart it without conflict */ cancel_op_key(lrm_state, rsc, op_id, FALSE); } if (op->params) { char *key = NULL; char *value = NULL; GHashTableIter iter; g_hash_table_iter_init(&iter, op->params); while (g_hash_table_iter_next(&iter, (gpointer *) & key, (gpointer *) & value)) { params = lrmd_key_value_add(params, key, value); } } call_id = lrm_state_exec(lrm_state, rsc->id, op->op_type, op->user_data, op->interval, op->timeout, op->start_delay, params); if (call_id <= 0) { crm_err("Operation %s on %s failed: %d", operation, rsc->id, call_id); register_fsa_error(C_FSA_INTERNAL, I_FAIL, NULL); } else { /* record all operations so we can wait * for them to complete during shutdown */ char *call_id_s = make_stop_id(rsc->id, call_id); struct recurring_op_s *pending = NULL; pending = calloc(1, sizeof(struct recurring_op_s)); crm_trace("Recording pending op: %d - %s %s", call_id, op_id, call_id_s); pending->call_id = call_id; pending->interval = op->interval; pending->op_type = strdup(operation); pending->op_key = strdup(op_id); pending->rsc_id = strdup(rsc->id); g_hash_table_replace(lrm_state->pending_ops, call_id_s, pending); if (op->interval > 0 && op->start_delay > START_DELAY_THRESHOLD) { char *uuid = NULL; int dummy = 0, target_rc = 0; crm_info("Faking confirmation of %s: execution postponed for over 5 minutes", op_id); decode_transition_key(op->user_data, &uuid, &dummy, &dummy, &target_rc); free(uuid); op->rc = target_rc; op->op_status = PCMK_LRM_OP_DONE; send_direct_ack(NULL, NULL, rsc, op, rsc->id); } } free(op_id); lrmd_free_event(op); return; } int last_resource_update = 0; static void cib_rsc_callback(xmlNode * msg, int call_id, int rc, xmlNode * output, void *user_data) { switch (rc) { case pcmk_ok: case -pcmk_err_diff_failed: case -pcmk_err_diff_resync: crm_trace("Resource update %d complete: rc=%d", call_id, rc); break; default: crm_warn("Resource update %d failed: (rc=%d) %s", call_id, rc, pcmk_strerror(rc)); } if (call_id == last_resource_update) { last_resource_update = 0; trigger_fsa(fsa_source); } } static int do_update_resource(lrm_state_t * lrm_state, lrmd_rsc_info_t * rsc, lrmd_event_data_t * op) { /* */ int rc = pcmk_ok; xmlNode *update, *iter = NULL; int call_opt = cib_quorum_override; const char *uuid = NULL; CRM_CHECK(op != NULL, return 0); if (fsa_state == S_ELECTION || fsa_state == S_PENDING) { crm_info("Sending update to local CIB in state: %s", fsa_state2string(fsa_state)); call_opt |= cib_scope_local; } iter = create_xml_node(iter, XML_CIB_TAG_STATUS); update = iter; iter = create_xml_node(iter, XML_CIB_TAG_STATE); if (safe_str_eq(lrm_state->node_name, fsa_our_uname)) { uuid = fsa_our_uuid; } else { /* remote nodes uuid and uname are equal */ uuid = lrm_state->node_name; crm_xml_add(iter, XML_NODE_IS_REMOTE, "true"); } CRM_LOG_ASSERT(uuid != NULL); if(uuid == NULL) { rc = -EINVAL; goto done; } crm_xml_add(iter, XML_ATTR_UUID, uuid); crm_xml_add(iter, XML_ATTR_UNAME, lrm_state->node_name); crm_xml_add(iter, XML_ATTR_ORIGIN, __FUNCTION__); iter = create_xml_node(iter, XML_CIB_TAG_LRM); crm_xml_add(iter, XML_ATTR_ID, uuid); iter = create_xml_node(iter, XML_LRM_TAG_RESOURCES); iter = create_xml_node(iter, XML_LRM_TAG_RESOURCE); crm_xml_add(iter, XML_ATTR_ID, op->rsc_id); build_operation_update(iter, rsc, op, __FUNCTION__); if (rsc) { const char *container = NULL; crm_xml_add(iter, XML_ATTR_TYPE, rsc->type); crm_xml_add(iter, XML_AGENT_ATTR_CLASS, rsc->class); crm_xml_add(iter, XML_AGENT_ATTR_PROVIDER, rsc->provider); if (op->params) { container = g_hash_table_lookup(op->params, CRM_META"_"XML_RSC_ATTR_CONTAINER); } if (container) { crm_trace("Resource %s is a part of container resource %s", op->rsc_id, container); crm_xml_add(iter, XML_RSC_ATTR_CONTAINER, container); } CRM_CHECK(rsc->type != NULL, crm_err("Resource %s has no value for type", op->rsc_id)); CRM_CHECK(rsc->class != NULL, crm_err("Resource %s has no value for class", op->rsc_id)); } else { crm_warn("Resource %s no longer exists in the lrmd", op->rsc_id); send_direct_ack(NULL, NULL, rsc, op, op->rsc_id); goto cleanup; } crm_log_xml_trace(update, __FUNCTION__); /* make it an asyncronous call and be done with it * * Best case: * the resource state will be discovered during * the next signup or election. * * Bad case: * we are shutting down and there is no DC at the time, * but then why were we shutting down then anyway? * (probably because of an internal error) * * Worst case: * we get shot for having resources "running" when the really weren't * * the alternative however means blocking here for too long, which * isnt acceptable */ fsa_cib_update(XML_CIB_TAG_STATUS, update, call_opt, rc, NULL); if (rc > 0) { last_resource_update = rc; } done: /* the return code is a call number, not an error code */ crm_trace("Sent resource state update message: %d for %s=%d on %s", rc, op->op_type, op->interval, op->rsc_id); fsa_register_cib_callback(rc, FALSE, NULL, cib_rsc_callback); cleanup: free_xml(update); return rc; } void do_lrm_event(long long action, enum crmd_fsa_cause cause, enum crmd_fsa_state cur_state, enum crmd_fsa_input cur_input, fsa_data_t * msg_data) { CRM_CHECK(FALSE, return); } gboolean process_lrm_event(lrm_state_t * lrm_state, lrmd_event_data_t * op) { char *op_id = NULL; char *op_key = NULL; int update_id = 0; gboolean removed = FALSE; lrmd_rsc_info_t *rsc = NULL; struct recurring_op_s *pending = NULL; CRM_CHECK(op != NULL, return FALSE); CRM_CHECK(op->rsc_id != NULL, return FALSE); op_id = make_stop_id(op->rsc_id, op->call_id); pending = g_hash_table_lookup(lrm_state->pending_ops, op_id); op_key = generate_op_key(op->rsc_id, op->op_type, op->interval); rsc = lrm_state_get_rsc_info(lrm_state, op->rsc_id, 0); if (op->op_status == PCMK_LRM_OP_ERROR && (op->rc == PCMK_OCF_RUNNING_MASTER || op->rc == PCMK_OCF_NOT_RUNNING)) { /* Leave it up to the TE/PE to decide if this is an error */ op->op_status = PCMK_LRM_OP_DONE; } if (op->op_status != PCMK_LRM_OP_CANCELLED) { if (safe_str_eq(op->op_type, RSC_NOTIFY)) { /* Keep notify ops out of the CIB */ send_direct_ack(NULL, NULL, NULL, op, op->rsc_id); } else { update_id = do_update_resource(lrm_state, rsc, op); } } else if (op->interval == 0) { /* This will occur when "crm resource cleanup" is called while actions are in-flight */ crm_err("Op %s (call=%d): Cancelled", op_key, op->call_id); send_direct_ack(NULL, NULL, NULL, op, op->rsc_id); } else if (pending == NULL) { /* We don't need to do anything for cancelled ops * that are not in our pending op list. There are no * transition actions waiting on these operations. */ } else if (op->user_data == NULL) { /* At this point we have a pending entry, but no transition * key present in the user_data field. report this */ crm_err("Op %s (call=%d): No user data", op_key, op->call_id); } else if (pending->remove) { /* The tengine canceled this op, we have been waiting for the cancel to finish. */ delete_op_entry(lrm_state, op, op->rsc_id, op_key, op->call_id); } else if (pending && op->rsc_deleted) { /* The tengine initiated this op, but it was cancelled outside of the * tengine's control during a resource cleanup/re-probe request. The tengine * must be alerted that this operation completed, otherwise the tengine * will continue waiting for this update to occur until it is timed out. * We don't want this update going to the cib though, so use a direct ack. */ crm_trace("Op %s (call=%d): cancelled due to rsc deletion", op_key, op->call_id); send_direct_ack(NULL, NULL, NULL, op, op->rsc_id); } else { /* Before a stop is called, no need to direct ack */ crm_trace("Op %s (call=%d): no delete event required", op_key, op->call_id); } if ((op->interval == 0) && g_hash_table_remove(lrm_state->pending_ops, op_id)) { removed = TRUE; crm_trace("Op %s (call=%d, stop-id=%s, remaining=%u): Confirmed", op_key, op->call_id, op_id, g_hash_table_size(lrm_state->pending_ops)); } else if(op->interval != 0 && op->op_status == PCMK_LRM_OP_CANCELLED) { removed = TRUE; g_hash_table_remove(lrm_state->pending_ops, op_id); } switch (op->op_status) { case PCMK_LRM_OP_CANCELLED: crm_info("Operation %s: %s (node=%s, call=%d, confirmed=%s)", op_key, services_lrm_status_str(op->op_status), lrm_state->node_name, op->call_id, removed ? "true" : "false"); break; case PCMK_LRM_OP_DONE: crm_notice("Operation %s: %s (node=%s, call=%d, rc=%d, cib-update=%d, confirmed=%s)", op_key, services_ocf_exitcode_str(op->rc), lrm_state->node_name, op->call_id, op->rc, update_id, removed ? "true" : "false"); break; case PCMK_LRM_OP_TIMEOUT: crm_err("Operation %s: %s (node=%s, call=%d, timeout=%dms)", op_key, services_lrm_status_str(op->op_status), lrm_state->node_name, op->call_id, op->timeout); break; default: crm_err("Operation %s (node=%s, call=%d, status=%d, cib-update=%d, confirmed=%s) %s", op_key, lrm_state->node_name, op->call_id, op->op_status, update_id, removed ? "true" : "false", services_lrm_status_str(op->op_status)); } if (op->output) { char *prefix = g_strdup_printf("%s-%s_%s_%d:%d", lrm_state->node_name, op->rsc_id, op->op_type, op->interval, op->call_id); if (op->rc) { crm_log_output(LOG_NOTICE, prefix, op->output); } else { crm_log_output(LOG_DEBUG, prefix, op->output); } g_free(prefix); } if (op->rsc_deleted) { crm_info("Deletion of resource '%s' complete after %s", op->rsc_id, op_key); delete_rsc_entry(lrm_state, NULL, op->rsc_id, NULL, pcmk_ok, NULL); } /* If a shutdown was escalated while operations were pending, * then the FSA will be stalled right now... allow it to continue */ mainloop_set_trigger(fsa_source); update_history_cache(lrm_state, rsc, op); lrmd_free_rsc_info(rsc); free(op_key); free(op_id); return TRUE; } diff --git a/doc/Clusters_from_Scratch/zh-CN/Ap-Configuration.po b/doc/Clusters_from_Scratch/zh-CN/Ap-Configuration.po index 203240245c..400d22f889 100644 --- a/doc/Clusters_from_Scratch/zh-CN/Ap-Configuration.po +++ b/doc/Clusters_from_Scratch/zh-CN/Ap-Configuration.po @@ -1,712 +1,712 @@ # # AUTHOR , YEAR. # msgid "" msgstr "" "Project-Id-Version: 0\n" "POT-Creation-Date: 2012-10-17T05:19:03\n" "PO-Revision-Date: 2010-12-15 23:34+0800\n" -"Last-Translator: Charlie Chen \n" +"Last-Translator: Hu Fu \n" "Language-Team: None\n" "Language: \n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" #. Tag: title #, no-c-format msgid "Configuration Recap" msgstr "配置扼要重述" #. Tag: title #, no-c-format msgid "Final Cluster Configuration" msgstr "最终的集群配置文件" #. Tag: programlisting #, no-c-format msgid "" "# pcs resource\n" " Master/Slave Set: WebDataClone [WebData]\n" " Masters: [ pcmk-2 pcmk-1 ]\n" " Clone Set: dlm-clone [dlm]\n" " Started: [ pcmk-2 pcmk-1 ]\n" " Clone Set: ClusterIP-clone [ClusterIP] (unique)\n" " ClusterIP:0 (ocf::heartbeat:IPaddr2) Started\n" " ClusterIP:1 (ocf::heartbeat:IPaddr2) Started\n" " Clone Set: WebFS-clone [WebFS]\n" " Started: [ pcmk-1 pcmk-2 ]\n" " Clone Set: WebSite-clone [WebSite]\n" " Started: [ pcmk-1 pcmk-2 ]\n" "# pcs resource rsc defaults\n" "resource-stickiness: 100\n" "# pcs resource op defaults\n" "timeout: 240s\n" "# pcs stonith\n" " impi-fencing (stonith:fence_ipmilan) Started\n" "# pcs property\n" "dc-version: 1.1.8-1.el7-60a19ed12fdb4d5c6a6b6767f52e5391e447fec0\n" "cluster-infrastructure: corosync\n" "no-quorum-policy: ignore\n" "stonith-enabled: true\n" "# pcs constraint\n" "Location Constraints:\n" "Ordering Constraints:\n" " ClusterIP-clone then WebSite-clone\n" " WebDataClone then WebSite-clone\n" " WebFS-clone then WebSite-clone\n" "Colocation Constraints:\n" " WebSite-clone with ClusterIP-clone\n" " WebFS-clone with WebDataClone (with-rsc-role:Master)\n" " WebSite-clone with WebFS-clone\n" "#\n" "# pcs status\n" "\n" "Last updated: Fri Sep 14 13:45:34 2012\n" "Last change: Fri Sep 14 13:43:13 2012 via cibadmin on pcmk-1\n" "Stack: corosync\n" "Current DC: pcmk-1 (1) - partition with quorum\n" "Version: 1.1.8-1.el7-60a19ed12fdb4d5c6a6b6767f52e5391e447fec0\n" "2 Nodes configured, unknown expected votes\n" "11 Resources configured.\n" "\n" "Online: [ pcmk-1 pcmk-2 ]\n" "\n" "Full list of resources:\n" "\n" " Master/Slave Set: WebDataClone [WebData]\n" " Masters: [ pcmk-2 pcmk-1 ]\n" " Clone Set: dlm-clone [dlm]\n" " Started: [ pcmk-1 pcmk-2 ]\n" " Clone Set: ClusterIP-clone [ClusterIP] (unique)\n" " ClusterIP:0 (ocf::heartbeat:IPaddr2): Started pcmk-1\n" " ClusterIP:1 (ocf::heartbeat:IPaddr2): Started pcmk-2\n" " Clone Set: WebFS-clone [WebFS]\n" " Started: [ pcmk-1 pcmk-2 ]\n" " Clone Set: WebSite-clone [WebSite]\n" " Started: [ pcmk-1 pcmk-2 ]\n" " impi-fencing (stonith:fence_ipmilan): Started" msgstr "" #. Tag: para #, no-c-format msgid "In xml it should look similar to this." -msgstr "" +msgstr "在xml中应该类似于这样。" #. Tag: programlisting #, no-c-format msgid "" "<cib admin_epoch=\"0\" cib-last-written=\"Fri Sep 14 13:43:13 2012\" crm_feature_set=\"3.0.6\" dc-uuid=\"1\" epoch=\"47\" have-quorum=\"1\" num_updates=\"50\" update-client=\"cibadmin\" update-origin=\"pcmk-1\" validate-with=\"pacemaker-1.2\">\n" " <configuration>\n" " <crm_config>\n" " <cluster_property_set id=\"cib-bootstrap-options\">\n" " <nvpair id=\"cib-bootstrap-options-dc-version\" name=\"dc-version\" value=\"1.1.8-1.el7-60a19ed12fdb4d5c6a6b6767f52e5391e447fec0\"/>\n" " <nvpair id=\"cib-bootstrap-options-cluster-infrastructure\" name=\"cluster-infrastructure\" value=\"corosync\"/>\n" " <nvpair id=\"cib-bootstrap-options-no-quorum-policy\" name=\"no-quorum-policy\" value=\"ignore\"/>\n" " <nvpair id=\"cib-bootstrap-options-stonith-enabled\" name=\"stonith-enabled\" value=\"true\"/>\n" " </cluster_property_set>\n" " </crm_config>\n" " <nodes>\n" " <node id=\"1\" type=\"normal\" uname=\"pcmk-1\"/>\n" " <node id=\"2\" type=\"normal\" uname=\"pcmk-2\"/>\n" " </nodes>\n" " <resources>\n" " <master id=\"WebDataClone\">\n" " <primitive class=\"ocf\" id=\"WebData\" provider=\"linbit\" type=\"drbd\">\n" " <instance_attributes id=\"WebData-instance_attributes\">\n" " <nvpair id=\"WebData-instance_attributes-drbd_resource\" name=\"drbd_resource\" value=\"wwwdata\"/>\n" " </instance_attributes>\n" " <operations>\n" " <op id=\"WebData-interval-60s\" interval=\"60s\" name=\"monitor\"/>\n" " </operations>\n" " </primitive>\n" " <meta_attributes id=\"WebDataClone-meta_attributes\">\n" " <nvpair id=\"WebDataClone-meta_attributes-master-node-max\" name=\"master-node-max\" value=\"1\"/>\n" " <nvpair id=\"WebDataClone-meta_attributes-clone-max\" name=\"clone-max\" value=\"2\"/>\n" " <nvpair id=\"WebDataClone-meta_attributes-clone-node-max\" name=\"clone-node-max\" value=\"1\"/>\n" " <nvpair id=\"WebDataClone-meta_attributes-notify\" name=\"notify\" value=\"true\"/>\n" " <nvpair id=\"WebDataClone-meta_attributes-master-max\" name=\"master-max\" value=\"2\"/>\n" " </meta_attributes>\n" " </master>\n" " <clone id=\"dlm-clone\">\n" " <primitive class=\"ocf\" id=\"dlm\" provider=\"pacemaker\" type=\"controld\">\n" " <instance_attributes id=\"dlm-instance_attributes\"/>\n" " <operations>\n" " <op id=\"dlm-interval-60s\" interval=\"60s\" name=\"monitor\"/>\n" " </operations>\n" " </primitive>\n" " <meta_attributes id=\"dlm-clone-meta\">\n" " <nvpair id=\"dlm-clone-max\" name=\"clone-max\" value=\"2\"/>\n" " <nvpair id=\"dlm-clone-node-max\" name=\"clone-node-max\" value=\"1\"/>\n" " </meta_attributes>\n" " </clone>\n" " <clone id=\"ClusterIP-clone\">\n" " <primitive class=\"ocf\" id=\"ClusterIP\" provider=\"heartbeat\" type=\"IPaddr2\">\n" " <instance_attributes id=\"ClusterIP-instance_attributes\">\n" " <nvpair id=\"ClusterIP-instance_attributes-ip\" name=\"ip\" value=\"192.168.0.120\"/>\n" " <nvpair id=\"ClusterIP-instance_attributes-cidr_netmask\" name=\"cidr_netmask\" value=\"32\"/>\n" " <nvpair id=\"ClusterIP-instance_attributes-clusterip_hash\" name=\"clusterip_hash\" value=\"sourceip\"/>\n" " </instance_attributes>\n" " <operations>\n" " <op id=\"ClusterIP-interval-30s\" interval=\"30s\" name=\"monitor\"/>\n" " </operations>\n" " </primitive>\n" " <meta_attributes id=\"ClusterIP-clone-meta\">\n" " <nvpair id=\"ClusterIP-globally-unique\" name=\"globally-unique\" value=\"true\"/>\n" " <nvpair id=\"ClusterIP-clone-max\" name=\"clone-max\" value=\"2\"/>\n" " <nvpair id=\"ClusterIP-clone-node-max\" name=\"clone-node-max\" value=\"2\"/>\n" " </meta_attributes>\n" " </clone>\n" " <clone id=\"WebFS-clone\">\n" " <primitive class=\"ocf\" id=\"WebFS\" provider=\"heartbeat\" type=\"Filesystem\">\n" " <instance_attributes id=\"WebFS-instance_attributes\">\n" " <nvpair id=\"WebFS-instance_attributes-device\" name=\"device\" value=\"/dev/drbd/by-res/wwwdata\"/>\n" " <nvpair id=\"WebFS-instance_attributes-directory\" name=\"directory\" value=\"/var/www/html\"/>\n" " <nvpair id=\"WebFS-instance_attributes-fstype\" name=\"fstype\" value=\"gfs2\"/>\n" " </instance_attributes>\n" " <meta_attributes id=\"WebFS-meta_attributes\"/>\n" " </primitive>\n" " <meta_attributes id=\"WebFS-clone-meta\"/>\n" " </clone>\n" " <clone id=\"WebSite-clone\">\n" " <primitive class=\"ocf\" id=\"WebSite\" provider=\"heartbeat\" type=\"apache\">\n" " <instance_attributes id=\"WebSite-instance_attributes\">\n" " <nvpair id=\"WebSite-instance_attributes-configfile\" name=\"configfile\" value=\"/etc/httpd/conf/httpd.conf\"/>\n" " <nvpair id=\"WebSite-instance_attributes-statusurl\" name=\"statusurl\" value=\"http://localhost/server-status\"/>\n" " </instance_attributes>\n" " <operations>\n" " <op id=\"WebSite-interval-1min\" interval=\"1min\" name=\"monitor\"/>\n" " </operations>\n" " </primitive>\n" " <meta_attributes id=\"WebSite-clone-meta\"/>\n" " </clone>\n" " <primitive class=\"stonith\" id=\"impi-fencing\" type=\"fence_ipmilan\">\n" " <instance_attributes id=\"impi-fencing-instance_attributes\">\n" " <nvpair id=\"impi-fencing-instance_attributes-pcmk_host_list\" name=\"pcmk_host_list\" value=\"pcmk-1 pcmk-2\"/>\n" " <nvpair id=\"impi-fencing-instance_attributes-ipaddr\" name=\"ipaddr\" value=\"10.0.0.1\"/>\n" " <nvpair id=\"impi-fencing-instance_attributes-login\" name=\"login\" value=\"testuser\"/>\n" " <nvpair id=\"impi-fencing-instance_attributes-passwd\" name=\"passwd\" value=\"acd123\"/>\n" " </instance_attributes>\n" " <operations>\n" " <op id=\"impi-fencing-interval-60s\" interval=\"60s\" name=\"monitor\"/>\n" " </operations>\n" " </primitive>\n" " </resources>\n" " <constraints>\n" " <rsc_colocation id=\"colocation-WebSite-ClusterIP-INFINITY\" rsc=\"WebSite-clone\" score=\"INFINITY\" with-rsc=\"ClusterIP-clone\"/>\n" " <rsc_order first=\"ClusterIP-clone\" first-action=\"start\" id=\"order-ClusterIP-WebSite-mandatory\" then=\"WebSite-clone\" then-action=\"start\"/>\n" " <rsc_colocation id=\"colocation-WebFS-WebDataClone-INFINITY\" rsc=\"WebFS-clone\" score=\"INFINITY\" with-rsc=\"WebDataClone\" with-rsc-role=\"Master\"/>\n" " <rsc_colocation id=\"colocation-WebSite-WebFS-INFINITY\" rsc=\"WebSite-clone\" score=\"INFINITY\" with-rsc=\"WebFS-clone\"/>\n" " <rsc_order first=\"WebFS-clone\" id=\"order-WebFS-WebSite-mandatory\" then=\"WebSite-clone\"/>\n" " <rsc_order first=\"WebDataClone\" first-action=\"promote\" id=\"order-WebDataClone-WebFS-mandatory\" then=\"WebFS-clone\" then-action=\"start\"/>\n" " </constraints>\n" " <rsc_defaults>\n" " <meta_attributes id=\"rsc_defaults-options\">\n" " <nvpair id=\"rsc_defaults-options-resource-stickiness\" name=\"resource-stickiness\" value=\"100\"/>\n" " </meta_attributes>\n" " </rsc_defaults>\n" " <op_defaults>\n" " <meta_attributes id=\"op_defaults-options\">\n" " <nvpair id=\"op_defaults-options-timeout\" name=\"timeout\" value=\"240s\"/>\n" " </meta_attributes>\n" " </op_defaults>\n" " </configuration>\n" "</cib>" msgstr "" #. Tag: title #, no-c-format msgid "Node List" msgstr "节点列表" #. Tag: para #, no-c-format msgid "The list of cluster nodes is automatically populated by the cluster." msgstr "这个列表中的集群节点是集群自动添加的。" #. Tag: literallayout #, no-c-format msgid "" "Pacemaker Nodes:\n" " Online: [ pcmk-1 pcmk-2 ]" msgstr "" #. Tag: title #, no-c-format msgid "Cluster Options" msgstr "集群选项" #. Tag: para #, no-c-format msgid "This is where the cluster automatically stores some information about the cluster" msgstr "这是集群自动存储集群信息的地方" #. Tag: para #, no-c-format msgid "dc-version - the version (including upstream source-code hash) of Pacemaker used on the DC" msgstr "dc-version - DC使用的Pacemaker的版本(包括源代码的hash)" #. Tag: para #, no-c-format msgid "cluster-infrastructure - the cluster infrastructure being used (heartbeat or openais)" msgstr "集群-基层 - 集群使用的基层软件 (heartbeat or openais/corosync)" #. Tag: para #, no-c-format msgid "expected-quorum-votes - the maximum number of nodes expected to be part of the cluster" msgstr "expected-quorum-votes - 预期的集群最大成员数" #. Tag: para #, no-c-format msgid "and where the admin can set options that control the way the cluster operates" msgstr "以及管理员设置集群操作的方法选项" #. Tag: para #, no-c-format msgid "stonith-enabled=true - Make use of STONITH" msgstr "stonith-enabled=true - 使用STONITH" #. Tag: para #, no-c-format msgid "no-quorum-policy=ignore - Ignore loss of quorum and continue to host resources." msgstr "no-quorum-policy=ignore - 忽略达不到法定人数的情况,继续运行资源" #. Tag: programlisting #, no-c-format msgid "" "# pcs property\n" "dc-version: 1.1.8-1.el7-60a19ed12fdb4d5c6a6b6767f52e5391e447fec0\n" "cluster-infrastructure: corosync\n" "no-quorum-policy: ignore\n" "stonith-enabled: true" msgstr "" #. Tag: title #, no-c-format msgid "Resources" msgstr "资源" #. Tag: title #, no-c-format msgid "Default Options" msgstr "默认选项" #. Tag: para #, no-c-format msgid "Here we configure cluster options that apply to every resource." msgstr "这里我们设置所有资源共用的集群选项" #. Tag: para #, no-c-format msgid "resource-stickiness - Specify the aversion to moving resources to other machines" -msgstr "resource-stickiness - 资源粘稠值" +msgstr "resource-stickiness - 资源粘性值" #. Tag: programlisting #, no-c-format msgid "" "# pcs resource rsc defaults\n" "resource-stickiness: 100" msgstr "" #. Tag: title #, no-c-format msgid "Fencing" msgstr "隔离" #. Tag: programlisting #, no-c-format msgid "" "# pcs stonith show\n" " impi-fencing (stonith:fence_ipmilan) Started\n" "# pcs stonith show impi-fencing\n" "Resource: impi-fencing\n" " pcmk_host_list: pcmk-1 pcmk-2\n" " ipaddr: 10.0.0.1\n" " login: testuser\n" " passwd: acd123" msgstr "" #. Tag: title #, no-c-format msgid "Service Address" msgstr "服务地址" #. Tag: para #, fuzzy, no-c-format msgid "Users of the services provided by the cluster require an unchanging address with which to access it. Additionally, we cloned the address so it will be active on both nodes. An iptables rule (created as part of the resource agent) is used to ensure that each request only gets processed by one of the two clone instances. The additional meta options tell the cluster that we want two instances of the clone (one \"request bucket\" for each node) and that if one node fails, then the remaining node should hold both." -msgstr "用户需要一个不变的地址来访问集群所提供的服务。此外,我们clone了地址,以便在两个节点上都使用这个IP。一个iptables规则(resource agent的一部分)是用来确保每个请求只能由两个节点中的某一个处理。这些额外的集群选项告诉我们想要两个clone(每个节点一个“请求桶”)实例,如果一个节点失效,那么剩下的节点处理这两个请求桶。" +msgstr "用户需要一个不变的地址来访问集群所提供的服务。此外,我们clone了地址,以便在两个节点上都使用这个IP。iptables规则(resource agent的一部分)是用来确保每个请求只能由两个节点中的某一个来处理。这些额外的集群选项告诉我们想要两个clone(每个节点一个“request bucket”)实例,如果一个节点失效,那么剩下的节点处理这两个request bucket。" #. Tag: programlisting #, no-c-format msgid "" "# pcs resource show ClusterIP-clone\n" "Resource: ClusterIP-clone\n" " ip: 192.168.0.120\n" " cidr_netmask: 32\n" " clusterip_hash: sourceip\n" " globally-unique: true\n" " clone-max: 2\n" " clone-node-max: 2\n" " op monitor interval=30s" msgstr "" #. Tag: para #, no-c-format msgid "TODO: The RA should check for globally-unique=true when cloned" msgstr "TODO: The RA should check for globally-unique=true when cloned" #. Tag: title #, no-c-format msgid "DRBD - Shared Storage" msgstr "DRBD - 共享存储" #. Tag: para #, no-c-format msgid "Here we define the DRBD service and specify which DRBD resource (from drbd.conf) it should manage. We make it a master/slave resource and, in order to have an active/active setup, allow both instances to be promoted by specifying master-max=2. We also set the notify option so that the cluster will tell DRBD agent when it’s peer changes state." -msgstr "在这里,我们定义了DRBD技术服务,并指定DRBD应该管理的资源(从drbd.conf)。我们让它作为主/从资源,并且为了active/active,用设置master-max=2来允许两者都晋升为master。我们还可以设置通知选项,这样,当时集群的节点的状态发生改变时,该集群将告诉DRBD的agent。 " +msgstr "在这里,我们定义了DRBD服务,并指定了应该管理的DRBD资源(从drbd.conf)。我们让它作为主/从资源,并且为了active/active,用设置master-max=2来允许两者都晋升为master。我们还可以设置通知选项,这样,当集群节点的状态发生改变时,该集群将告诉DRBD的agent。 " #. Tag: programlisting #, no-c-format msgid "" "# pcs resource show WebDataClone\n" "Resource: WebDataClone\n" " drbd_resource: wwwdata\n" " master-node-max: 1\n" " clone-max: 2\n" " clone-node-max: 1\n" " notify: true\n" " master-max: 2\n" " op monitor interval=60s\n" "# pcs constraint ref WebDataClone\n" "Resource: WebDataClone\n" " colocation-WebFS-WebDataClone-INFINITY\n" " order-WebDataClone-WebFS-mandatory" msgstr "" #. Tag: title #, no-c-format msgid "Cluster Filesystem" msgstr "集群文件系统" #. Tag: para #, no-c-format msgid "The cluster filesystem ensures that files are read and written correctly. We need to specify the block device (provided by DRBD), where we want it mounted and that we are using GFS2. Again it is a clone because it is intended to be active on both nodes. The additional constraints ensure that it can only be started on nodes with active gfs-control and drbd instances." msgstr "群集文件系统可确保文件读写正确。我们需要指定我们想挂载并使用GFS2的块设备(由DRBD提供)。这又是一个clone,因为它的目的是在两个节点上都可用。这些额外的限制确保它只在有gfs-control和drbd 实例的节点上运行。" #. Tag: programlisting #, no-c-format msgid "" "# pcs resource show WebFS-clone\n" "Resource: WebFS-clone\n" " device: /dev/drbd/by-res/wwwdata\n" " directory: /var/www/html\n" " fstype: gfs2\n" "# pcs constraint ref WebFS-clone\n" "Resource: WebFS-clone\n" " colocation-WebFS-WebDataClone-INFINITY\n" " colocation-WebSite-WebFS-INFINITY\n" " order-WebFS-WebSite-mandatory\n" " order-WebDataClone-WebFS-mandatory" msgstr "" #. Tag: title #, no-c-format msgid "Apache" msgstr "Apache" #. Tag: para #, no-c-format msgid "Lastly we have the actual service, Apache. We need only tell the cluster where to find it’s main configuration file and restrict it to running on nodes that have the required filesystem mounted and the IP address active." -msgstr "最后我们有了真正的服务,Apache,我们只需要告诉集群在哪里可以找到它的主配置文件,并限制其只在挂载了文件系统和有可用IP节点上运行" +msgstr "最后我们有了真正的服务,Apache,我们只需要告诉集群在哪里可以找到它的主配置文件,并限制其只能在挂载了文件系统和有可用IP的节点上运行。" #. Tag: programlisting #, no-c-format msgid "" "# pcs resource show WebSite-clone\n" "Resource: WebSite-clone\n" " configfile: /etc/httpd/conf/httpd.conf\n" " statusurl: http://localhost/server-status\n" " master-max: 2\n" " op monitor interval=1min\n" "# pcs constraint ref WebSite-clone\n" "Resource: WebSite-clone\n" " colocation-WebSite-ClusterIP-INFINITY\n" " colocation-WebSite-WebFS-INFINITY\n" " order-ClusterIP-WebSite-mandatory\n" " order-WebFS-WebSite-mandatory" msgstr "" #~ msgid "" #~ "\n" #~ "[root@pcmk-1 ~]# crm configure show\n" #~ "node pcmk-1\n" #~ "node pcmk-2\n" #~ "primitive WebData ocf:linbit:drbd \\\n" #~ "        params drbd_resource=\"wwwdata\" \\\n" #~ "        op monitor interval=\"60s\"\n" #~ "primitive WebFS ocf:heartbeat:Filesystem \\\n" #~ "        params device=\"/dev/drbd/by-res/wwwdata\" directory=\"/var/www/html\" fstype=”gfs2”\n" #~ "primitive WebSite ocf:heartbeat:apache \\\n" #~ "        params configfile=\"/etc/httpd/conf/httpd.conf\" \\\n" #~ "        op monitor interval=\"1min\"\n" #~ "primitive ClusterIP ocf:heartbeat:IPaddr2 \\\n" #~ "        params ip=”192.168.122.101” cidr_netmask=”32” clusterip_hash=”sourceip” \\\n" #~ "        op monitor interval=\"30s\"\n" #~ "primitive dlm ocf:pacemaker:controld \\\n" #~ "        op monitor interval=\"120s\"\n" #~ "primitive gfs-control ocf:pacemaker:controld \\\n" #~ "   params daemon=”gfs_controld.pcmk” args=”-g 0” \\\n" #~ "        op monitor interval=\"120s\"\n" #~ "primitive rsa-fencing stonith::external/ibmrsa \\\n" #~ "        params hostname=”pcmk-1 pcmk-2\" ipaddr=192.168.122.31 userid=mgmt passwd=abc123 type=ibm \\\n" #~ "        op monitor interval=\"60s\"\n" #~ "ms WebDataClone WebData \\\n" #~ "        meta master-max=\"2\" master-node-max=\"1\" clone-max=\"2\" clone-node-max=\"1\" notify=\"true\"\n" #~ "clone Fencing rsa-fencing \n" #~ "clone WebFSClone WebFS\n" #~ "clone WebIP ClusterIP  \\\n" #~ "        meta globally-unique=”true” clone-max=”2” clone-node-max=”2”\n" #~ "clone WebSiteClone WebSite\n" #~ "clone dlm-clone dlm \\\n" #~ "        meta interleave=\"true\"\n" #~ "clone gfs-clone gfs-control \\\n" #~ "        meta interleave=\"true\"\n" #~ "colocation WebFS-with-gfs-control inf: WebFSClone gfs-clone\n" #~ "colocation WebSite-with-WebFS inf: WebSiteClone WebFSClone\n" #~ "colocation fs_on_drbd inf: WebFSClone WebDataClone:Master\n" #~ "colocation gfs-with-dlm inf: gfs-clone dlm-clone\n" #~ "colocation website-with-ip inf: WebSiteClone WebIP\n" #~ "order WebFS-after-WebData inf: WebDataClone:promote WebFSClone:start\n" #~ "order WebSite-after-WebFS inf: WebFSClone WebSiteClone\n" #~ "order apache-after-ip inf: WebIP WebSiteClone\n" #~ "order start-WebFS-after-gfs-control inf: gfs-clone WebFSClone\n" #~ "order start-gfs-after-dlm inf: dlm-clone gfs-clone\n" #~ "property $id=\"cib-bootstrap-options\" \\\n" #~ "        dc-version=\"1.0.5-462f1569a43740667daf7b0f6b521742e9eb8fa7\" \\\n" #~ "        cluster-infrastructure=\"openais\" \\\n" #~ "        expected-quorum-votes=”2” \\\n" #~ "        stonith-enabled=”true” \\\n" #~ "        no-quorum-policy=\"ignore\"\n" #~ "rsc_defaults $id=\"rsc-options\" \\\n" #~ "        resource-stickiness=”100”\n" #~ msgstr "" #~ "\n" #~ "[root@pcmk-1 ~]# crm configure show\n" #~ "node pcmk-1\n" #~ "node pcmk-2\n" #~ "primitive WebData ocf:linbit:drbd \\\n" #~ "        params drbd_resource=\"wwwdata\" \\\n" #~ "        op monitor interval=\"60s\"\n" #~ "primitive WebFS ocf:heartbeat:Filesystem \\\n" #~ "        params device=\"/dev/drbd/by-res/wwwdata\" directory=\"/var/www/html\" fstype=”gfs2”\n" #~ "primitive WebSite ocf:heartbeat:apache \\\n" #~ "        params configfile=\"/etc/httpd/conf/httpd.conf\" \\\n" #~ "        op monitor interval=\"1min\"\n" #~ "primitive ClusterIP ocf:heartbeat:IPaddr2 \\\n" #~ "        params ip=”192.168.122.101” cidr_netmask=”32” clusterip_hash=”sourceip” \\\n" #~ "        op monitor interval=\"30s\"\n" #~ "primitive dlm ocf:pacemaker:controld \\\n" #~ "        op monitor interval=\"120s\"\n" #~ "primitive gfs-control ocf:pacemaker:controld \\\n" #~ "   params daemon=”gfs_controld.pcmk” args=”-g 0” \\\n" #~ "        op monitor interval=\"120s\"\n" #~ "primitive rsa-fencing stonith::external/ibmrsa \\\n" #~ "        params hostname=”pcmk-1 pcmk-2\" ipaddr=192.168.122.31 userid=mgmt passwd=abc123 type=ibm \\\n" #~ "        op monitor interval=\"60s\"\n" #~ "ms WebDataClone WebData \\\n" #~ "        meta master-max=\"2\" master-node-max=\"1\" clone-max=\"2\" clone-node-max=\"1\" notify=\"true\"\n" #~ "clone Fencing rsa-fencing \n" #~ "clone WebFSClone WebFS\n" #~ "clone WebIP ClusterIP  \\\n" #~ "        meta globally-unique=”true” clone-max=”2” clone-node-max=”2”\n" #~ "clone WebSiteClone WebSite\n" #~ "clone dlm-clone dlm \\\n" #~ "        meta interleave=\"true\"\n" #~ "clone gfs-clone gfs-control \\\n" #~ "        meta interleave=\"true\"\n" #~ "colocation WebFS-with-gfs-control inf: WebFSClone gfs-clone\n" #~ "colocation WebSite-with-WebFS inf: WebSiteClone WebFSClone\n" #~ "colocation fs_on_drbd inf: WebFSClone WebDataClone:Master\n" #~ "colocation gfs-with-dlm inf: gfs-clone dlm-clone\n" #~ "colocation website-with-ip inf: WebSiteClone WebIP\n" #~ "order WebFS-after-WebData inf: WebDataClone:promote WebFSClone:start\n" #~ "order WebSite-after-WebFS inf: WebFSClone WebSiteClone\n" #~ "order apache-after-ip inf: WebIP WebSiteClone\n" #~ "order start-WebFS-after-gfs-control inf: gfs-clone WebFSClone\n" #~ "order start-gfs-after-dlm inf: dlm-clone gfs-clone\n" #~ "property $id=\"cib-bootstrap-options\" \\\n" #~ "        dc-version=\"1.0.5-462f1569a43740667daf7b0f6b521742e9eb8fa7\" \\\n" #~ "        cluster-infrastructure=\"openais\" \\\n" #~ "        expected-quorum-votes=”2” \\\n" #~ "        stonith-enabled=”true” \\\n" #~ "        no-quorum-policy=\"ignore\"\n" #~ "rsc_defaults $id=\"rsc-options\" \\\n" #~ "        resource-stickiness=”100”\n" #~ msgid "" #~ "\n" #~ "node pcmk-1\n" #~ "node pcmk-2\n" #~ msgstr "" #~ "\n" #~ "node pcmk-1\n" #~ "node pcmk-2\n" #~ msgid "" #~ "\n" #~ "property $id=\"cib-bootstrap-options\" \\\n" #~ "        dc-version=\"1.0.5-462f1569a43740667daf7b0f6b521742e9eb8fa7\" \\\n" #~ "        cluster-infrastructure=\"openais\" \\\n" #~ "        expected-quorum-votes=”2” \\\n" #~ "        stonith-enabled=”true” \\\n" #~ "        no-quorum-policy=\"ignore\"\n" #~ msgstr "" #~ "\n" #~ "property $id=\"cib-bootstrap-options\" \\\n" #~ "        dc-version=\"1.0.5-462f1569a43740667daf7b0f6b521742e9eb8fa7\" \\\n" #~ "        cluster-infrastructure=\"openais\" \\\n" #~ "        expected-quorum-votes=”2” \\\n" #~ "        stonith-enabled=”true” \\\n" #~ "        no-quorum-policy=\"ignore\"\n" #~ msgid "" #~ "\n" #~ "rsc_defaults $id=\"rsc-options\" \\\n" #~ "        resource-stickiness=”100”\n" #~ msgstr "" #~ "\n" #~ "rsc_defaults $id=\"rsc-options\" \\\n" #~ "        resource-stickiness=”100”\n" #~ msgid "TODO: Add text here" #~ msgstr "TODO: Add text here" #~ msgid "" #~ "\n" #~ "primitive rsa-fencing stonith::external/ibmrsa \\\n" #~ "        params hostname=”pcmk-1 pcmk-2\" ipaddr=192.168.122.31 userid=mgmt passwd=abc123 type=ibm \\\n" #~ "        op monitor interval=\"60s\"\n" #~ "clone Fencing rsa-fencing\n" #~ msgstr "" #~ "\n" #~ "primitive rsa-fencing stonith::external/ibmrsa \\\n" #~ "        params hostname=”pcmk-1 pcmk-2\" ipaddr=192.168.122.31 userid=mgmt passwd=abc123 type=ibm \\\n" #~ "        op monitor interval=\"60s\"\n" #~ "clone Fencing rsa-fencing\n" #~ msgid "" #~ "\n" #~ "primitive ClusterIP ocf:heartbeat:IPaddr2 \\\n" #~ "        params ip=”192.168.122.101” cidr_netmask=”32” clusterip_hash=”sourceip” \\\n" #~ "        op monitor interval=\"30s\"\n" #~ "clone WebIP ClusterIP  \n" #~ "        meta globally-unique=”true” clone-max=”2” clone-node-max=”2”\n" #~ msgstr "" #~ "\n" #~ "primitive ClusterIP ocf:heartbeat:IPaddr2 \\\n" #~ "        params ip=”192.168.122.101” cidr_netmask=”32” clusterip_hash=”sourceip” \\\n" #~ "        op monitor interval=\"30s\"\n" #~ "clone WebIP ClusterIP  \n" #~ "        meta globally-unique=”true” clone-max=”2” clone-node-max=”2”\n" #~ msgid "Distributed lock manager" #~ msgstr "分布式锁控制器" #~ msgid "Cluster filesystems like GFS2 require a lock manager. This service starts the daemon that provides user-space applications (such as the GFS2 daemon) with access to the in-kernel lock manager. Since we need it to be available on all nodes in the cluster, we have it cloned." #~ msgstr "像GFS2集群文件系统需要一个锁管理。该服务启动守护进程,提供了访问内核中的锁管理器的用户空间应用程序(如GFS2守护进程)。因为我们需要它在集群中的所有可用节点中运行,我们把它clone。" #~ msgid "" #~ "\n" #~ "primitive dlm ocf:pacemaker:controld \\\n" #~ "        op monitor interval=\"120s\"\n" #~ "clone dlm-clone dlm \\\n" #~ "        meta interleave=\"true\n" #~ msgstr "" #~ "\n" #~ "primitive dlm ocf:pacemaker:controld \\\n" #~ "        op monitor interval=\"120s\"\n" #~ "clone dlm-clone dlm \\\n" #~ "        meta interleave=\"true\n" #~ msgid "TODO: Confirm interleave is no longer needed" #~ msgstr "TODO: Confirm interleave is no longer needed" #~ msgid "GFS control daemon" #~ msgstr "GFS 控制守护进程" #~ msgid "GFS2 also needs a user-space/kernel bridge that runs on every node. So here we have another clone, however this time we must also specify that it can only run on machines that are also running the DLM (colocation constraint) and that it can only be started after the DLM is running (order constraint). Additionally, the gfs-control clone should only care about the DLM instances it is paired with, so we need to set the interleave option." #~ msgstr "GFS2还需要一个user-space到kernel的桥梁,每个节点上要运行。所以在这里我们还有一个clone,但是这一次我们还必须指定它只能运行在有DLM的机器上(colocation 约束),它只能在DLM后启动 (order约束)。此外,gfs-control clone应该只关系与其配对的DLM实例,所以我们还要设置interleave 选项" #~ msgid "" #~ "\n" #~ "primitive gfs-control ocf:pacemaker:controld \\\n" #~ "   params daemon=”gfs_controld.pcmk” args=”-g 0” \\\n" #~ "        op monitor interval=\"120s\"\n" #~ "clone gfs-clone gfs-control \\\n" #~ "        meta interleave=\"true\"\n" #~ "colocation gfs-with-dlm inf: gfs-clone dlm-clone\n" #~ "order start-gfs-after-dlm inf: dlm-clone gfs-clone\n" #~ msgstr "" #~ "\n" #~ "primitive gfs-control ocf:pacemaker:controld \\\n" #~ "   params daemon=”gfs_controld.pcmk” args=”-g 0” \\\n" #~ "        op monitor interval=\"120s\"\n" #~ "clone gfs-clone gfs-control \\\n" #~ "        meta interleave=\"true\"\n" #~ "colocation gfs-with-dlm inf: gfs-clone dlm-clone\n" #~ "order start-gfs-after-dlm inf: dlm-clone gfs-clone\n" #~ msgid "" #~ "\n" #~ "primitive WebData ocf:linbit:drbd \\\n" #~ "        params drbd_resource=\"wwwdata\" \\\n" #~ "        op monitor interval=\"60s\"\n" #~ "ms WebDataClone WebData \\\n" #~ "        meta master-max=\"2\" master-node-max=\"1\" clone-max=\"2\" clone-node-max=\"1\" notify=\"true\"\n" #~ msgstr "" #~ "\n" #~ "primitive WebData ocf:linbit:drbd \\\n" #~ "        params drbd_resource=\"wwwdata\" \\\n" #~ "        op monitor interval=\"60s\"\n" #~ "ms WebDataClone WebData \\\n" #~ "        meta master-max=\"2\" master-node-max=\"1\" clone-max=\"2\" clone-node-max=\"1\" notify=\"true\"\n" #~ msgid "" #~ "\n" #~ "primitive WebFS ocf:heartbeat:Filesystem \\\n" #~ "        params device=\"/dev/drbd/by-res/wwwdata\" directory=\"/var/www/html\" fstype=”gfs2”\n" #~ "clone WebFSClone WebFS\n" #~ "colocation WebFS-with-gfs-control inf: WebFSClone gfs-clone\n" #~ "colocation fs_on_drbd inf: WebFSClone WebDataClone:Master\n" #~ "order WebFS-after-WebData inf: WebDataClone:promote WebFSClone:start\n" #~ "order start-WebFS-after-gfs-control inf: gfs-clone WebFSClone\n" #~ msgstr "" #~ "\n" #~ "primitive WebFS ocf:heartbeat:Filesystem \\\n" #~ "        params device=\"/dev/drbd/by-res/wwwdata\" directory=\"/var/www/html\" fstype=”gfs2”\n" #~ "clone WebFSClone WebFS\n" #~ "colocation WebFS-with-gfs-control inf: WebFSClone gfs-clone\n" #~ "colocation fs_on_drbd inf: WebFSClone WebDataClone:Master\n" #~ "order WebFS-after-WebData inf: WebDataClone:promote WebFSClone:start\n" #~ "order start-WebFS-after-gfs-control inf: gfs-clone WebFSClone\n" #~ msgid "" #~ "\n" #~ "primitive WebSite ocf:heartbeat:apache \\\n" #~ "        params configfile=\"/etc/httpd/conf/httpd.conf\" \\\n" #~ "        op monitor interval=\"1min\"\n" #~ "clone WebSiteClone WebSite\n" #~ "colocation WebSite-with-WebFS inf: WebSiteClone WebFSClone\n" #~ "colocation website-with-ip inf: WebSiteClone WebIP\n" #~ "order apache-after-ip inf: WebIP WebSiteClone\n" #~ "order WebSite-after-WebFS inf: WebFSClone WebSiteClone\n" #~ msgstr "" #~ "\n" #~ "primitive WebSite ocf:heartbeat:apache \\\n" #~ "        params configfile=\"/etc/httpd/conf/httpd.conf\" \\\n" #~ "        op monitor interval=\"1min\"\n" #~ "clone WebSiteClone WebSite\n" #~ "colocation WebSite-with-WebFS inf: WebSiteClone WebFSClone\n" #~ "colocation website-with-ip inf: WebSiteClone WebIP\n" #~ "order apache-after-ip inf: WebIP WebSiteClone\n" #~ "order WebSite-after-WebFS inf: WebFSClone WebSiteClone\n" diff --git a/doc/Clusters_from_Scratch/zh-CN/Ap-Corosync-Conf.po b/doc/Clusters_from_Scratch/zh-CN/Ap-Corosync-Conf.po index 559d13e247..2ef3b07545 100644 --- a/doc/Clusters_from_Scratch/zh-CN/Ap-Corosync-Conf.po +++ b/doc/Clusters_from_Scratch/zh-CN/Ap-Corosync-Conf.po @@ -1,212 +1,212 @@ # # AUTHOR , YEAR. # msgid "" msgstr "" "Project-Id-Version: 0\n" "POT-Creation-Date: 2012-10-17T05:19:03\n" "PO-Revision-Date: 2010-12-15 23:35+0800\n" -"Last-Translator: Charlie Chen \n" +"Last-Translator: Hu Fu \n" "Language-Team: None\n" "Language: \n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" #. Tag: title #, fuzzy, no-c-format msgid "Sample Corosync Configuration" -msgstr "Corosync.conf 样例" +msgstr "Corosync.conf 示例" #. Tag: title #, no-c-format msgid "Sample corosync.conf for two-node cluster using a node list." -msgstr "" +msgstr "配有node list选项的双节点集群corosync.conf示例。" #. Tag: literallayout #, no-c-format msgid "" "# Please read the corosync.conf.5 manual page\n" "totem {\n" "version: 2\n" "secauth: off\n" "cluster_name: mycluster\n" "transport: udpu\n" "}\n" "\n" "nodelist {\n" " node {\n" " ring0_addr: pcmk-1\n" " nodeid: 1\n" " }\n" " node {\n" " ring0_addr: pcmk-2\n" " nodeid: 2\n" " }\n" "}\n" "\n" "quorum {\n" " provider: corosync_votequorum\n" "}\n" "\n" "logging {\n" " to_syslog: yes\n" "}" msgstr "" #~ msgid "" #~ "\n" #~ "# Please read the Corosync.conf.5 manual page\n" #~ "compatibility: whitetank\n" #~ "\n" #~ "aisexec {\n" #~ "        # Run as root - this is necessary to be able to manage resources with Pacemaker\n" #~ "        user:        root\n" #~ "        group:        root\n" #~ "}\n" #~ "\n" #~ "service {\n" #~ "        # Load the Pacemaker Cluster Resource Manager\n" #~ "        ver:       0\n" #~ "        name:      pacemaker\n" #~ "        use_mgmtd: no\n" #~ "        use_logd:  no\n" #~ "}\n" #~ "\n" #~ "totem {\n" #~ "        version: 2\n" #~ "\n" #~ "        # How long before declaring a token lost (ms)\n" #~ "        token:          5000\n" #~ "\n" #~ "        # How many token retransmits before forming a new configuration\n" #~ "        token_retransmits_before_loss_const: 10\n" #~ "\n" #~ "        # How long to wait for join messages in the membership protocol (ms)\n" #~ "        join:           1000\n" #~ "\n" #~ "        # How long to wait for consensus to be achieved before starting a new\n" #~ "        # round of membership configuration (ms)\n" #~ "        consensus:      6000\n" #~ "\n" #~ "        # Turn off the virtual synchrony filter\n" #~ "        vsftype:        none\n" #~ "\n" #~ "        # Number of messages that may be sent by one processor on receipt of the token\n" #~ "        max_messages:   20\n" #~ "\n" #~ "        # Stagger sending the node join messages by 1..send_join ms\n" #~ "        send_join: 45\n" #~ "\n" #~ "        # Limit generated nodeids to 31-bits (positive signed integers)\n" #~ "        clear_node_high_bit: yes\n" #~ "\n" #~ "        # Disable encryption\n" #~ "        secauth:        off\n" #~ "\n" #~ "        # How many threads to use for encryption/decryption\n" #~ "        threads:           0\n" #~ "\n" #~ "        # Optionally assign a fixed node id (integer)\n" #~ "        # nodeid:         1234\n" #~ "\n" #~ "        interface {\n" #~ "                ringnumber: 0\n" #~ "\n" #~ "                # The following values need to be set based on your environment\n" #~ "                bindnetaddr: 192.168.122.0\n" #~ "                mcastaddr: 226.94.1.1\n" #~ "                mcastport: 4000\n" #~ "        }\n" #~ "}\n" #~ "\n" #~ "logging {\n" #~ "        debug: off\n" #~ "        fileline: off\n" #~ "        to_syslog: yes\n" #~ "        to_stderr: off\n" #~ "        syslog_facility: daemon\n" #~ "        timestamp: on\n" #~ "}\n" #~ "\n" #~ "amf {\n" #~ "        mode: disabled\n" #~ "}\n" #~ msgstr "" #~ "\n" #~ "# Please read the Corosync.conf.5 manual page\n" #~ "compatibility: whitetank\n" #~ "\n" #~ "aisexec {\n" #~ "        # Run as root - this is necessary to be able to manage resources with Pacemaker\n" #~ "        user:        root\n" #~ "        group:        root\n" #~ "}\n" #~ "\n" #~ "service {\n" #~ "        # Load the Pacemaker Cluster Resource Manager\n" #~ "        ver:       0\n" #~ "        name:      pacemaker\n" #~ "        use_mgmtd: no\n" #~ "        use_logd:  no\n" #~ "}\n" #~ "\n" #~ "totem {\n" #~ "        version: 2\n" #~ "\n" #~ "        # How long before declaring a token lost (ms)\n" #~ "        token:          5000\n" #~ "\n" #~ "        # How many token retransmits before forming a new configuration\n" #~ "        token_retransmits_before_loss_const: 10\n" #~ "\n" #~ "        # How long to wait for join messages in the membership protocol (ms)\n" #~ "        join:           1000\n" #~ "\n" #~ "        # How long to wait for consensus to be achieved before starting a new\n" #~ "        # round of membership configuration (ms)\n" #~ "        consensus:      6000\n" #~ "\n" #~ "        # Turn off the virtual synchrony filter\n" #~ "        vsftype:        none\n" #~ "\n" #~ "        # Number of messages that may be sent by one processor on receipt of the token\n" #~ "        max_messages:   20\n" #~ "\n" #~ "        # Stagger sending the node join messages by 1..send_join ms\n" #~ "        send_join: 45\n" #~ "\n" #~ "        # Limit generated nodeids to 31-bits (positive signed integers)\n" #~ "        clear_node_high_bit: yes\n" #~ "\n" #~ "        # Disable encryption\n" #~ "        secauth:        off\n" #~ "\n" #~ "        # How many threads to use for encryption/decryption\n" #~ "        threads:           0\n" #~ "\n" #~ "        # Optionally assign a fixed node id (integer)\n" #~ "        # nodeid:         1234\n" #~ "\n" #~ "        interface {\n" #~ "                ringnumber: 0\n" #~ "\n" #~ "                # The following values need to be set based on your environment\n" #~ "                bindnetaddr: 192.168.122.0\n" #~ "                mcastaddr: 226.94.1.1\n" #~ "                mcastport: 4000\n" #~ "        }\n" #~ "}\n" #~ "\n" #~ "logging {\n" #~ "        debug: off\n" #~ "        fileline: off\n" #~ "        to_syslog: yes\n" #~ "        to_stderr: off\n" #~ "        syslog_facility: daemon\n" #~ "        timestamp: on\n" #~ "}\n" #~ "\n" #~ "amf {\n" #~ "        mode: disabled\n" #~ "}\n" diff --git a/doc/Clusters_from_Scratch/zh-CN/Author_Group.po b/doc/Clusters_from_Scratch/zh-CN/Author_Group.po index 2b3aca598d..76964ae5fd 100644 --- a/doc/Clusters_from_Scratch/zh-CN/Author_Group.po +++ b/doc/Clusters_from_Scratch/zh-CN/Author_Group.po @@ -1,64 +1,64 @@ # # AUTHOR , YEAR. # msgid "" msgstr "" "Project-Id-Version: 0\n" "POT-Creation-Date: 2012-10-17T05:19:03\n" "PO-Revision-Date: 2010-12-15 23:35+0800\n" -"Last-Translator: Charlie Chen \n" +"Last-Translator: Hu Fu \n" "Language-Team: None\n" "Language: \n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" #. Tag: firstname #, no-c-format msgid "Andrew" msgstr "Andrew" #. Tag: surname #, no-c-format msgid "Beekhof" msgstr "Beekhof" #. Tag: orgname #, no-c-format msgid "Red Hat" msgstr "Red Hat" #. Tag: contrib #, no-c-format msgid "Primary author" msgstr "作者" #. Tag: firstname #, no-c-format msgid "Raoul" msgstr "Raoul" #. Tag: surname #, no-c-format msgid "Scarazzini" msgstr "Scarazzini" #. Tag: contrib #, no-c-format msgid "Italian translation" msgstr "意大利语翻译" #. Tag: firstname #, no-c-format msgid "Dan" msgstr "" #. Tag: surname #, no-c-format msgid "Frîncu" msgstr "" #. Tag: contrib -#, fuzzy, no-c-format +#, no-c-format msgid "Romanian translation" -msgstr "意大利语翻译" +msgstr "罗马尼亚语翻译" diff --git a/doc/Clusters_from_Scratch/zh-CN/Book_Info.po b/doc/Clusters_from_Scratch/zh-CN/Book_Info.po index 973f6b7876..f74af87811 100644 --- a/doc/Clusters_from_Scratch/zh-CN/Book_Info.po +++ b/doc/Clusters_from_Scratch/zh-CN/Book_Info.po @@ -1,72 +1,72 @@ # # AUTHOR , YEAR. # msgid "" msgstr "" "Project-Id-Version: 0\n" "POT-Creation-Date: 2012-10-17T05:19:03\n" "PO-Revision-Date: 2010-12-15 23:35+0800\n" -"Last-Translator: Charlie Chen \n" +"Last-Translator: Hu Fu \n" "Language-Team: None\n" "Language: \n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" #. Tag: title #, no-c-format msgid "Clusters from Scratch" msgstr "从头开始搭建集群" #. Tag: subtitle #, no-c-format msgid "Creating Active/Passive and Active/Active Clusters on Fedora" -msgstr "在Fedora上面创建主/主和主备集群" +msgstr "在Fedora上面创建主/主和主/备集群" #. Tag: productname #, no-c-format msgid "Pacemaker" msgstr "Pacemaker" #. Tag: para #, no-c-format msgid "The purpose of this document is to provide a start-to-finish guide to building an example active/passive cluster with Pacemaker and show how it can be converted to an active/active one." -msgstr "本文档的主要目的是提供一站式指南,教您如何使用Pacemaker创建一个主/备模式的集群并把它转换到主/主模式。" +msgstr "本文档的主要目是提供一站式指南,教您如何使用Pacemaker创建一个主/备模式的集群并把它转换到主/主模式。" #. Tag: para #, no-c-format msgid "The example cluster will use:" msgstr "示例集群会使用以下软件:" #. Tag: para -#, fuzzy, no-c-format +#, no-c-format msgid "&DISTRO; &DISTRO_VERSION; as the host operating system" msgstr "&DISTRO; &DISTRO_VERSION; 作为基本操作系统" #. Tag: para #, no-c-format msgid "Corosync to provide messaging and membership services," -msgstr "Corosync作为通信层和提供关系管理服务" +msgstr "Corosync作为通信层并提供关系管理服务," #. Tag: para #, no-c-format msgid "Pacemaker to perform resource management," -msgstr "Pacemaker来实现资源管理" +msgstr "Pacemaker来实现资源管理," #. Tag: para #, no-c-format msgid "DRBD as a cost-effective alternative to shared storage," -msgstr "DRBD 作为一个经济的共享存储方案" +msgstr "DRBD 作为一个经济的共享存储方案," #. Tag: para #, no-c-format msgid "GFS2 as the cluster filesystem (in active/active mode)" msgstr "GFS2 作为集群文件系统(主/主模式中)" #. Tag: para #, no-c-format msgid "Given the graphical nature of the Fedora install process, a number of screenshots are included. However the guide is primarily composed of commands, the reasons for executing them and their expected outputs." -msgstr "虽然给出了图形化安装Fedora的过程,并且有很多截图,但是本文的主要是靠命令来操作,包括为什么要运行这个命令和这些操作产生的结果。(译者注:本文中基本是crm shell来操作的,这里应该是老版本文档的遗留)" +msgstr "虽然给出了图形化安装Fedora的过程,并且有很多截图,但本文主要是靠命令来操作,包括为什么要运行这个命令和这些操作产生的结果。(译者注:本文中基本是crm shell来操作的,这里应该是老版本文档的遗留)" #~ msgid "The crm shell for displaying the configuration and making changes" #~ msgstr "crm shell 来显示并修改配置文件" diff --git a/doc/Clusters_from_Scratch/zh-CN/Ch-Active-Active.po b/doc/Clusters_from_Scratch/zh-CN/Ch-Active-Active.po index b089fb966e..1fedbd45ac 100644 --- a/doc/Clusters_from_Scratch/zh-CN/Ch-Active-Active.po +++ b/doc/Clusters_from_Scratch/zh-CN/Ch-Active-Active.po @@ -1,1551 +1,1551 @@ # # AUTHOR , YEAR. # msgid "" msgstr "" "Project-Id-Version: 0\n" "POT-Creation-Date: 2012-10-17T05:19:03\n" "PO-Revision-Date: 2010-12-16 00:37+0800\n" -"Last-Translator: Charlie Chen \n" +"Last-Translator: Hu Fu \n" "Language-Team: None\n" "Language: \n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "X-Poedit-Language: Chinese\n" "X-Poedit-Country: CHINA\n" "X-Poedit-SourceCharset: utf-8\n" #. Tag: title #, no-c-format msgid "Conversion to Active/Active" msgstr "转变为Active/Active" #. Tag: title #, no-c-format msgid "Requirements" msgstr "需求" #. Tag: para -#, fuzzy, no-c-format +#, no-c-format msgid "The primary requirement for an Active/Active cluster is that the data required for your services is available, simultaneously, on both machines. Pacemaker makes no requirement on how this is achieved, you could use a SAN if you had one available, however since DRBD supports multiple Primaries, we can also use that." -msgstr "Active/Active集群一个主要的需求就是数据在两台机器上面都是可用并且是同步的。Pacemaker没有要求你怎么实现,你可以用SAN,但是自从DRBD支持多主模式,我们也可以用这个来实现。" +msgstr "Active/Active集群一个主要的需求就是数据在两台机器上面都是可用并且同步的。Pacemaker没有要求你怎么实现,你可以使用现有可用的SAN,但是自从DRBD支持多主模式后,我们也可以用这个来实现。" #. Tag: para -#, fuzzy, no-c-format +#, no-c-format msgid "The only hitch is that we need to use a cluster-aware filesystem. The one we used earlier with DRBD, ext4, is not one of those. Both OCFS2 and GFS2 are supported, however here we will use GFS2 which comes with Fedora 17." -msgstr "唯一的限制是我们要用一个针对集群的文件系统(我们之前用的ext4,它并不是这样一个文件系统)。 OCFS2或者GFS2都是可以的,但是在&DISTRO; &DISTRO_VERSION;上面,我们用GFS2。" +msgstr "唯一的限制是我们要用一个针对集群的文件系统(我们之前用的ext4,但它并不是这样一个文件系统)。 OCFS2或者GFS2都是支持的,但是在Fedora 17上面,我们使用GFS2。" #. Tag: title #, no-c-format msgid "Installing the required Software" -msgstr "" +msgstr "安装必要的软件" #. Tag: programlisting #, no-c-format msgid "# yum install -y gfs2-utils dlm kernel-modules-extra" msgstr "" #. Tag: literallayout #, no-c-format msgid "" "Loaded plugins: langpacks, presto, refresh-packagekit\n" "Resolving Dependencies\n" "--> Running transaction check\n" "---> Package dlm.x86_64 0:3.99.4-1.fc17 will be installed\n" "---> Package gfs2-utils.x86_64 0:3.1.4-3.fc17 will be installed\n" "---> Package kernel-modules-extra.x86_64 0:3.4.4-3.fc17 will be installed\n" "--> Finished Dependency Resolution\n" "\n" "Dependencies Resolved\n" "\n" "================================================================================\n" " Package Arch Version Repository Size\n" "================================================================================\n" "Installing:\n" " dlm x86_64 3.99.4-1.fc17 updates 83 k\n" " gfs2-utils x86_64 3.1.4-3.fc17 fedora 214 k\n" " kernel-modules-extra x86_64 3.4.4-3.fc17 updates 1.7 M\n" "\n" "Transaction Summary\n" "================================================================================\n" "Install 3 Packages\n" "\n" "Total download size: 1.9 M\n" "Installed size: 7.7 M\n" "Downloading Packages:\n" "(1/3): dlm-3.99.4-1.fc17.x86_64.rpm | 83 kB 00:00\n" "(2/3): gfs2-utils-3.1.4-3.fc17.x86_64.rpm | 214 kB 00:00\n" "(3/3): kernel-modules-extra-3.4.4-3.fc17.x86_64.rpm | 1.7 MB 00:01\n" "--------------------------------------------------------------------------------\n" "Total 615 kB/s | 1.9 MB 00:03\n" "Running Transaction Check\n" "Running Transaction Test\n" "Transaction Test Succeeded\n" "Running Transaction\n" " Installing : kernel-modules-extra-3.4.4-3.fc17.x86_64 1/3\n" " Installing : gfs2-utils-3.1.4-3.fc17.x86_64 2/3\n" " Installing : dlm-3.99.4-1.fc17.x86_64 3/3\n" " Verifying : dlm-3.99.4-1.fc17.x86_64 1/3\n" " Verifying : gfs2-utils-3.1.4-3.fc17.x86_64 2/3\n" " Verifying : kernel-modules-extra-3.4.4-3.fc17.x86_64 3/3\n" "\n" "Installed:\n" " dlm.x86_64 0:3.99.4-1.fc17\n" " gfs2-utils.x86_64 0:3.1.4-3.fc17\n" " kernel-modules-extra.x86_64 0:3.4.4-3.fc17\n" "\n" "Complete!" msgstr "" #. Tag: title #, no-c-format msgid "Create a GFS2 Filesystem" msgstr "创建一个 GFS2 文件系统" #. Tag: title #, no-c-format msgid "Preparation" msgstr "准备工作" #. Tag: para -#, fuzzy, no-c-format +#, no-c-format msgid "Before we do anything to the existing partition, we need to make sure it is unmounted. We do this by telling the cluster to stop the WebFS resource. This will ensure that other resources (in our case, Apache) using WebFS are not only stopped, but stopped in the correct order." -msgstr "在我们对一个已存在的分区做任何操作之前,我们要确保它没有被挂载。我们告诉集群停止WebFS这个资源来确保这一点。这可以确保其他使用WebFS的资源会正确的依次关闭。" +msgstr "在我们对一个已存在的分区做任何操作之前,我们要确保它没有被挂载。我们通过告诉集群停止WebFS这个资源来实现这点。这将会确保其他使用WebFS的资源会正确的依次关闭。" #. Tag: programlisting #, no-c-format msgid "" "# pcs resource stop WebFS\n" "# pcs resource\n" " ClusterIP (ocf::heartbeat:IPaddr2) Started\n" " WebSite (ocf::heartbeat:apache) Stopped\n" " Master/Slave Set: WebDataClone [WebData]\n" " Masters: [ pcmk-2 ]\n" " Slaves: [ pcmk-1 ]\n" " WebFS (ocf::heartbeat:Filesystem) Stopped" msgstr "" #. Tag: para #, no-c-format msgid "Note that both Apache and WebFS have been stopped." -msgstr "注意 Apache and WebFS 两者都已经停止了。" +msgstr "注意 Apache 和 WebFS 两者都已经停止了。" #. Tag: title #, no-c-format msgid "Create and Populate an GFS2 Partition" msgstr "创建并迁移数据到 GFS2 分区" #. Tag: para #, no-c-format msgid "Now that the cluster stack and integration pieces are running smoothly, we can create an GFS2 partition." -msgstr "现在集群的基层和集成部分都正常运行,我们现在创建一个GFS2分区" +msgstr "现在集群的基层和集成部分都正常运行,我们创建一个GFS2分区。" #. Tag: para #, no-c-format msgid "This will erase all previous content stored on the DRBD device. Ensure you have a copy of any important data." msgstr "这个操作会清除DRBD分区上面的所有数据,请备份重要的数据。" #. Tag: para #, no-c-format msgid "We need to specify a number of additional parameters when creating a GFS2 partition." msgstr "我们要为GFS2分区指定一系列附加的参数。" #. Tag: para #, no-c-format msgid "First we must use the -p option to specify that we want to use the the Kernel’s DLM. Next we use -j to indicate that it should reserve enough space for two journals (one per node accessing the filesystem)." -msgstr "首先我们要用 -p选项来指定我们用的是内核的DLM,然后我们用-j来表示我们为两个日志保留足够的空间(每个操作文件系统的节点各一个)。" +msgstr "首先我们要用 -p 选项来指定我们用的是内核的DLM,然后我们用 -j 来表示我们为两个日志保留足够的空间(每个操作文件系统的节点各一个)。" #. Tag: para #, no-c-format msgid "Lastly, we use -t to specify the lock table name. The format for this field is clustername:fsname. For the fsname, we need to use the same value as specified in corosync.conf for cluster_name. If you setup corosync with the same cluster name we used in this tutorial, cluster name will be mycluster. If you are unsure what your cluster name is, open up /etc/corosync/corosync.conf, or execute the command pcs cluster corosync pcmk-1 to view the corosync config. The cluster name will be in the totem block." -msgstr "" +msgstr "最后,我们使用 -t 来指定 lock table 名。这部分的格式为“集群名:文件系统名”。对于“集群名”,我们需要在corosync.conf中使用cluster_name参数指定。如果你是根据教程部署的corosync时,集群名就是“mycluster”。如果你不确定你的集群名是什么,可以打开配置 /etc/corosync/corosync.conf,或者执行命令crm status来查看。在corosync.conf文件中,集群名在totem块中。" #. Tag: para #, no-c-format msgid "We must run the next command on whichever node last had /dev/drbd mounted. Otherwise you will receive the message:" -msgstr "" +msgstr "我们必须在最后挂载了 /dev/drbd 的节点上执行下条命令。否则你会收到这样的消息:" #. Tag: screen #, no-c-format msgid "/dev/drbd1: Read-only file system" msgstr "" #. Tag: programlisting #, no-c-format msgid "" "# ssh pcmk-2 -- mkfs.gfs2 -p lock_dlm -j 2 -t mycluster:web /dev/drbd1\n" "This will destroy any data on /dev/drbd1.\n" "It appears to contain: Linux rev 1.0 ext4 filesystem data, UUID=dc45fff3-c47a-4db2-96f7-a8049a323fe4 (extents) (large files) (huge files)\n" "Are you sure you want to proceed? [y/n]y\n" "Device: /dev/drbd1\n" "Blocksize: 4096\n" "Device Size 0.97 GB (253935 blocks)\n" "Filesystem Size: 0.97 GB (253932 blocks)\n" "Journals: 2\n" "Resource Groups: 4\n" "Locking Protocol: \"lock_dlm\"\n" "Lock Table: \"mycluster\"\n" "UUID: ed293a02-9eee-3fa3-ed1c-435ef1fd0116" msgstr "" #. Tag: programlisting #, no-c-format msgid "" "# pcs cluster cib dlm_cfg\n" "# pcs -f dlm_cfg resource create dlm ocf:pacemaker:controld op monitor interval=60s\n" "# pcs -f dlm_cfg resource clone dlm clone-max=2 clone-node-max=1\n" "# pcs -f dlm_cfg resource show\n" " ClusterIP (ocf::heartbeat:IPaddr2) Started\n" " WebSite (ocf::heartbeat:apache) Stopped\n" " Master/Slave Set: WebDataClone [WebData]\n" " Masters: [ pcmk-2 ]\n" " Slaves: [ pcmk-1 ]\n" " WebFS (ocf::heartbeat:Filesystem) Stopped\n" " Clone Set: dlm-clone [dlm]\n" " Stopped: [ dlm:0 dlm:1 ]\n" "# pcs cluster push cib dlm_cfg\n" "CIB updated\n" "# pcs status\n" "\n" "Last updated: Fri Sep 14 12:54:50 2012\n" "Last change: Fri Sep 14 12:54:43 2012 via cibadmin on pcmk-1\n" "Stack: corosync\n" "Current DC: pcmk-1 (1) - partition with quorum\n" "Version: 1.1.8-1.el7-60a19ed12fdb4d5c6a6b6767f52e5391e447fec0\n" "2 Nodes configured, unknown expected votes\n" "7 Resources configured.\n" "\n" "Online: [ pcmk-1 pcmk-2 ]\n" "\n" "Full list of resources:\n" "\n" " ClusterIP (ocf::heartbeat:IPaddr2): Started pcmk-2\n" " WebSite (ocf::heartbeat:apache): Stopped\n" " Master/Slave Set: WebDataClone [WebData]\n" " Masters: [ pcmk-2 ]\n" " Slaves: [ pcmk-1 ]\n" " WebFS (ocf::heartbeat:Filesystem): Stopped\n" " Clone Set: dlm-clone [dlm]\n" " Started: [ pcmk-1 pcmk-2 ]" msgstr "" #. Tag: para #, no-c-format msgid "Then (re)populate the new filesystem with data (web pages). For now we’ll create another variation on our home page." msgstr "然后再迁移数据到这个新的文件系统。现在我们创建一个跟上次不一样的主页。" #. Tag: programlisting #, fuzzy, no-c-format msgid "" "# mount /dev/drbd1 /mnt/\n" "# cat <<-END >/mnt/index.html\n" "<html>\n" "<body>My Test Site - GFS2</body>\n" "</html>\n" "END\n" "# umount /dev/drbd1\n" "# drbdadm verify wwwdata#" msgstr "" "\n" "[root@pcmk-1 ~]# mount /dev/drbd1 /mnt/\n" "[root@pcmk-1 ~]# cat <<-END >/mnt/index.html\n" "<html>\n" "<body>My Test Site - GFS2</body>\n" "</html>\n" "END\n" "[root@pcmk-1 ~]# umount /dev/drbd1\n" "[root@pcmk-1 ~]# drbdadm verify wwwdata\n" "[root@pcmk-1 ~]#\n" #. Tag: title #, no-c-format msgid "Reconfigure the Cluster for GFS2" msgstr "8.5. 重新为集群配置GFS2" #. Tag: para #, no-c-format msgid "With the WebFS resource stopped, lets update the configuration." -msgstr "" +msgstr "当 WebFS 资源停止后,让我们来更新配置。" #. Tag: programlisting #, no-c-format msgid "" "# pcs resource show WebFS\n" "Resource: WebFS\n" " device: /dev/drbd/by-res/wwwdata\n" " directory: /var/www/html\n" " fstype: ext4\n" " target-role: Stopped" msgstr "" #. Tag: para #, no-c-format msgid "The fstype option needs to be updated to gfs2 instead of ext4." -msgstr "" +msgstr "fstype 参数需要从ext4更新为gfs2" #. Tag: programlisting #, no-c-format msgid "" "# pcs resource update WebFS fstype=gfs2\n" "# pcs resource show WebFS\n" "Resource: WebFS\n" " device: /dev/drbd/by-res/wwwdata\n" " directory: /var/www/html\n" " fstype: gfs2\n" " target-role: Stopped\n" "CIB updated" msgstr "" #. Tag: title #, no-c-format msgid "Reconfigure Pacemaker for Active/Active" msgstr "重新配置 Pacemaker 为 Active/Active" #. Tag: para #, no-c-format msgid "Almost everything is in place. Recent versions of DRBD are capable of operating in Primary/Primary mode and the filesystem we’re using is cluster aware. All we need to do now is reconfigure the cluster to take advantage of this." msgstr "基本上所有的事情都已经准备就绪了。最新的DRBD是支持 Primary/Primary(主/主)模式的,并且我们的文件系统的是针对集群的。所有我们要做的事情就是重新配置我们的集群来使用它们(的先进功能)。" #. Tag: para -#, fuzzy, no-c-format +#, no-c-format msgid "This will involve a number of changes, so we’ll want work with a local cib file." -msgstr "这次操作会改很多东西,所以我们再次使用交互模式" +msgstr "这次操作会改很多东西,所以我们再次使用crm交互模式" #. Tag: programlisting #, no-c-format msgid "# pcs cluster cib active_cfg" msgstr "" #. Tag: para -#, fuzzy, no-c-format +#, no-c-format msgid "There’s no point making the services active on both locations if we can’t reach them, so lets first clone the IP address. Cloned IPaddr2 resources use an iptables rule to ensure that each request only gets processed by one of the two clone instances. The additional meta options tell the cluster how many instances of the clone we want (one \"request bucket\" for each node) and that if all other nodes fail, then the remaining node should hold all of them. Otherwise the requests would be simply discarded." -msgstr "如果我们不能访问这些服务,那做成 Active/Active是没有必要的,所以我们要先clone这个IP地址,克隆的IPaddr2资源用的是iptables规则来保证每个请求都只由一个节点来处理。附件的meta选项告诉集群我们要克隆多少个实例(每个节点一个\"请求桶\")。并且如果其他节点挂了,剩下的节点可以处理所有的请求。否则这些请求都会被丢弃。" +msgstr "如果我们不能访问这些服务,那做成 Active/Active是没有必要的,所以我们要先clone这个IP地址,克隆的IPaddr2资源用的是iptables规则来保证每个请求都只由一个节点来处理。附件的meta选项告诉集群我们要克隆多少个实例(每个节点一个\"request bucket\")。并且如果其他节点挂了,剩下的一个节点可以处理所有的请求,否则这些请求都会被丢弃。" #. Tag: screen #, no-c-format msgid "" "# pcs -f active_cfg resource clone ClusterIP \\\n" " globally-unique=true clone-max=2 clone-node-max=2" msgstr "" #. Tag: para #, no-c-format msgid "Notice when the ClusterIP becomes a clone, the constraints referencing ClusterIP now reference the clone. This is done automatically by pcs." -msgstr "" +msgstr "注意当 ClusterIP 变为clone后,现在引用 ClusterIP 引用 clone 的约束" #. Tag: programlisting #, no-c-format msgid "" "# pcs -f active_cfg constraint\n" "Location Constraints:\n" "Ordering Constraints:\n" " start ClusterIP-clone then start WebSite\n" " WebFS then WebSite\n" " promote WebDataClone then start WebFS\n" "Colocation Constraints:\n" " WebSite with ClusterIP-clone\n" " WebFS with WebDataClone (with-rsc-role:Master)\n" " WebSite with WebFS" msgstr "" #. Tag: para #, no-c-format msgid "Now we must tell the ClusterIP how to decide which requests are processed by which hosts. To do this we must specify the clusterip_hash parameter." -msgstr "现在我们要告诉集群如何决定请求怎样分配给节点。我们要设置 clusterip_hash这个参数来实现它。" +msgstr "现在我们要告诉集群如何决定请由那个主机去处理请求。我们要设置 clusterip_hash 这个参数来实现它。" #. Tag: programlisting #, no-c-format msgid "# pcs -f active_cfg resource update ClusterIP clusterip_hash=sourceip" msgstr "" #. Tag: para -#, fuzzy, no-c-format +#, no-c-format msgid "Next we need to convert the filesystem and Apache resources into clones." -msgstr "然后我们要把文件系统和apache资源变成clones。同样的 crm shell会自动更新相关约束。" +msgstr "然后我们要把文件系统和apache资源变成clones。同样的 crm shell 会自动更新相关约束。" #. Tag: para #, no-c-format msgid "Notice how pcs automatically updates the relevant constraints again." msgstr "" #. Tag: programlisting #, no-c-format msgid "" "# pcs -f active_cfg resource clone WebFS\n" "# pcs -f active_cfg resource clone WebSite\n" "# pcs -f active_cfg constraint\n" "Location Constraints:\n" "Ordering Constraints:\n" " start ClusterIP-clone then start WebSite-clone\n" " WebFS-clone then WebSite-clone\n" " promote WebDataClone then start WebFS-clone\n" "Colocation Constraints:\n" " WebSite-clone with ClusterIP-clone\n" " WebFS-clone with WebDataClone (with-rsc-role:Master)\n" " WebSite-clone with WebFS-clone" msgstr "" #. Tag: para #, no-c-format msgid "The last step is to tell the cluster that it is now allowed to promote both instances to be Primary (aka. Master)." -msgstr "最后要告诉集群现在允许把两个节点都提升为 Primary(换句话说 Master)." +msgstr "最后要告诉集群现在允许把两个节点都提升为 Primary(换句话说 Master)。" #. Tag: programlisting #, no-c-format msgid "# pcs -f active_cfg resource update WebDataClone master-max=2" msgstr "" #. Tag: para #, no-c-format msgid "Review the configuration before uploading it to the cluster, quitting the shell and watching the cluster’s response" -msgstr "看看配置文件有没有错误,然后退出shell看看集群的反应。" +msgstr "将配置上传到集群前检查是否有错误,然后退出shell看看集群的响应。" #. Tag: programlisting #, no-c-format msgid "" "# pcs cluster push cib active_cfg\n" "# pcs resource start WebFS" msgstr "" #. Tag: para #, no-c-format msgid "After all the processes are started the status should look similar to this." -msgstr "" +msgstr "在所有处理都启动后,状态看起来应该类似于这样。" #. Tag: programlisting #, no-c-format msgid "" "# pcs resource\n" " Master/Slave Set: WebDataClone [WebData]\n" " Masters: [ pcmk-2 pcmk-1 ]\n" " Clone Set: dlm-clone [dlm]\n" " Started: [ pcmk-2 pcmk-1 ]\n" " Clone Set: ClusterIP-clone [ClusterIP] (unique)\n" " ClusterIP:0 (ocf::heartbeat:IPaddr2) Started\n" " ClusterIP:1 (ocf::heartbeat:IPaddr2) Started\n" " Clone Set: WebFS-clone [WebFS]\n" " Started: [ pcmk-1 pcmk-2 ]\n" " Clone Set: WebSite-clone [WebSite]\n" " Started: [ pcmk-1 pcmk-2 ]" msgstr "" #. Tag: title #, no-c-format msgid "Testing Recovery" msgstr "恢复测试" #. Tag: para #, no-c-format msgid "TODO: Put one node into standby to demonstrate failover" -msgstr "TODO: Put one node into standby to demonstrate failover" +msgstr "TODO: 将一个节点置为standby状态来演示故障迁移" #~ msgid "Install a Cluster Filesystem - GFS2" #~ msgstr "安装一个集群文件系统 - GFS2" #~ msgid "The first thing to do is install gfs2-utils on each machine." #~ msgstr "首先我们在各个节点上面安装GFS2。" #~ msgid "" #~ "\n" #~ "[root@pcmk-1 ~]# yum install -y gfs2-utils gfs-pcmk\n" #~ "Setting up Install Process\n" #~ "Resolving Dependencies\n" #~ "--> Running transaction check\n" #~ "---> Package gfs-pcmk.x86_64 0:3.0.5-2.fc12 set to be updated\n" #~ "--> Processing Dependency: libSaCkpt.so.3(OPENAIS_CKPT_B.01.01)(64bit) for package: gfs-pcmk-3.0.5-2.fc12.x86_64\n" #~ "--> Processing Dependency: dlm-pcmk for package: gfs-pcmk-3.0.5-2.fc12.x86_64\n" #~ "--> Processing Dependency: libccs.so.3()(64bit) for package: gfs-pcmk-3.0.5-2.fc12.x86_64\n" #~ "--> Processing Dependency: libdlmcontrol.so.3()(64bit) for package: gfs-pcmk-3.0.5-2.fc12.x86_64\n" #~ "--> Processing Dependency: liblogthread.so.3()(64bit) for package: gfs-pcmk-3.0.5-2.fc12.x86_64\n" #~ "--> Processing Dependency: libSaCkpt.so.3()(64bit) for package: gfs-pcmk-3.0.5-2.fc12.x86_64\n" #~ "---> Package gfs2-utils.x86_64 0:3.0.5-2.fc12 set to be updated\n" #~ "--> Running transaction check\n" #~ "---> Package clusterlib.x86_64 0:3.0.5-2.fc12 set to be updated\n" #~ "---> Package dlm-pcmk.x86_64 0:3.0.5-2.fc12 set to be updated\n" #~ "---> Package openaislib.x86_64 0:1.1.0-1.fc12 set to be updated\n" #~ "--> Finished Dependency Resolution\n" #~ "\n" #~ "Dependencies Resolved\n" #~ "\n" #~ "===========================================================================================\n" #~ " Package                Arch               Version                   Repository        Size\n" #~ "===========================================================================================\n" #~ "Installing:\n" #~ " gfs-pcmk               x86_64             3.0.5-2.fc12              custom           101 k\n" #~ " gfs2-utils             x86_64             3.0.5-2.fc12              custom           208 k\n" #~ "Installing for dependencies:\n" #~ " clusterlib             x86_64             3.0.5-2.fc12              custom            65 k\n" #~ " dlm-pcmk               x86_64             3.0.5-2.fc12              custom            93 k\n" #~ " openaislib             x86_64             1.1.0-1.fc12              fedora            76 k\n" #~ "\n" #~ "Transaction Summary\n" #~ "===========================================================================================\n" #~ "Install       5 Package(s)\n" #~ "Upgrade       0 Package(s)\n" #~ "\n" #~ "Total download size: 541 k\n" #~ "Downloading Packages:\n" #~ "(1/5): clusterlib-3.0.5-2.fc12.x86_64.rpm                                |  65 kB     00:00\n" #~ "(2/5): dlm-pcmk-3.0.5-2.fc12.x86_64.rpm                                  |  93 kB     00:00\n" #~ "(3/5): gfs-pcmk-3.0.5-2.fc12.x86_64.rpm                                  | 101 kB     00:00\n" #~ "(4/5): gfs2-utils-3.0.5-2.fc12.x86_64.rpm                                | 208 kB     00:00\n" #~ "(5/5): openaislib-1.1.0-1.fc12.x86_64.rpm                                |  76 kB     00:00\n" #~ "-------------------------------------------------------------------------------------------\n" #~ "Total                                                           992 kB/s | 541 kB     00:00\n" #~ "Running rpm_check_debug\n" #~ "Running Transaction Test\n" #~ "Finished Transaction Test\n" #~ "Transaction Test Succeeded\n" #~ "Running Transaction\n" #~ "  Installing     : clusterlib-3.0.5-2.fc12.x86_64                                       1/5 \n" #~ "  Installing     : openaislib-1.1.0-1.fc12.x86_64                                       2/5 \n" #~ "  Installing     : dlm-pcmk-3.0.5-2.fc12.x86_64                                         3/5 \n" #~ "  Installing     : gfs-pcmk-3.0.5-2.fc12.x86_64                                         4/5 \n" #~ "  Installing     : gfs2-utils-3.0.5-2.fc12.x86_64                                       5/5 \n" #~ "\n" #~ "Installed:\n" #~ "  gfs-pcmk.x86_64 0:3.0.5-2.fc12                    gfs2-utils.x86_64 0:3.0.5-2.fc12\n" #~ "\n" #~ "Dependency Installed:\n" #~ "  clusterlib.x86_64 0:3.0.5-2.fc12   dlm-pcmk.x86_64 0:3.0.5-2.fc12 \n" #~ "  openaislib.x86_64 0:1.1.0-1.fc12  \n" #~ "\n" #~ "Complete!\n" #~ "[root@pcmk-1 x86_64]#\n" #~ msgstr "" #~ "\n" #~ "[root@pcmk-1 ~]# yum install -y gfs2-utils gfs-pcmk\n" #~ "Setting up Install Process\n" #~ "Resolving Dependencies\n" #~ "--> Running transaction check\n" #~ "---> Package gfs-pcmk.x86_64 0:3.0.5-2.fc12 set to be updated\n" #~ "--> Processing Dependency: libSaCkpt.so.3(OPENAIS_CKPT_B.01.01)(64bit) for package: gfs-pcmk-3.0.5-2.fc12.x86_64\n" #~ "--> Processing Dependency: dlm-pcmk for package: gfs-pcmk-3.0.5-2.fc12.x86_64\n" #~ "--> Processing Dependency: libccs.so.3()(64bit) for package: gfs-pcmk-3.0.5-2.fc12.x86_64\n" #~ "--> Processing Dependency: libdlmcontrol.so.3()(64bit) for package: gfs-pcmk-3.0.5-2.fc12.x86_64\n" #~ "--> Processing Dependency: liblogthread.so.3()(64bit) for package: gfs-pcmk-3.0.5-2.fc12.x86_64\n" #~ "--> Processing Dependency: libSaCkpt.so.3()(64bit) for package: gfs-pcmk-3.0.5-2.fc12.x86_64\n" #~ "---> Package gfs2-utils.x86_64 0:3.0.5-2.fc12 set to be updated\n" #~ "--> Running transaction check\n" #~ "---> Package clusterlib.x86_64 0:3.0.5-2.fc12 set to be updated\n" #~ "---> Package dlm-pcmk.x86_64 0:3.0.5-2.fc12 set to be updated\n" #~ "---> Package openaislib.x86_64 0:1.1.0-1.fc12 set to be updated\n" #~ "--> Finished Dependency Resolution\n" #~ "\n" #~ "Dependencies Resolved\n" #~ "\n" #~ "===========================================================================================\n" #~ " Package                Arch               Version                   Repository        Size\n" #~ "===========================================================================================\n" #~ "Installing:\n" #~ " gfs-pcmk               x86_64             3.0.5-2.fc12              custom           101 k\n" #~ " gfs2-utils             x86_64             3.0.5-2.fc12              custom           208 k\n" #~ "Installing for dependencies:\n" #~ " clusterlib             x86_64             3.0.5-2.fc12              custom            65 k\n" #~ " dlm-pcmk               x86_64             3.0.5-2.fc12              custom            93 k\n" #~ " openaislib             x86_64             1.1.0-1.fc12              fedora            76 k\n" #~ "\n" #~ "Transaction Summary\n" #~ "===========================================================================================\n" #~ "Install       5 Package(s)\n" #~ "Upgrade       0 Package(s)\n" #~ "\n" #~ "Total download size: 541 k\n" #~ "Downloading Packages:\n" #~ "(1/5): clusterlib-3.0.5-2.fc12.x86_64.rpm                                |  65 kB     00:00\n" #~ "(2/5): dlm-pcmk-3.0.5-2.fc12.x86_64.rpm                                  |  93 kB     00:00\n" #~ "(3/5): gfs-pcmk-3.0.5-2.fc12.x86_64.rpm                                  | 101 kB     00:00\n" #~ "(4/5): gfs2-utils-3.0.5-2.fc12.x86_64.rpm                                | 208 kB     00:00\n" #~ "(5/5): openaislib-1.1.0-1.fc12.x86_64.rpm                                |  76 kB     00:00\n" #~ "-------------------------------------------------------------------------------------------\n" #~ "Total                                                           992 kB/s | 541 kB     00:00\n" #~ "Running rpm_check_debug\n" #~ "Running Transaction Test\n" #~ "Finished Transaction Test\n" #~ "Transaction Test Succeeded\n" #~ "Running Transaction\n" #~ "  Installing     : clusterlib-3.0.5-2.fc12.x86_64                                       1/5 \n" #~ "  Installing     : openaislib-1.1.0-1.fc12.x86_64                                       2/5 \n" #~ "  Installing     : dlm-pcmk-3.0.5-2.fc12.x86_64                                         3/5 \n" #~ "  Installing     : gfs-pcmk-3.0.5-2.fc12.x86_64                                         4/5 \n" #~ "  Installing     : gfs2-utils-3.0.5-2.fc12.x86_64                                       5/5 \n" #~ "\n" #~ "Installed:\n" #~ "  gfs-pcmk.x86_64 0:3.0.5-2.fc12                    gfs2-utils.x86_64 0:3.0.5-2.fc12\n" #~ "\n" #~ "Dependency Installed:\n" #~ "  clusterlib.x86_64 0:3.0.5-2.fc12   dlm-pcmk.x86_64 0:3.0.5-2.fc12 \n" #~ "  openaislib.x86_64 0:1.1.0-1.fc12  \n" #~ "\n" #~ "Complete!\n" #~ "[root@pcmk-1 x86_64]#\n" #~ msgid "Setup Pacemaker-GFS2 Integration" #~ msgstr "整合 Pacemaker-GFS2" #~ msgid "GFS2 needs two services to be running, the first is the user-space interface to the kernel’s distributed lock manager (DLM). The DLM is used to co-ordinate which node(s) can access a given file (and when) and integrates with Pacemaker to obtain node membership The list of nodes the cluster considers to be available information and fencing capabilities." #~ msgstr "GFS2要求运行两个服务,首先是用户空间访问内核的分布式锁管理(DLM)的接口。 DLM是用来统筹哪个节点可以处理某个特定的文件,并且与Pacemaker集成来得到节点之间的关系 The list of nodes the cluster considers to be available 和隔离能力。" #~ msgid "The second service is GFS2’s own control daemon which also integrates with Pacemaker to obtain node membership data." #~ msgstr "另外一个服务是GFS2自身的控制进程,也是与Pacemaker集成来得到节点之间的关系。" #~ msgid "Add the DLM service" #~ msgstr "添加 DLM 服务" #~ msgid "The DLM control daemon needs to run on all active cluster nodes, so we will use the shells interactive mode to create a cloned resource." #~ msgstr "DLM控制进程需要在所有可用的集群节点上面运行,所以我们用shell交互模式来添加一个cloned类型的资源。" #~ msgid "" #~ "\n" #~ "[root@pcmk-1 ~]# crm\n" #~ "crm(live)# cib new stack-glue\n" #~ "INFO: stack-glue shadow CIB created\n" #~ "crm(stack-glue)# configure primitive dlm ocf:pacemaker:controld op monitor interval=120s\n" #~ "crm(stack-glue)# configure clone dlm-clone dlm meta interleave=true\n" #~ "crm(stack-glue)# configure show xml\n" #~ "crm(stack-glue)# configure show\n" #~ "node pcmk-1\n" #~ "node pcmk-2\n" #~ "primitive WebData ocf:linbit:drbd \\\n" #~ "        params drbd_resource=\"wwwdata\" \\\n" #~ "        op monitor interval=\"60s\"\n" #~ "primitive WebFS ocf:heartbeat:Filesystem \\\n" #~ "        params device=\"/dev/drbd/by-res/wwwdata\" directory=\"/var/www/html\" fstype=\"ext4\"\n" #~ "primitive WebSite ocf:heartbeat:apache \\\n" #~ "        params configfile=\"/etc/httpd/conf/httpd.conf\" \\\n" #~ "        op monitor interval=\"1min\"\n" #~ "primitive ClusterIP ocf:heartbeat:IPaddr2 \\\n" #~ "        params ip=\"192.168.122.101\" cidr_netmask=\"32\" \\\n" #~ "        op monitor interval=\"30s\"\n" #~ "primitive dlm ocf:pacemaker:controld \\\n" #~ " op monitor interval=\"120s\"\n" #~ "ms WebDataClone WebData \\\n" #~ "        meta master-max=\"1\" master-node-max=\"1\" clone-max=\"2\" clone-node-max=\"1\" notify=\"true\"\n" #~ "clone dlm-clone dlm \\\n" #~ " meta interleave=\"true\"\n" #~ "location prefer-pcmk-1 WebSite 50: pcmk-1\n" #~ "colocation WebSite-with-WebFS inf: WebSite WebFS\n" #~ "colocation fs_on_drbd inf: WebFS WebDataClone:Master\n" #~ "colocation website-with-ip inf: WebSite ClusterIP\n" #~ "order WebFS-after-WebData inf: WebDataClone:promote WebFS:start\n" #~ "order WebSite-after-WebFS inf: WebFS WebSite\n" #~ "order apache-after-ip inf: ClusterIP WebSite\n" #~ "property $id=\"cib-bootstrap-options\" \\\n" #~ "        dc-version=\"1.0.5-462f1569a43740667daf7b0f6b521742e9eb8fa7\" \\\n" #~ "        cluster-infrastructure=\"openais\" \\\n" #~ "        expected-quorum-votes=”2” \\\n" #~ "        stonith-enabled=\"false\" \\\n" #~ "        no-quorum-policy=\"ignore\"\n" #~ "rsc_defaults $id=\"rsc-options\" \\\n" #~ "        resource-stickiness=”100”\n" #~ msgstr "" #~ "\n" #~ "[root@pcmk-1 ~]# crm\n" #~ "crm(live)# cib new stack-glue\n" #~ "INFO: stack-glue shadow CIB created\n" #~ "crm(stack-glue)# configure primitive dlm ocf:pacemaker:controld op monitor interval=120s\n" #~ "crm(stack-glue)# configure clone dlm-clone dlm meta interleave=true\n" #~ "crm(stack-glue)# configure show xml\n" #~ "crm(stack-glue)# configure show\n" #~ "node pcmk-1\n" #~ "node pcmk-2\n" #~ "primitive WebData ocf:linbit:drbd \\\n" #~ "        params drbd_resource=\"wwwdata\" \\\n" #~ "        op monitor interval=\"60s\"\n" #~ "primitive WebFS ocf:heartbeat:Filesystem \\\n" #~ "        params device=\"/dev/drbd/by-res/wwwdata\" directory=\"/var/www/html\" fstype=\"ext4\"\n" #~ "primitive WebSite ocf:heartbeat:apache \\\n" #~ "        params configfile=\"/etc/httpd/conf/httpd.conf\" \\\n" #~ "        op monitor interval=\"1min\"\n" #~ "primitive ClusterIP ocf:heartbeat:IPaddr2 \\\n" #~ "        params ip=\"192.168.122.101\" cidr_netmask=\"32\" \\\n" #~ "        op monitor interval=\"30s\"\n" #~ "primitive dlm ocf:pacemaker:controld \\\n" #~ " op monitor interval=\"120s\"\n" #~ "ms WebDataClone WebData \\\n" #~ "        meta master-max=\"1\" master-node-max=\"1\" clone-max=\"2\" clone-node-max=\"1\" notify=\"true\"\n" #~ "clone dlm-clone dlm \\\n" #~ " meta interleave=\"true\"\n" #~ "location prefer-pcmk-1 WebSite 50: pcmk-1\n" #~ "colocation WebSite-with-WebFS inf: WebSite WebFS\n" #~ "colocation fs_on_drbd inf: WebFS WebDataClone:Master\n" #~ "colocation website-with-ip inf: WebSite ClusterIP\n" #~ "order WebFS-after-WebData inf: WebDataClone:promote WebFS:start\n" #~ "order WebSite-after-WebFS inf: WebFS WebSite\n" #~ "order apache-after-ip inf: ClusterIP WebSite\n" #~ "property $id=\"cib-bootstrap-options\" \\\n" #~ "        dc-version=\"1.0.5-462f1569a43740667daf7b0f6b521742e9eb8fa7\" \\\n" #~ "        cluster-infrastructure=\"openais\" \\\n" #~ "        expected-quorum-votes=”2” \\\n" #~ "        stonith-enabled=\"false\" \\\n" #~ "        no-quorum-policy=\"ignore\"\n" #~ "rsc_defaults $id=\"rsc-options\" \\\n" #~ "        resource-stickiness=”100”\n" #~ msgid "TODO: Explain the meaning of the interleave option" #~ msgstr "TODO: Explain the meaning of the interleave option" #~ msgid "" #~ "\n" #~ "crm(stack-glue)# cib commit stack-glue\n" #~ "INFO: commited 'stack-glue' shadow CIB to the cluster\n" #~ "crm(stack-glue)# quit\n" #~ "bye\n" #~ "[root@pcmk-1 ~]# crm_mon\n" #~ "============\n" #~ "Last updated: Thu Sep  3 20:49:54 2009\n" #~ "Stack: openais\n" #~ "Current DC: pcmk-2 - partition with quorum\n" #~ "Version: 1.0.5-462f1569a43740667daf7b0f6b521742e9eb8fa7\n" #~ "2 Nodes configured, 2 expected votes\n" #~ "5 Resources configured.\n" #~ "============\n" #~ "\n" #~ "Online: [ pcmk-1 pcmk-2 ]\n" #~ "\n" #~ "WebSite (ocf::heartbeat:apache):        Started pcmk-2\n" #~ "Master/Slave Set: WebDataClone\n" #~ "        Masters: [ pcmk-1 ]\n" #~ "        Slaves: [ pcmk-2 ]\n" #~ "ClusterIP        (ocf::heartbeat:IPaddr):        Started pcmk-2\n" #~ "Clone Set: dlm-clone\n" #~ " Started: [ pcmk-2 pcmk-1 ]\n" #~ "WebFS   (ocf::heartbeat:Filesystem):    Started pcmk-2\n" #~ msgstr "" #~ "\n" #~ "crm(stack-glue)# cib commit stack-glue\n" #~ "INFO: commited 'stack-glue' shadow CIB to the cluster\n" #~ "crm(stack-glue)# quit\n" #~ "bye\n" #~ "[root@pcmk-1 ~]# crm_mon\n" #~ "============\n" #~ "Last updated: Thu Sep  3 20:49:54 2009\n" #~ "Stack: openais\n" #~ "Current DC: pcmk-2 - partition with quorum\n" #~ "Version: 1.0.5-462f1569a43740667daf7b0f6b521742e9eb8fa7\n" #~ "2 Nodes configured, 2 expected votes\n" #~ "5 Resources configured.\n" #~ "============\n" #~ "\n" #~ "Online: [ pcmk-1 pcmk-2 ]\n" #~ "\n" #~ "WebSite (ocf::heartbeat:apache):        Started pcmk-2\n" #~ "Master/Slave Set: WebDataClone\n" #~ "        Masters: [ pcmk-1 ]\n" #~ "        Slaves: [ pcmk-2 ]\n" #~ "ClusterIP        (ocf::heartbeat:IPaddr):        Started pcmk-2\n" #~ "Clone Set: dlm-clone\n" #~ " Started: [ pcmk-2 pcmk-1 ]\n" #~ "WebFS   (ocf::heartbeat:Filesystem):    Started pcmk-2\n" #~ msgid "Add the GFS2 service" #~ msgstr "添加 GFS2 服务" #~ msgid "Once the DLM is active, we can add the GFS2 control daemon." #~ msgstr "一旦DLM启动了,我们可以加上GFS2的控制进程了。" #~ msgid "Use the crm shell to create the gfs-control cluster resource:" #~ msgstr "用crm shell来创建gfs-control这个集群资源:" #~ msgid "" #~ "\n" #~ "[root@pcmk-1 ~]# crm\n" #~ "crm(live)# cib new gfs-glue --force\n" #~ "INFO: gfs-glue shadow CIB created\n" #~ "crm(gfs-glue)# configure primitive gfs-control ocf:pacemaker:controld params daemon=gfs_controld.pcmk args=\"-g 0\" op monitor interval=120s\n" #~ "crm(gfs-glue)# configure clone gfs-clone gfs-control meta interleave=true\n" #~ msgstr "" #~ "\n" #~ "[root@pcmk-1 ~]# crm\n" #~ "crm(live)# cib new gfs-glue --force\n" #~ "INFO: gfs-glue shadow CIB created\n" #~ "crm(gfs-glue)# configure primitive gfs-control ocf:pacemaker:controld params daemon=gfs_controld.pcmk args=\"-g 0\" op monitor interval=120s\n" #~ "crm(gfs-glue)# configure clone gfs-clone gfs-control meta interleave=true\n" #~ msgid "Now ensure Pacemaker only starts the gfs-control service on nodes that also have a copy of the dlm service (created above) already running" #~ msgstr "现在确保Pacemaker只在有dlm服务运行的节点上面启动 gfs-control 服务" #~ msgid "" #~ "\n" #~ "crm(gfs-glue)# configure colocation gfs-with-dlm INFINITY: gfs-clone dlm-clone\n" #~ "crm(gfs-glue)# configure order start-gfs-after-dlm mandatory: dlm-clone gfs-clone\n" #~ msgstr "" #~ "\n" #~ "crm(gfs-glue)# configure colocation gfs-with-dlm INFINITY: gfs-clone dlm-clone\n" #~ "crm(gfs-glue)# configure order start-gfs-after-dlm mandatory: dlm-clone gfs-clone\n" #~ msgid "" #~ "\n" #~ "crm(gfs-glue)# configure show\n" #~ "node pcmk-1\n" #~ "node pcmk-2\n" #~ "primitive WebData ocf:linbit:drbd \\\n" #~ "        params drbd_resource=\"wwwdata\" \\\n" #~ "        op monitor interval=\"60s\"\n" #~ "primitive WebFS ocf:heartbeat:Filesystem \\\n" #~ "        params device=\"/dev/drbd/by-res/wwwdata\" directory=\"/var/www/html\" fstype=\"ext4\"\n" #~ "primitive WebSite ocf:heartbeat:apache \\\n" #~ "        params configfile=\"/etc/httpd/conf/httpd.conf\" \\\n" #~ "        op monitor interval=\"1min\"\n" #~ "primitive ClusterIP ocf:heartbeat:IPaddr2 \\\n" #~ "        params ip=\"192.168.122.101\" cidr_netmask=\"32\" \\\n" #~ "        op monitor interval=\"30s\"\n" #~ "primitive dlm ocf:pacemaker:controld \\\n" #~ "        op monitor interval=\"120s\"\n" #~ "primitive gfs-control ocf:pacemaker:controld \\\n" #~ " params daemon=”gfs_controld.pcmk” args=”-g 0” \\\n" #~ " op monitor interval=\"120s\"\n" #~ "ms WebDataClone WebData \\\n" #~ "        meta master-max=\"1\" master-node-max=\"1\" clone-max=\"2\" clone-node-max=\"1\" notify=\"true\"\n" #~ "clone dlm-clone dlm \\\n" #~ "        meta interleave=\"true\"\n" #~ "clone gfs-clone gfs-control \\\n" #~ " meta interleave=\"true\"\n" #~ "location prefer-pcmk-1 WebSite 50: pcmk-1\n" #~ "colocation WebSite-with-WebFS inf: WebSite WebFS\n" #~ "colocation fs_on_drbd inf: WebFS WebDataClone:Master\n" #~ "colocation gfs-with-dlm inf: gfs-clone dlm-clone\n" #~ "colocation website-with-ip inf: WebSite ClusterIP\n" #~ "order WebFS-after-WebData inf: WebDataClone:promote WebFS:start\n" #~ "order WebSite-after-WebFS inf: WebFS WebSite\n" #~ "order apache-after-ip inf: ClusterIP WebSite\n" #~ "order start-gfs-after-dlm inf: dlm-clone gfs-clone\n" #~ "property $id=\"cib-bootstrap-options\" \\\n" #~ "        dc-version=\"1.0.5-462f1569a43740667daf7b0f6b521742e9eb8fa7\" \\\n" #~ "        cluster-infrastructure=\"openais\" \\\n" #~ "        expected-quorum-votes=”2” \\\n" #~ "        stonith-enabled=\"false\" \\\n" #~ "        no-quorum-policy=\"ignore\"\n" #~ "rsc_defaults $id=\"rsc-options\" \\\n" #~ "        resource-stickiness=”100”\n" #~ "crm(gfs-glue)# cib commit gfs-glue\n" #~ "INFO: commited 'gfs-glue' shadow CIB to the cluster\n" #~ "crm(gfs-glue)# quit\n" #~ "bye\n" #~ "[root@pcmk-1 ~]# crm_mon\n" #~ "============\n" #~ "Last updated: Thu Sep  3 20:49:54 2009\n" #~ "Stack: openais\n" #~ "Current DC: pcmk-2 - partition with quorum\n" #~ "Version: 1.0.5-462f1569a43740667daf7b0f6b521742e9eb8fa7\n" #~ "2 Nodes configured, 2 expected votes\n" #~ "6 Resources configured.\n" #~ "============\n" #~ "\n" #~ "Online: [ pcmk-1 pcmk-2 ]\n" #~ "\n" #~ "WebSite (ocf::heartbeat:apache):        Started pcmk-2\n" #~ "Master/Slave Set: WebDataClone\n" #~ "        Masters: [ pcmk-1 ]\n" #~ "        Slaves: [ pcmk-2 ]\n" #~ "ClusterIP        (ocf::heartbeat:IPaddr):        Started pcmk-2\n" #~ "Clone Set: dlm-clone\n" #~ "        Started: [ pcmk-2 pcmk-1 ]\n" #~ "Clone Set: gfs-clone\n" #~ " Started: [ pcmk-2 pcmk-1 ]\n" #~ "WebFS   (ocf::heartbeat:Filesystem):    Started pcmk-1\n" #~ msgstr "" #~ "\n" #~ "crm(gfs-glue)# configure show\n" #~ "node pcmk-1\n" #~ "node pcmk-2\n" #~ "primitive WebData ocf:linbit:drbd \\\n" #~ "        params drbd_resource=\"wwwdata\" \\\n" #~ "        op monitor interval=\"60s\"\n" #~ "primitive WebFS ocf:heartbeat:Filesystem \\\n" #~ "        params device=\"/dev/drbd/by-res/wwwdata\" directory=\"/var/www/html\" fstype=\"ext4\"\n" #~ "primitive WebSite ocf:heartbeat:apache \\\n" #~ "        params configfile=\"/etc/httpd/conf/httpd.conf\" \\\n" #~ "        op monitor interval=\"1min\"\n" #~ "primitive ClusterIP ocf:heartbeat:IPaddr2 \\\n" #~ "        params ip=\"192.168.122.101\" cidr_netmask=\"32\" \\\n" #~ "        op monitor interval=\"30s\"\n" #~ "primitive dlm ocf:pacemaker:controld \\\n" #~ "        op monitor interval=\"120s\"\n" #~ "primitive gfs-control ocf:pacemaker:controld \\\n" #~ " params daemon=”gfs_controld.pcmk” args=”-g 0” \\\n" #~ " op monitor interval=\"120s\"\n" #~ "ms WebDataClone WebData \\\n" #~ "        meta master-max=\"1\" master-node-max=\"1\" clone-max=\"2\" clone-node-max=\"1\" notify=\"true\"\n" #~ "clone dlm-clone dlm \\\n" #~ "        meta interleave=\"true\"\n" #~ "clone gfs-clone gfs-control \\\n" #~ " meta interleave=\"true\"\n" #~ "location prefer-pcmk-1 WebSite 50: pcmk-1\n" #~ "colocation WebSite-with-WebFS inf: WebSite WebFS\n" #~ "colocation fs_on_drbd inf: WebFS WebDataClone:Master\n" #~ "colocation gfs-with-dlm inf: gfs-clone dlm-clone\n" #~ "colocation website-with-ip inf: WebSite ClusterIP\n" #~ "order WebFS-after-WebData inf: WebDataClone:promote WebFS:start\n" #~ "order WebSite-after-WebFS inf: WebFS WebSite\n" #~ "order apache-after-ip inf: ClusterIP WebSite\n" #~ "order start-gfs-after-dlm inf: dlm-clone gfs-clone\n" #~ "property $id=\"cib-bootstrap-options\" \\\n" #~ "        dc-version=\"1.0.5-462f1569a43740667daf7b0f6b521742e9eb8fa7\" \\\n" #~ "        cluster-infrastructure=\"openais\" \\\n" #~ "        expected-quorum-votes=”2” \\\n" #~ "        stonith-enabled=\"false\" \\\n" #~ "        no-quorum-policy=\"ignore\"\n" #~ "rsc_defaults $id=\"rsc-options\" \\\n" #~ "        resource-stickiness=”100”\n" #~ "crm(gfs-glue)# cib commit gfs-glue\n" #~ "INFO: commited 'gfs-glue' shadow CIB to the cluster\n" #~ "crm(gfs-glue)# quit\n" #~ "bye\n" #~ "[root@pcmk-1 ~]# crm_mon\n" #~ "============\n" #~ "Last updated: Thu Sep  3 20:49:54 2009\n" #~ "Stack: openais\n" #~ "Current DC: pcmk-2 - partition with quorum\n" #~ "Version: 1.0.5-462f1569a43740667daf7b0f6b521742e9eb8fa7\n" #~ "2 Nodes configured, 2 expected votes\n" #~ "6 Resources configured.\n" #~ "============\n" #~ "\n" #~ "Online: [ pcmk-1 pcmk-2 ]\n" #~ "\n" #~ "WebSite (ocf::heartbeat:apache):        Started pcmk-2\n" #~ "Master/Slave Set: WebDataClone\n" #~ "        Masters: [ pcmk-1 ]\n" #~ "        Slaves: [ pcmk-2 ]\n" #~ "ClusterIP        (ocf::heartbeat:IPaddr):        Started pcmk-2\n" #~ "Clone Set: dlm-clone\n" #~ "        Started: [ pcmk-2 pcmk-1 ]\n" #~ "Clone Set: gfs-clone\n" #~ " Started: [ pcmk-2 pcmk-1 ]\n" #~ "WebFS   (ocf::heartbeat:Filesystem):    Started pcmk-1\n" #~ msgid "" #~ "\n" #~ "[root@pcmk-1 ~]# crm_resource --resource WebFS --set-parameter target-role --meta --parameter-value Stopped\n" #~ "[root@pcmk-1 ~]# crm_mon\n" #~ "============\n" #~ "Last updated: Thu Sep  3 15:18:06 2009\n" #~ "Stack: openais\n" #~ "Current DC: pcmk-1 - partition with quorum\n" #~ "Version: 1.0.5-462f1569a43740667daf7b0f6b521742e9eb8fa7\n" #~ "2 Nodes configured, 2 expected votes\n" #~ "6 Resources configured.\n" #~ "============\n" #~ "\n" #~ "Online: [ pcmk-1 pcmk-2 ]\n" #~ "\n" #~ "Master/Slave Set: WebDataClone\n" #~ "        Masters: [ pcmk-1 ]\n" #~ "        Slaves: [ pcmk-2 ]\n" #~ "ClusterIP        (ocf::heartbeat:IPaddr):        Started pcmk-1\n" #~ "Clone Set: dlm-clone\n" #~ "        Started: [ pcmk-2 pcmk-1 ]\n" #~ "Clone Set: gfs-clone\n" #~ "        Started: [ pcmk-2 pcmk-1 ]\n" #~ msgstr "" #~ "\n" #~ "[root@pcmk-1 ~]# crm_resource --resource WebFS --set-parameter target-role --meta --parameter-value Stopped\n" #~ "[root@pcmk-1 ~]# crm_mon\n" #~ "============\n" #~ "Last updated: Thu Sep  3 15:18:06 2009\n" #~ "Stack: openais\n" #~ "Current DC: pcmk-1 - partition with quorum\n" #~ "Version: 1.0.5-462f1569a43740667daf7b0f6b521742e9eb8fa7\n" #~ "2 Nodes configured, 2 expected votes\n" #~ "6 Resources configured.\n" #~ "============\n" #~ "\n" #~ "Online: [ pcmk-1 pcmk-2 ]\n" #~ "\n" #~ "Master/Slave Set: WebDataClone\n" #~ "        Masters: [ pcmk-1 ]\n" #~ "        Slaves: [ pcmk-2 ]\n" #~ "ClusterIP        (ocf::heartbeat:IPaddr):        Started pcmk-1\n" #~ "Clone Set: dlm-clone\n" #~ "        Started: [ pcmk-2 pcmk-1 ]\n" #~ "Clone Set: gfs-clone\n" #~ "        Started: [ pcmk-2 pcmk-1 ]\n" #~ msgid "Lastly, we use -t to specify the lock table name. The format for this field is clustername:fsname. For the fsname, we just need to pick something unique and descriptive and since we haven’t specified a clustername yet, we will use the default (pcmk)." #~ msgstr "最后,我们用-t来指定lock table的名称。这个字段的格式是 clustername:fsname(集群名称:文件系统名称)。fsname的话,我们只要用一个唯一的并且能描述我们这个集群的名称就好了,我们用默认的pcmk。" #~ msgid "To specify an alternate name for the cluster, locate the service section containing “name: pacemaker” in corosync.conf and insert the following line anywhere inside the block:" #~ msgstr "如果要更改集群的名称,找到包含name:pacemaker的配置文件区域,然后添加如下所示的选项即可。" #~ msgid "clustername: myname" #~ msgstr "clustername: myname" #~ msgid "Do this on each node in the cluster and be sure to restart them before continuing." #~ msgstr "在每个节点都执行以下命令。" #~ msgid "" #~ "\n" #~ "mkfs.gfs2 -p lock_dlm -j 2 -t pcmk:web /dev/drbd1\n" #~ "[root@pcmk-1 ~]# mkfs.gfs2 -t pcmk:web -p lock_dlm -j 2 /dev/vdb \n" #~ "This will destroy any data on /dev/vdb.\n" #~ "It appears to contain: data\n" #~ "\n" #~ "Are you sure you want to proceed? [y/n] y\n" #~ "\n" #~ "Device:                    /dev/vdb\n" #~ "Blocksize:                 4096\n" #~ "Device Size                1.00 GB (131072 blocks)\n" #~ "Filesystem Size:           1.00 GB (131070 blocks)\n" #~ "Journals:                  2\n" #~ "Resource Groups:           2\n" #~ "Locking Protocol:          \"lock_dlm\"\n" #~ "Lock Table:                \"pcmk:web\"\n" #~ "UUID:                      6B776F46-177B-BAF8-2C2B-292C0E078613\n" #~ "\n" #~ "[root@pcmk-1 ~]#\n" #~ msgstr "" #~ "\n" #~ "mkfs.gfs2 -p lock_dlm -j 2 -t pcmk:web /dev/drbd1\n" #~ "[root@pcmk-1 ~]# mkfs.gfs2 -t pcmk:web -p lock_dlm -j 2 /dev/vdb \n" #~ "This will destroy any data on /dev/vdb.\n" #~ "It appears to contain: data\n" #~ "\n" #~ "Are you sure you want to proceed? [y/n] y\n" #~ "\n" #~ "Device:                    /dev/vdb\n" #~ "Blocksize:                 4096\n" #~ "Device Size                1.00 GB (131072 blocks)\n" #~ "Filesystem Size:           1.00 GB (131070 blocks)\n" #~ "Journals:                  2\n" #~ "Resource Groups:           2\n" #~ "Locking Protocol:          \"lock_dlm\"\n" #~ "Lock Table:                \"pcmk:web\"\n" #~ "UUID:                      6B776F46-177B-BAF8-2C2B-292C0E078613\n" #~ "\n" #~ "[root@pcmk-1 ~]#\n" #~ msgid "" #~ "\n" #~ "[root@pcmk-1 ~]# crm\n" #~ "crm(live)# cib new GFS2\n" #~ "INFO: GFS2 shadow CIB created\n" #~ "crm(GFS2)# configure delete WebFS\n" #~ "crm(GFS2)# configure primitive WebFS ocf:heartbeat:Filesystem params device=\"/dev/drbd/by-res/wwwdata\" directory=\"/var/www/html\" fstype=”gfs2”\n" #~ msgstr "" #~ "\n" #~ "[root@pcmk-1 ~]# crm\n" #~ "crm(live)# cib new GFS2\n" #~ "INFO: GFS2 shadow CIB created\n" #~ "crm(GFS2)# configure delete WebFS\n" #~ "crm(GFS2)# configure primitive WebFS ocf:heartbeat:Filesystem params device=\"/dev/drbd/by-res/wwwdata\" directory=\"/var/www/html\" fstype=”gfs2”\n" #~ msgid "Now that we’ve recreated the resource, we also need to recreate all the constraints that used it. This is because the shell will automatically remove any constraints that referenced WebFS." #~ msgstr "现在我们重新创建这个资源, 我们也要重建跟这个资源相关的约束条件,因为shell会自动删除跟WebFS相关的约束条件。" #~ msgid "" #~ "\n" #~ "crm(GFS2)# configure colocation WebSite-with-WebFS inf: WebSite WebFS\n" #~ "crm(GFS2)# configure colocation fs_on_drbd inf: WebFS WebDataClone:Master\n" #~ "crm(GFS2)# configure order WebFS-after-WebData inf: WebDataClone:promote WebFS:start\n" #~ "crm(GFS2)# configure order WebSite-after-WebFS inf: WebFS WebSite\n" #~ "crm(GFS2)# configure colocation WebFS-with-gfs-control INFINITY: WebFS gfs-clone\n" #~ "crm(GFS2)# configure order start-WebFS-after-gfs-control mandatory: gfs-clone WebFS\n" #~ "crm(GFS2)# configure show\n" #~ "node pcmk-1\n" #~ "node pcmk-2\n" #~ "primitive WebData ocf:linbit:drbd \\\n" #~ "        params drbd_resource=\"wwwdata\" \\\n" #~ "        op monitor interval=\"60s\"\n" #~ "primitive WebFS ocf:heartbeat:Filesystem \\\n" #~ " params device=\"/dev/drbd/by-res/wwwdata\" directory=\"/var/www/html\" fstype=”gfs2”\n" #~ "primitive WebSite ocf:heartbeat:apache \\\n" #~ "        params configfile=\"/etc/httpd/conf/httpd.conf\" \\\n" #~ "        op monitor interval=\"1min\"\n" #~ "primitive ClusterIP ocf:heartbeat:IPaddr2 \\\n" #~ "        params ip=\"192.168.122.101\" cidr_netmask=\"32\" \\\n" #~ "        op monitor interval=\"30s\"\n" #~ "primitive dlm ocf:pacemaker:controld \\\n" #~ "        op monitor interval=\"120s\"\n" #~ "primitive gfs-control ocf:pacemaker:controld \\\n" #~ "   params daemon=”gfs_controld.pcmk” args=”-g 0” \\\n" #~ "        op monitor interval=\"120s\"\n" #~ "ms WebDataClone WebData \\\n" #~ "        meta master-max=\"1\" master-node-max=\"1\" clone-max=\"2\" clone-node-max=\"1\" notify=\"true\"\n" #~ "clone dlm-clone dlm \\\n" #~ "        meta interleave=\"true\"\n" #~ "clone gfs-clone gfs-control \\\n" #~ "        meta interleave=\"true\"\n" #~ "colocation WebFS-with-gfs-control inf: WebFS gfs-clone\n" #~ "colocation WebSite-with-WebFS inf: WebSite WebFS\n" #~ "colocation fs_on_drbd inf: WebFS WebDataClone:Master\n" #~ "colocation gfs-with-dlm inf: gfs-clone dlm-clone\n" #~ "colocation website-with-ip inf: WebSite ClusterIP\n" #~ "order WebFS-after-WebData inf: WebDataClone:promote WebFS:start\n" #~ "order WebSite-after-WebFS inf: WebFS WebSite\n" #~ "order apache-after-ip inf: ClusterIP WebSite\n" #~ "order start-WebFS-after-gfs-control inf: gfs-clone WebFS\n" #~ "order start-gfs-after-dlm inf: dlm-clone gfs-clone\n" #~ "property $id=\"cib-bootstrap-options\" \\\n" #~ "        dc-version=\"1.0.5-462f1569a43740667daf7b0f6b521742e9eb8fa7\" \\\n" #~ "        cluster-infrastructure=\"openais\" \\\n" #~ "        expected-quorum-votes=”2” \\\n" #~ "        stonith-enabled=\"false\" \\\n" #~ "        no-quorum-policy=\"ignore\"\n" #~ "rsc_defaults $id=\"rsc-options\" \\\n" #~ "        resource-stickiness=”100”\n" #~ msgstr "" #~ "\n" #~ "crm(GFS2)# configure colocation WebSite-with-WebFS inf: WebSite WebFS\n" #~ "crm(GFS2)# configure colocation fs_on_drbd inf: WebFS WebDataClone:Master\n" #~ "crm(GFS2)# configure order WebFS-after-WebData inf: WebDataClone:promote WebFS:start\n" #~ "crm(GFS2)# configure order WebSite-after-WebFS inf: WebFS WebSite\n" #~ "crm(GFS2)# configure colocation WebFS-with-gfs-control INFINITY: WebFS gfs-clone\n" #~ "crm(GFS2)# configure order start-WebFS-after-gfs-control mandatory: gfs-clone WebFS\n" #~ "crm(GFS2)# configure show\n" #~ "node pcmk-1\n" #~ "node pcmk-2\n" #~ "primitive WebData ocf:linbit:drbd \\\n" #~ "        params drbd_resource=\"wwwdata\" \\\n" #~ "        op monitor interval=\"60s\"\n" #~ "primitive WebFS ocf:heartbeat:Filesystem \\\n" #~ " params device=\"/dev/drbd/by-res/wwwdata\" directory=\"/var/www/html\" fstype=”gfs2”\n" #~ "primitive WebSite ocf:heartbeat:apache \\\n" #~ "        params configfile=\"/etc/httpd/conf/httpd.conf\" \\\n" #~ "        op monitor interval=\"1min\"\n" #~ "primitive ClusterIP ocf:heartbeat:IPaddr2 \\\n" #~ "        params ip=\"192.168.122.101\" cidr_netmask=\"32\" \\\n" #~ "        op monitor interval=\"30s\"\n" #~ "primitive dlm ocf:pacemaker:controld \\\n" #~ "        op monitor interval=\"120s\"\n" #~ "primitive gfs-control ocf:pacemaker:controld \\\n" #~ "   params daemon=”gfs_controld.pcmk” args=”-g 0” \\\n" #~ "        op monitor interval=\"120s\"\n" #~ "ms WebDataClone WebData \\\n" #~ "        meta master-max=\"1\" master-node-max=\"1\" clone-max=\"2\" clone-node-max=\"1\" notify=\"true\"\n" #~ "clone dlm-clone dlm \\\n" #~ "        meta interleave=\"true\"\n" #~ "clone gfs-clone gfs-control \\\n" #~ "        meta interleave=\"true\"\n" #~ "colocation WebFS-with-gfs-control inf: WebFS gfs-clone\n" #~ "colocation WebSite-with-WebFS inf: WebSite WebFS\n" #~ "colocation fs_on_drbd inf: WebFS WebDataClone:Master\n" #~ "colocation gfs-with-dlm inf: gfs-clone dlm-clone\n" #~ "colocation website-with-ip inf: WebSite ClusterIP\n" #~ "order WebFS-after-WebData inf: WebDataClone:promote WebFS:start\n" #~ "order WebSite-after-WebFS inf: WebFS WebSite\n" #~ "order apache-after-ip inf: ClusterIP WebSite\n" #~ "order start-WebFS-after-gfs-control inf: gfs-clone WebFS\n" #~ "order start-gfs-after-dlm inf: dlm-clone gfs-clone\n" #~ "property $id=\"cib-bootstrap-options\" \\\n" #~ "        dc-version=\"1.0.5-462f1569a43740667daf7b0f6b521742e9eb8fa7\" \\\n" #~ "        cluster-infrastructure=\"openais\" \\\n" #~ "        expected-quorum-votes=”2” \\\n" #~ "        stonith-enabled=\"false\" \\\n" #~ "        no-quorum-policy=\"ignore\"\n" #~ "rsc_defaults $id=\"rsc-options\" \\\n" #~ "        resource-stickiness=”100”\n" #~ msgid "" #~ "\n" #~ "crm(GFS2)# cib commit GFS2\n" #~ "INFO: commited 'GFS2' shadow CIB to the cluster\n" #~ "crm(GFS2)# quit\n" #~ "bye\n" #~ "[root@pcmk-1 ~]# crm_mon\n" #~ "============\n" #~ "Last updated: Thu Sep  3 20:49:54 2009\n" #~ "Stack: openais\n" #~ "Current DC: pcmk-2 - partition with quorum\n" #~ "Version: 1.0.5-462f1569a43740667daf7b0f6b521742e9eb8fa7\n" #~ "2 Nodes configured, 2 expected votes\n" #~ "6 Resources configured.\n" #~ "============\n" #~ "\n" #~ "Online: [ pcmk-1 pcmk-2 ]\n" #~ "\n" #~ "WebSite (ocf::heartbeat:apache):        Started pcmk-2\n" #~ "Master/Slave Set: WebDataClone\n" #~ "        Masters: [ pcmk-1 ]\n" #~ "        Slaves: [ pcmk-2 ]\n" #~ "ClusterIP        (ocf::heartbeat:IPaddr):        Started pcmk-2\n" #~ "Clone Set: dlm-clone\n" #~ "        Started: [ pcmk-2 pcmk-1 ]\n" #~ "Clone Set: gfs-clone\n" #~ "        Started: [ pcmk-2 pcmk-1 ]\n" #~ "WebFS (ocf::heartbeat:Filesystem): Started pcmk-1\n" #~ msgstr "" #~ "\n" #~ "crm(GFS2)# cib commit GFS2\n" #~ "INFO: commited 'GFS2' shadow CIB to the cluster\n" #~ "crm(GFS2)# quit\n" #~ "bye\n" #~ "[root@pcmk-1 ~]# crm_mon\n" #~ "============\n" #~ "Last updated: Thu Sep  3 20:49:54 2009\n" #~ "Stack: openais\n" #~ "Current DC: pcmk-2 - partition with quorum\n" #~ "Version: 1.0.5-462f1569a43740667daf7b0f6b521742e9eb8fa7\n" #~ "2 Nodes configured, 2 expected votes\n" #~ "6 Resources configured.\n" #~ "============\n" #~ "\n" #~ "Online: [ pcmk-1 pcmk-2 ]\n" #~ "\n" #~ "WebSite (ocf::heartbeat:apache):        Started pcmk-2\n" #~ "Master/Slave Set: WebDataClone\n" #~ "        Masters: [ pcmk-1 ]\n" #~ "        Slaves: [ pcmk-2 ]\n" #~ "ClusterIP        (ocf::heartbeat:IPaddr):        Started pcmk-2\n" #~ "Clone Set: dlm-clone\n" #~ "        Started: [ pcmk-2 pcmk-1 ]\n" #~ "Clone Set: gfs-clone\n" #~ "        Started: [ pcmk-2 pcmk-1 ]\n" #~ "WebFS (ocf::heartbeat:Filesystem): Started pcmk-1\n" #~ msgid "" #~ "\n" #~ "[root@pcmk-1 ~]# crm\n" #~ "[root@pcmk-1 ~]# cib new active\n" #~ " " #~ msgstr "" #~ "\n" #~ "[root@pcmk-1 ~]# crm\n" #~ "[root@pcmk-1 ~]# cib new active\n" #~ " " #~ msgid "" #~ "\n" #~ "[root@pcmk-1 ~]# configure clone WebIP ClusterIP  \\\n" #~ "        meta globally-unique=”true” clone-max=”2” clone-node-max=”2”\n" #~ " " #~ msgstr "" #~ "\n" #~ "[root@pcmk-1 ~]# configure clone WebIP ClusterIP  \\\n" #~ "        meta globally-unique=”true” clone-max=”2” clone-node-max=”2”\n" #~ " " #~ msgid "Open the ClusterIP resource" #~ msgstr "打开ClusterIP的配置" #~ msgid "[root@pcmk-1 ~]# configure edit  ClusterIP" #~ msgstr "[root@pcmk-1 ~]# configure edit  ClusterIP" #~ msgid "And add the following to the params line" #~ msgstr "在参数行添加以下内容:" #~ msgid "clusterip_hash=\"sourceip\"" #~ msgstr "clusterip_hash=\"sourceip\"" #~ msgid "So that the complete definition looks like:" #~ msgstr "完整的定义就像下面一样:" #~ msgid "" #~ "\n" #~ "primitive ClusterIP ocf:heartbeat:IPaddr2 \\ \n" #~ "        params ip=\"192.168.122.101\" cidr_netmask=\"32\" clusterip_hash=\"sourceip\" \\\n" #~ "        op monitor interval=\"30s\"\n" #~ " " #~ msgstr "" #~ "\n" #~ "primitive ClusterIP ocf:heartbeat:IPaddr2 \\ \n" #~ "        params ip=\"192.168.122.101\" cidr_netmask=\"32\" clusterip_hash=\"sourceip\" \\\n" #~ "        op monitor interval=\"30s\"\n" #~ " " #~ msgid "Here is the full transcript" #~ msgstr "以下是完整的配置" #~ msgid "" #~ "\n" #~ "[root@pcmk-1 ~]# crm \n" #~ "crm(live)# cib new active\n" #~ "INFO: active shadow CIB created\n" #~ "crm(active)# configure clone WebIP ClusterIP  \\\n" #~ "        meta globally-unique=”true” clone-max=”2” clone-node-max=”2”\n" #~ "crm(active)# configure show\n" #~ "node pcmk-1\n" #~ "node pcmk-2\n" #~ "primitive WebData ocf:linbit:drbd \\\n" #~ "        params drbd_resource=\"wwwdata\" \\\n" #~ "        op monitor interval=\"60s\"\n" #~ "primitive WebFS ocf:heartbeat:Filesystem \\\n" #~ "        params device=\"/dev/drbd/by-res/wwwdata\" directory=\"/var/www/html\" fstype=”gfs2”\n" #~ "primitive WebSite ocf:heartbeat:apache \\\n" #~ "        params configfile=\"/etc/httpd/conf/httpd.conf\" \\\n" #~ "        op monitor interval=\"1min\"\n" #~ "primitive ClusterIP ocf:heartbeat:IPaddr2 \\\n" #~ "        params ip=”192.168.122.101” cidr_netmask=”32” clusterip_hash=”sourceip” \\\n" #~ "        op monitor interval=\"30s\"\n" #~ "primitive dlm ocf:pacemaker:controld \\\n" #~ "        op monitor interval=\"120s\"\n" #~ "primitive gfs-control ocf:pacemaker:controld \\\n" #~ "   params daemon=”gfs_controld.pcmk” args=”-g 0” \\\n" #~ "        op monitor interval=\"120s\"\n" #~ "ms WebDataClone WebData \\\n" #~ "        meta master-max=\"1\" master-node-max=\"1\" clone-max=\"2\" clone-node-max=\"1\" notify=\"true\"\n" #~ "clone WebIP ClusterIP \\\n" #~ " meta globally-unique=”true” clone-max=”2” clone-node-max=”2”\n" #~ "clone dlm-clone dlm \\\n" #~ "        meta interleave=\"true\"\n" #~ "clone gfs-clone gfs-control \\\n" #~ "        meta interleave=\"true\"\n" #~ "colocation WebFS-with-gfs-control inf: WebFS gfs-clone\n" #~ "colocation WebSite-with-WebFS inf: WebSite WebFS\n" #~ "colocation fs_on_drbd inf: WebFS WebDataClone:Master\n" #~ "colocation gfs-with-dlm inf: gfs-clone dlm-clone\n" #~ "colocation website-with-ip inf: WebSite WebIP\n" #~ "order WebFS-after-WebData inf: WebDataClone:promote WebFS:start\n" #~ "order WebSite-after-WebFS inf: WebFS WebSite\n" #~ "order apache-after-ip inf: WebIP WebSite\n" #~ "order start-WebFS-after-gfs-control inf: gfs-clone WebFS\n" #~ "order start-gfs-after-dlm inf: dlm-clone gfs-clone\n" #~ "property $id=\"cib-bootstrap-options\" \\\n" #~ "        dc-version=\"1.0.5-462f1569a43740667daf7b0f6b521742e9eb8fa7\" \\\n" #~ "        cluster-infrastructure=\"openais\" \\\n" #~ "        expected-quorum-votes=”2” \\\n" #~ "        stonith-enabled=\"false\" \\\n" #~ "        no-quorum-policy=\"ignore\"\n" #~ "rsc_defaults $id=\"rsc-options\" \\\n" #~ "        resource-stickiness=”100”\n" #~ " " #~ msgstr "" #~ "\n" #~ "[root@pcmk-1 ~]# crm \n" #~ "crm(live)# cib new active\n" #~ "INFO: active shadow CIB created\n" #~ "crm(active)# configure clone WebIP ClusterIP  \\\n" #~ "        meta globally-unique=”true” clone-max=”2” clone-node-max=”2”\n" #~ "crm(active)# configure show\n" #~ "node pcmk-1\n" #~ "node pcmk-2\n" #~ "primitive WebData ocf:linbit:drbd \\\n" #~ "        params drbd_resource=\"wwwdata\" \\\n" #~ "        op monitor interval=\"60s\"\n" #~ "primitive WebFS ocf:heartbeat:Filesystem \\\n" #~ "        params device=\"/dev/drbd/by-res/wwwdata\" directory=\"/var/www/html\" fstype=”gfs2”\n" #~ "primitive WebSite ocf:heartbeat:apache \\\n" #~ "        params configfile=\"/etc/httpd/conf/httpd.conf\" \\\n" #~ "        op monitor interval=\"1min\"\n" #~ "primitive ClusterIP ocf:heartbeat:IPaddr2 \\\n" #~ "        params ip=”192.168.122.101” cidr_netmask=”32” clusterip_hash=”sourceip” \\\n" #~ "        op monitor interval=\"30s\"\n" #~ "primitive dlm ocf:pacemaker:controld \\\n" #~ "        op monitor interval=\"120s\"\n" #~ "primitive gfs-control ocf:pacemaker:controld \\\n" #~ "   params daemon=”gfs_controld.pcmk” args=”-g 0” \\\n" #~ "        op monitor interval=\"120s\"\n" #~ "ms WebDataClone WebData \\\n" #~ "        meta master-max=\"1\" master-node-max=\"1\" clone-max=\"2\" clone-node-max=\"1\" notify=\"true\"\n" #~ "clone WebIP ClusterIP \\\n" #~ " meta globally-unique=”true” clone-max=”2” clone-node-max=”2”\n" #~ "clone dlm-clone dlm \\\n" #~ "        meta interleave=\"true\"\n" #~ "clone gfs-clone gfs-control \\\n" #~ "        meta interleave=\"true\"\n" #~ "colocation WebFS-with-gfs-control inf: WebFS gfs-clone\n" #~ "colocation WebSite-with-WebFS inf: WebSite WebFS\n" #~ "colocation fs_on_drbd inf: WebFS WebDataClone:Master\n" #~ "colocation gfs-with-dlm inf: gfs-clone dlm-clone\n" #~ "colocation website-with-ip inf: WebSite WebIP\n" #~ "order WebFS-after-WebData inf: WebDataClone:promote WebFS:start\n" #~ "order WebSite-after-WebFS inf: WebFS WebSite\n" #~ "order apache-after-ip inf: WebIP WebSite\n" #~ "order start-WebFS-after-gfs-control inf: gfs-clone WebFS\n" #~ "order start-gfs-after-dlm inf: dlm-clone gfs-clone\n" #~ "property $id=\"cib-bootstrap-options\" \\\n" #~ "        dc-version=\"1.0.5-462f1569a43740667daf7b0f6b521742e9eb8fa7\" \\\n" #~ "        cluster-infrastructure=\"openais\" \\\n" #~ "        expected-quorum-votes=”2” \\\n" #~ "        stonith-enabled=\"false\" \\\n" #~ "        no-quorum-policy=\"ignore\"\n" #~ "rsc_defaults $id=\"rsc-options\" \\\n" #~ "        resource-stickiness=”100”\n" #~ " " #~ msgid "Notice how any constraints that referenced ClusterIP have been updated to use WebIP instead. This is an additional benefit of using the crm shell." #~ msgstr "请注意所有跟ClusterIP相关的限制都已经被更新到与WebIP相关,这是使用crm shell的另一个好处。" #~ msgid "" #~ "\n" #~ "crm(active)# configure clone WebFSClone WebFS\n" #~ "crm(active)# configure clone WebSiteClone WebSite\n" #~ " " #~ msgstr "" #~ "\n" #~ "crm(active)# configure clone WebFSClone WebFS\n" #~ "crm(active)# configure clone WebSiteClone WebSite\n" #~ " " #~ msgid "" #~ "\n" #~ "crm(active)# configure edit WebDataClone\n" #~ " " #~ msgstr "" #~ "\n" #~ "crm(active)# configure edit WebDataClone\n" #~ " " #~ msgid "Change master-max to 2" #~ msgstr "把 master-max 改为 2" #~ msgid "" #~ "\n" #~ "crm(active)# configure show\n" #~ "node pcmk-1\n" #~ "node pcmk-2\n" #~ "primitive WebData ocf:linbit:drbd \\\n" #~ "        params drbd_resource=\"wwwdata\" \\\n" #~ "        op monitor interval=\"60s\"\n" #~ "primitive WebFS ocf:heartbeat:Filesystem \\\n" #~ "        params device=\"/dev/drbd/by-res/wwwdata\" directory=\"/var/www/html\" fstype=”gfs2”\n" #~ "primitive WebSite ocf:heartbeat:apache \\\n" #~ "        params configfile=\"/etc/httpd/conf/httpd.conf\" \\\n" #~ "        op monitor interval=\"1min\"\n" #~ "primitive ClusterIP ocf:heartbeat:IPaddr2 \\\n" #~ "        params ip=”192.168.122.101” cidr_netmask=”32” clusterip_hash=”sourceip” \\\n" #~ "        op monitor interval=\"30s\"\n" #~ "primitive dlm ocf:pacemaker:controld \\\n" #~ "        op monitor interval=\"120s\"\n" #~ "primitive gfs-control ocf:pacemaker:controld \\\n" #~ "   params daemon=”gfs_controld.pcmk” args=”-g 0” \\\n" #~ "        op monitor interval=\"120s\"\n" #~ "ms WebDataClone WebData \\\n" #~ "        meta master-max=\"2\" master-node-max=\"1\" clone-max=\"2\" clone-node-max=\"1\" notify=\"true\"\n" #~ "clone WebFSClone WebFS\n" #~ "clone WebIP ClusterIP  \\\n" #~ "        meta globally-unique=”true” clone-max=”2” clone-node-max=”2”\n" #~ "clone WebSiteClone WebSite\n" #~ "clone dlm-clone dlm \\\n" #~ "        meta interleave=\"true\"\n" #~ "clone gfs-clone gfs-control \\\n" #~ "        meta interleave=\"true\"\n" #~ "colocation WebFS-with-gfs-control inf: WebFSClone gfs-clone\n" #~ "colocation WebSite-with-WebFS inf: WebSiteClone WebFSClone\n" #~ "colocation fs_on_drbd inf: WebFSClone WebDataClone:Master\n" #~ "colocation gfs-with-dlm inf: gfs-clone dlm-clone\n" #~ "colocation website-with-ip inf: WebSiteClone WebIP\n" #~ "order WebFS-after-WebData inf: WebDataClone:promote WebFSClone:start\n" #~ "order WebSite-after-WebFS inf: WebFSClone WebSiteClone\n" #~ "order apache-after-ip inf: WebIP WebSiteClone\n" #~ "order start-WebFS-after-gfs-control inf: gfs-clone WebFSClone\n" #~ "order start-gfs-after-dlm inf: dlm-clone gfs-clone\n" #~ "property $id=\"cib-bootstrap-options\" \\\n" #~ "        dc-version=\"1.0.5-462f1569a43740667daf7b0f6b521742e9eb8fa7\" \\\n" #~ "        cluster-infrastructure=\"openais\" \\\n" #~ "        expected-quorum-votes=”2” \\\n" #~ "        stonith-enabled=\"false\" \\\n" #~ "        no-quorum-policy=\"ignore\"\n" #~ "rsc_defaults $id=\"rsc-options\" \\\n" #~ "        resource-stickiness=”100”\n" #~ " " #~ msgstr "" #~ "\n" #~ "crm(active)# configure show\n" #~ "node pcmk-1\n" #~ "node pcmk-2\n" #~ "primitive WebData ocf:linbit:drbd \\\n" #~ "        params drbd_resource=\"wwwdata\" \\\n" #~ "        op monitor interval=\"60s\"\n" #~ "primitive WebFS ocf:heartbeat:Filesystem \\\n" #~ "        params device=\"/dev/drbd/by-res/wwwdata\" directory=\"/var/www/html\" fstype=”gfs2”\n" #~ "primitive WebSite ocf:heartbeat:apache \\\n" #~ "        params configfile=\"/etc/httpd/conf/httpd.conf\" \\\n" #~ "        op monitor interval=\"1min\"\n" #~ "primitive ClusterIP ocf:heartbeat:IPaddr2 \\\n" #~ "        params ip=”192.168.122.101” cidr_netmask=”32” clusterip_hash=”sourceip” \\\n" #~ "        op monitor interval=\"30s\"\n" #~ "primitive dlm ocf:pacemaker:controld \\\n" #~ "        op monitor interval=\"120s\"\n" #~ "primitive gfs-control ocf:pacemaker:controld \\\n" #~ "   params daemon=”gfs_controld.pcmk” args=”-g 0” \\\n" #~ "        op monitor interval=\"120s\"\n" #~ "ms WebDataClone WebData \\\n" #~ "        meta master-max=\"2\" master-node-max=\"1\" clone-max=\"2\" clone-node-max=\"1\" notify=\"true\"\n" #~ "clone WebFSClone WebFS\n" #~ "clone WebIP ClusterIP  \\\n" #~ "        meta globally-unique=”true” clone-max=”2” clone-node-max=”2”\n" #~ "clone WebSiteClone WebSite\n" #~ "clone dlm-clone dlm \\\n" #~ "        meta interleave=\"true\"\n" #~ "clone gfs-clone gfs-control \\\n" #~ "        meta interleave=\"true\"\n" #~ "colocation WebFS-with-gfs-control inf: WebFSClone gfs-clone\n" #~ "colocation WebSite-with-WebFS inf: WebSiteClone WebFSClone\n" #~ "colocation fs_on_drbd inf: WebFSClone WebDataClone:Master\n" #~ "colocation gfs-with-dlm inf: gfs-clone dlm-clone\n" #~ "colocation website-with-ip inf: WebSiteClone WebIP\n" #~ "order WebFS-after-WebData inf: WebDataClone:promote WebFSClone:start\n" #~ "order WebSite-after-WebFS inf: WebFSClone WebSiteClone\n" #~ "order apache-after-ip inf: WebIP WebSiteClone\n" #~ "order start-WebFS-after-gfs-control inf: gfs-clone WebFSClone\n" #~ "order start-gfs-after-dlm inf: dlm-clone gfs-clone\n" #~ "property $id=\"cib-bootstrap-options\" \\\n" #~ "        dc-version=\"1.0.5-462f1569a43740667daf7b0f6b521742e9eb8fa7\" \\\n" #~ "        cluster-infrastructure=\"openais\" \\\n" #~ "        expected-quorum-votes=”2” \\\n" #~ "        stonith-enabled=\"false\" \\\n" #~ "        no-quorum-policy=\"ignore\"\n" #~ "rsc_defaults $id=\"rsc-options\" \\\n" #~ "        resource-stickiness=”100”\n" #~ " " #~ msgid "" #~ "\n" #~ "crm(active)# cib commit active\n" #~ "INFO: commited 'active' shadow CIB to the cluster\n" #~ "crm(active)# quit\n" #~ "bye\n" #~ "[root@pcmk-1 ~]# crm_mon\n" #~ "============\n" #~ "Last updated: Thu Sep  3 21:37:27 2009\n" #~ "Stack: openais\n" #~ "Current DC: pcmk-2 - partition with quorum\n" #~ "Version: 1.0.5-462f1569a43740667daf7b0f6b521742e9eb8fa7\n" #~ "2 Nodes configured, 2 expected votes\n" #~ "6 Resources configured.\n" #~ "============\n" #~ "\n" #~ "Online: [ pcmk-1 pcmk-2 ]\n" #~ "\n" #~ "Master/Slave Set: WebDataClone\n" #~ "        Masters: [ pcmk-1 pcmk-2 ]\n" #~ "Clone Set: dlm-clone\n" #~ "        Started: [ pcmk-2 pcmk-1 ]\n" #~ "Clone Set: gfs-clone\n" #~ "        Started: [ pcmk-2 pcmk-1 ]\n" #~ "Clone Set: WebIP\n" #~ " Started: [ pcmk-1 pcmk-2 ]\n" #~ "Clone Set: WebFSClone\n" #~ " Started: [ pcmk-1 pcmk-2 ]\n" #~ "Clone Set: WebSiteClone\n" #~ " Started: [ pcmk-1 pcmk-2 ]\n" #~ " " #~ msgstr "" #~ "\n" #~ "crm(active)# cib commit active\n" #~ "INFO: commited 'active' shadow CIB to the cluster\n" #~ "crm(active)# quit\n" #~ "bye\n" #~ "[root@pcmk-1 ~]# crm_mon\n" #~ "============\n" #~ "Last updated: Thu Sep  3 21:37:27 2009\n" #~ "Stack: openais\n" #~ "Current DC: pcmk-2 - partition with quorum\n" #~ "Version: 1.0.5-462f1569a43740667daf7b0f6b521742e9eb8fa7\n" #~ "2 Nodes configured, 2 expected votes\n" #~ "6 Resources configured.\n" #~ "============\n" #~ "\n" #~ "Online: [ pcmk-1 pcmk-2 ]\n" #~ "\n" #~ "Master/Slave Set: WebDataClone\n" #~ "        Masters: [ pcmk-1 pcmk-2 ]\n" #~ "Clone Set: dlm-clone\n" #~ "        Started: [ pcmk-2 pcmk-1 ]\n" #~ "Clone Set: gfs-clone\n" #~ "        Started: [ pcmk-2 pcmk-1 ]\n" #~ "Clone Set: WebIP\n" #~ " Started: [ pcmk-1 pcmk-2 ]\n" #~ "Clone Set: WebFSClone\n" #~ " Started: [ pcmk-1 pcmk-2 ]\n" #~ "Clone Set: WebSiteClone\n" #~ " Started: [ pcmk-1 pcmk-2 ]\n" #~ " " #~ msgid "The list of nodes the cluster considers to be available" #~ msgstr "集群认为列表中的节点都是可用的" #~ msgid "information and fencing capabilities." #~ msgstr "信息和隔离功能。" diff --git a/doc/Clusters_from_Scratch/zh-CN/Ch-Active-Passive.po b/doc/Clusters_from_Scratch/zh-CN/Ch-Active-Passive.po index ac14319684..f9cc72355f 100644 --- a/doc/Clusters_from_Scratch/zh-CN/Ch-Active-Passive.po +++ b/doc/Clusters_from_Scratch/zh-CN/Ch-Active-Passive.po @@ -1,949 +1,949 @@ # # AUTHOR , YEAR. # msgid "" msgstr "" "Project-Id-Version: 0\n" "POT-Creation-Date: 2012-10-17T05:19:03\n" "PO-Revision-Date: 2010-12-16 00:24+0800\n" -"Last-Translator: Charlie Chen \n" +"Last-Translator: Hu Fu \n" "Language-Team: None\n" "Language: \n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" #. Tag: title #, no-c-format msgid "Creating an Active/Passive Cluster" msgstr "创建一个主/备集群" #. Tag: title #, no-c-format msgid "Exploring the Existing Configuration" msgstr "浏览现有配置" #. Tag: para #, no-c-format msgid "When Pacemaker starts up, it automatically records the number and details of the nodes in the cluster as well as which stack is being used and the version of Pacemaker being used." msgstr "当Pacemaker启动的时候,它会自动记录节点的数量和详细信息,以及基层软件(本文中是corosync)和Pacemaker的版本。" #. Tag: para #, no-c-format msgid "This is what the base configuration should look like." msgstr "这是初始配置文件的模样:" #. Tag: programlisting #, no-c-format msgid "" "# pcs status\n" "Last updated: Fri Sep 14 10:12:01 2012\n" "Last change: Fri Sep 14 09:51:55 2012 via crmd on pcmk-2\n" "Stack: corosync\n" "Current DC: pcmk-1 (1) - partition with quorum\n" "Version: 1.1.8-1.el7-60a19ed12fdb4d5c6a6b6767f52e5391e447fec0\n" "2 Nodes configured, unknown expected votes\n" "0 Resources configured.\n" "\n" "Online: [ pcmk-1 pcmk-2 ]\n" "\n" "Full list of resources:" msgstr "" #. Tag: para -#, fuzzy, no-c-format +#, no-c-format msgid "For those that are not of afraid of XML, you can see the raw cluster configuration and status by using the pcs cluster cib command." -msgstr "如果有谁想看看XML格式的,你可以添加xml选项来看到原始的配置文件" +msgstr "如果有谁想看看XML格式的,你可以命令“crm configure show xml”来看到原始的配置文件" #. Tag: title #, no-c-format msgid "The last XML you’ll see in this document" msgstr "这是本文档最后一次显示XML。(作者怨念很深啊)" #. Tag: programlisting #, no-c-format msgid "# pcs cluster cib" msgstr "" #. Tag: programlisting #, no-c-format msgid "" "<cib epoch=\"4\" num_updates=\"19\" admin_epoch=\"0\" validate-with=\"pacemaker-1.2\" crm_feature_set=\"3.0.6\" update-origin=\"pcmk-1\" update-client=\"crmd\" cib-last-written=\"Wed Aug 1 16:08:52 2012\" have-quorum=\"1\" dc-uuid=\"1\">\n" " <configuration>\n" " <crm_config>\n" " <cluster_property_set id=\"cib-bootstrap-options\">\n" " <nvpair id=\"cib-bootstrap-options-dc-version\" name=\"dc-version\" value=\"1.1.8-1.el7-60a19ed12fdb4d5c6a6b6767f52e5391e447fec0\"/>\n" " <nvpair id=\"cib-bootstrap-options-cluster-infrastructure\" name=\"cluster-infrastructure\" value=\"corosync\"/>\n" " </cluster_property_set>\n" " </crm_config>\n" " <nodes>\n" " <node id=\"1\" uname=\"pcmk-1\" type=\"normal\"/>\n" " <node id=\"2\" uname=\"pcmk-2\" type=\"normal\"/>\n" " </nodes>\n" " <resources/>\n" " <constraints/>\n" " </configuration>\n" " <status>\n" " <node_state id=\"2\" uname=\"pcmk-2\" ha=\"active\" in_ccm=\"true\" crmd=\"online\" join=\"member\" expected=\"member\" crm-debug-origin=\"do_state_transition\" shutdown=\"0\">\n" " <lrm id=\"2\">\n" " <lrm_resources/>\n" " </lrm>\n" " <transient_attributes id=\"2\">\n" " <instance_attributes id=\"status-2\">\n" " <nvpair id=\"status-2-probe_complete\" name=\"probe_complete\" value=\"true\"/>\n" " </instance_attributes>\n" " </transient_attributes>\n" " </node_state>\n" " <node_state id=\"1\" uname=\"pcmk-1\" ha=\"active\" in_ccm=\"true\" crmd=\"online\" join=\"member\" expected=\"member\" crm-debug-origin=\"do_state_transition\" shutdown=\"0\">\n" " <lrm id=\"1\">\n" " <lrm_resources/>\n" " </lrm>\n" " <transient_attributes id=\"1\">\n" " <instance_attributes id=\"status-1\">\n" " <nvpair id=\"status-1-probe_complete\" name=\"probe_complete\" value=\"true\"/>\n" " </instance_attributes>\n" " </transient_attributes>\n" " </node_state>\n" " </status>\n" "</cib>" msgstr "" #. Tag: para #, no-c-format msgid "Before we make any changes, its a good idea to check the validity of the configuration." msgstr "在我们做出任何改变之前,我们最好检查下配置文件。" #. Tag: programlisting #, fuzzy, no-c-format msgid "" "# crm_verify -L -V\n" " error: unpack_resources: Resource start-up disabled since no STONITH resources have been defined\n" " error: unpack_resources: Either configure some or disable STONITH with the stonith-enabled option\n" " error: unpack_resources: NOTE: Clusters with shared data need STONITH to ensure data integrity\n" "Errors found during check: config not valid\n" " -V may provide more details" msgstr "" "\n" "[root@pcmk-1 ~]# crm_verify -L\n" "crm_verify[2195]: 2009/08/27_16:57:12 ERROR: unpack_resources: Resource start-up disabled since no STONITH resources have been defined\n" "crm_verify[2195]: 2009/08/27_16:57:12 ERROR: unpack_resources: Either configure some or disable STONITH with the stonith-enabled option\n" "crm_verify[2195]: 2009/08/27_16:57:12 ERROR: unpack_resources: NOTE: Clusters with shared data need STONITH to ensure data integrity\n" "Errors found during check: config not valid\n" "  -V may provide more details\n" "[root@pcmk-1 ~]#\n" #. Tag: para #, no-c-format msgid "As you can see, the tool has found some errors." msgstr "就像你看到的,这个工具发现了一些错误。" #. Tag: para -#, fuzzy, no-c-format +#, no-c-format msgid "In order to guarantee the safety of your data If the data is corrupt, there is little point in continuing to make it available , the default for STONITH A common node fencing mechanism. Used to ensure data integrity by powering off \"bad\" nodes in Pacemaker is enabled. However it also knows when no STONITH configuration has been supplied and reports this as a problem (since the cluster would not be able to make progress if a situation requiring node fencing arose)." -msgstr "为了确保您数据的安全性 如果数据是损坏的,那保证它的可用性是没有意义的 ,请使用配备STONITH 一个常见的隔离手段。用关掉坏节点电源的办法来保证数据完整 的Pacemaker。但是当没有配置STONITH的时候也会报这个错误(因为当集群中某个节点需要被隔离的时候,集群就无法工作了)。" +msgstr "为了确保您数据的安全性 如果数据是损坏的,那保证它的可用性是没有意义的 ,请在pacemaker中配置使用STONITH 一个常见的隔离手段。用关掉坏节点电源的办法来保证数据完整 。但是当没有配置STONITH的时候就会报这个错误(因为当集群中某个节点需要被隔离的时候,集群就无法工作了)。" #. Tag: para #, no-c-format msgid "For now, we will disable this feature and configure it later in the Configuring STONITH section. It is important to note that the use of STONITH is highly encouraged, turning it off tells the cluster to simply pretend that failed nodes are safely powered off. Some vendors will even refuse to support clusters that have it disabled." -msgstr "目前,我们禁用这个特性,然后在 配置STONISH 章节来配置它。这里要指出,使用STONITH是非常有必要的。关闭这个特性就是告诉集群:假装故障的节点已经安全的关机了。一些供应商甚至不允许这个特性被关闭。" +msgstr "目前,我们禁用这个特性,然后在“配置STONISH”章节来配置它。这里要指出,使用STONITH是非常有必要的。关闭这个特性就是告诉集群假装故障的节点已经安全的关机了。一些供应商甚至不允许集群的这个特性被关闭。" #. Tag: para -#, fuzzy, no-c-format +#, no-c-format msgid "To disable STONITH, we set the stonith-enabled cluster option to false." -msgstr "我们将 stonith-enabled设置为 false 来关闭STONITH" +msgstr "我们将 stonith-enabled 设置为 false 来关闭STONITH" #. Tag: programlisting #, no-c-format msgid "" "# pcs property set stonith-enabled=false\n" "# crm_verify -L" msgstr "" #. Tag: para #, no-c-format msgid "With the new cluster option set, the configuration is now valid." msgstr "设置完这个选项以后,校验配置文件就正常了。" #. Tag: para #, no-c-format msgid "The use of stonith-enabled=false is completely inappropriate for a production cluster. We use it here to defer the discussion of its configuration which can differ widely from one installation to the next. See for information on why STONITH is important and details on how to configure it." -msgstr "" +msgstr "在用于生产环境的集群中配置 stonith-enabled=false 是非常不恰当的。我们在这里使用是为了推迟讨论它各种不同的配置。你可已在这里看到 如何配置 STONITH 已经它为何如此重要的详细内容。" #. Tag: title #, no-c-format msgid "Adding a Resource" msgstr "添加一个资源" #. Tag: para -#, fuzzy, no-c-format +#, no-c-format msgid "The first thing we should do is configure an IP address. Regardless of where the cluster service(s) are running, we need a consistent address to contact them on. Here I will choose and add 192.168.122.120 as the floating address, give it the imaginative name ClusterIP and tell the cluster to check that its running every 30 seconds." -msgstr "首先要做的是配置一个IP地址,不管集群服务在哪运行,我们要一个固定的地址来提供服务。在这里我选择192.168.122.101作为浮动IP,给它取一个好记的名字 ClusterIP 并且告诉集群 每30秒检查它一次" +msgstr "首先要做的是配置一个IP地址,不管集群服务在哪运行,我们要一个固定的地址来提供服务。在这里我选择192.168.122.101作为浮动IP,给它取一个好记的名字 ClusterIP 并且告诉集群每30秒检查它一次。" #. Tag: para #, no-c-format msgid "The chosen address must not be one already associated with a physical node" msgstr "选择的IP地址不能被节点所占用" #. Tag: screen #, fuzzy, no-c-format msgid "" "# pcs resource create ClusterIP ocf:heartbeat:IPaddr2 \\\n" " ip=192.168.0.120 cidr_netmask=32 op monitor interval=30s" msgstr "" "\n" "crm configure primitive ClusterIP ocf:heartbeat:IPaddr2 \\ \n" "        params ip=192.168.122.101 cidr_netmask=32 \\ \n" "        op monitor interval=30s\n" #. Tag: para #, no-c-format msgid "The other important piece of information here is ocf:heartbeat:IPaddr2." -msgstr "" +msgstr "这里是 ocf:heartbeat:IPaddr2 的其他重要信息。" #. Tag: para -#, fuzzy, no-c-format +#, no-c-format msgid "This tells Pacemaker three things about the resource you want to add. The first field, ocf, is the standard to which the resource script conforms to and where to find it. The second field is specific to OCF resources and tells the cluster which namespace to find the resource script in, in this case heartbeat. The last field indicates the name of the resource script." -msgstr "另外一个重要的信息是 ocf:heartbeat:IPaddr2。这告诉Pacemaker三件事情,第一个部分,ocf,指明了这个资源采用的标准(类型)以及在哪能找到它。第二个部分标明这个资源脚本的在OCF中的名字空间,在这个例子中是heartbeat。最后一个部分指明了资源脚本的名称。" +msgstr "另外一个重要的信息是 ocf:heartbeat:IPaddr2。这告诉Pacemaker三件事情,第一个部分ocf,指明了这个资源采用的标准(类型)以及在哪能找到它。第二个部分标明这个资源脚本的在ocf中的名字空间,在这个例子中是heartbeat。最后一个部分指明了资源脚本的名称。" #. Tag: para -#, fuzzy, no-c-format +#, no-c-format msgid "To obtain a list of the available resource standards (the ocf part of ocf:heartbeat:IPaddr2), run" msgstr "可以运行下面的命令来获得可用的资源类" #. Tag: programlisting #, no-c-format msgid "" "# pcs resource standards\n" "ocf\n" "lsb\n" "service\n" "systemd\n" "stonith" msgstr "" #. Tag: para -#, fuzzy, no-c-format +#, no-c-format msgid "To obtain a list of the available ocf resource providers (the heartbeat part of ocf:heartbeat:IPaddr2), run" -msgstr "可以运行下面的命令来获得可用的资源类" +msgstr "可以运行下面的命令来获得可用的ocf资源提供者" #. Tag: programlisting #, no-c-format msgid "" "# pcs resource providers\n" "heartbeat\n" "linbit\n" "pacemaker\n" "redhat" msgstr "" #. Tag: para #, no-c-format msgid "Finally, if you want to see all the resource agents available for a specific ocf provider (the IPaddr2 part of ocf:heartbeat:IPaddr2), run" -msgstr "" +msgstr "最后, 如果你想看特定一个ocf资源提供者的所有可用资源代理" #. Tag: programlisting #, no-c-format msgid "" "# pcs resource agents ocf:heartbeat\n" "AoEtarget\n" "AudibleAlarm\n" "CTDB\n" "ClusterMon\n" "Delay\n" "Dummy\n" ".\n" ". (skipping lots of resources to save space)\n" ".\n" "IPaddr2\n" ".\n" ".\n" ".\n" "symlink\n" "syslog-ng\n" "tomcat\n" "vmware" msgstr "" #. Tag: para #, no-c-format msgid "Now verify that the IP resource has been added and display the cluster’s status to see that it is now active." msgstr "现在检查下IP 资源是不是已经添加了,并且看看是否处在可用状态。" #. Tag: programlisting #, no-c-format msgid "" "# pcs status\n" "\n" "Last updated: Fri Sep 14 10:17:00 2012\n" "Last change: Fri Sep 14 10:15:48 2012 via cibadmin on pcmk-1\n" "Stack: corosync\n" "Current DC: pcmk-1 (1) - partition with quorum\n" "Version: 1.1.8-1.el7-60a19ed12fdb4d5c6a6b6767f52e5391e447fec0\n" "2 Nodes configured, unknown expected votes\n" "1 Resources configured.\n" "\n" "Online: [ pcmk-1 pcmk-2 ]\n" "\n" "Full list of resources:\n" "\n" " ClusterIP (ocf::heartbeat:IPaddr2): Started pcmk-1" msgstr "" #. Tag: title #, no-c-format msgid "Perform a Failover" -msgstr " 做一次失效备援" +msgstr " 做一次故障迁移" #. Tag: para #, no-c-format msgid "Being a high-availability cluster, we should test failover of our new resource before moving on." -msgstr "作为一个高可用的集群,我们在继续本文档之前,我们要需要测试失效备援 。" +msgstr "作为一个高可用的集群,我们在继续本文档之前,我们要需要测试故障迁移 。" #. Tag: para #, no-c-format msgid "First, find the node on which the IP address is running." msgstr "首先,找到IP资源现在在哪个节点上运行。" #. Tag: para -#, fuzzy, no-c-format +#, no-c-format msgid "Shut down Pacemaker and Corosync on that machine." -msgstr "关闭那个节点上面的Corosync服务:" +msgstr "关闭那个节点上面的Corosync和Pacemaker服务。" #. Tag: programlisting #, no-c-format msgid "" "#pcs cluster stop pcmk-1\n" "Stopping Cluster..." msgstr "" #. Tag: para -#, fuzzy, no-c-format +#, no-c-format msgid "Once Corosync is no longer running, go to the other node and check the cluster status." -msgstr "当Corosync停止运行以后,我们到另外一个节点用crm_mon来检查集群状态." +msgstr "当Corosync停止运行以后,我们到另外一个节点用 crm_mon -1 来检查集群状态。" #. Tag: programlisting #, fuzzy, no-c-format msgid "" "# pcs status\n" "\n" "Last updated: Fri Sep 14 10:31:01 2012\n" "Last change: Fri Sep 14 10:15:48 2012 via cibadmin on pcmk-1\n" "Stack: corosync\n" "Current DC: pcmk-2 (2) - partition WITHOUT quorum\n" "Version: 1.1.8-1.el7-60a19ed12fdb4d5c6a6b6767f52e5391e447fec0\n" "2 Nodes configured, unknown expected votes\n" "1 Resources configured.\n" "\n" "Online: [ pcmk-2 ]\n" "OFFLINE: [ pcmk-1 ]\n" "\n" "Full list of resources:\n" "\n" " ClusterIP (ocf::heartbeat:IPaddr2): Stopped" msgstr "" "\n" "[root@pcmk-2 ~]# crm_mon\n" "============\n" "Last updated: Fri Aug 28 15:30:18 2009\n" "Stack: openais\n" "Current DC: pcmk-2 - partition WITHOUT quorum\n" "Version: 1.0.5-462f1569a43740667daf7b0f6b521742e9eb8fa7\n" "2 Nodes configured, 2 expected votes\n" "1 Resources configured.\n" "============\n" "Online: [ pcmk-2 ]\n" "OFFLINE: [ pcmk-1 ]\n" "\n" "ClusterIP (ocf::heartbeat:IPaddr): Started pcmk-2\n" #. Tag: para -#, fuzzy, no-c-format +#, no-c-format msgid "There are three things to notice about the cluster’s current state. The first is that, as expected, pcmk-1 is now offline. However we can also see that ClusterIP isn’t running anywhere!" -msgstr "关于集群状态,我们有三个地方需要注意,首先,如我们所料pcmk-1已经下线了,然而我们发现ClusterIP不在任何地方运行!" +msgstr "关于集群状态,我们有三个地方需要注意.首先,如我们所料pcmk-1已经离线了,但是我们发现ClusterIP没有在任何地方运行!" #. Tag: title #, no-c-format msgid "Quorum and Two-Node Clusters" msgstr "法定人数和双节点集群" #. Tag: para -#, fuzzy, no-c-format +#, no-c-format msgid "This is because the cluster no longer has quorum, as can be seen by the text \"partition WITHOUT quorum\" in the status output. In order to reduce the possibility of data corruption, Pacemaker’s default behavior is to stop all resources if the cluster does not have quorum." -msgstr "这是因为集群已经达不到“法定人数”了,就像我们看到的“partition WITHOUT quorum” (用绿色强调的)。为了避免数据遭到破坏,当Pacemaker发现集群达不到法定人数时,就会停止所有的资源。" +msgstr "这是因为集群已经达不到“法定人数”了,我们可以看到集群状态输出里有“partition WITHOUT quorum” (用绿色强调的)。为了避免数据遭到破坏,当Pacemaker发现集群达不到法定人数时,就会停止所有的资源。" #. Tag: para #, no-c-format msgid "A cluster is said to have quorum when more than half the known or expected nodes are online, or for the mathematically inclined, whenever the following equation is true:" msgstr "当有半数以上的节点在线时,这个集群就认为自己拥有法定人数了,是“合法”的,换而言之就是下面的公式:" #. Tag: literallayout -#, fuzzy, no-c-format +#, no-c-format msgid "total_nodes < 2 * active_nodes" -msgstr "总节点数 - 1 < 2 * 可用的节点" +msgstr "总节点数 < 2 * 活跃节点数" #. Tag: para -#, fuzzy, no-c-format +#, no-c-format msgid "Therefore a two-node cluster only has quorum when both nodes are running, which is no longer the case for our cluster. This would normally make the creation of a two-node cluster pointless Actually some would argue that two-node clusters are always pointless, but that is an argument for another time , however it is possible to control how Pacemaker behaves when quorum is lost. In particular, we can tell the cluster to simply ignore quorum altogether." -msgstr "因此在双节点的集群中,只有当两者都在线时才是合法的。 这个规则会让 双节点的集群 毫无意义,但是我们可以控制Pacemaker发现集群达不到法定人数时候的行为。简单来说,我们告诉集群忽略它 。" +msgstr "因此在双节点的集群中只有当两者都在线时才是合法的的这个规则会让“双节点集群”毫无意义,但是我们可以控制Pacemaker发现集群达不到法定人数时候的行为。简单来说,我们告诉集群忽略它。" #. Tag: programlisting #, no-c-format msgid "" "# pcs property set no-quorum-policy=ignore\n" "# pcs property\n" "dc-version: 1.1.8-1.el7-60a19ed12fdb4d5c6a6b6767f52e5391e447fec0\n" "cluster-infrastructure: corosync\n" "stonith-enabled: false\n" "no-quorum-policy: ignore" msgstr "" #. Tag: para #, no-c-format msgid "After a few moments, the cluster will start the IP address on the remaining node. Note that the cluster still does not have quorum." -msgstr "过了一会,集群会在剩下的那个节点上启动这个IP。请注意集群现在依然没有达到法定人数。" +msgstr "过一会儿,集群会在剩下的那个节点上启动这个IP。请注意集群现在依然没有达到法定人数。" #. Tag: programlisting #, fuzzy, no-c-format msgid "" "# pcs status\n" "Last updated: Fri Sep 14 10:38:11 2012\n" "Last change: Fri Sep 14 10:37:53 2012 via cibadmin on pcmk-2\n" "Stack: corosync\n" "Current DC: pcmk-2 (2) - partition WITHOUT quorum\n" "Version: 1.1.8-1.el7-60a19ed12fdb4d5c6a6b6767f52e5391e447fec0\n" "2 Nodes configured, unknown expected votes\n" "1 Resources configured.\n" "\n" "Online: [ pcmk-2 ]\n" "OFFLINE: [ pcmk-1 ]\n" "\n" "Full list of resources:\n" "\n" " ClusterIP (ocf::heartbeat:IPaddr2): Started pcmk-2" msgstr "" "\n" "[root@pcmk-2 ~]# crm_mon\n" "============\n" "Last updated: Fri Aug 28 15:30:18 2009\n" "Stack: openais\n" "Current DC: pcmk-2 - partition WITHOUT quorum\n" "Version: 1.0.5-462f1569a43740667daf7b0f6b521742e9eb8fa7\n" "2 Nodes configured, 2 expected votes\n" "1 Resources configured.\n" "============\n" "Online: [ pcmk-2 ]\n" "OFFLINE: [ pcmk-1 ]\n" "\n" "ClusterIP (ocf::heartbeat:IPaddr): Started pcmk-2\n" #. Tag: para #, no-c-format msgid "Now simulate node recovery by restarting the cluster stack on pcmk-1 and check the cluster’s status. Note, if you get an authentication error with the pcs cluster start pcmk-1 command, you must authenticate on the node using the pcs cluster auth pcmk pcmk-1 pcmk-2 command discussed earlier." -msgstr "" +msgstr "现在通过重启pcmk-1的集群组建来模拟节点恢复,并检查集群的状态。" #. Tag: programlisting #, no-c-format msgid "" "# pcs cluster start pcmk-1\n" "Starting Cluster...\n" "# pcs status\n" "\n" "Last updated: Fri Sep 14 10:42:56 2012\n" "Last change: Fri Sep 14 10:37:53 2012 via cibadmin on pcmk-2\n" "Stack: corosync\n" "Current DC: pcmk-2 (2) - partition with quorum\n" "Version: 1.1.8-1.el7-60a19ed12fdb4d5c6a6b6767f52e5391e447fec0\n" "2 Nodes configured, unknown expected votes\n" "1 Resources configured.\n" "\n" "Online: [ pcmk-1 pcmk-2 ]\n" "\n" "Full list of resources:\n" "\n" " ClusterIP (ocf::heartbeat:IPaddr2): Started pcmk-2" msgstr "" #. Tag: para #, no-c-format msgid "In the dark days, the cluster may have moved the IP back to its original location (pcmk-1). Usually this is no longer the case." -msgstr "" +msgstr "集群可能会迁移这个IP到原始的节点pcmk-1。" #. Tag: title #, no-c-format msgid "Prevent Resources from Moving after Recovery" -msgstr "防止资源在节点恢复后移动" +msgstr "防止资源在节点恢复后迁移" #. Tag: para -#, fuzzy, no-c-format +#, no-c-format msgid "In most circumstances, it is highly desirable to prevent healthy resources from being moved around the cluster. Moving resources almost always requires a period of downtime. For complex services like Oracle databases, this period can be quite long." -msgstr "一些环境中会要求尽量避免资源在节点之间移动。移动资源通常意味着一段时间内无法提供服务,某些复杂的服务,比如Oracle数据库,这个时间可能会很长。" +msgstr "一些环境中会要求尽量避免资源在节点之间迁移。迁移资源通常意味着一段时间内无法提供服务,某些复杂的服务,比如Oracle数据库,这个时间可能会很长。" #. Tag: para -#, fuzzy, no-c-format +#, no-c-format msgid "To address this, Pacemaker has the concept of resource stickiness which controls how much a service prefers to stay running where it is. You may like to think of it as the \"cost\" of any downtime. By default, Pacemaker assumes there is zero cost associated with moving resources and will do so to achieve \"optimal\" It should be noted that Pacemaker’s definition of optimal may not always agree with that of a human’s. The order in which Pacemaker processes lists of resources and nodes creates implicit preferences in situations where the administrator has not explicitly specified them resource placement. We can specify a different stickiness for every resource, but it is often sufficient to change the default." -msgstr "为了达到这个效果,Pacemaker 有一个叫做 资源黏性值 的概念,它能够控制一个服务(资源)有多想呆在它正在运行的节点上。你可以把它认为是无法提供服务的“代价” 这里要注意的是Pacemaker定义的代价跟人们所想的不一样。如果管理员没有明确的指定参数(创建稳定环境所必须的),那么资源个节点在Pacemaker处理列表中的顺序会隐式地创建参数 ”。 Pacemaker为了达到最优分布各个资源的目的,默认设置这个值为0。我们可以为每个资源定义不同的黏性值,但一般来说,更改默认黏性值就够了。" +msgstr "为了达到这个效果,Pacemaker 有一个叫做“资源粘性值”的概念,它能够控制一个服务(资源)有多想呆在它正在运行的节点上。你可以把它认为是无法提供服务的“代价” 这里要注意的是Pacemaker定义的代价跟人们所想的不一样。如果管理员没有明确的指定参数(创建稳定环境所必须的),那么资源及节点在Pacemaker处理列表中的顺序会隐式地创建参数 ”。 Pacemaker为了达到最优分布各个资源的目的,默认设置这个值为0。我们可以为每个资源定义不同的粘性值,但一般来说,更改默认粘性值就够了。" #. Tag: programlisting #, no-c-format msgid "" "# pcs resource rsc defaults resource-stickiness=100\n" "# pcs resource rsc defaults\n" "resource-stickiness: 100" msgstr "" #~ msgid "" #~ "\n" #~ "[root@pcmk-2 ~]# crm configure show\n" #~ "node pcmk-1\n" #~ "node pcmk-2\n" #~ "property $id=\"cib-bootstrap-options\" \\\n" #~ "        dc-version=\"1.0.5-462f1569a43740667daf7b0f6b521742e9eb8fa7\" \\\n" #~ "        cluster-infrastructure=\"openais\" \\\n" #~ "        expected-quorum-votes=\"2\"\n" #~ msgstr "" #~ "\n" #~ "[root@pcmk-2 ~]# crm configure show\n" #~ "node pcmk-1\n" #~ "node pcmk-2\n" #~ "property $id=\"cib-bootstrap-options\" \\\n" #~ "        dc-version=\"1.0.5-462f1569a43740667daf7b0f6b521742e9eb8fa7\" \\\n" #~ "        cluster-infrastructure=\"openais\" \\\n" #~ "        expected-quorum-votes=\"2\"\n" #~ msgid "" #~ "\n" #~ "[root@pcmk-2 ~]# crm configure show xml\n" #~ "<?xml version=\"1.0\" ?>\n" #~ "<cib admin_epoch=\"0\" crm_feature_set=\"3.0.1\" dc-uuid=\"pcmk-1\" epoch=\"13\" have-quorum=\"1\" num_updates=\"7\" validate-with=\"pacemaker-1.0\">\n" #~ "  <configuration>\n" #~ "    <crm_config>\n" #~ "      <cluster_property_set id=\"cib-bootstrap-options\">\n" #~ "        <nvpair id=\"cib-bootstrap-options-dc-version\" name=\"dc-version\" value=\"1.0.5-462f1569a43740667daf7b0f6b521742e9eb8fa7\"/>\n" #~ "        <nvpair id=\"cib-bootstrap-options-cluster-infrastructure\" name=\"cluster-infrastructure\" value=\"openais\"/>\n" #~ "        <nvpair id=\"cib-bootstrap-options-expected-quorum-votes\" name=\"expected-quorum-votes\" value=\"2\"/>\n" #~ "      </cluster_property_set>\n" #~ "    </crm_config>\n" #~ "    <rsc_defaults/>\n" #~ "    <op_defaults/>\n" #~ "    <nodes>\n" #~ "      <node id=\"pcmk-1\" type=\"normal\" uname=\"pcmk-1\"/>\n" #~ "      <node id=\"pcmk-2\" type=\"normal\" uname=\"pcmk-2\"/>\n" #~ "    </nodes>\n" #~ "    <resources/>\n" #~ "    <constraints/>\n" #~ "  </configuration>\n" #~ "</cib>\n" #~ msgstr "" #~ "\n" #~ "[root@pcmk-2 ~]# crm configure show xml\n" #~ "<?xml version=\"1.0\" ?>\n" #~ "<cib admin_epoch=\"0\" crm_feature_set=\"3.0.1\" dc-uuid=\"pcmk-1\" epoch=\"13\" have-quorum=\"1\" num_updates=\"7\" validate-with=\"pacemaker-1.0\">\n" #~ "  <configuration>\n" #~ "    <crm_config>\n" #~ "      <cluster_property_set id=\"cib-bootstrap-options\">\n" #~ "        <nvpair id=\"cib-bootstrap-options-dc-version\" name=\"dc-version\" value=\"1.0.5-462f1569a43740667daf7b0f6b521742e9eb8fa7\"/>\n" #~ "        <nvpair id=\"cib-bootstrap-options-cluster-infrastructure\" name=\"cluster-infrastructure\" value=\"openais\"/>\n" #~ "        <nvpair id=\"cib-bootstrap-options-expected-quorum-votes\" name=\"expected-quorum-votes\" value=\"2\"/>\n" #~ "      </cluster_property_set>\n" #~ "    </crm_config>\n" #~ "    <rsc_defaults/>\n" #~ "    <op_defaults/>\n" #~ "    <nodes>\n" #~ "      <node id=\"pcmk-1\" type=\"normal\" uname=\"pcmk-1\"/>\n" #~ "      <node id=\"pcmk-2\" type=\"normal\" uname=\"pcmk-2\"/>\n" #~ "    </nodes>\n" #~ "    <resources/>\n" #~ "    <constraints/>\n" #~ "  </configuration>\n" #~ "</cib>\n" #~ msgid "crm configure property stonith-enabled=false" #~ msgstr "crm configure property stonith-enabled=false" #~ msgid "crm_verify -L" #~ msgstr "crm_verify -L" #~ msgid "" #~ "\n" #~ "[root@pcmk-1 ~]# crm ra classes\n" #~ "heartbeat\n" #~ "lsb\n" #~ "ocf / heartbeat pacemaker\n" #~ "stonith\n" #~ msgstr "" #~ "\n" #~ "[root@pcmk-1 ~]# crm ra classes\n" #~ "heartbeat\n" #~ "lsb\n" #~ "ocf / heartbeat pacemaker\n" #~ "stonith\n" #~ msgid "To then find all the OCF resource agents provided by Pacemaker and Heartbeat, run" #~ msgstr "找到OCF中Pacemaker和Heartbeat提供的资源脚本,运行下面的命令" #~ msgid "" #~ "\n" #~ "[root@pcmk-1 ~]# crm ra list ocf pacemaker\n" #~ "ClusterMon     Dummy          Stateful       SysInfo        SystemHealth   controld\n" #~ "ping           pingd          \n" #~ "[root@pcmk-1 ~]# crm ra list ocf heartbeat\n" #~ "AoEtarget              AudibleAlarm           ClusterMon             Delay\n" #~ "Dummy                  EvmsSCC                Evmsd                  Filesystem\n" #~ "ICP                    IPaddr                 IPaddr2                IPsrcaddr\n" #~ "LVM                    LinuxSCSI              MailTo                 ManageRAID\n" #~ "ManageVE               Pure-FTPd              Raid1                  Route\n" #~ "SAPDatabase            SAPInstance            SendArp                ServeRAID\n" #~ "SphinxSearchDaemon     Squid                  Stateful               SysInfo\n" #~ "VIPArip                VirtualDomain          WAS                    WAS6\n" #~ "WinPopup               Xen                    Xinetd                 anything\n" #~ "apache                 db2                    drbd                   eDir88\n" #~ "iSCSILogicalUnit       iSCSITarget            ids                    iscsi\n" #~ "ldirectord             mysql                  mysql-proxy            nfsserver\n" #~ "oracle                 oralsnr                pgsql                  pingd\n" #~ "portblock              rsyncd                 scsi2reservation       sfex\n" #~ "tomcat                 vmware                 \n" #~ "[root@pcmk-1 ~]#\n" #~ msgstr "" #~ "\n" #~ "[root@pcmk-1 ~]# crm ra list ocf pacemaker\n" #~ "ClusterMon     Dummy          Stateful       SysInfo        SystemHealth   controld\n" #~ "ping           pingd          \n" #~ "[root@pcmk-1 ~]# crm ra list ocf heartbeat\n" #~ "AoEtarget              AudibleAlarm           ClusterMon             Delay\n" #~ "Dummy                  EvmsSCC                Evmsd                  Filesystem\n" #~ "ICP                    IPaddr                 IPaddr2                IPsrcaddr\n" #~ "LVM                    LinuxSCSI              MailTo                 ManageRAID\n" #~ "ManageVE               Pure-FTPd              Raid1                  Route\n" #~ "SAPDatabase            SAPInstance            SendArp                ServeRAID\n" #~ "SphinxSearchDaemon     Squid                  Stateful               SysInfo\n" #~ "VIPArip                VirtualDomain          WAS                    WAS6\n" #~ "WinPopup               Xen                    Xinetd                 anything\n" #~ "apache                 db2                    drbd                   eDir88\n" #~ "iSCSILogicalUnit       iSCSITarget            ids                    iscsi\n" #~ "ldirectord             mysql                  mysql-proxy            nfsserver\n" #~ "oracle                 oralsnr                pgsql                  pingd\n" #~ "portblock              rsyncd                 scsi2reservation       sfex\n" #~ "tomcat                 vmware                 \n" #~ "[root@pcmk-1 ~]#\n" #~ msgid "" #~ "\n" #~ "[root@pcmk-1 ~]# crm configure show\n" #~ "node pcmk-1\n" #~ "node pcmk-2\n" #~ "primitive ClusterIP ocf:heartbeat:IPaddr2 \\\n" #~ " params ip=\"192.168.122.101\" cidr_netmask=\"32\" \\\n" #~ " op monitor interval=\"30s\"\n" #~ "property $id=\"cib-bootstrap-options\" \\\n" #~ "        dc-version=\"1.0.5-462f1569a43740667daf7b0f6b521742e9eb8fa7\" \\\n" #~ "        cluster-infrastructure=\"openais\" \\\n" #~ "        expected-quorum-votes=\"2\" \\\n" #~ "        stonith-enabled=\"false\" \\\n" #~ "[root@pcmk-1 ~]# crm_mon\n" #~ "============\n" #~ "Last updated: Fri Aug 28 15:23:48 2009\n" #~ "Stack: openais\n" #~ "Current DC: pcmk-1 - partition with quorum\n" #~ "Version: 1.0.5-462f1569a43740667daf7b0f6b521742e9eb8fa7\n" #~ "2 Nodes configured, 2 expected votes\n" #~ "1 Resources configured.\n" #~ "============\n" #~ "\n" #~ "Online: [ pcmk-1 pcmk-2 ]\n" #~ "ClusterIP (ocf::heartbeat:IPaddr): Started pcmk-1\n" #~ msgstr "" #~ "\n" #~ "[root@pcmk-1 ~]# crm configure show\n" #~ "node pcmk-1\n" #~ "node pcmk-2\n" #~ "primitive ClusterIP ocf:heartbeat:IPaddr2 \\\n" #~ " params ip=\"192.168.122.101\" cidr_netmask=\"32\" \\\n" #~ " op monitor interval=\"30s\"\n" #~ "property $id=\"cib-bootstrap-options\" \\\n" #~ "        dc-version=\"1.0.5-462f1569a43740667daf7b0f6b521742e9eb8fa7\" \\\n" #~ "        cluster-infrastructure=\"openais\" \\\n" #~ "        expected-quorum-votes=\"2\" \\\n" #~ "        stonith-enabled=\"false\" \\\n" #~ "[root@pcmk-1 ~]# crm_mon\n" #~ "============\n" #~ "Last updated: Fri Aug 28 15:23:48 2009\n" #~ "Stack: openais\n" #~ "Current DC: pcmk-1 - partition with quorum\n" #~ "Version: 1.0.5-462f1569a43740667daf7b0f6b521742e9eb8fa7\n" #~ "2 Nodes configured, 2 expected votes\n" #~ "1 Resources configured.\n" #~ "============\n" #~ "\n" #~ "Online: [ pcmk-1 pcmk-2 ]\n" #~ "ClusterIP (ocf::heartbeat:IPaddr): Started pcmk-1\n" #~ msgid "" #~ "\n" #~ "[root@pcmk-1 ~]# crm resource status ClusterIP\n" #~ "resource ClusterIP is running on: pcmk-1\n" #~ "[root@pcmk-1 ~]#\n" #~ msgstr "" #~ "\n" #~ "[root@pcmk-1 ~]# crm resource status ClusterIP\n" #~ "resource ClusterIP is running on: pcmk-1\n" #~ "[root@pcmk-1 ~]#\n" #~ msgid "" #~ "\n" #~ "[root@pcmk-1 ~]# ssh pcmk-1 -- /etc/init.d/corosync stop\n" #~ "Stopping Corosync Cluster Engine (corosync): [ OK ]\n" #~ "Waiting for services to unload: [ OK ]\n" #~ "[root@pcmk-1 ~]#\n" #~ msgstr "" #~ "\n" #~ "[root@pcmk-1 ~]# ssh pcmk-1 -- /etc/init.d/corosync stop\n" #~ "Stopping Corosync Cluster Engine (corosync): [ OK ]\n" #~ "Waiting for services to unload: [ OK ]\n" #~ "[root@pcmk-1 ~]#\n" #~ msgid "" #~ "\n" #~ "[root@pcmk-2 ~]# crm_mon\n" #~ "============\n" #~ "Last updated: Fri Aug 28 15:27:35 2009\n" #~ "Stack: openais\n" #~ "Current DC: pcmk-2 - partition WITHOUT quorum\n" #~ "Version: 1.0.5-462f1569a43740667daf7b0f6b521742e9eb8fa7\n" #~ "2 Nodes configured, 2 expected votes\n" #~ "1 Resources configured.\n" #~ "============\n" #~ "\n" #~ "Online: [ pcmk-2 ]\n" #~ "OFFLINE: [ pcmk-1 ]\n" #~ msgstr "" #~ "\n" #~ "[root@pcmk-2 ~]# crm_mon\n" #~ "============\n" #~ "Last updated: Fri Aug 28 15:27:35 2009\n" #~ "Stack: openais\n" #~ "Current DC: pcmk-2 - partition WITHOUT quorum\n" #~ "Version: 1.0.5-462f1569a43740667daf7b0f6b521742e9eb8fa7\n" #~ "2 Nodes configured, 2 expected votes\n" #~ "1 Resources configured.\n" #~ "============\n" #~ "\n" #~ "Online: [ pcmk-2 ]\n" #~ "OFFLINE: [ pcmk-1 ]\n" #~ msgid "" #~ "\n" #~ "[root@pcmk-1 ~]# crm configure property no-quorum-policy=ignore\n" #~ "[root@pcmk-1 ~]# crm configure show \n" #~ "node pcmk-1\n" #~ "node pcmk-2\n" #~ "primitive ClusterIP ocf:heartbeat:IPaddr2 \\\n" #~ "        params ip=\"192.168.122.101\" cidr_netmask=\"32\" \\\n" #~ "        op monitor interval=\"30s\"\n" #~ "property $id=\"cib-bootstrap-options\" \\\n" #~ "        dc-version=\"1.0.5-462f1569a43740667daf7b0f6b521742e9eb8fa7\" \\\n" #~ "        cluster-infrastructure=\"openais\" \\\n" #~ "        expected-quorum-votes=\"2\" \\\n" #~ "        stonith-enabled=\"false\" \\\n" #~ "        no-quorum-policy=\"ignore\"\n" #~ msgstr "" #~ "\n" #~ "[root@pcmk-1 ~]# crm configure property no-quorum-policy=ignore\n" #~ "[root@pcmk-1 ~]# crm configure show \n" #~ "node pcmk-1\n" #~ "node pcmk-2\n" #~ "primitive ClusterIP ocf:heartbeat:IPaddr2 \\\n" #~ "        params ip=\"192.168.122.101\" cidr_netmask=\"32\" \\\n" #~ "        op monitor interval=\"30s\"\n" #~ "property $id=\"cib-bootstrap-options\" \\\n" #~ "        dc-version=\"1.0.5-462f1569a43740667daf7b0f6b521742e9eb8fa7\" \\\n" #~ "        cluster-infrastructure=\"openais\" \\\n" #~ "        expected-quorum-votes=\"2\" \\\n" #~ "        stonith-enabled=\"false\" \\\n" #~ "        no-quorum-policy=\"ignore\"\n" #~ msgid "Now simulate node recovery by restarting the cluster stack on pcmk-1 and check the cluster’s status." #~ msgstr "现在模拟节点恢复,我们启动 pcmk-1 上面的Corosync服务,然后检查集群状态。" #~ msgid "" #~ "\n" #~ "[root@pcmk-1 ~]# /etc/init.d/corosync start\n" #~ "Starting Corosync Cluster Engine (corosync): [ OK ] \n" #~ "[root@pcmk-1 ~]# crm_mon\n" #~ "============\n" #~ "Last updated: Fri Aug 28 15:32:13 2009\n" #~ "Stack: openais\n" #~ "Current DC: pcmk-2 - partition with quorum\n" #~ "Version: 1.0.5-462f1569a43740667daf7b0f6b521742e9eb8fa7\n" #~ "2 Nodes configured, 2 expected votes\n" #~ "1 Resources configured.\n" #~ "============\n" #~ "Online: [ pcmk-1 pcmk-2 ]\n" #~ "\n" #~ "ClusterIP        (ocf::heartbeat:IPaddr):        Started pcmk-1\n" #~ msgstr "" #~ "\n" #~ "[root@pcmk-1 ~]# /etc/init.d/corosync start\n" #~ "Starting Corosync Cluster Engine (corosync): [ OK ] \n" #~ "[root@pcmk-1 ~]# crm_mon\n" #~ "============\n" #~ "Last updated: Fri Aug 28 15:32:13 2009\n" #~ "Stack: openais\n" #~ "Current DC: pcmk-2 - partition with quorum\n" #~ "Version: 1.0.5-462f1569a43740667daf7b0f6b521742e9eb8fa7\n" #~ "2 Nodes configured, 2 expected votes\n" #~ "1 Resources configured.\n" #~ "============\n" #~ "Online: [ pcmk-1 pcmk-2 ]\n" #~ "\n" #~ "ClusterIP        (ocf::heartbeat:IPaddr):        Started pcmk-1\n" #~ msgid "Here we see something that some may consider surprising, the IP is back running at its original location!" #~ msgstr "现在我们可以看到让某些人惊奇的事情,IP资源回到原来那个节点(pcmk-1)上去了。" #~ msgid "" #~ "\n" #~ "crm configure rsc_defaults resource-stickiness=100\n" #~ "[root@pcmk-2 ~]# crm configure show\n" #~ "node pcmk-1\n" #~ "node pcmk-2\n" #~ "primitive ClusterIP ocf:heartbeat:IPaddr2 \\\n" #~ "        params ip=\"192.168.122.101\" cidr_netmask=\"32\" \\\n" #~ "        op monitor interval=\"30s\"\n" #~ "property $id=\"cib-bootstrap-options\" \\\n" #~ "        dc-version=\"1.0.5-462f1569a43740667daf7b0f6b521742e9eb8fa7\" \\\n" #~ "        cluster-infrastructure=\"openais\" \\\n" #~ "        expected-quorum-votes=\"2\" \\\n" #~ "        stonith-enabled=\"false\" \\\n" #~ "        no-quorum-policy=\"ignore\"\n" #~ "rsc_defaults $id=\"rsc-options\" \\\n" #~ " resource-stickiness=\"100\"\n" #~ msgstr "" #~ "\n" #~ "crm configure rsc_defaults resource-stickiness=100\n" #~ "[root@pcmk-2 ~]# crm configure show\n" #~ "node pcmk-1\n" #~ "node pcmk-2\n" #~ "primitive ClusterIP ocf:heartbeat:IPaddr2 \\\n" #~ "        params ip=\"192.168.122.101\" cidr_netmask=\"32\" \\\n" #~ "        op monitor interval=\"30s\"\n" #~ "property $id=\"cib-bootstrap-options\" \\\n" #~ "        dc-version=\"1.0.5-462f1569a43740667daf7b0f6b521742e9eb8fa7\" \\\n" #~ "        cluster-infrastructure=\"openais\" \\\n" #~ "        expected-quorum-votes=\"2\" \\\n" #~ "        stonith-enabled=\"false\" \\\n" #~ "        no-quorum-policy=\"ignore\"\n" #~ "rsc_defaults $id=\"rsc-options\" \\\n" #~ " resource-stickiness=\"100\"\n" #~ msgid "If we now retry the failover test, we see that as expected ClusterIP still moves to pcmk-2 when pcmk-1 is taken offline." #~ msgstr "现在我们重新尝试失效援备测试,我们可以看到,正如我们所料,当pcmk-1不在线的时候ClusterIP还是移动到了pcmk-2" #~ msgid "" #~ "\n" #~ "[root@pcmk-1 ~]# ssh pcmk-1 -- /etc/init.d/corosync stop\n" #~ "Stopping Corosync Cluster Engine (corosync):               [  OK  ]\n" #~ "Waiting for services to unload:                            [  OK  ]\n" #~ "[root@pcmk-1 ~]# ssh pcmk-2 -- crm_mon -1\n" #~ "============\n" #~ "Last updated: Fri Aug 28 15:39:38 2009\n" #~ "Stack: openais\n" #~ "Current DC: pcmk-2 - partition WITHOUT quorum\n" #~ "Version: 1.0.5-462f1569a43740667daf7b0f6b521742e9eb8fa7\n" #~ "2 Nodes configured, 2 expected votes\n" #~ "1 Resources configured.\n" #~ "============\n" #~ "\n" #~ "Online: [ pcmk-2 ]\n" #~ "OFFLINE: [ pcmk-1 ]\n" #~ "\n" #~ "ClusterIP        (ocf::heartbeat:IPaddr):        Started pcmk-2\n" #~ msgstr "" #~ "\n" #~ "[root@pcmk-1 ~]# ssh pcmk-1 -- /etc/init.d/corosync stop\n" #~ "Stopping Corosync Cluster Engine (corosync):               [  OK  ]\n" #~ "Waiting for services to unload:                            [  OK  ]\n" #~ "[root@pcmk-1 ~]# ssh pcmk-2 -- crm_mon -1\n" #~ "============\n" #~ "Last updated: Fri Aug 28 15:39:38 2009\n" #~ "Stack: openais\n" #~ "Current DC: pcmk-2 - partition WITHOUT quorum\n" #~ "Version: 1.0.5-462f1569a43740667daf7b0f6b521742e9eb8fa7\n" #~ "2 Nodes configured, 2 expected votes\n" #~ "1 Resources configured.\n" #~ "============\n" #~ "\n" #~ "Online: [ pcmk-2 ]\n" #~ "OFFLINE: [ pcmk-1 ]\n" #~ "\n" #~ "ClusterIP        (ocf::heartbeat:IPaddr):        Started pcmk-2\n" #~ msgid "However when we bring pcmk-1 back online, ClusterIP now remains running on pcmk-2." #~ msgstr "但是当我们把pcmk-1恢复在线后,ClusterIP现在还是跑在pcmk-2上面。" #~ msgid "" #~ "\n" #~ "[root@pcmk-1 ~]# /etc/init.d/corosync start\n" #~ "Starting Corosync Cluster Engine (corosync): [ OK ]\n" #~ "[root@pcmk-1 ~]# crm_mon\n" #~ "============\n" #~ "Last updated: Fri Aug 28 15:41:23 2009\n" #~ "Stack: openais\n" #~ "Current DC: pcmk-2 - partition with quorum\n" #~ "Version: 1.0.5-462f1569a43740667daf7b0f6b521742e9eb8fa7\n" #~ "2 Nodes configured, 2 expected votes\n" #~ "1 Resources configured.\n" #~ "============\n" #~ "\n" #~ "Online: [ pcmk-1 pcmk-2 ]\n" #~ "\n" #~ "ClusterIP        (ocf::heartbeat:IPaddr):        Started pcmk-2\n" #~ msgstr "" #~ "\n" #~ "[root@pcmk-1 ~]# /etc/init.d/corosync start\n" #~ "Starting Corosync Cluster Engine (corosync): [ OK ]\n" #~ "[root@pcmk-1 ~]# crm_mon\n" #~ "============\n" #~ "Last updated: Fri Aug 28 15:41:23 2009\n" #~ "Stack: openais\n" #~ "Current DC: pcmk-2 - partition with quorum\n" #~ "Version: 1.0.5-462f1569a43740667daf7b0f6b521742e9eb8fa7\n" #~ "2 Nodes configured, 2 expected votes\n" #~ "1 Resources configured.\n" #~ "============\n" #~ "\n" #~ "Online: [ pcmk-1 pcmk-2 ]\n" #~ "\n" #~ "ClusterIP        (ocf::heartbeat:IPaddr):        Started pcmk-2\n" #~ msgid "In order to guarantee the safety of your data" #~ msgstr "为了保持你的数据的安全性" #~ msgid "If the data is corrupt, there is little point in continuing to make it available" #~ msgstr "如果数据是异常的, 那保证它的可用性是没有意义的。" #~ msgid ", Pacemaker ships with STONITH" #~ msgstr ",Pacemaker与STONITH一起工作" #~ msgid "A common node fencing mechanism. Used to ensure data integrity by powering off “bad” nodes." #~ msgstr "一个常见的隔离途径。用关闭“坏”节点的电源的方法来保证数据完整。" #~ msgid "enabled. However it also knows when no STONITH configuration has been supplied and reports this as a problem (since the cluster would not be able to make progress if a situation requiring node fencing arose)." #~ msgstr "但是当没有配置STONITH的时候也会报这个错误,(因为 当集群需要隔离的时候,集群就无法工作了。)" #~ msgid "Therefore a two-node cluster only has quorum when both nodes are running, which is no longer the case for our cluster. This would normally make the creation of a two-node cluster pointless" #~ msgstr "因此 两个节点的集群 只有在两者都在线时才是合法的。 这个规则会让 两个节点的集群 毫无意义," #~ msgid "Actually some would argue that two-node clusters are always pointless, but that is an argument for another time." #~ msgstr "事实上某些人会说双节点集群本身就是没有意义的,但这不是现在我们要讨论的问题。" #~ msgid ", however it is possible to control how Pacemaker behaves when quorum is lost. In particular, we can tell the cluster to simply ignore quorum altogether." #~ msgstr "但是我们可以控制Pacemaker达不到法定人数时候的行为。简单来说,我们告诉集群忽略它 。" #~ msgid "To address this, Pacemaker has the concept of resource stickiness which controls how much a service prefers to stay running where it is. You may like to think of it as the “cost” of any downtime. By default, Pacemaker assumes there is zero cost associated with moving resources and will do so to achieve “optimal" #~ msgstr "为了达到这个效果,Pacemaker 有一个叫做 资源黏性值 的概念,它能够控制一个服务(资源)有多想呆在它正在运行的节点上。你可以把它认为是无法提供服务的“代价" #~ msgid "It should be noted that Pacemaker’s definition of optimal may not always agree with that of a human’s. The order in which Pacemaker processes lists of resources and nodes create implicit preferences (required in order to create a stabile solution) in situations where the administrator had not explicitly specified some." #~ msgstr "这里要注意的是Pacemaker定义的代价通常跟常人认为的不一样。如果管理员没有明确的指定顺序,Pacemaker处理列表中的资源和节点顺序隐式地定义了参数(创建稳定的方案所需要的)。" #~ msgid "” resource placement. We can specify a different stickiness for every resource, but it is often sufficient to change the default." #~ msgstr "”。 Pacemaker默认这个值为0,以达到最优分布各个资源的目的。我们可以为每个资源定义不同的黏性值,但一般来说,更改默认黏性值就够了。" diff --git a/doc/Clusters_from_Scratch/zh-CN/Ch-Apache.po b/doc/Clusters_from_Scratch/zh-CN/Ch-Apache.po index 520e738bf5..ded6f3ac12 100644 --- a/doc/Clusters_from_Scratch/zh-CN/Ch-Apache.po +++ b/doc/Clusters_from_Scratch/zh-CN/Ch-Apache.po @@ -1,1192 +1,1192 @@ # # AUTHOR , YEAR. # msgid "" msgstr "" "Project-Id-Version: 0\n" "POT-Creation-Date: 2012-10-17T05:19:03\n" "PO-Revision-Date: 2010-12-15 23:37+0800\n" -"Last-Translator: Charlie Chen \n" +"Last-Translator: Hu Fu \n" "Language-Team: None\n" "Language: \n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" #. Tag: title #, no-c-format msgid "Apache - Adding More Services" msgstr "Apache - 添加更多的服务" #. Tag: title #, no-c-format msgid "Forward" -msgstr "" +msgstr "回顾" #. Tag: para #, no-c-format msgid "Now that we have a basic but functional active/passive two-node cluster, we’re ready to add some real services. We’re going to start with Apache because its a feature of many clusters and relatively simple to configure." -msgstr "现在我们有了一个基本的但是功能齐全的双节点集群,我们已经可以往里面加些真的服务了。我们准备启动一个Apache服务,因为它是许多集群的主角,并且相对来说比较容易配置。" +msgstr "现在我们有了一个基本的但是功能齐全的双节点集群,我们已经可以往里面加些真正的服务了。我们准备启动一个Apache服务,因为它是许多集群的主角,并且相对来说比较容易配置。" #. Tag: title #, no-c-format msgid "Installation" msgstr "安装Apache" #. Tag: para -#, fuzzy, no-c-format +#, no-c-format msgid "Before continuing, we need to make sure Apache is installed on both hosts. We also need the wget tool in order for the cluster to be able to check the status of the Apache server." -msgstr "同样的,为了检测Apache服务器,我们要安装wget这个工具。" +msgstr "在继续前,我们需要确保两个节点上都安装了Apache。同样的,为了检测Apache服务器,我们要安装wget这个工具。" #. Tag: programlisting #, no-c-format msgid "# yum install -y httpd wget" msgstr "" #. Tag: literallayout #, no-c-format msgid "" "Loaded plugins: langpacks, presto, refresh-packagekit\n" "fedora/metalink | 2.6 kB 00:00\n" "updates/metalink | 3.2 kB 00:00\n" "updates-testing/metalink | 41 kB 00:00\n" "Resolving Dependencies\n" "--> Running transaction check\n" "---> Package httpd.x86_64 0:2.2.22-3.fc17 will be installed\n" "--> Processing Dependency: httpd-tools = 2.2.22-3.fc17 for package: httpd-2.2.22-3.fc17.x86_64\n" "--> Processing Dependency: apr-util-ldap for package: httpd-2.2.22-3.fc17.x86_64\n" "--> Processing Dependency: libaprutil-1.so.0()(64bit) for package: httpd-2.2.22-3.fc17.x86_64\n" "--> Processing Dependency: libapr-1.so.0()(64bit) for package: httpd-2.2.22-3.fc17.x86_64\n" "--> Running transaction check\n" "---> Package apr.x86_64 0:1.4.6-1.fc17 will be installed\n" "---> Package apr-util.x86_64 0:1.4.1-2.fc17 will be installed\n" "---> Package apr-util-ldap.x86_64 0:1.4.1-2.fc17 will be installed\n" "---> Package httpd-tools.x86_64 0:2.2.22-3.fc17 will be installed\n" "--> Finished Dependency Resolution\n" "\n" "Dependencies Resolved\n" "\n" "=====================================================================================\n" " Package Arch Version Repository Size\n" "=====================================================================================\n" "Installing:\n" " httpd x86_64 2.2.22-3.fc17 updates-testing 823 k\n" " wget x86_64 1.13.4-2.fc17 fedora 495 k\n" "Installing for dependencies:\n" " apr x86_64 1.4.6-1.fc17 fedora 99 k\n" " apr-util x86_64 1.4.1-2.fc17 fedora 78 k\n" " apr-util-ldap x86_64 1.4.1-2.fc17 fedora 17 k\n" " httpd-tools x86_64 2.2.22-3.fc17 updates-testing 74 k\n" "\n" "Transaction Summary\n" "=====================================================================================\n" "Install 1 Package (+4 Dependent packages)\n" "\n" "Total download size: 1.1 M\n" "Installed size: 3.5 M\n" "Downloading Packages:\n" "(1/6): apr-1.4.6-1.fc17.x86_64.rpm | 99 kB 00:00\n" "(2/6): apr-util-1.4.1-2.fc17.x86_64.rpm | 78 kB 00:00\n" "(3/6): apr-util-ldap-1.4.1-2.fc17.x86_64.rpm | 17 kB 00:00\n" "(4/6): httpd-2.2.22-3.fc17.x86_64.rpm | 823 kB 00:01\n" "(5/6): httpd-tools-2.2.22-3.fc17.x86_64.rpm | 74 kB 00:00\n" "(6/6): wget-1.13.4-2.fc17.x86_64.rpm | 495 kB 00:01\n" "-------------------------------------------------------------------------------------\n" "Total 238 kB/s | 1.1 MB 00:04\n" "Running Transaction Check\n" "Running Transaction Test\n" "Transaction Test Succeeded\n" "Running Transaction\n" " Installing : apr-1.4.6-1.fc17.x86_64 1/6\n" " Installing : apr-util-1.4.1-2.fc17.x86_64 2/6\n" " Installing : apr-util-ldap-1.4.1-2.fc17.x86_64 3/6\n" " Installing : httpd-tools-2.2.22-3.fc17.x86_64 4/6\n" " Installing : httpd-2.2.22-3.fc17.x86_64 5/6\n" " Installing : wget-1.13.4-2.fc17.x86_64 6/6\n" " Verifying : apr-util-ldap-1.4.1-2.fc17.x86_64 1/6\n" " Verifying : httpd-tools-2.2.22-3.fc17.x86_64 2/6\n" " Verifying : apr-util-1.4.1-2.fc17.x86_64 3/6\n" " Verifying : apr-1.4.6-1.fc17.x86_64 4/6\n" " Verifying : httpd-2.2.22-3.fc17.x86_64 5/6\n" " Verifying : wget-1.13.4-2.fc17.x86_64 6/6\n" "\n" "Installed:\n" " httpd.x86_64 0:2.2.22-3.fc17 wget.x86_64 0:1.13.4-2.fc17\n" "\n" "Dependency Installed:\n" " apr.x86_64 0:1.4.6-1.fc17 apr-util.x86_64 0:1.4.1-2.fc17\n" " apr-util-ldap.x86_64 0:1.4.1-2.fc17 httpd-tools.x86_64 0:2.2.22-3.fc17\n" "\n" "Complete!" msgstr "" #. Tag: title #, no-c-format msgid "Preparation" msgstr "准备工作" #. Tag: para #, no-c-format msgid "First we need to create a page for Apache to serve up. On Fedora the default Apache docroot is /var/www/html, so we’ll create an index file there." msgstr "首先我们为Apache创建一个主页。在Fedora上面默认的Apache docroot是/var/www/html,所以我们在这个目录下面建立一个主页。" #. Tag: programlisting #, fuzzy, no-c-format msgid "" "# cat <<-END >/var/www/html/index.html\n" " <html>\n" " <body>My Test Site - pcmk-1</body>\n" " </html>\n" "END" msgstr "" "\n" "[root@pcmk-1 ~]# cat <<-END >/var/www/html/index.html\n" " <html>\n" " <body>My Test Site - pcmk-1</body>\n" " </html>\n" " END\n" "[root@pcmk-1 ~]#\n" #. Tag: para #, no-c-format msgid "For the moment, we will simplify things by serving up only a static site and manually sync the data between the two nodes. So run the command again on pcmk-2." msgstr "为了方便,我们简化所用的页面并人工地在两个节点直接同步数据。所以在pcmk-2上面运行这个命令。" #. Tag: programlisting #, fuzzy, no-c-format msgid "" "[root@pcmk-2 ~]# cat <<-END >/var/www/html/index.html <html>\n" " <body>My Test Site - pcmk-2</body>\n" " </html>\n" " END" msgstr "" "\n" "[root@pcmk-2 ~]# cat <<-END >/var/www/html/index.html\n" " <html>\n" " <body>My Test Site - pcmk-2</body>\n" " </html>\n" " END\n" "[root@pcmk-2 ~]#\n" #. Tag: title #, no-c-format msgid "Enable the Apache status URL" msgstr "开启 Apache status URL" #. Tag: para -#, fuzzy, no-c-format +#, no-c-format msgid "In order to monitor the health of your Apache instance, and recover it if it fails, the resource agent used by Pacemaker assumes the server-status URL is available. Look for the following in /etc/httpd/conf/httpd.conf and make sure it is not disabled or commented out:" -msgstr "为了监控Apache实例的健康状态,并在它挂掉的时候恢复Apache服务,资源agent会假设 server-status URL是可用的。查看/etc/httpd/conf/httpd.conf并确保下面的选项没有被禁用或注释掉。" +msgstr "为了监控Apache实例的健康状态,并在它挂掉的时候恢复Apache服务,pacemaker使用的资源agent会假设 server-status URL是可用的。查看/etc/httpd/conf/httpd.conf并确保下面的选项没有被禁用或注释掉。" #. Tag: programlisting #, fuzzy, no-c-format msgid "" "<Location /server-status>\n" " SetHandler server-status\n" " Order deny,allow\n" " Deny from all\n" " Allow from 127.0.0.1\n" "</Location>" msgstr "" "\n" "<Location /server-status>\n" " SetHandler server-status\n" " Order deny,allow\n" " Deny from all\n" " Allow from 127.0.0.1\n" "</Location>\n" "\t " #. Tag: title #, no-c-format msgid "Update the Configuration" msgstr "更新配置文件" #. Tag: para -#, fuzzy, no-c-format +#, no-c-format msgid "At this point, Apache is ready to go, all that needs to be done is to add it to the cluster. Lets call the resource WebSite. We need to use an OCF script called apache in the heartbeat namespace Compare the key used here ocf:heartbeat:apache with the one we used earlier for the IP address: ocf:heartbeat:IPaddr2 , the only required parameter is the path to the main Apache configuration file and we’ll tell the cluster to check once a minute that apache is still running." -msgstr "现在 ,Apache已经可以添加到集群中了。我们管这个资源叫WebSite。我们需要用一个叫做apache的OCF脚本,这个脚本在heartbeat这个名字空间里,唯一一个需要设定的参数就是Apache的主配置文件路径,并且我们告诉集群每一分钟检测一次Apache是否运行。" +msgstr "现在,Apache已经可以添加到集群中了。我们管这个资源叫WebSite。我们需要用一个叫做apache的OCF脚本,这个脚本在heartbeat这个名字空间里,唯一一个需要设定的参数就是Apache的主配置文件路径,并且我们告诉集群每一分钟检测一次Apache是否运行。" #. Tag: screen #, no-c-format msgid "" "pcs resource create WebSite ocf:heartbeat:apache \\\n" " configfile=/etc/httpd/conf/httpd.conf \\\n" " statusurl=\"http://localhost/server-status\" op monitor interval=1min" msgstr "" #. Tag: para #, no-c-format msgid "By default, the operation timeout for all resource’s start, stop, and monitor operations is 20 seconds. In many cases this timeout period is less than the advised timeout period. For the purposes of this tutorial, we will adjust the global operation timeout default to 240 seconds." -msgstr "" +msgstr "默认的,所有资源的start,stop和monitor操作的超时时间都是20秒。在很多情况下这个超时周期小于建议超时时间。在本教程中,我们将调整全局操作的超时时间为240秒。" #. Tag: programlisting #, no-c-format msgid "" "# pcs resource op defaults timeout=240s\n" "# pcs resource op defaults\n" "timeout: 240s" msgstr "" #. Tag: para #, no-c-format msgid "After a short delay, we should see the cluster start apache" -msgstr "过了一会,我们可以看到集群把apache启动起来了。" +msgstr "过了一会儿,我们可以看到集群把apache启动起来了。" #. Tag: programlisting #, fuzzy, no-c-format msgid "" "# pcs status\n" "\n" "Last updated: Fri Sep 14 10:51:27 2012\n" "Last change: Fri Sep 14 10:50:46 2012 via crm_attribute on pcmk-1\n" "Stack: corosync\n" "Current DC: pcmk-2 (2) - partition with quorum\n" "Version: 1.1.8-1.el7-60a19ed12fdb4d5c6a6b6767f52e5391e447fec0\n" "2 Nodes configured, unknown expected votes\n" "2 Resources configured.\n" "\n" "Online: [ pcmk-1 pcmk-2 ]\n" "\n" "Full list of resources:\n" "\n" " ClusterIP (ocf::heartbeat:IPaddr2): Started pcmk-2\n" " WebSite (ocf::heartbeat:apache): Started pcmk-1" msgstr "" "\n" "[root@pcmk-1 ~]# crm_mon\n" "============\n" "Last updated: Fri Aug 28 16:12:49 2009\n" "Stack: openais\n" "Current DC: pcmk-2 - partition with quorum\n" "Version: 1.0.5-462f1569a43740667daf7b0f6b521742e9eb8fa7\n" "2 Nodes configured, 2 expected votes\n" "2 Resources configured.\n" "============\n" "\n" "Online: [ pcmk-1 pcmk-2 ]\n" "\n" "ClusterIP        (ocf::heartbeat:IPaddr):        Started pcmk-2\n" "WebSite        (ocf::heartbeat:apache):        Started pcmk-1\n" #. Tag: para #, no-c-format msgid "Wait a moment, the WebSite resource isn’t running on the same host as our IP address!" -msgstr "等等!WebSite这个资源跟IP没有跑在同一个节点上面!" +msgstr "等等!WebSite这个资源跟IP地址没有跑在同一个节点上面!" #. Tag: para #, no-c-format msgid "If, in the pcs status output, you see the WebSite resource has failed to start, then you’ve likely not enabled the status URL correctly. You can check if this is the problem by running:" -msgstr "" +msgstr "如果在crm status的输出中,你看到WebSite资源启动失败了,那极有可能是你没有正确的启动status URL,你可以通过运行下面命令来检查是否是这个原因:" #. Tag: literallayout #, no-c-format msgid "wget http://127.0.0.1/server-status" msgstr "" #. Tag: para #, no-c-format msgid "If you see Connection refused in the output, then this is indeed the problem. Check to ensure that Allow from 127.0.0.1 is present for the <Location /server-status> block." -msgstr "" +msgstr "如果你看到的输出是“Connection refused”,这确实是个问题。检查配置确保“Allow from 127.0.0.1” 在 <Location /server-status> 块中。" #. Tag: title #, no-c-format msgid "Ensuring Resources Run on the Same Host" msgstr "确保资源在同一个节点运行" #. Tag: para -#, fuzzy, no-c-format +#, no-c-format msgid "To reduce the load on any one machine, Pacemaker will generally try to spread the configured resources across the cluster nodes. However we can tell the cluster that two resources are related and need to run on the same host (or not at all). Here we instruct the cluster that WebSite can only run on the host that ClusterIP is active on." -msgstr "为了减少每个机器的负载,Pacemaker会智能地尝试将资源分散到各个节点上面。 然而我们可以告诉集群某两个资源是有联系并且要在同一个节点运行(或不同的节点运行)。这里我们告诉集群WebSite只能在有ClusterIP的节点上运行。如果ClusterIP在哪个节点都不存在,那么WebSite也不能运行。" +msgstr "为了减少每个机器的负载,Pacemaker会智能地尝试将资源分散到各个节点上面。然而我们可以告诉集群某两个资源是有联系并且要在同一个节点运行(或不同的节点运行)。这里我们告诉集群WebSite只能在有ClusterIP的节点上运行。" #. Tag: para #, no-c-format msgid "To achieve this we use a colocation constraint that indicates it is mandatory for WebSite to run on the same node as ClusterIP. The \"mandatory\" part of the colocation constraint is indicated by using a score of INFINITY. The INFINITY score also means that if ClusterIP is not active anywhere, WebSite will not be permitted to run." -msgstr "" +msgstr "为此我们使用托管约束来强制性的表明WebSite和ClusterIP运行在同一节点。“强制性”部分的托管约束使用分数INFINITY(无穷大)来表示。无穷大也表明了如果ClusterIP没有在任何节点运行,那么WebSite也不能运行。" #. Tag: para #, no-c-format msgid "If ClusterIP is not active anywhere, WebSite will not be permitted to run anywhere." -msgstr "" +msgstr "如果ClusterIP在任何节点都不存在,那么WebSite也不能运行。" #. Tag: para #, no-c-format msgid "Colocation constraints are \"directional\", in that they imply certain things about the order in which the two resources will have a location chosen. In this case we’re saying WebSite needs to be placed on the same machine as ClusterIP, this implies that we must know the location of ClusterIP before choosing a location for WebSite." -msgstr "" +msgstr "托管约束是定向性的,这样他们意味该命令,两个资源将会有一个位置选择。这种情况下我们说WebSite需要被放置在和ClusterIP相同的集群上,这意味着在选择WebSite的位置前,我们必须要知道ClusterIP的位置。" #. Tag: programlisting #, no-c-format msgid "" "# pcs constraint colocation add WebSite ClusterIP INFINITY\n" "# pcs constraint\n" "Location Constraints:\n" "Ordering Constraints:\n" "Colocation Constraints:\n" " WebSite with ClusterIP\n" "# pcs status\n" "\n" "Last updated: Fri Sep 14 11:00:44 2012\n" "Last change: Fri Sep 14 11:00:25 2012 via cibadmin on pcmk-1\n" "Stack: corosync\n" "Current DC: pcmk-2 (2) - partition with quorum\n" "Version: 1.1.8-1.el7-60a19ed12fdb4d5c6a6b6767f52e5391e447fec0\n" "2 Nodes configured, unknown expected votes\n" "2 Resources configured.\n" "\n" "Online: [ pcmk-1 pcmk-2 ]\n" "\n" "Full list of resources:\n" "\n" " ClusterIP (ocf::heartbeat:IPaddr2): Started pcmk-2\n" " WebSite (ocf::heartbeat:apache): Started pcmk-2" msgstr "" #. Tag: title #, no-c-format msgid "Controlling Resource Start/Stop Ordering" msgstr "控制资源的启动停止顺序" #. Tag: para -#, fuzzy, no-c-format +#, no-c-format msgid "When Apache starts, it binds to the available IP addresses. It doesn’t know about any addresses we add afterwards, so not only do they need to run on the same node, but we need to make sure ClusterIP is already active before we start WebSite. We do this by adding an ordering constraint." -msgstr "当Apache启动了,它跟可用的IP绑在了一起。它不会知道我们后来添加的IP,所以我们不仅需要控制他们在相同的节点运行,也要确保ClusterIP在WebSite之前就启动了。我们用添加ordering约束来达到这个效果。我们需要给这个order取个名字(apache-after-ip之类 描述性的),并指出他是托管的(这样当ClusterIP恢复了,同时会触发WebSite的恢复) 并且写明了这两个资源的启动顺序。" +msgstr "当Apache启动了,它跟可用的IP绑在了一起。它不知道我们后来添加的IP,所以我们不仅需要控制他们在相同的节点运行,也要确保ClusterIP在WebSite之前就启动了。我们用添加ordering约束来达到这个效果。我们需要给这个order取个名字(apache-after-ip之类描述性的),并指出他是托管的(这样当ClusterIP恢复了,同时会触发WebSite的恢复) 并写明了这两个资源的启动顺序。" #. Tag: para #, no-c-format msgid "By default all order constraints are mandatory constraints unless otherwise configured. This means that the recovery of ClusterIP will also trigger the recovery of WebSite." -msgstr "" +msgstr "默认情况下,所有的顺序约束是强制性约束,除非另有配置。这意味着ClusterIP的恢复也会触发WebSite的恢复。" #. Tag: programlisting #, no-c-format msgid "" "# pcs constraint order ClusterIP then WebSite\n" "Adding ClusterIP WebSite (kind: Mandatory) (Options: first-action=start then-action=start)\n" "# pcs constraint\n" "Location Constraints:\n" "Ordering Constraints:\n" " start ClusterIP then start WebSite\n" "Colocation Constraints:\n" " WebSite with ClusterIP" msgstr "" #. Tag: title #, no-c-format msgid "Specifying a Preferred Location" msgstr "指定优先的 Location" #. Tag: para -#, fuzzy, no-c-format +#, no-c-format msgid "Pacemaker does not rely on any sort of hardware symmetry between nodes, so it may well be that one machine is more powerful than the other. In such cases it makes sense to host the resources there if it is available. To do this we create a location constraint." -msgstr "Pacemaker 并不要求你机器的硬件配置是相同的,可能某些机器比另外的机器配置要好。这种状况下我们会希望设置:当某个节点可用时,资源就要跑在上面之类的规则。为了达到这个效果我们创建location约束。同样的,我们给他取一个描述性的名字(prefer-pcmk-1),指明我们想在上面跑WebSite这个服务,多想在上面跑(我们现在指定分值为50,但是在双节点的集群状态下,任何大于0的值都可以达到想要的效果),以及目标节点的名字:" +msgstr "Pacemaker 并不要求你机器的硬件配置是相同的,可能某些机器比另外的机器配置要好。这种状况下我们会希望设置当某个节点可用时,资源就要跑在上面之类的规则。为了达到这个效果我们创建location约束。" #. Tag: para #, no-c-format msgid "In the location constraint below, we are saying the WebSite resource prefers the node pcmk-1 with a score of 50. The score here indicates how badly we’d like the resource to run somewhere." -msgstr "" +msgstr "同样的,我们给他取一个描述性的名字(prefer-pcmk-1),指明我们想在上面跑WebSite这个服务,多想在上面跑(我们现在指定分值为50,但是在双节点的集群状态下,任何大于0的值都可以达到想要的效果),以及目标节点的名字:" #. Tag: programlisting #, no-c-format msgid "" "# pcs constraint location WebSite prefers pcmk-1=50\n" "# pcs constraint\n" "Location Constraints:\n" " Resource: WebSite\n" " Enabled on: pcmk-1 (score:50)\n" "Ordering Constraints:\n" " start ClusterIP then start WebSite\n" "Colocation Constraints:\n" " WebSite with ClusterIP\n" "# pcs status\n" "Last updated: Fri Sep 14 11:06:37 2012\n" "Last change: Fri Sep 14 11:06:26 2012 via cibadmin on pcmk-1\n" "Stack: corosync\n" "Current DC: pcmk-2 (2) - partition with quorum\n" "Version: 1.1.8-1.el7-60a19ed12fdb4d5c6a6b6767f52e5391e447fec0\n" "2 Nodes configured, unknown expected votes\n" "2 Resources configured.\n" "\n" "Online: [ pcmk-1 pcmk-2 ]\n" "\n" "Full list of resources:\n" "\n" " ClusterIP (ocf::heartbeat:IPaddr2): Started pcmk-2\n" " WebSite (ocf::heartbeat:apache): Started pcmk-2" msgstr "" #. Tag: para #, no-c-format msgid "Wait a minute, the resources are still on pcmk-2!" msgstr "等等,资源还是在pcmk-2上面跑的!" #. Tag: para #, no-c-format msgid "Even though we now prefer pcmk-1 over pcmk-2, that preference is (intentionally) less than the resource stickiness (how much we preferred not to have unnecessary downtime)." -msgstr "即使我们更希望资源在pcmk-1上面运行,但是 这个优先值还是比资源黏性值要小。" +msgstr "即使我们更希望资源在pcmk-1上面运行,但是这个优先值还是比资源粘性值要小。" #. Tag: para -#, fuzzy, no-c-format +#, no-c-format msgid "To see the current placement scores, you can use a tool called crm_simulate" -msgstr "如果要看现在的分值,可以用ptest这个命令" +msgstr "如果要看现在的分值,可以用crm_simulate这个命令" #. Tag: programlisting #, no-c-format msgid "" "# crm_simulate -sL\n" "Current cluster status:\n" "Online: [ pcmk-1 pcmk-2 ]\n" "\n" " ClusterIP (ocf:heartbeat:IPaddr2): Started pcmk-2\n" " WebSite (ocf:heartbeat:apache): Started pcmk-2\n" "\n" "Allocation scores:\n" "native_color: ClusterIP allocation score on pcmk-1: 50\n" "native_color: ClusterIP allocation score on pcmk-2: 200\n" "native_color: WebSite allocation score on pcmk-1: -INFINITY\n" "native_color: WebSite allocation score on pcmk-2: 100\n" "\n" "Transition Summary:" msgstr "" #. Tag: title #, no-c-format msgid "Manually Moving Resources Around the Cluster" -msgstr "在集群中手工地移动资源" +msgstr "在集群中手动迁移资源" #. Tag: para -#, fuzzy, no-c-format +#, no-c-format msgid "There are always times when an administrator needs to override the cluster and force resources to move to a specific location. By updating our previous location constraint with a score of INFINITY, WebSite will be forced to move to pcmk-1." -msgstr "经常性的会有管理员想要无视集群然后强制把资源移动到指定的地方。 底层的操作就像我们上面创建的location约束一样。只要提供资源和目标地址,我们会补全剩余部分。" +msgstr "经常性的会有管理员想要无视集群然后强制把资源迁移到指定的地方。底层的操作就像我们上面创建的location约束一样。只要提供资源和目标地址,我们会补全剩余部分。" #. Tag: programlisting #, no-c-format msgid "" "# pcs constraint location WebSite prefers pcmk-1=INFINITY\n" "# pcs constraint all\n" "Location Constraints:\n" " Resource: WebSite\n" " Enabled on: pcmk-1 (score:INFINITY) (id:location-WebSite-pcmk-1-INFINITY)\n" "Ordering Constraints:\n" " start ClusterIP then start WebSite (Mandatory) (id:order-ClusterIP-WebSite-mandatory)\n" "Colocation Constraints:\n" " WebSite with ClusterIP (INFINITY) (id:colocation-WebSite-ClusterIP-INFINITY)\n" "# pcs status\n" "\n" "Last updated: Fri Sep 14 11:16:26 2012\n" "Last change: Fri Sep 14 11:16:18 2012 via cibadmin on pcmk-1\n" "Stack: corosync\n" "Current DC: pcmk-2 (2) - partition with quorum\n" "Version: 1.1.8-1.el7-60a19ed12fdb4d5c6a6b6767f52e5391e447fec0\n" "2 Nodes configured, unknown expected votes\n" "2 Resources configured.\n" "\n" "Online: [ pcmk-1 pcmk-2 ]\n" "\n" "Full list of resources:\n" "\n" " ClusterIP (ocf::heartbeat:IPaddr2): Started pcmk-1\n" " WebSite (ocf::heartbeat:apache): Started pcmk-1" msgstr "" #. Tag: title #, no-c-format msgid "Giving Control Back to the Cluster" msgstr "把控制权交还给集群" #. Tag: para #, no-c-format msgid "Once we’ve finished whatever activity that required us to move the resources to pcmk-1, in our case nothing, we can then allow the cluster to resume normal operation with the unmove command. Since we previously configured a default stickiness, the resources will remain on pcmk-1." -msgstr "当我们完成那些要求要资源移动到pcmk-1的操作--在我们的例子里面啥都没干 --我们可以用unmove命令把集群恢复到强制移动前的状态。因为我们之前配置了默认的资源黏性值,恢复了以后资源还是会在pcmk-1上面。" +msgstr "当我们完成那些要求要资源迁移到pcmk-1的操作--在我们的例子里面啥都没干 --我们可以用unmove命令把集群恢复到强制迁移前的状态。因为我们之前配置了默认的资源粘性值,恢复了以后资源还是会在pcmk-1上面。" #. Tag: programlisting #, no-c-format msgid "" "# pcs constraint all\n" "Location Constraints:\n" " Resource: WebSite\n" " Enabled on: pcmk-1 (score:INFINITY) (id:location-WebSite-pcmk-1-INFINITY)\n" "Ordering Constraints:\n" " start ClusterIP then start WebSite (Mandatory) (id:order-ClusterIP-WebSite-mandatory)\n" "Colocation Constraints:\n" " WebSite with ClusterIP (INFINITY) (id:colocation-WebSite-ClusterIP-INFINITY)\n" "# pcs constraint rm location-WebSite-pcmk-1-INFINITY\n" "# pcs constraint\n" "Location Constraints:\n" "Ordering Constraints:\n" " start ClusterIP then start WebSite\n" "Colocation Constraints:\n" " WebSite with ClusterIP" msgstr "" #. Tag: para -#, fuzzy, no-c-format +#, no-c-format msgid "Note that the constraint is now gone. If we check the cluster status, we can also see that as expected the resources are still active on pcmk-1." -msgstr "可以看到自动生成的约束已经没有了。如果我们查看集群的状态,我们也可以看到就如我们所预期的,资源还是在pcmk-1上面跑" +msgstr "可以看到自动生成的约束已经没有了。如果我们查看集群的状态,也可以看到就如我们所预期的,资源还是在pcmk-1上面跑。" #. Tag: programlisting #, fuzzy, no-c-format msgid "" "# pcs status\n" "\n" "Last updated: Fri Sep 14 11:57:12 2012\n" "Last change: Fri Sep 14 11:57:03 2012 via cibadmin on pcmk-1\n" "Stack: corosync\n" "Current DC: pcmk-2 (2) - partition with quorum\n" "Version: 1.1.8-1.el7-60a19ed12fdb4d5c6a6b6767f52e5391e447fec0\n" "2 Nodes configured, unknown expected votes\n" "2 Resources configured.\n" "\n" "Online: [ pcmk-1 pcmk-2 ]\n" "\n" "Full list of resources:\n" "\n" " ClusterIP (ocf::heartbeat:IPaddr2): Started pcmk-1\n" " WebSite (ocf::heartbeat:apache): Started pcmk-1" msgstr "" "\n" "[root@pcmk-1 ~]# crm_mon\n" "============\n" "Last updated: Fri Aug 28 16:12:49 2009\n" "Stack: openais\n" "Current DC: pcmk-2 - partition with quorum\n" "Version: 1.0.5-462f1569a43740667daf7b0f6b521742e9eb8fa7\n" "2 Nodes configured, 2 expected votes\n" "2 Resources configured.\n" "============\n" "\n" "Online: [ pcmk-1 pcmk-2 ]\n" "\n" "ClusterIP        (ocf::heartbeat:IPaddr):        Started pcmk-2\n" "WebSite        (ocf::heartbeat:apache):        Started pcmk-1\n" #~ msgid "Before continuing, we need to make sure Apache is installed on both hosts." #~ msgstr "在继续之前,我们先确保两个节点安装了Apache." #~ msgid "" #~ "\n" #~ "[root@ppcmk-1 ~]# yum install -y httpd\n" #~ "Setting up Install Process\n" #~ "Resolving Dependencies\n" #~ "--> Running transaction check\n" #~ "---> Package httpd.x86_64 0:2.2.13-2.fc12 set to be updated\n" #~ "--> Processing Dependency: httpd-tools = 2.2.13-2.fc12 for package: httpd-2.2.13-2.fc12.x86_64\n" #~ "--> Processing Dependency: apr-util-ldap for package: httpd-2.2.13-2.fc12.x86_64\n" #~ "--> Processing Dependency: /etc/mime.types for package: httpd-2.2.13-2.fc12.x86_64\n" #~ "--> Processing Dependency: libaprutil-1.so.0()(64bit) for package: httpd-2.2.13-2.fc12.x86_64\n" #~ "--> Processing Dependency: libapr-1.so.0()(64bit) for package: httpd-2.2.13-2.fc12.x86_64\n" #~ "--> Running transaction check\n" #~ "---> Package apr.x86_64 0:1.3.9-2.fc12 set to be updated\n" #~ "---> Package apr-util.x86_64 0:1.3.9-2.fc12 set to be updated\n" #~ "---> Package apr-util-ldap.x86_64 0:1.3.9-2.fc12 set to be updated\n" #~ "---> Package httpd-tools.x86_64 0:2.2.13-2.fc12 set to be updated\n" #~ "---> Package mailcap.noarch 0:2.1.30-1.fc12 set to be updated\n" #~ "--> Finished Dependency Resolution\n" #~ "\n" #~ "Dependencies Resolved\n" #~ "\n" #~ "=======================================================================================\n" #~ " Package               Arch             Version                Repository         Size\n" #~ "=======================================================================================\n" #~ "Installing:\n" #~ " httpd               x86_64           2.2.13-2.fc12            rawhide           735 k\n" #~ "Installing for dependencies:\n" #~ " apr                 x86_64           1.3.9-2.fc12             rawhide           117 k\n" #~ " apr-util            x86_64           1.3.9-2.fc12             rawhide            84 k\n" #~ " apr-util-ldap       x86_64           1.3.9-2.fc12             rawhide            15 k\n" #~ " httpd-tools         x86_64           2.2.13-2.fc12            rawhide            63 k\n" #~ " mailcap             noarch           2.1.30-1.fc12            rawhide            25 k\n" #~ "\n" #~ "Transaction Summary\n" #~ "=======================================================================================\n" #~ "Install       6 Package(s)\n" #~ "Upgrade       0 Package(s)\n" #~ "\n" #~ "Total download size: 1.0 M\n" #~ "Downloading Packages:\n" #~ "(1/6): apr-1.3.9-2.fc12.x86_64.rpm                                   | 117 kB     00:00     \n" #~ "(2/6): apr-util-1.3.9-2.fc12.x86_64.rpm                             |  84 kB     00:00     \n" #~ "(3/6): apr-util-ldap-1.3.9-2.fc12.x86_64.rpm                         |  15 kB     00:00     \n" #~ "(4/6): httpd-2.2.13-2.fc12.x86_64.rpm                               | 735 kB     00:00     \n" #~ "(5/6): httpd-tools-2.2.13-2.fc12.x86_64.rpm                         |  63 kB     00:00     \n" #~ "(6/6): mailcap-2.1.30-1.fc12.noarch.rpm                             |  25 kB     00:00     \n" #~ "----------------------------------------------------------------------------------------\n" #~ "Total                                                       875 kB/s | 1.0 MB     00:01     \n" #~ "Running rpm_check_debug\n" #~ "Running Transaction Test\n" #~ "Finished Transaction Test\n" #~ "Transaction Test Succeeded\n" #~ "Running Transaction\n" #~ "  Installing     : apr-1.3.9-2.fc12.x86_64                                         1/6 \n" #~ "  Installing     : apr-util-1.3.9-2.fc12.x86_64                                     2/6 \n" #~ "  Installing     : apr-util-ldap-1.3.9-2.fc12.x86_64                               3/6 \n" #~ "  Installing     : httpd-tools-2.2.13-2.fc12.x86_64                                 4/6 \n" #~ "  Installing     : mailcap-2.1.30-1.fc12.noarch                                     5/6 \n" #~ "  Installing     : httpd-2.2.13-2.fc12.x86_64                                       6/6 \n" #~ "\n" #~ "Installed:\n" #~ "  httpd.x86_64 0:2.2.13-2.fc12                                                         \n" #~ "\n" #~ "Dependency Installed:\n" #~ "  apr.x86_64 0:1.3.9-2.fc12            apr-util.x86_64 0:1.3.9-2.fc12\n" #~ "  apr-util-ldap.x86_64 0:1.3.9-2.fc12  httpd-tools.x86_64 0:2.2.13-2.fc12\n" #~ "  mailcap.noarch 0:2.1.30-1.fc12  \n" #~ "\n" #~ "Complete!\n" #~ "[root@pcmk-1 ~]#\n" #~ msgstr "" #~ "\n" #~ "[root@ppcmk-1 ~]# yum install -y httpd\n" #~ "Setting up Install Process\n" #~ "Resolving Dependencies\n" #~ "--> Running transaction check\n" #~ "---> Package httpd.x86_64 0:2.2.13-2.fc12 set to be updated\n" #~ "--> Processing Dependency: httpd-tools = 2.2.13-2.fc12 for package: httpd-2.2.13-2.fc12.x86_64\n" #~ "--> Processing Dependency: apr-util-ldap for package: httpd-2.2.13-2.fc12.x86_64\n" #~ "--> Processing Dependency: /etc/mime.types for package: httpd-2.2.13-2.fc12.x86_64\n" #~ "--> Processing Dependency: libaprutil-1.so.0()(64bit) for package: httpd-2.2.13-2.fc12.x86_64\n" #~ "--> Processing Dependency: libapr-1.so.0()(64bit) for package: httpd-2.2.13-2.fc12.x86_64\n" #~ "--> Running transaction check\n" #~ "---> Package apr.x86_64 0:1.3.9-2.fc12 set to be updated\n" #~ "---> Package apr-util.x86_64 0:1.3.9-2.fc12 set to be updated\n" #~ "---> Package apr-util-ldap.x86_64 0:1.3.9-2.fc12 set to be updated\n" #~ "---> Package httpd-tools.x86_64 0:2.2.13-2.fc12 set to be updated\n" #~ "---> Package mailcap.noarch 0:2.1.30-1.fc12 set to be updated\n" #~ "--> Finished Dependency Resolution\n" #~ "\n" #~ "Dependencies Resolved\n" #~ "\n" #~ "=======================================================================================\n" #~ " Package               Arch             Version                Repository         Size\n" #~ "=======================================================================================\n" #~ "Installing:\n" #~ " httpd               x86_64           2.2.13-2.fc12            rawhide           735 k\n" #~ "Installing for dependencies:\n" #~ " apr                 x86_64           1.3.9-2.fc12             rawhide           117 k\n" #~ " apr-util            x86_64           1.3.9-2.fc12             rawhide            84 k\n" #~ " apr-util-ldap       x86_64           1.3.9-2.fc12             rawhide            15 k\n" #~ " httpd-tools         x86_64           2.2.13-2.fc12            rawhide            63 k\n" #~ " mailcap             noarch           2.1.30-1.fc12            rawhide            25 k\n" #~ "\n" #~ "Transaction Summary\n" #~ "=======================================================================================\n" #~ "Install       6 Package(s)\n" #~ "Upgrade       0 Package(s)\n" #~ "\n" #~ "Total download size: 1.0 M\n" #~ "Downloading Packages:\n" #~ "(1/6): apr-1.3.9-2.fc12.x86_64.rpm                                   | 117 kB     00:00     \n" #~ "(2/6): apr-util-1.3.9-2.fc12.x86_64.rpm                             |  84 kB     00:00     \n" #~ "(3/6): apr-util-ldap-1.3.9-2.fc12.x86_64.rpm                         |  15 kB     00:00     \n" #~ "(4/6): httpd-2.2.13-2.fc12.x86_64.rpm                               | 735 kB     00:00     \n" #~ "(5/6): httpd-tools-2.2.13-2.fc12.x86_64.rpm                         |  63 kB     00:00     \n" #~ "(6/6): mailcap-2.1.30-1.fc12.noarch.rpm                             |  25 kB     00:00     \n" #~ "----------------------------------------------------------------------------------------\n" #~ "Total                                                       875 kB/s | 1.0 MB     00:01     \n" #~ "Running rpm_check_debug\n" #~ "Running Transaction Test\n" #~ "Finished Transaction Test\n" #~ "Transaction Test Succeeded\n" #~ "Running Transaction\n" #~ "  Installing     : apr-1.3.9-2.fc12.x86_64                                         1/6 \n" #~ "  Installing     : apr-util-1.3.9-2.fc12.x86_64                                     2/6 \n" #~ "  Installing     : apr-util-ldap-1.3.9-2.fc12.x86_64                               3/6 \n" #~ "  Installing     : httpd-tools-2.2.13-2.fc12.x86_64                                 4/6 \n" #~ "  Installing     : mailcap-2.1.30-1.fc12.noarch                                     5/6 \n" #~ "  Installing     : httpd-2.2.13-2.fc12.x86_64                                       6/6 \n" #~ "\n" #~ "Installed:\n" #~ "  httpd.x86_64 0:2.2.13-2.fc12                                                         \n" #~ "\n" #~ "Dependency Installed:\n" #~ "  apr.x86_64 0:1.3.9-2.fc12            apr-util.x86_64 0:1.3.9-2.fc12\n" #~ "  apr-util-ldap.x86_64 0:1.3.9-2.fc12  httpd-tools.x86_64 0:2.2.13-2.fc12\n" #~ "  mailcap.noarch 0:2.1.30-1.fc12  \n" #~ "\n" #~ "Complete!\n" #~ "[root@pcmk-1 ~]#\n" #~ msgid "" #~ "\n" #~ "[root@pcmk-1 ~]# yum install -y wget\n" #~ "Setting up Install Process\n" #~ "Resolving Dependencies\n" #~ "--> Running transaction check\n" #~ "---> Package wget.x86_64 0:1.11.4-5.fc12 set to be updated\n" #~ "--> Finished Dependency Resolution\n" #~ "\n" #~ "Dependencies Resolved\n" #~ "\n" #~ "===========================================================================================\n" #~ " Package        Arch             Version                      Repository               Size\n" #~ "===========================================================================================\n" #~ "Installing:\n" #~ " wget         x86_64          1.11.4-5.fc12                   rawhide                393 k\n" #~ "\n" #~ "Transaction Summary\n" #~ "===========================================================================================\n" #~ "Install       1 Package(s)\n" #~ "Upgrade       0 Package(s)\n" #~ "\n" #~ "Total download size: 393 k\n" #~ "Downloading Packages:\n" #~ "wget-1.11.4-5.fc12.x86_64.rpm                                            | 393 kB     00:00     \n" #~ "Running rpm_check_debug\n" #~ "Running Transaction Test\n" #~ "Finished Transaction Test\n" #~ "Transaction Test Succeeded\n" #~ "Running Transaction\n" #~ "  Installing     : wget-1.11.4-5.fc12.x86_64                                            1/1 \n" #~ "\n" #~ "Installed:\n" #~ "  wget.x86_64 0:1.11.4-5.fc12\n" #~ "\n" #~ "Complete!\n" #~ "[root@pcmk-1 ~]#\n" #~ msgstr "" #~ "\n" #~ "[root@pcmk-1 ~]# yum install -y wget\n" #~ "Setting up Install Process\n" #~ "Resolving Dependencies\n" #~ "--> Running transaction check\n" #~ "---> Package wget.x86_64 0:1.11.4-5.fc12 set to be updated\n" #~ "--> Finished Dependency Resolution\n" #~ "\n" #~ "Dependencies Resolved\n" #~ "\n" #~ "===========================================================================================\n" #~ " Package        Arch             Version                      Repository               Size\n" #~ "===========================================================================================\n" #~ "Installing:\n" #~ " wget         x86_64          1.11.4-5.fc12                   rawhide                393 k\n" #~ "\n" #~ "Transaction Summary\n" #~ "===========================================================================================\n" #~ "Install       1 Package(s)\n" #~ "Upgrade       0 Package(s)\n" #~ "\n" #~ "Total download size: 393 k\n" #~ "Downloading Packages:\n" #~ "wget-1.11.4-5.fc12.x86_64.rpm                                            | 393 kB     00:00     \n" #~ "Running rpm_check_debug\n" #~ "Running Transaction Test\n" #~ "Finished Transaction Test\n" #~ "Transaction Test Succeeded\n" #~ "Running Transaction\n" #~ "  Installing     : wget-1.11.4-5.fc12.x86_64                                            1/1 \n" #~ "\n" #~ "Installed:\n" #~ "  wget.x86_64 0:1.11.4-5.fc12\n" #~ "\n" #~ "Complete!\n" #~ "[root@pcmk-1 ~]#\n" #~ msgid "" #~ "\n" #~ "[root@pcmk-1 ~]# crm configure primitive WebSite ocf:heartbeat:apache params configfile=/etc/httpd/conf/httpd.conf op monitor interval=1min\n" #~ "[root@pcmk-1 ~]# crm configure show\n" #~ "node pcmk-1\n" #~ "node pcmk-2\n" #~ "primitive WebSite ocf:heartbeat:apache \\\n" #~ " params configfile=\"/etc/httpd/conf/httpd.conf\" \\\n" #~ " op monitor interval=\"1min\"\n" #~ "primitive ClusterIP ocf:heartbeat:IPaddr2 \\\n" #~ "        params ip=\"192.168.122.101\" cidr_netmask=\"32\" \\\n" #~ "        op monitor interval=\"30s\"\n" #~ "property $id=\"cib-bootstrap-options\" \\\n" #~ "        dc-version=\"1.0.5-462f1569a43740667daf7b0f6b521742e9eb8fa7\" \\\n" #~ "        cluster-infrastructure=\"openais\" \\\n" #~ "        expected-quorum-votes=\"2\" \\\n" #~ "        stonith-enabled=\"false\" \\\n" #~ "        no-quorum-policy=\"ignore\"\n" #~ "rsc_defaults $id=\"rsc-options\" \\\n" #~ "        resource-stickiness=\"100\"\n" #~ msgstr "" #~ "\n" #~ "[root@pcmk-1 ~]# crm configure primitive WebSite ocf:heartbeat:apache params configfile=/etc/httpd/conf/httpd.conf op monitor interval=1min\n" #~ "[root@pcmk-1 ~]# crm configure show\n" #~ "node pcmk-1\n" #~ "node pcmk-2\n" #~ "primitive WebSite ocf:heartbeat:apache \\\n" #~ " params configfile=\"/etc/httpd/conf/httpd.conf\" \\\n" #~ " op monitor interval=\"1min\"\n" #~ "primitive ClusterIP ocf:heartbeat:IPaddr2 \\\n" #~ "        params ip=\"192.168.122.101\" cidr_netmask=\"32\" \\\n" #~ "        op monitor interval=\"30s\"\n" #~ "property $id=\"cib-bootstrap-options\" \\\n" #~ "        dc-version=\"1.0.5-462f1569a43740667daf7b0f6b521742e9eb8fa7\" \\\n" #~ "        cluster-infrastructure=\"openais\" \\\n" #~ "        expected-quorum-votes=\"2\" \\\n" #~ "        stonith-enabled=\"false\" \\\n" #~ "        no-quorum-policy=\"ignore\"\n" #~ "rsc_defaults $id=\"rsc-options\" \\\n" #~ "        resource-stickiness=\"100\"\n" #~ msgid "" #~ "\n" #~ "[root@pcmk-1 ~]# crm configure colocation website-with-ip INFINITY: WebSite ClusterIP\n" #~ "[root@pcmk-1 ~]# crm configure show\n" #~ "node pcmk-1\n" #~ "node pcmk-2\n" #~ "primitive WebSite ocf:heartbeat:apache \\\n" #~ "        params configfile=\"/etc/httpd/conf/httpd.conf\" \\\n" #~ "        op monitor interval=\"1min\"\n" #~ "primitive ClusterIP ocf:heartbeat:IPaddr2 \\\n" #~ "        params ip=\"192.168.122.101\" cidr_netmask=\"32\" \\\n" #~ "        op monitor interval=\"30s\"\n" #~ "colocation website-with-ip inf: WebSite ClusterIP\n" #~ "property $id=\"cib-bootstrap-options\" \\\n" #~ "        dc-version=\"1.0.5-462f1569a43740667daf7b0f6b521742e9eb8fa7\" \\\n" #~ "        cluster-infrastructure=\"openais\" \\\n" #~ "        expected-quorum-votes=\"2\" \\\n" #~ "        stonith-enabled=\"false\" \\\n" #~ "        no-quorum-policy=\"ignore\"\n" #~ "rsc_defaults $id=\"rsc-options\" \\\n" #~ "        resource-stickiness=\"100\"\n" #~ "[root@pcmk-1 ~]# crm_mon\n" #~ "============\n" #~ "Last updated: Fri Aug 28 16:14:34 2009\n" #~ "Stack: openais\n" #~ "Current DC: pcmk-2 - partition with quorum\n" #~ "Version: 1.0.5-462f1569a43740667daf7b0f6b521742e9eb8fa7\n" #~ "2 Nodes configured, 2 expected votes\n" #~ "2 Resources configured.\n" #~ "============\n" #~ "\n" #~ "Online: [ pcmk-1 pcmk-2 ]\n" #~ "\n" #~ "ClusterIP        (ocf::heartbeat:IPaddr):        Started pcmk-2\n" #~ "WebSite        (ocf::heartbeat:apache):        Started pcmk-2\n" #~ msgstr "" #~ "\n" #~ "[root@pcmk-1 ~]# crm configure colocation website-with-ip INFINITY: WebSite ClusterIP\n" #~ "[root@pcmk-1 ~]# crm configure show\n" #~ "node pcmk-1\n" #~ "node pcmk-2\n" #~ "primitive WebSite ocf:heartbeat:apache \\\n" #~ "        params configfile=\"/etc/httpd/conf/httpd.conf\" \\\n" #~ "        op monitor interval=\"1min\"\n" #~ "primitive ClusterIP ocf:heartbeat:IPaddr2 \\\n" #~ "        params ip=\"192.168.122.101\" cidr_netmask=\"32\" \\\n" #~ "        op monitor interval=\"30s\"\n" #~ "colocation website-with-ip inf: WebSite ClusterIP\n" #~ "property $id=\"cib-bootstrap-options\" \\\n" #~ "        dc-version=\"1.0.5-462f1569a43740667daf7b0f6b521742e9eb8fa7\" \\\n" #~ "        cluster-infrastructure=\"openais\" \\\n" #~ "        expected-quorum-votes=\"2\" \\\n" #~ "        stonith-enabled=\"false\" \\\n" #~ "        no-quorum-policy=\"ignore\"\n" #~ "rsc_defaults $id=\"rsc-options\" \\\n" #~ "        resource-stickiness=\"100\"\n" #~ "[root@pcmk-1 ~]# crm_mon\n" #~ "============\n" #~ "Last updated: Fri Aug 28 16:14:34 2009\n" #~ "Stack: openais\n" #~ "Current DC: pcmk-2 - partition with quorum\n" #~ "Version: 1.0.5-462f1569a43740667daf7b0f6b521742e9eb8fa7\n" #~ "2 Nodes configured, 2 expected votes\n" #~ "2 Resources configured.\n" #~ "============\n" #~ "\n" #~ "Online: [ pcmk-1 pcmk-2 ]\n" #~ "\n" #~ "ClusterIP        (ocf::heartbeat:IPaddr):        Started pcmk-2\n" #~ "WebSite        (ocf::heartbeat:apache):        Started pcmk-2\n" #~ msgid "" #~ "\n" #~ "[root@pcmk-1 ~]# crm configure order apache-after-ip mandatory: ClusterIP WebSite\n" #~ "[root@pcmk-1 ~]# crm configure show\n" #~ "node pcmk-1\n" #~ "node pcmk-2\n" #~ "primitive WebSite ocf:heartbeat:apache \\\n" #~ "        params configfile=\"/etc/httpd/conf/httpd.conf\" \\\n" #~ "        op monitor interval=\"1min\"\n" #~ "primitive ClusterIP ocf:heartbeat:IPaddr2 \\\n" #~ "        params ip=\"192.168.122.101\" cidr_netmask=\"32\" \\\n" #~ "        op monitor interval=\"30s\"\n" #~ "colocation website-with-ip inf: WebSite ClusterIP\n" #~ "order apache-after-ip inf: ClusterIP WebSite\n" #~ "property $id=\"cib-bootstrap-options\" \\\n" #~ "        dc-version=\"1.0.5-462f1569a43740667daf7b0f6b521742e9eb8fa7\" \\\n" #~ "        cluster-infrastructure=\"openais\" \\\n" #~ "        expected-quorum-votes=\"2\" \\\n" #~ "        stonith-enabled=\"false\" \\\n" #~ "        no-quorum-policy=\"ignore\"\n" #~ "rsc_defaults $id=\"rsc-options\" \\\n" #~ "        resource-stickiness=\"100\"\n" #~ msgstr "" #~ "\n" #~ "[root@pcmk-1 ~]# crm configure order apache-after-ip mandatory: ClusterIP WebSite\n" #~ "[root@pcmk-1 ~]# crm configure show\n" #~ "node pcmk-1\n" #~ "node pcmk-2\n" #~ "primitive WebSite ocf:heartbeat:apache \\\n" #~ "        params configfile=\"/etc/httpd/conf/httpd.conf\" \\\n" #~ "        op monitor interval=\"1min\"\n" #~ "primitive ClusterIP ocf:heartbeat:IPaddr2 \\\n" #~ "        params ip=\"192.168.122.101\" cidr_netmask=\"32\" \\\n" #~ "        op monitor interval=\"30s\"\n" #~ "colocation website-with-ip inf: WebSite ClusterIP\n" #~ "order apache-after-ip inf: ClusterIP WebSite\n" #~ "property $id=\"cib-bootstrap-options\" \\\n" #~ "        dc-version=\"1.0.5-462f1569a43740667daf7b0f6b521742e9eb8fa7\" \\\n" #~ "        cluster-infrastructure=\"openais\" \\\n" #~ "        expected-quorum-votes=\"2\" \\\n" #~ "        stonith-enabled=\"false\" \\\n" #~ "        no-quorum-policy=\"ignore\"\n" #~ "rsc_defaults $id=\"rsc-options\" \\\n" #~ "        resource-stickiness=\"100\"\n" #~ msgid "" #~ "\n" #~ "[root@pcmk-1 ~]# crm configure location prefer-pcmk-1 WebSite 50: pcmk-1\n" #~ "[root@pcmk-1 ~]# crm configure show\n" #~ "node pcmk-1\n" #~ "node pcmk-2\n" #~ "primitive WebSite ocf:heartbeat:apache \\\n" #~ "        params configfile=\"/etc/httpd/conf/httpd.conf\" \\\n" #~ "        op monitor interval=\"1min\"\n" #~ "primitive ClusterIP ocf:heartbeat:IPaddr2 \\\n" #~ "        params ip=\"192.168.122.101\" cidr_netmask=\"32\" \\\n" #~ "        op monitor interval=\"30s\"\n" #~ "location prefer-pcmk-1 WebSite 50: pcmk-1\n" #~ "colocation website-with-ip inf: WebSite ClusterIP\n" #~ "property $id=\"cib-bootstrap-options\" \\\n" #~ "        dc-version=\"1.0.5-462f1569a43740667daf7b0f6b521742e9eb8fa7\" \\\n" #~ "        cluster-infrastructure=\"openais\" \\\n" #~ "        expected-quorum-votes=\"2\" \\\n" #~ "        stonith-enabled=\"false\" \\\n" #~ "        no-quorum-policy=\"ignore\"\n" #~ "rsc_defaults $id=\"rsc-options\" \\\n" #~ "        resource-stickiness=\"100\"\n" #~ "[root@pcmk-1 ~]# crm_mon\n" #~ "============\n" #~ "Last updated: Fri Aug 28 16:17:35 2009\n" #~ "Stack: openais\n" #~ "Current DC: pcmk-2 - partition with quorum\n" #~ "Version: 1.0.5-462f1569a43740667daf7b0f6b521742e9eb8fa7\n" #~ "2 Nodes configured, 2 expected votes\n" #~ "2 Resources configured.\n" #~ "============\n" #~ "\n" #~ "Online: [ pcmk-1 pcmk-2 ]\n" #~ "\n" #~ "ClusterIP        (ocf::heartbeat:IPaddr):        Started pcmk-2\n" #~ "WebSite        (ocf::heartbeat:apache):        Started pcmk-2\n" #~ msgstr "" #~ "\n" #~ "[root@pcmk-1 ~]# crm configure location prefer-pcmk-1 WebSite 50: pcmk-1\n" #~ "[root@pcmk-1 ~]# crm configure show\n" #~ "node pcmk-1\n" #~ "node pcmk-2\n" #~ "primitive WebSite ocf:heartbeat:apache \\\n" #~ "        params configfile=\"/etc/httpd/conf/httpd.conf\" \\\n" #~ "        op monitor interval=\"1min\"\n" #~ "primitive ClusterIP ocf:heartbeat:IPaddr2 \\\n" #~ "        params ip=\"192.168.122.101\" cidr_netmask=\"32\" \\\n" #~ "        op monitor interval=\"30s\"\n" #~ "location prefer-pcmk-1 WebSite 50: pcmk-1\n" #~ "colocation website-with-ip inf: WebSite ClusterIP\n" #~ "property $id=\"cib-bootstrap-options\" \\\n" #~ "        dc-version=\"1.0.5-462f1569a43740667daf7b0f6b521742e9eb8fa7\" \\\n" #~ "        cluster-infrastructure=\"openais\" \\\n" #~ "        expected-quorum-votes=\"2\" \\\n" #~ "        stonith-enabled=\"false\" \\\n" #~ "        no-quorum-policy=\"ignore\"\n" #~ "rsc_defaults $id=\"rsc-options\" \\\n" #~ "        resource-stickiness=\"100\"\n" #~ "[root@pcmk-1 ~]# crm_mon\n" #~ "============\n" #~ "Last updated: Fri Aug 28 16:17:35 2009\n" #~ "Stack: openais\n" #~ "Current DC: pcmk-2 - partition with quorum\n" #~ "Version: 1.0.5-462f1569a43740667daf7b0f6b521742e9eb8fa7\n" #~ "2 Nodes configured, 2 expected votes\n" #~ "2 Resources configured.\n" #~ "============\n" #~ "\n" #~ "Online: [ pcmk-1 pcmk-2 ]\n" #~ "\n" #~ "ClusterIP        (ocf::heartbeat:IPaddr):        Started pcmk-2\n" #~ "WebSite        (ocf::heartbeat:apache):        Started pcmk-2\n" #~ msgid "ptest -sL" #~ msgstr "ptest -sL" #~ msgid "Include output" #~ msgstr "Include output" #~ msgid "There is a way to force them to move though..." #~ msgstr "这里有个办法强制地移动资源" #~ msgid "" #~ "\n" #~ "[root@pcmk-1 ~]# crm resource move WebSite pcmk-1\n" #~ "[root@pcmk-1 ~]# crm_mon\n" #~ "============\n" #~ "Last updated: Fri Aug 28 16:19:24 2009\n" #~ "Stack: openais\n" #~ "Current DC: pcmk-2 - partition with quorum\n" #~ "Version: 1.0.5-462f1569a43740667daf7b0f6b521742e9eb8fa7\n" #~ "2 Nodes configured, 2 expected votes\n" #~ "2 Resources configured.\n" #~ "============\n" #~ "\n" #~ "Online: [ pcmk-1 pcmk-2 ]\n" #~ "\n" #~ "ClusterIP        (ocf::heartbeat:IPaddr):        Started pcmk-1\n" #~ "WebSite        (ocf::heartbeat:apache):        Started pcmk-1\n" #~ "Notice how the colocation rule we created has ensured that ClusterIP was also moved to pcmk-1.\n" #~ "For the curious, we can see the effect of this command by examining the configuration\n" #~ "crm configure show\n" #~ "[root@pcmk-1 ~]# crm configure show\n" #~ "node pcmk-1\n" #~ "node pcmk-2\n" #~ "primitive WebSite ocf:heartbeat:apache \\\n" #~ "        params configfile=\"/etc/httpd/conf/httpd.conf\" \\\n" #~ "        op monitor interval=\"1min\"\n" #~ "primitive ClusterIP ocf:heartbeat:IPaddr2 \\\n" #~ "        params ip=\"192.168.122.101\" cidr_netmask=\"32\" \\\n" #~ "        op monitor interval=\"30s\"\n" #~ "location cli-prefer-WebSite WebSite \\\n" #~ " rule $id=\"cli-prefer-rule-WebSite\" inf: #uname eq pcmk-1\n" #~ "location prefer-pcmk-1 WebSite 50: pcmk-1\n" #~ "colocation website-with-ip inf: WebSite ClusterIP\n" #~ "property $id=\"cib-bootstrap-options\" \\\n" #~ "        dc-version=\"1.0.5-462f1569a43740667daf7b0f6b521742e9eb8fa7\" \\\n" #~ "        cluster-infrastructure=\"openais\" \\\n" #~ "        expected-quorum-votes=\"2\" \\\n" #~ "        stonith-enabled=\"false\" \\\n" #~ "        no-quorum-policy=\"ignore\"\n" #~ "rsc_defaults $id=\"rsc-options\" \\\n" #~ "        resource-stickiness=\"100\"\n" #~ msgstr "" #~ "\n" #~ "[root@pcmk-1 ~]# crm resource move WebSite pcmk-1\n" #~ "[root@pcmk-1 ~]# crm_mon\n" #~ "============\n" #~ "Last updated: Fri Aug 28 16:19:24 2009\n" #~ "Stack: openais\n" #~ "Current DC: pcmk-2 - partition with quorum\n" #~ "Version: 1.0.5-462f1569a43740667daf7b0f6b521742e9eb8fa7\n" #~ "2 Nodes configured, 2 expected votes\n" #~ "2 Resources configured.\n" #~ "============\n" #~ "\n" #~ "Online: [ pcmk-1 pcmk-2 ]\n" #~ "\n" #~ "ClusterIP        (ocf::heartbeat:IPaddr):        Started pcmk-1\n" #~ "WebSite        (ocf::heartbeat:apache):        Started pcmk-1\n" #~ "Notice how the colocation rule we created has ensured that ClusterIP was also moved to pcmk-1.\n" #~ "For the curious, we can see the effect of this command by examining the configuration\n" #~ "crm configure show\n" #~ "[root@pcmk-1 ~]# crm configure show\n" #~ "node pcmk-1\n" #~ "node pcmk-2\n" #~ "primitive WebSite ocf:heartbeat:apache \\\n" #~ "        params configfile=\"/etc/httpd/conf/httpd.conf\" \\\n" #~ "        op monitor interval=\"1min\"\n" #~ "primitive ClusterIP ocf:heartbeat:IPaddr2 \\\n" #~ "        params ip=\"192.168.122.101\" cidr_netmask=\"32\" \\\n" #~ "        op monitor interval=\"30s\"\n" #~ "location cli-prefer-WebSite WebSite \\\n" #~ " rule $id=\"cli-prefer-rule-WebSite\" inf: #uname eq pcmk-1\n" #~ "location prefer-pcmk-1 WebSite 50: pcmk-1\n" #~ "colocation website-with-ip inf: WebSite ClusterIP\n" #~ "property $id=\"cib-bootstrap-options\" \\\n" #~ "        dc-version=\"1.0.5-462f1569a43740667daf7b0f6b521742e9eb8fa7\" \\\n" #~ "        cluster-infrastructure=\"openais\" \\\n" #~ "        expected-quorum-votes=\"2\" \\\n" #~ "        stonith-enabled=\"false\" \\\n" #~ "        no-quorum-policy=\"ignore\"\n" #~ "rsc_defaults $id=\"rsc-options\" \\\n" #~ "        resource-stickiness=\"100\"\n" #~ msgid "Highlighted is the automated constraint used to move the resources to pcmk-1" #~ msgstr "斜体部分是用来移动资源到pcmk-1约束,它是自动生成的。" #~ msgid "" #~ "\n" #~ "[root@pcmk-1 ~]# crm resource unmove WebSite\n" #~ "[root@pcmk-1 ~]# crm configure show\n" #~ "node pcmk-1\n" #~ "node pcmk-2\n" #~ "primitive WebSite ocf:heartbeat:apache \\\n" #~ "        params configfile=\"/etc/httpd/conf/httpd.conf\" \\\n" #~ "        op monitor interval=\"1min\"\n" #~ "primitive ClusterIP ocf:heartbeat:IPaddr2 \\\n" #~ "        params ip=\"192.168.122.101\" cidr_netmask=\"32\" \\\n" #~ "        op monitor interval=\"30s\"\n" #~ "location prefer-pcmk-1 WebSite 50: pcmk-1\n" #~ "colocation website-with-ip inf: WebSite ClusterIP\n" #~ "property $id=\"cib-bootstrap-options\" \\\n" #~ "        dc-version=\"1.0.5-462f1569a43740667daf7b0f6b521742e9eb8fa7\" \\\n" #~ "        cluster-infrastructure=\"openais\" \\\n" #~ "        expected-quorum-votes=\"2\" \\\n" #~ "        stonith-enabled=\"false\" \\\n" #~ "        no-quorum-policy=\"ignore\"\n" #~ "rsc_defaults $id=\"rsc-options\" \\\n" #~ "        resource-stickiness=\"100\"\n" #~ msgstr "" #~ "\n" #~ "[root@pcmk-1 ~]# crm resource unmove WebSite\n" #~ "[root@pcmk-1 ~]# crm configure show\n" #~ "node pcmk-1\n" #~ "node pcmk-2\n" #~ "primitive WebSite ocf:heartbeat:apache \\\n" #~ "        params configfile=\"/etc/httpd/conf/httpd.conf\" \\\n" #~ "        op monitor interval=\"1min\"\n" #~ "primitive ClusterIP ocf:heartbeat:IPaddr2 \\\n" #~ "        params ip=\"192.168.122.101\" cidr_netmask=\"32\" \\\n" #~ "        op monitor interval=\"30s\"\n" #~ "location prefer-pcmk-1 WebSite 50: pcmk-1\n" #~ "colocation website-with-ip inf: WebSite ClusterIP\n" #~ "property $id=\"cib-bootstrap-options\" \\\n" #~ "        dc-version=\"1.0.5-462f1569a43740667daf7b0f6b521742e9eb8fa7\" \\\n" #~ "        cluster-infrastructure=\"openais\" \\\n" #~ "        expected-quorum-votes=\"2\" \\\n" #~ "        stonith-enabled=\"false\" \\\n" #~ "        no-quorum-policy=\"ignore\"\n" #~ "rsc_defaults $id=\"rsc-options\" \\\n" #~ "        resource-stickiness=\"100\"\n" #~ msgid "" #~ "\n" #~ "[root@pcmk-1 ~]# crm_mon\n" #~ "============\n" #~ "Last updated: Fri Aug 28 16:20:53 2009\n" #~ "Stack: openais\n" #~ "Current DC: pcmk-2 - partition with quorum\n" #~ "Version: 1.0.5-462f1569a43740667daf7b0f6b521742e9eb8fa7\n" #~ "2 Nodes configured, 2 expected votes\n" #~ "2 Resources configured.\n" #~ "============\n" #~ "\n" #~ "Online: [ pcmk-1 pcmk-2 ]\n" #~ "\n" #~ " ClusterIP        (ocf::heartbeat:IPaddr):        Started pcmk-1\n" #~ " WebSite        (ocf::heartbeat:apache):        Started pcmk-1\n" #~ msgstr "" #~ "\n" #~ "[root@pcmk-1 ~]# crm_mon\n" #~ "============\n" #~ "Last updated: Fri Aug 28 16:20:53 2009\n" #~ "Stack: openais\n" #~ "Current DC: pcmk-2 - partition with quorum\n" #~ "Version: 1.0.5-462f1569a43740667daf7b0f6b521742e9eb8fa7\n" #~ "2 Nodes configured, 2 expected votes\n" #~ "2 Resources configured.\n" #~ "============\n" #~ "\n" #~ "Online: [ pcmk-1 pcmk-2 ]\n" #~ "\n" #~ " ClusterIP        (ocf::heartbeat:IPaddr):        Started pcmk-1\n" #~ " WebSite        (ocf::heartbeat:apache):        Started pcmk-1\n" #~ msgid "At this point, Apache is ready to go, all that needs to be done is to add it to the cluster. Lets call the resource WebSite. We need to use an OCF script called apache in the heartbeat namespace" #~ msgstr "现在 ,Apache已经可以添加到集群中了。我们管这个资源叫WebSite。我们需要用一个叫做apache的OCF脚本" #~ msgid "Compare the key used here ocf:heartbeat:apache with the one we used earlier for the IP address: ocf:heartbeat:IPaddr2" #~ msgstr "把我现在所用的关键字ocf:heartbeat:apache 跟之前IP地址所用的比较一下: ocf:heartbeat:IPaddr2" #~ msgid ", the only required parameter is the path to the main Apache configuration file and we’ll tell the cluster to check once a minute that apache is still running." #~ msgstr ",这个脚本在heartbeat这个名字空间里,唯一一个需要设定的参数就是Apache的主配置文件路径,并且我们告诉集群每一分钟检测一次apache是否运行。" diff --git a/doc/Clusters_from_Scratch/zh-CN/Ch-Installation.po b/doc/Clusters_from_Scratch/zh-CN/Ch-Installation.po index 903397a303..ecfd91734d 100644 --- a/doc/Clusters_from_Scratch/zh-CN/Ch-Installation.po +++ b/doc/Clusters_from_Scratch/zh-CN/Ch-Installation.po @@ -1,2186 +1,2186 @@ # # AUTHOR , YEAR. # msgid "" msgstr "" "Project-Id-Version: 0\n" "POT-Creation-Date: 2012-10-17T05:19:03\n" "PO-Revision-Date: 2010-12-16 00:16+0800\n" -"Last-Translator: Charlie Chen \n" +"Last-Translator: Hu Fu \n" "Language-Team: None\n" "Language: \n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" #. Tag: title #, no-c-format msgid "Installation" msgstr "安装" #. Tag: title #, no-c-format msgid "OS Installation" msgstr "安装操作系统" #. Tag: para -#, fuzzy, no-c-format +#, no-c-format msgid "Detailed instructions for installing Fedora are available at http://docs.fedoraproject.org/en-US/Fedora/17/html/Installation_Guide/ in a number of languages. The abbreviated version is as follows…" -msgstr "详细的安装手册在http://docs.fedoraproject.org/install-guide/f&DISTRO_VERSION;/。下文是一个简短的版本..." +msgstr "详细的安装手册在http://docs.fedoraproject.org/en-US/Fedora/17/html/Installation_Guide/。下文是一个简短的版本..." #. Tag: para -#, fuzzy, no-c-format +#, no-c-format msgid "Point your browser to http://fedoraproject.org/en/get-fedora-all, locate the Install Media section and download the install DVD that matches your hardware." msgstr "在你的浏览器中打开 http://fedoraproject.org/en/get-fedora-all,找到Install Media部分并下载适合你硬件的安装DVD文件。" #. Tag: para -#, fuzzy, no-c-format +#, no-c-format msgid "Burn the disk image to a DVD http://docs.fedoraproject.org/en-US/Fedora/16/html/Burning_ISO_images_to_disc/index.html and boot from it, or use the image to boot a virtual machine." -msgstr "给你的机器取个名字。 http://docs.fedoraproject.org/install-guide/f&DISTRO_VERSION;/en-US/html/sn-networkconfig-fedora.html 我可以使用clusterlabs.org这个域名,所以在这里我用这个域名。" +msgstr "制作DVD光盘http://docs.fedoraproject.org/en-US/Fedora/16/html/Burning_ISO_images_to_disc/index.html并从它启动,或者使用这个镜像启动一个虚拟机。" #. Tag: para #, no-c-format msgid "After clicking through the welcome screen, select your language, keyboard layout http://docs.fedoraproject.org/en-US/Fedora/16/html/Installation_Guide/sn-keyboard-x86.html and storage type http://docs.fedoraproject.org/en-US/Fedora/16/html/Installation_Guide/Storage_Devices-x86.html" -msgstr "" +msgstr "点击跳过欢迎画面后,选择你的语言,键盘布局http://docs.fedoraproject.org/en-US/Fedora/16/html/Installation_Guide/sn-keyboard-x86.html以及存储类型http://docs.fedoraproject.org/en-US/Fedora/16/html/Installation_Guide/Storage_Devices-x86.html" #. Tag: para -#, fuzzy, no-c-format +#, no-c-format msgid "Assign your machine a host name. http://docs.fedoraproject.org/en-US/Fedora/16/html/Installation_Guide/sn-Netconfig-x86.html I happen to control the clusterlabs.org domain name, so I will use that here." -msgstr "给你的机器取个名字。 http://docs.fedoraproject.org/install-guide/f&DISTRO_VERSION;/en-US/html/sn-networkconfig-fedora.html 我可以使用clusterlabs.org这个域名,所以在这里我用这个域名。" +msgstr "给你的机器取个名字。http://docs.fedoraproject.org/en-US/Fedora/16/html/Installation_Guide/sn-Netconfig-x86.html 我可以使用clusterlabs.org这个域名,所以在这里我用这个域名。" #. Tag: para -#, fuzzy, no-c-format +#, no-c-format msgid "Do not accept the default network settings. Cluster machines should never obtain an IP address via DHCP." -msgstr "不要使用默认的网络设置,集群永远不会靠DHCP来管理IP,这里我使用clusterslab的内部IP。" +msgstr "不要使用默认的网络设置,集群永远不会靠DHCP来管理IP。" #. Tag: para #, no-c-format msgid "When you are presented with the Configure Network advanced option, select that option before continuing with the installation process to specify a fixed IPv4 address for System eth0. Be sure to also enter the Routes section and add an entry for your default gateway." -msgstr "" +msgstr "当你出现在配置网络高级选项画面时,在安装过程继续进行前指定一个固定的IPv4地址的System eth0。此外,请务必进入“路由”选项并添加你的默认网关。" #. Tag: phrase #, no-c-format msgid "Custom network settings" -msgstr "" +msgstr "自定义网络设置" #. Tag: para #, no-c-format msgid "If you miss this step, this can easily be configured after installation. You will have to navigate to system settings and select network. From there you can select what device to configure." -msgstr "" +msgstr "如果你错过了这一步,也可以很容易的在完成安装后配置。你会在“系统设置”导航中选择“网络”。从这里你可以选择配置哪些设备。" #. Tag: para -#, fuzzy, no-c-format +#, no-c-format msgid "You will then be prompted to indicate the machine’s physical location http://docs.fedoraproject.org/en-US/Fedora/16/html/Installation_Guide/s1-timezone-x86.html and to supply a root password. http://docs.fedoraproject.org/en-US/Fedora/16/html/Installation_Guide/sn-account_configuration-x86.html" msgstr "然后你会被提示选择机器所在地并设定root密码。 http://docs.fedoraproject.org/install-guide/f&DISTRO_VERSION;/en-US/html/sn-account_configuration.html " #. Tag: para -#, fuzzy, no-c-format +#, no-c-format msgid "Now select where you want Fedora installed. http://docs.fedoraproject.org/en-US/Fedora/16/html/Installation_Guide/s1-diskpartsetup-x86.html As I don’t care about any existing data, I will accept the default and allow Fedora to use the complete drive." -msgstr "如然后你选择想在哪安装Fedora http://docs.fedoraproject.org/install-guide/f13/en-US/html/s1-diskpartsetup-x86.html 。 如果你像我一样不在意已存在的数据,就选择默认让Fedora来使用完整的驱动器。然而我想为DRBD保留一些空间,所以我勾选了Review and modify partitioning layout。" +msgstr "现在你选择想在哪安装Fedora http://docs.fedoraproject.org/install-guide/f13/en-US/html/s1-diskpartsetup-x86.html 。 如果你像我一样不在意已存在的数据,就选择默认让Fedora来使用完整的驱动器。" #. Tag: para #, no-c-format msgid "By default Fedora uses LVM for partitioning which allows us to dynamically change the amount of space allocated to a given partition." -msgstr "" +msgstr "默认情况下Fedora使用LVM的分区,它允许我们动态地改变空间分配给指定分区的数量。" #. Tag: para #, no-c-format msgid "However, by default it also allocates all free space to the / (aka. root) partition which cannot be dynamically reduced in size (dynamic increases are fine by-the-way)." -msgstr "" +msgstr "不过,默认情况下它也分配所有的自由空间,不能动态地降低分区(这种方法下的动态增加是可用的)。" #. Tag: para #, no-c-format msgid "So if you plan on following the DRBD or GFS2 portions of this guide, you should reserve at least 1Gb of space on each machine from which to create a shared volume. To do so select the Review and modify partitioning layout checkbox before clicking Next. You will then be given an opportunity to reduce the size of the root partition." -msgstr "" +msgstr "然而我想为DRBD和GFS2保留一些空间,所以我勾选了Review and modify partitioning layout。然后你将何以root分区的大小了。" #. Tag: para -#, fuzzy, no-c-format +#, no-c-format msgid "Next choose which software should be installed. http://docs.fedoraproject.org/en-US/Fedora/16/html/Installation_Guide/s1-pkgselection-x86.html Change the selection to Minimal so that we see everything that gets installed. Don’t enable updates yet, we’ll do that (and install any extra software we need) later. After you click next, Fedora will begin installing." msgstr "然后我们选择应该安装什么软件。因为我们想用Apache,所以选择Web Server。现在不要开启Update源,我们一会操作它。点击下一步,开始安装Fedora。" #. Tag: para -#, fuzzy, no-c-format +#, no-c-format msgid "Go grab something to drink, this may take a while." -msgstr "安装Fedora: 去搞点东西喝喝 这要一会儿" +msgstr "安装Fedora: 去搞点东西喝喝 这需要一段时间。" #. Tag: para #, no-c-format msgid "Once the node reboots, you’ll see a (possibly mangled) login prompt on the console. Login using root and the password you created earlier." -msgstr "" +msgstr "一旦节点重启,你会在看到登录界面。使用你之前创建的root用户及密码登录。" #. Tag: phrase #, no-c-format msgid "Initial Console" -msgstr "" +msgstr "初始化控制台" #. Tag: para -#, fuzzy, no-c-format +#, no-c-format msgid "From here on in we’re going to be working exclusively from the terminal." msgstr "这是最后一个截屏了,剩下的我们都用命令行来操作。" #. Tag: title -#, fuzzy, no-c-format +#, no-c-format msgid "Post Installation Tasks" msgstr "安装" #. Tag: title #, fuzzy, no-c-format msgid "Networking" msgstr "设定网络" #. Tag: para #, no-c-format msgid "Bring up the network and ensure it starts at boot" -msgstr "" +msgstr "调出网络并确保主机开机后它会启动" #. Tag: programlisting #, no-c-format msgid "" "# service network start\n" "# chkconfig network on" msgstr "" #. Tag: para #, no-c-format msgid "Check the machine has the static IP address you configured earlier" -msgstr "" +msgstr "检查机器拥有你之前配置过的静态IP地址" #. Tag: programlisting #, no-c-format msgid "" "# ip addr\n" "1: lo: <LOOPBACK,UP,LOWER_UP> mtu 16436 qdisc noqueue state UNKNOWN\n" " link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00\n" " inet 127.0.0.1/8 scope host lo\n" " inet6 ::1/128 scope host\n" " valid_lft forever preferred_lft forever\n" "2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP qlen 1000\n" " link/ether 52:54:00:d7:d6:08 brd ff:ff:ff:ff:ff:ff\n" " inet 192.168.122.101/24 brd 192.168.122.255 scope global eth0\n" " inet6 fe80::5054:ff:fed7:d608/64 scope link\n" " valid_lft forever preferred_lft forever" msgstr "" #. Tag: para #, no-c-format msgid "Now check the default route setting:" -msgstr "" +msgstr "现在检查默认路由设置:" #. Tag: programlisting #, no-c-format msgid "" "[root@pcmk-1 ~]# ip route\n" "default via 192.168.122.1 dev eth0\n" "192.168.122.0/24 dev eth0 proto kernel scope link src 192.168.122.101" msgstr "" #. Tag: para #, no-c-format msgid "If there is no line beginning with default via, then you may need to add a line such as" -msgstr "" +msgstr "如果没有以 default via 开头的一行,那么你可能需要添加如下一行" #. Tag: programlisting #, no-c-format msgid "GATEWAY=192.168.122.1" msgstr "" #. Tag: para #, no-c-format msgid "to /etc/sysconfig/network and restart the network." -msgstr "" +msgstr "到/etc/sysconfig/network,然后重启网络。" #. Tag: para #, no-c-format msgid "Now check for connectivity to the outside world. Start small by testing if we can read the gateway we configured." -msgstr "" +msgstr "现在检查网络是否连通了。" #. Tag: programlisting #, fuzzy, no-c-format msgid "" "# ping -c 1 192.168.122.1\n" "PING 192.168.122.1 (192.168.122.1) 56(84) bytes of data.\n" "64 bytes from 192.168.122.1: icmp_req=1 ttl=64 time=0.249 ms\n" "\n" "--- 192.168.122.1 ping statistics ---\n" "1 packets transmitted, 1 received, 0% packet loss, time 0ms\n" "rtt min/avg/max/mdev = 0.249/0.249/0.249/0.000 ms" msgstr "" "\n" "\t ping -c 3 192.168.122.102\n" "[root@pcmk-1 ~]# ping -c 3 192.168.122.102\n" "PING 192.168.122.102 (192.168.122.102) 56(84) bytes of data.\n" "64 bytes from 192.168.122.102: icmp_seq=1 ttl=64 time=0.343 ms\n" "64 bytes from 192.168.122.102: icmp_seq=2 ttl=64 time=0.402 ms\n" "64 bytes from 192.168.122.102: icmp_seq=3 ttl=64 time=0.558 ms\n" "\n" "--- 192.168.122.102 ping statistics ---\n" "3 packets transmitted, 3 received, 0% packet loss, time 2000ms\n" "rtt min/avg/max/mdev = 0.343/0.434/0.558/0.092 ms\n" "\t" #. Tag: para #, no-c-format msgid "Now try something external, choose a location you know will be available." -msgstr "" +msgstr "现在尝试一下外网,选择一个你熟悉的可用地址。" #. Tag: programlisting #, no-c-format msgid "" "# ping -c 1 www.google.com\n" "PING www.l.google.com (173.194.72.106) 56(84) bytes of data.\n" "64 bytes from tf-in-f106.1e100.net (173.194.72.106): icmp_req=1 ttl=41 time=167 ms\n" "\n" "--- www.l.google.com ping statistics ---\n" "1 packets transmitted, 1 received, 0% packet loss, time 0ms\n" "rtt min/avg/max/mdev = 167.618/167.618/167.618/0.000 ms" msgstr "" #. Tag: title #, no-c-format msgid "Leaving the Console" -msgstr "" +msgstr "离开控制台" #. Tag: para #, no-c-format msgid "The console isn’t a very friendly place to work from, we will now switch to accessing the machine remotely via SSH where we can use copy&paste etc." -msgstr "" +msgstr "控制台并不是一个友好的工作环境,我们现在使用SSH来控制机器,这样我们就可以使用粘贴/复制了。" #. Tag: para #, no-c-format msgid "First we check we can see the newly installed at all:" -msgstr "" +msgstr "首先我们检查所有所有新安装的:" #. Tag: programlisting #, fuzzy, no-c-format msgid "" "beekhof@f16 ~ # ping -c 1 192.168.122.101\n" "PING 192.168.122.101 (192.168.122.101) 56(84) bytes of data.\n" "64 bytes from 192.168.122.101: icmp_req=1 ttl=64 time=1.01 ms\n" "\n" "--- 192.168.122.101 ping statistics ---\n" "1 packets transmitted, 1 received, 0% packet loss, time 0ms\n" "rtt min/avg/max/mdev = 1.012/1.012/1.012/0.000 ms" msgstr "" "\n" "\t ping -c 3 192.168.122.102\n" "[root@pcmk-1 ~]# ping -c 3 192.168.122.102\n" "PING 192.168.122.102 (192.168.122.102) 56(84) bytes of data.\n" "64 bytes from 192.168.122.102: icmp_seq=1 ttl=64 time=0.343 ms\n" "64 bytes from 192.168.122.102: icmp_seq=2 ttl=64 time=0.402 ms\n" "64 bytes from 192.168.122.102: icmp_seq=3 ttl=64 time=0.558 ms\n" "\n" "--- 192.168.122.102 ping statistics ---\n" "3 packets transmitted, 3 received, 0% packet loss, time 2000ms\n" "rtt min/avg/max/mdev = 0.343/0.434/0.558/0.092 ms\n" "\t" #. Tag: para #, no-c-format msgid "Next we login via SSH" -msgstr "" +msgstr "接下来我们通过SSH登录" #. Tag: programlisting #, no-c-format msgid "" "beekhof@f16 ~ # ssh -l root 192.168.122.11\n" "root@192.168.122.11's password:\n" "Last login: Fri Mar 30 19:41:19 2012 from 192.168.122.1\n" "[root@pcmk-1 ~]#" msgstr "" #. Tag: title #, no-c-format msgid "Security Shortcuts" msgstr "安全提示" #. Tag: para -#, fuzzy, no-c-format +#, no-c-format msgid "To simplify this guide and focus on the aspects directly connected to clustering, we will now disable the machine’s firewall and SELinux installation." -msgstr "为了简化本文档并更好的关注集群方面的问题,我们现在在先禁用防火墙和SELinux。这些操作都会导致重大的安全问题,并不推荐对公网上的集群这样做。" +msgstr "为了简化本文档并更好的关注集群方面的问题,我们现在在先禁用防火墙和SELinux。" #. Tag: para -#, fuzzy, no-c-format +#, no-c-format msgid "Both of these actions create significant security issues and should not be performed on machines that will be exposed to the outside world." -msgstr "为了简化本文档并更好的关注集群方面的问题,我们现在在先禁用防火墙和SELinux。这些操作都会导致重大的安全问题,并不推荐对公网上的集群这样做。" +msgstr "这些操作都会导致重大的安全问题,并不推荐对公网上的集群这样做。" #. Tag: literallayout #, no-c-format msgid "TODO: Create an Appendix that deals with (at least) re-enabling the firewall." msgstr "TODO: Create an Appendix that deals with (at least) re-enabling the firewall." #. Tag: programlisting #, no-c-format msgid "" "# setenforce 0\n" "# sed -i.bak \"s/SELINUX=enforcing/SELINUX=permissive/g\" /etc/selinux/config\n" "# systemctl disable iptables.service\n" "# rm '/etc/systemd/system/basic.target.wants/iptables.service'\n" "# systemctl stop iptables.service" msgstr "" #. Tag: title #, no-c-format msgid "Short Node Names" msgstr "简化节点名称" #. Tag: para -#, fuzzy, no-c-format +#, no-c-format msgid "During installation, we filled in the machine’s fully qualifier domain name (FQDN) which can be rather long when it appears in cluster logs and status output. See for yourself how the machine identifies itself: Nodesshort name short name " msgstr "在安装过程中,我们发现FQDN域名太长了,不利于在日志或状态界面中查看,我们用以下操作来简化机器名:" #. Tag: programlisting #, no-c-format msgid "" "# uname -n\n" "pcmk-1.clusterlabs.org\n" "# dnsdomainname\n" "clusterlabs.org" msgstr "" #. Tag: para #, no-c-format msgid " NodesDomain name (Query) Domain name (Query) " msgstr "" #. Tag: para #, no-c-format msgid "The output from the second command is fine, but we really don’t need the domain name included in the basic host details. To address this, we need to update /etc/sysconfig/network. This is what it should look like before we start." -msgstr "第二个命令的输出是正常的,但是我们真的不需要这么详细的输出,我们更改/etc/sysconfig/network文件来达到简化的目的。" +msgstr "第二个命令的输出是正常的,但是我们真的不需要这么详细的输出,所以我们更改/etc/sysconfig/network文件来达到简化的目的。" #. Tag: programlisting #, fuzzy, no-c-format msgid "" "# cat /etc/sysconfig/network\n" "NETWORKING=yes\n" "HOSTNAME=pcmk-1.clusterlabs.org\n" "GATEWAY=192.168.122.1" msgstr "" "\n" "[root@pcmk-1 ~]# cat /etc/sysconfig/network\n" "NETWORKING=yes\n" "HOSTNAME=pcmk-1.clusterlabs.org\n" "GATEWAY=192.168.122.1\n" " " #. Tag: para #, no-c-format msgid "All we need to do now is strip off the domain name portion, which is stored elsewhere anyway." -msgstr "我们要做的只是要把域名后面的部分去掉。" +msgstr "我们要做的只是把域名后面的部分去掉。" #. Tag: programlisting #, fuzzy, no-c-format msgid " # sed -i.sed 's/\\.[a-z].*//g' /etc/sysconfig/network" msgstr "[root@pcmk-1 ~]# sed -i.bak 's/\\.[a-z].*//g' /etc/sysconfig/network" #. Tag: para #, no-c-format msgid "Now confirm the change was successful. The revised file contents should look something like this." msgstr "现在cat一下看看更改是否成功了。" #. Tag: programlisting #, fuzzy, no-c-format msgid "" "# cat /etc/sysconfig/network\n" "NETWORKING=yes\n" "HOSTNAME=pcmk-1\n" "GATEWAY=192.168.122.1" msgstr "" "\n" "[root@pcmk-1 ~]# cat /etc/sysconfig/network\n" "NETWORKING=yes\n" "HOSTNAME=pcmk-1\n" "GATEWAY=192.168.122.1\n" " " #. Tag: para #, no-c-format msgid "However we’re not finished. The machine wont normally see the shortened host name until about it reboots, but we can force it to update." -msgstr "然而到这里还没结束,机器还没接受新的配置文件,我们强制它生效。" +msgstr "到这里还没结束,机器还没接受新的配置文件,我们强制使它生效。" #. Tag: programlisting #, no-c-format msgid "" "# source /etc/sysconfig/network\n" "# hostname $HOSTNAME" msgstr "" #. Tag: para #, no-c-format msgid " NodesDomain name (Remove from host name) Domain name (Remove from host name) " msgstr "" #. Tag: para #, no-c-format msgid "Now check the machine is using the correct names" msgstr "现在我们看看是否按达到我们预期的效果:" #. Tag: programlisting #, no-c-format msgid "" "# uname -n\n" "pcmk-1\n" "# dnsdomainname\n" "clusterlabs.org" msgstr "" #. Tag: title #, no-c-format msgid "NTP" msgstr "" #. Tag: para #, no-c-format msgid "It is highly recommended to enable NTP on your cluster nodes. Doing so ensures all nodes agree on the current time and makes reading log files significantly easier. http://docs.fedoraproject.org/en-US/Fedora/17/html-single/System_Administrators_Guide/index.html#ch-Configuring_the_Date_and_Time" -msgstr "" +msgstr "强烈推荐在你的集群节点上启动NTP服务。这样做保证了所有节点时间一致,这使得查看日志变得容易。http://docs.fedoraproject.org/en-US/Fedora/17/html-single/System_Administrators_Guide/index.html#ch-Configuring_the_Date_and_Time" #. Tag: title #, no-c-format msgid "Before You Continue" msgstr "写在开始之前" #. Tag: para -#, fuzzy, no-c-format +#, no-c-format msgid "Repeat the Installation steps so far, so that you have two Fedora nodes ready to have the cluster software installed." -msgstr "在另一台Fedora 12机器上面重复以上操作步骤,这样你就有2台安装了集群软件的节点了。" +msgstr "在另一台Fedora机器上面重复以上操作步骤,这样你就有2台安装了集群软件的节点了。" #. Tag: para -#, fuzzy, no-c-format +#, no-c-format msgid "For the purposes of this document, the additional node is called pcmk-2 with address 192.168.122.102." -msgstr "在这篇文档中, 另外一个节点叫 pcmk-2 并且IP地址为 192.168.122.42。" +msgstr "在这篇文档中, 另外一个节点叫 pcmk-2 并且IP地址为 192.168.122.102。" #. Tag: title #, no-c-format msgid "Finalize Networking" msgstr "设定网络" #. Tag: para -#, fuzzy, no-c-format +#, no-c-format msgid "Confirm that you can communicate between the two new nodes:" msgstr "确认这两个新节点能够通讯:" #. Tag: programlisting #, fuzzy, no-c-format msgid "" "# ping -c 3 192.168.122.102\n" "PING 192.168.122.102 (192.168.122.102) 56(84) bytes of data.\n" "64 bytes from 192.168.122.102: icmp_seq=1 ttl=64 time=0.343 ms\n" "64 bytes from 192.168.122.102: icmp_seq=2 ttl=64 time=0.402 ms\n" "64 bytes from 192.168.122.102: icmp_seq=3 ttl=64 time=0.558 ms\n" "\n" "--- 192.168.122.102 ping statistics ---\n" "3 packets transmitted, 3 received, 0% packet loss, time 2000ms\n" "rtt min/avg/max/mdev = 0.343/0.434/0.558/0.092 ms" msgstr "" "\n" "\t ping -c 3 192.168.122.102\n" "[root@pcmk-1 ~]# ping -c 3 192.168.122.102\n" "PING 192.168.122.102 (192.168.122.102) 56(84) bytes of data.\n" "64 bytes from 192.168.122.102: icmp_seq=1 ttl=64 time=0.343 ms\n" "64 bytes from 192.168.122.102: icmp_seq=2 ttl=64 time=0.402 ms\n" "64 bytes from 192.168.122.102: icmp_seq=3 ttl=64 time=0.558 ms\n" "\n" "--- 192.168.122.102 ping statistics ---\n" "3 packets transmitted, 3 received, 0% packet loss, time 2000ms\n" "rtt min/avg/max/mdev = 0.343/0.434/0.558/0.092 ms\n" "\t" #. Tag: para -#, fuzzy, no-c-format +#, no-c-format msgid "Now we need to make sure we can communicate with the machines by their name. If you have a DNS server, add additional entries for the two machines. Otherwise, you’ll need to add the machines to /etc/hosts . Below are the entries for my cluster nodes:" -msgstr "现在我们需要确认我们能通过机器名访问这两台机器,如果你有一个DNS服务器,为这两台节点做域名解析。" +msgstr "现在我们需要确认我们能通过机器名访问这两台机器,如果你有一个DNS服务器,为这两台节点做域名解析。否则你需要修改 /etc/hosts。下面是我的集群节点:" #. Tag: programlisting #, fuzzy, no-c-format msgid "" "# grep pcmk /etc/hosts\n" "192.168.122.101 pcmk-1.clusterlabs.org pcmk-1\n" "192.168.122.102 pcmk-2.clusterlabs.org pcmk-2" msgstr "" "\n" "\t grep pcmk /etc/hosts\n" "[root@pcmk-1 ~]# grep pcmk /etc/hosts\n" "192.168.122.101 pcmk-1.clusterlabs.org pcmk-1\n" "192.168.122.102 pcmk-2.clusterlabs.org pcmk-2\n" "\t" #. Tag: para #, no-c-format msgid "We can now verify the setup by again using ping:" msgstr "现在让我们ping一下:" #. Tag: programlisting #, fuzzy, no-c-format msgid "" "# ping -c 3 pcmk-2\n" "PING pcmk-2.clusterlabs.org (192.168.122.101) 56(84) bytes of data.\n" "64 bytes from pcmk-1.clusterlabs.org (192.168.122.101): icmp_seq=1 ttl=64 time=0.164 ms\n" "64 bytes from pcmk-1.clusterlabs.org (192.168.122.101): icmp_seq=2 ttl=64 time=0.475 ms\n" "64 bytes from pcmk-1.clusterlabs.org (192.168.122.101): icmp_seq=3 ttl=64 time=0.186 ms\n" "\n" "--- pcmk-2.clusterlabs.org ping statistics ---\n" "3 packets transmitted, 3 received, 0% packet loss, time 2001ms\n" "rtt min/avg/max/mdev = 0.164/0.275/0.475/0.141 ms" msgstr "" "\n" "\t ping -c 3 pcmk-2\n" "[root@pcmk-1 ~]# ping -c 3 pcmk-2\n" "PING pcmk-2.clusterlabs.org (192.168.122.101) 56(84) bytes of data.\n" "64 bytes from pcmk-1.clusterlabs.org (192.168.122.101): icmp_seq=1 ttl=64 time=0.164 ms\n" "64 bytes from pcmk-1.clusterlabs.org (192.168.122.101): icmp_seq=2 ttl=64 time=0.475 ms\n" "64 bytes from pcmk-1.clusterlabs.org (192.168.122.101): icmp_seq=3 ttl=64 time=0.186 ms\n" "\n" "--- pcmk-2.clusterlabs.org ping statistics ---\n" "3 packets transmitted, 3 received, 0% packet loss, time 2001ms\n" "rtt min/avg/max/mdev = 0.164/0.275/0.475/0.141 ms\n" "\t" #. Tag: title #, no-c-format msgid "Configure SSH" msgstr "配置SSH" #. Tag: para -#, fuzzy, no-c-format +#, no-c-format msgid "SSH is a convenient and secure way to copy files and perform commands remotely. For the purposes of this guide, we will create a key without a password (using the -N option) so that we can perform remote actions without being prompted." -msgstr "SSH 是一个方便又安全来的用来远程传输文件或运行命令 的工具. 在这个文档中, 我们创建ssh key(用 -N “” 选项)来免去登入要输入密码的麻烦。" +msgstr "SSH 是一个方便又安全的用来远程传输文件或运行命令的工具. 在这个文档中, 我们创建ssh key(用 -N 选项)来免去登入要输入密码的麻烦。" #. Tag: para #, no-c-format msgid " SSH " msgstr "" #. Tag: para -#, fuzzy, no-c-format +#, no-c-format msgid "Unprotected SSH keys, those without a password, are not recommended for servers exposed to the outside world. We use them here only to simplify the demo." -msgstr "不推荐在公网的机器上采用未用密码保护的ssh-key" +msgstr "没有密码的SSH密钥是不受保护的,因此不推荐在公网的机器上采用未用密码保护的ssh-key。在这里我们仅用于简化演示。" #. Tag: para #, no-c-format msgid "Create a new key and allow anyone with that key to log in:" msgstr "创建一个密钥并允许所有有这个密钥的用户登入" #. Tag: title #, no-c-format msgid "Creating and Activating a new SSH Key" msgstr "创建并激活一个新的SSH密钥" #. Tag: programlisting #, fuzzy, no-c-format msgid "" "# ssh-keygen -t dsa -f ~/.ssh/id_dsa -N \"\"\n" "Generating public/private dsa key pair.\n" "Your identification has been saved in /root/.ssh/id_dsa.\n" "Your public key has been saved in /root/.ssh/id_dsa.pub.\n" "The key fingerprint is:\n" "91:09:5c:82:5a:6a:50:08:4e:b2:0c:62:de:cc:74:44 root@pcmk-1.clusterlabs.org\n" "\n" "The key's randomart image is:\n" "+--[ DSA 1024]----+\n" "|==.ooEo.. |\n" "|X O + .o o |\n" "| * A + |\n" "| + . |\n" "| . S |\n" "| |\n" "| |\n" "| |\n" "| |\n" "+-----------------+\n" "\n" "# cp .ssh/id_dsa.pub .ssh/authorized_keys" msgstr "" "\n" "[root@pcmk-1 ~]# ssh-keygen -t dsa -f ~/.ssh/id_dsa -N \"\"\n" "Generating public/private dsa key pair.\n" "Your identification has been saved in /root/.ssh/id_dsa.\n" "Your public key has been saved in /root/.ssh/id_dsa.pub.\n" "The key fingerprint is:\n" "91:09:5c:82:5a:6a:50:08:4e:b2:0c:62:de:cc:74:44 root@pcmk-1.clusterlabs.org\n" "\n" "The key's randomart image is:\n" "+--[ DSA 1024]----+\n" "|==.ooEo.. |\n" "|X O + .o o |\n" "| * A + |\n" "| + . |\n" "| . S |\n" "| |\n" "| |\n" "| |\n" "| |\n" "+-----------------+\n" "[root@pcmk-1 ~]# cp .ssh/id_dsa.pub .ssh/authorized_keys\n" "[root@pcmk-1 ~]#\n" "\t" #. Tag: para #, fuzzy, no-c-format msgid " Creating and Activating a new SSH Key " msgstr "创建并激活一个新的SSH密钥" #. Tag: para #, no-c-format msgid "Install the key on the other nodes and test that you can now run commands remotely, without being prompted" msgstr "在其他节点安装这个密钥并测试你是否可以执行命令而不用输入密码" #. Tag: title #, no-c-format msgid "Installing the SSH Key on Another Host" msgstr "在另一个机器上面安装SSH密钥" #. Tag: programlisting #, fuzzy, no-c-format msgid "" "# scp -r .ssh pcmk-2:\n" "The authenticity of host 'pcmk-2 (192.168.122.102)' can't be established.\n" "RSA key fingerprint is b1:2b:55:93:f1:d9:52:2b:0f:f2:8a:4e:ae:c6:7c:9a.\n" "Are you sure you want to continue connecting (yes/no)? yes\n" "Warning: Permanently added 'pcmk-2,192.168.122.102' (RSA) to the list of known hosts.root@pcmk-2's password:\n" "id_dsa.pub 100% 616 0.6KB/s 00:00\n" "id_dsa 100% 672 0.7KB/s 00:00\n" "known_hosts 100% 400 0.4KB/s 00:00\n" "authorized_keys 100% 616 0.6KB/s 00:00\n" "# ssh pcmk-2 -- uname -n\n" "pcmk-2\n" "#" msgstr "" "\n" "[root@pcmk-1 ~]# scp -r .ssh pcmk-2:\n" "The authenticity of host 'pcmk-2 (192.168.122.102)' can't be established.\n" "RSA key fingerprint is b1:2b:55:93:f1:d9:52:2b:0f:f2:8a:4e:ae:c6:7c:9a.\n" "Are you sure you want to continue connecting (yes/no)? yes\n" "Warning: Permanently added 'pcmk-2,192.168.122.102' (RSA) to the list of known hosts.\n" "root@pcmk-2's password: \n" "id_dsa.pub 100% 616 0.6KB/s 00:00 \n" "id_dsa 100% 672 0.7KB/s 00:00 \n" "known_hosts 100% 400 0.4KB/s 00:00 \n" "authorized_keys 100% 616 0.6KB/s 00:00 \n" "[root@pcmk-1 ~]# ssh pcmk-2 -- uname -n\n" "pcmk-2\n" "[root@pcmk-1 ~]#\n" "\t" #. Tag: title #, no-c-format msgid "Cluster Software Installation" msgstr "集群软件安装" #. Tag: title #, no-c-format msgid "Install the Cluster Software" msgstr "安装集群软件" #. Tag: para #, no-c-format msgid "Since version 12, Fedora comes with recent versions of everything you need, so simply fire up the shell and run:" msgstr "从Fedora 12开始,你需要的东西都已经准备好了,只需在终端命令行运行以下命令:" #. Tag: programlisting #, no-c-format msgid "# yum install -y pacemaker corosync" msgstr "" #. Tag: literallayout #, no-c-format msgid "" "fedora/metalink | 38 kB 00:00\n" "fedora | 4.2 kB 00:00\n" "fedora/primary_db | 14 MB 00:21\n" "updates/metalink | 2.7 kB 00:00\n" "updates | 2.6 kB 00:00\n" "updates/primary_db | 1.2 kB 00:00\n" "updates-testing/metalink | 28 kB 00:00\n" "updates-testing | 4.5 kB 00:00\n" "updates-testing/primary_db | 4.5 MB 00:12\n" "Setting up Install Process\n" "Resolving Dependencies\n" "--> Running transaction check\n" "---> Package corosync.x86_64 0:1.99.9-1.fc17 will be installed\n" "--> Processing Dependency: corosynclib = 1.99.9-1.fc17 for package: corosync-1.99.9-1.fc17.x86_64\n" "--> Processing Dependency: libxslt for package: corosync-1.99.9-1.fc17.x86_64\n" "--> Processing Dependency: libvotequorum.so.5(COROSYNC_VOTEQUORUM_1.0)(64bit) for package: corosync-1.99.9-1.fc17.x86_64\n" "--> Processing Dependency: libquorum.so.5(COROSYNC_QUORUM_1.0)(64bit) for package: corosync-1.99.9-1.fc17.x86_64\n" "--> Processing Dependency: libcpg.so.4(COROSYNC_CPG_1.0)(64bit) for package: corosync-1.99.9-1.fc17.x86_64\n" "--> Processing Dependency: libcmap.so.4(COROSYNC_CMAP_1.0)(64bit) for package: corosync-1.99.9-1.fc17.x86_64\n" "--> Processing Dependency: libcfg.so.6(COROSYNC_CFG_0.82)(64bit) for package: corosync-1.99.9-1.fc17.x86_64\n" "--> Processing Dependency: libvotequorum.so.5()(64bit) for package: corosync-1.99.9-1.fc17.x86_64\n" "--> Processing Dependency: libtotem_pg.so.5()(64bit) for package: corosync-1.99.9-1.fc17.x86_64\n" "--> Processing Dependency: libquorum.so.5()(64bit) for package: corosync-1.99.9-1.fc17.x86_64\n" "--> Processing Dependency: libqb.so.0()(64bit) for package: corosync-1.99.9-1.fc17.x86_64\n" "--> Processing Dependency: libnetsnmp.so.30()(64bit) for package: corosync-1.99.9-1.fc17.x86_64\n" "--> Processing Dependency: libcpg.so.4()(64bit) for package: corosync-1.99.9-1.fc17.x86_64\n" "--> Processing Dependency: libcorosync_common.so.4()(64bit) for package: corosync-1.99.9-1.fc17.x86_64\n" "--> Processing Dependency: libcmap.so.4()(64bit) for package: corosync-1.99.9-1.fc17.x86_64\n" "--> Processing Dependency: libcfg.so.6()(64bit) for package: corosync-1.99.9-1.fc17.x86_64\n" "---> Package pacemaker.x86_64 0:1.1.7-2.fc17 will be installed\n" "--> Processing Dependency: pacemaker-libs = 1.1.7-2.fc17 for package: pacemaker-1.1.7-2.fc17.x86_64\n" "--> Processing Dependency: pacemaker-cluster-libs = 1.1.7-2.fc17 for package: pacemaker-1.1.7-2.fc17.x86_64\n" "--> Processing Dependency: pacemaker-cli = 1.1.7-2.fc17 for package: pacemaker-1.1.7-2.fc17.x86_64\n" "--> Processing Dependency: resource-agents for package: pacemaker-1.1.7-2.fc17.x86_64\n" "--> Processing Dependency: perl(Getopt::Long) for package: pacemaker-1.1.7-2.fc17.x86_64\n" "--> Processing Dependency: libgnutls.so.26(GNUTLS_1_4)(64bit) for package: pacemaker-1.1.7-2.fc17.x86_64\n" "--> Processing Dependency: cluster-glue for package: pacemaker-1.1.7-2.fc17.x86_64\n" "--> Processing Dependency: /usr/bin/perl for package: pacemaker-1.1.7-2.fc17.x86_64\n" "--> Processing Dependency: libtransitioner.so.1()(64bit) for package: pacemaker-1.1.7-2.fc17.x86_64\n" "--> Processing Dependency: libstonithd.so.1()(64bit) for package: pacemaker-1.1.7-2.fc17.x86_64\n" "--> Processing Dependency: libstonith.so.1()(64bit) for package: pacemaker-1.1.7-2.fc17.x86_64\n" "--> Processing Dependency: libplumb.so.2()(64bit) for package: pacemaker-1.1.7-2.fc17.x86_64\n" "--> Processing Dependency: libpils.so.2()(64bit) for package: pacemaker-1.1.7-2.fc17.x86_64\n" "--> Processing Dependency: libpengine.so.3()(64bit) for package: pacemaker-1.1.7-2.fc17.x86_64\n" "--> Processing Dependency: libpe_status.so.3()(64bit) for package: pacemaker-1.1.7-2.fc17.x86_64\n" "--> Processing Dependency: libpe_rules.so.2()(64bit) for package: pacemaker-1.1.7-2.fc17.x86_64\n" "--> Processing Dependency: libltdl.so.7()(64bit) for package: pacemaker-1.1.7-2.fc17.x86_64\n" "--> Processing Dependency: liblrm.so.2()(64bit) for package: pacemaker-1.1.7-2.fc17.x86_64\n" "--> Processing Dependency: libgnutls.so.26()(64bit) for package: pacemaker-1.1.7-2.fc17.x86_64\n" "--> Processing Dependency: libcrmcommon.so.2()(64bit) for package: pacemaker-1.1.7-2.fc17.x86_64\n" "--> Processing Dependency: libcrmcluster.so.1()(64bit) for package: pacemaker-1.1.7-2.fc17.x86_64\n" "--> Processing Dependency: libcib.so.1()(64bit) for package: pacemaker-1.1.7-2.fc17.x86_64\n" "--> Running transaction check\n" "---> Package cluster-glue.x86_64 0:1.0.6-9.fc17.1 will be installed\n" "--> Processing Dependency: perl-TimeDate for package: cluster-glue-1.0.6-9.fc17.1.x86_64\n" "--> Processing Dependency: libOpenIPMIutils.so.0()(64bit) for package: cluster-glue-1.0.6-9.fc17.1.x86_64\n" "--> Processing Dependency: libOpenIPMIposix.so.0()(64bit) for package: cluster-glue-1.0.6-9.fc17.1.x86_64\n" "--> Processing Dependency: libOpenIPMI.so.0()(64bit) for package: cluster-glue-1.0.6-9.fc17.1.x86_64\n" "---> Package cluster-glue-libs.x86_64 0:1.0.6-9.fc17.1 will be installed\n" "---> Package corosynclib.x86_64 0:1.99.9-1.fc17 will be installed\n" "--> Processing Dependency: librdmacm.so.1(RDMACM_1.0)(64bit) for package: corosynclib-1.99.9-1.fc17.x86_64\n" "--> Processing Dependency: libibverbs.so.1(IBVERBS_1.1)(64bit) for package: corosynclib-1.99.9-1.fc17.x86_64\n" "--> Processing Dependency: libibverbs.so.1(IBVERBS_1.0)(64bit) for package: corosynclib-1.99.9-1.fc17.x86_64\n" "--> Processing Dependency: librdmacm.so.1()(64bit) for package: corosynclib-1.99.9-1.fc17.x86_64\n" "--> Processing Dependency: libibverbs.so.1()(64bit) for package: corosynclib-1.99.9-1.fc17.x86_64\n" "---> Package gnutls.x86_64 0:2.12.17-1.fc17 will be installed\n" "--> Processing Dependency: libtasn1.so.3(LIBTASN1_0_3)(64bit) for package: gnutls-2.12.17-1.fc17.x86_64\n" "--> Processing Dependency: libtasn1.so.3()(64bit) for package: gnutls-2.12.17-1.fc17.x86_64\n" "--> Processing Dependency: libp11-kit.so.0()(64bit) for package: gnutls-2.12.17-1.fc17.x86_64\n" "---> Package libqb.x86_64 0:0.11.1-1.fc17 will be installed\n" "---> Package libtool-ltdl.x86_64 0:2.4.2-3.fc17 will be installed\n" "---> Package libxslt.x86_64 0:1.1.26-9.fc17 will be installed\n" "---> Package net-snmp-libs.x86_64 1:5.7.1-4.fc17 will be installed\n" "---> Package pacemaker-cli.x86_64 0:1.1.7-2.fc17 will be installed\n" "---> Package pacemaker-cluster-libs.x86_64 0:1.1.7-2.fc17 will be installed\n" "---> Package pacemaker-libs.x86_64 0:1.1.7-2.fc17 will be installed\n" "---> Package perl.x86_64 4:5.14.2-211.fc17 will be installed\n" "--> Processing Dependency: perl-libs = 4:5.14.2-211.fc17 for package: 4:perl-5.14.2-211.fc17.x86_64\n" "--> Processing Dependency: perl(threads::shared) >= 1.21 for package: 4:perl-5.14.2-211.fc17.x86_64\n" "--> Processing Dependency: perl(Socket) >= 1.3 for package: 4:perl-5.14.2-211.fc17.x86_64\n" "--> Processing Dependency: perl(Scalar::Util) >= 1.10 for package: 4:perl-5.14.2-211.fc17.x86_64\n" "--> Processing Dependency: perl(File::Spec) >= 0.8 for package: 4:perl-5.14.2-211.fc17.x86_64\n" "--> Processing Dependency: perl-macros for package: 4:perl-5.14.2-211.fc17.x86_64\n" "--> Processing Dependency: perl-libs for package: 4:perl-5.14.2-211.fc17.x86_64\n" "--> Processing Dependency: perl(threads::shared) for package: 4:perl-5.14.2-211.fc17.x86_64\n" "--> Processing Dependency: perl(threads) for package: 4:perl-5.14.2-211.fc17.x86_64\n" "--> Processing Dependency: perl(Socket) for package: 4:perl-5.14.2-211.fc17.x86_64\n" "--> Processing Dependency: perl(Scalar::Util) for package: 4:perl-5.14.2-211.fc17.x86_64\n" "--> Processing Dependency: perl(Pod::Simple) for package: 4:perl-5.14.2-211.fc17.x86_64\n" "--> Processing Dependency: perl(Module::Pluggable) for package: 4:perl-5.14.2-211.fc17.x86_64\n" "--> Processing Dependency: perl(List::Util) for package: 4:perl-5.14.2-211.fc17.x86_64\n" "--> Processing Dependency: perl(File::Spec::Unix) for package: 4:perl-5.14.2-211.fc17.x86_64\n" "--> Processing Dependency: perl(File::Spec::Functions) for package: 4:perl-5.14.2-211.fc17.x86_64\n" "--> Processing Dependency: perl(File::Spec) for package: 4:perl-5.14.2-211.fc17.x86_64\n" "--> Processing Dependency: perl(Cwd) for package: 4:perl-5.14.2-211.fc17.x86_64\n" "--> Processing Dependency: perl(Carp) for package: 4:perl-5.14.2-211.fc17.x86_64\n" "--> Processing Dependency: libperl.so()(64bit) for package: 4:perl-5.14.2-211.fc17.x86_64\n" "---> Package resource-agents.x86_64 0:3.9.2-2.fc17.1 will be installed\n" "--> Processing Dependency: /usr/sbin/rpc.nfsd for package: resource-agents-3.9.2-2.fc17.1.x86_64\n" "--> Processing Dependency: /usr/sbin/rpc.mountd for package: resource-agents-3.9.2-2.fc17.1.x86_64\n" "--> Processing Dependency: /usr/sbin/ethtool for package: resource-agents-3.9.2-2.fc17.1.x86_64\n" "--> Processing Dependency: /sbin/rpc.statd for package: resource-agents-3.9.2-2.fc17.1.x86_64\n" "--> Processing Dependency: /sbin/quotaon for package: resource-agents-3.9.2-2.fc17.1.x86_64\n" "--> Processing Dependency: /sbin/quotacheck for package: resource-agents-3.9.2-2.fc17.1.x86_64\n" "--> Processing Dependency: /sbin/mount.nfs4 for package: resource-agents-3.9.2-2.fc17.1.x86_64\n" "--> Processing Dependency: /sbin/mount.nfs for package: resource-agents-3.9.2-2.fc17.1.x86_64\n" "--> Processing Dependency: /sbin/mount.cifs for package: resource-agents-3.9.2-2.fc17.1.x86_64\n" "--> Processing Dependency: /sbin/fsck.xfs for package: resource-agents-3.9.2-2.fc17.1.x86_64\n" "--> Processing Dependency: libnet.so.1()(64bit) for package: resource-agents-3.9.2-2.fc17.1.x86_64\n" "--> Running transaction check\n" "---> Package OpenIPMI-libs.x86_64 0:2.0.18-13.fc17 will be installed\n" "---> Package cifs-utils.x86_64 0:5.3-2.fc17 will be installed\n" "--> Processing Dependency: libtalloc.so.2(TALLOC_2.0.2)(64bit) for package: cifs-utils-5.3-2.fc17.x86_64\n" "--> Processing Dependency: keyutils for package: cifs-utils-5.3-2.fc17.x86_64\n" "--> Processing Dependency: libwbclient.so.0()(64bit) for package: cifs-utils-5.3-2.fc17.x86_64\n" "--> Processing Dependency: libtalloc.so.2()(64bit) for package: cifs-utils-5.3-2.fc17.x86_64\n" "---> Package ethtool.x86_64 2:3.2-2.fc17 will be installed\n" "---> Package libibverbs.x86_64 0:1.1.6-2.fc17 will be installed\n" "---> Package libnet.x86_64 0:1.1.5-3.fc17 will be installed\n" "---> Package librdmacm.x86_64 0:1.0.15-1.fc17 will be installed\n" "---> Package libtasn1.x86_64 0:2.12-1.fc17 will be installed\n" "---> Package nfs-utils.x86_64 1:1.2.5-12.fc17 will be installed\n" "--> Processing Dependency: rpcbind for package: 1:nfs-utils-1.2.5-12.fc17.x86_64\n" "--> Processing Dependency: libtirpc for package: 1:nfs-utils-1.2.5-12.fc17.x86_64\n" "--> Processing Dependency: libnfsidmap for package: 1:nfs-utils-1.2.5-12.fc17.x86_64\n" "--> Processing Dependency: libgssglue.so.1(libgssapi_CITI_2)(64bit) for package: 1:nfs-utils-1.2.5-12.fc17.x86_64\n" "--> Processing Dependency: libgssglue for package: 1:nfs-utils-1.2.5-12.fc17.x86_64\n" "--> Processing Dependency: libevent for package: 1:nfs-utils-1.2.5-12.fc17.x86_64\n" "--> Processing Dependency: libtirpc.so.1()(64bit) for package: 1:nfs-utils-1.2.5-12.fc17.x86_64\n" "--> Processing Dependency: libnfsidmap.so.0()(64bit) for package: 1:nfs-utils-1.2.5-12.fc17.x86_64\n" "--> Processing Dependency: libgssglue.so.1()(64bit) for package: 1:nfs-utils-1.2.5-12.fc17.x86_64\n" "--> Processing Dependency: libevent-2.0.so.5()(64bit) for package: 1:nfs-utils-1.2.5-12.fc17.x86_64\n" "---> Package p11-kit.x86_64 0:0.12-1.fc17 will be installed\n" "---> Package perl-Carp.noarch 0:1.22-2.fc17 will be installed\n" "---> Package perl-Module-Pluggable.noarch 1:3.90-211.fc17 will be installed\n" "---> Package perl-PathTools.x86_64 0:3.33-211.fc17 will be installed\n" "---> Package perl-Pod-Simple.noarch 1:3.16-211.fc17 will be installed\n" "--> Processing Dependency: perl(Pod::Escapes) >= 1.04 for package: 1:perl-Pod-Simple-3.16-211.fc17.noarch\n" "---> Package perl-Scalar-List-Utils.x86_64 0:1.25-1.fc17 will be installed\n" "---> Package perl-Socket.x86_64 0:2.001-1.fc17 will be installed\n" "---> Package perl-TimeDate.noarch 1:1.20-6.fc17 will be installed\n" "---> Package perl-libs.x86_64 4:5.14.2-211.fc17 will be installed\n" "---> Package perl-macros.x86_64 4:5.14.2-211.fc17 will be installed\n" "---> Package perl-threads.x86_64 0:1.86-2.fc17 will be installed\n" "---> Package perl-threads-shared.x86_64 0:1.40-2.fc17 will be installed\n" "---> Package quota.x86_64 1:4.00-3.fc17 will be installed\n" "--> Processing Dependency: quota-nls = 1:4.00-3.fc17 for package: 1:quota-4.00-3.fc17.x86_64\n" "--> Processing Dependency: tcp_wrappers for package: 1:quota-4.00-3.fc17.x86_64\n" "---> Package xfsprogs.x86_64 0:3.1.8-1.fc17 will be installed\n" "--> Running transaction check\n" "---> Package keyutils.x86_64 0:1.5.5-2.fc17 will be installed\n" "---> Package libevent.x86_64 0:2.0.14-2.fc17 will be installed\n" "---> Package libgssglue.x86_64 0:0.3-1.fc17 will be installed\n" "---> Package libnfsidmap.x86_64 0:0.25-1.fc17 will be installed\n" "---> Package libtalloc.x86_64 0:2.0.7-4.fc17 will be installed\n" "---> Package libtirpc.x86_64 0:0.2.2-2.1.fc17 will be installed\n" "---> Package libwbclient.x86_64 1:3.6.3-81.fc17.1 will be installed\n" "---> Package perl-Pod-Escapes.noarch 1:1.04-211.fc17 will be installed\n" "---> Package quota-nls.noarch 1:4.00-3.fc17 will be installed\n" "---> Package rpcbind.x86_64 0:0.2.0-16.fc17 will be installed\n" "---> Package tcp_wrappers.x86_64 0:7.6-69.fc17 will be installed\n" "--> Finished Dependency Resolution\n" "\n" "Dependencies Resolved\n" "\n" "=====================================================================================\n" " Package Arch Version Repository Size\n" "=====================================================================================\n" "Installing:\n" " corosync x86_64 1.99.9-1.fc17 updates-testing 159 k\n" " pacemaker x86_64 1.1.7-2.fc17 updates-testing 362 k\n" "Installing for dependencies:\n" " OpenIPMI-libs x86_64 2.0.18-13.fc17 fedora 466 k\n" " cifs-utils x86_64 5.3-2.fc17 updates-testing 66 k\n" " cluster-glue x86_64 1.0.6-9.fc17.1 fedora 229 k\n" " cluster-glue-libs x86_64 1.0.6-9.fc17.1 fedora 121 k\n" " corosynclib x86_64 1.99.9-1.fc17 updates-testing 96 k\n" " ethtool x86_64 2:3.2-2.fc17 fedora 94 k\n" " gnutls x86_64 2.12.17-1.fc17 fedora 385 k\n" " keyutils x86_64 1.5.5-2.fc17 fedora 49 k\n" " libevent x86_64 2.0.14-2.fc17 fedora 160 k\n" " libgssglue x86_64 0.3-1.fc17 fedora 24 k\n" " libibverbs x86_64 1.1.6-2.fc17 fedora 44 k\n" " libnet x86_64 1.1.5-3.fc17 fedora 54 k\n" " libnfsidmap x86_64 0.25-1.fc17 fedora 34 k\n" " libqb x86_64 0.11.1-1.fc17 updates-testing 68 k\n" " librdmacm x86_64 1.0.15-1.fc17 fedora 27 k\n" " libtalloc x86_64 2.0.7-4.fc17 fedora 22 k\n" " libtasn1 x86_64 2.12-1.fc17 updates-testing 319 k\n" " libtirpc x86_64 0.2.2-2.1.fc17 fedora 78 k\n" " libtool-ltdl x86_64 2.4.2-3.fc17 fedora 45 k\n" " libwbclient x86_64 1:3.6.3-81.fc17.1 updates-testing 68 k\n" " libxslt x86_64 1.1.26-9.fc17 fedora 416 k\n" " net-snmp-libs x86_64 1:5.7.1-4.fc17 fedora 713 k\n" " nfs-utils x86_64 1:1.2.5-12.fc17 fedora 311 k\n" " p11-kit x86_64 0.12-1.fc17 updates-testing 36 k\n" " pacemaker-cli x86_64 1.1.7-2.fc17 updates-testing 368 k\n" " pacemaker-cluster-libs x86_64 1.1.7-2.fc17 updates-testing 77 k\n" " pacemaker-libs x86_64 1.1.7-2.fc17 updates-testing 322 k\n" " perl x86_64 4:5.14.2-211.fc17 fedora 10 M\n" " perl-Carp noarch 1.22-2.fc17 fedora 17 k\n" " perl-Module-Pluggable noarch 1:3.90-211.fc17 fedora 47 k\n" " perl-PathTools x86_64 3.33-211.fc17 fedora 105 k\n" " perl-Pod-Escapes noarch 1:1.04-211.fc17 fedora 40 k\n" " perl-Pod-Simple noarch 1:3.16-211.fc17 fedora 223 k\n" " perl-Scalar-List-Utils x86_64 1.25-1.fc17 updates-testing 33 k\n" " perl-Socket x86_64 2.001-1.fc17 updates-testing 44 k\n" " perl-TimeDate noarch 1:1.20-6.fc17 fedora 43 k\n" " perl-libs x86_64 4:5.14.2-211.fc17 fedora 628 k\n" " perl-macros x86_64 4:5.14.2-211.fc17 fedora 32 k\n" " perl-threads x86_64 1.86-2.fc17 fedora 47 k\n" " perl-threads-shared x86_64 1.40-2.fc17 fedora 36 k\n" " quota x86_64 1:4.00-3.fc17 fedora 160 k\n" " quota-nls noarch 1:4.00-3.fc17 fedora 74 k\n" " resource-agents x86_64 3.9.2-2.fc17.1 fedora 466 k\n" " rpcbind x86_64 0.2.0-16.fc17 fedora 52 k\n" " tcp_wrappers x86_64 7.6-69.fc17 fedora 72 k\n" " xfsprogs x86_64 3.1.8-1.fc17 updates-testing 715 k\n" "\n" "Transaction Summary\n" "=====================================================================================\n" "Install 2 Packages (+46 Dependent packages)\n" "\n" "Total download size: 18 M\n" "Installed size: 59 M\n" "Downloading Packages:\n" "(1/48): OpenIPMI-libs-2.0.18-13.fc17.x86_64.rpm | 466 kB 00:00\n" "warning: rpmts_HdrFromFdno: Header V3 RSA/SHA256 Signature, key ID 1aca3465: NOKEY\n" "Public key for OpenIPMI-libs-2.0.18-13.fc17.x86_64.rpm is not installed\n" "(2/48): cifs-utils-5.3-2.fc17.x86_64.rpm | 66 kB 00:01\n" "Public key for cifs-utils-5.3-2.fc17.x86_64.rpm is not installed\n" "(3/48): cluster-glue-1.0.6-9.fc17.1.x86_64.rpm | 229 kB 00:00\n" "(4/48): cluster-glue-libs-1.0.6-9.fc17.1.x86_64.rpm | 121 kB 00:00\n" "(5/48): corosync-1.99.9-1.fc17.x86_64.rpm | 159 kB 00:01\n" "(6/48): corosynclib-1.99.9-1.fc17.x86_64.rpm | 96 kB 00:00\n" "(7/48): ethtool-3.2-2.fc17.x86_64.rpm | 94 kB 00:00\n" "(8/48): gnutls-2.12.17-1.fc17.x86_64.rpm | 385 kB 00:00\n" "(9/48): keyutils-1.5.5-2.fc17.x86_64.rpm | 49 kB 00:00\n" "(10/48): libevent-2.0.14-2.fc17.x86_64.rpm | 160 kB 00:00\n" "(11/48): libgssglue-0.3-1.fc17.x86_64.rpm | 24 kB 00:00\n" "(12/48): libibverbs-1.1.6-2.fc17.x86_64.rpm | 44 kB 00:00\n" "(13/48): libnet-1.1.5-3.fc17.x86_64.rpm | 54 kB 00:00\n" "(14/48): libnfsidmap-0.25-1.fc17.x86_64.rpm | 34 kB 00:00\n" "(15/48): libqb-0.11.1-1.fc17.x86_64.rpm | 68 kB 00:01\n" "(16/48): librdmacm-1.0.15-1.fc17.x86_64.rpm | 27 kB 00:00\n" "(17/48): libtalloc-2.0.7-4.fc17.x86_64.rpm | 22 kB 00:00\n" "(18/48): libtasn1-2.12-1.fc17.x86_64.rpm | 319 kB 00:02\n" "(19/48): libtirpc-0.2.2-2.1.fc17.x86_64.rpm | 78 kB 00:00\n" "(20/48): libtool-ltdl-2.4.2-3.fc17.x86_64.rpm | 45 kB 00:00\n" "(21/48): libwbclient-3.6.3-81.fc17.1.x86_64.rpm | 68 kB 00:00\n" "(22/48): libxslt-1.1.26-9.fc17.x86_64.rpm | 416 kB 00:00\n" "(23/48): net-snmp-libs-5.7.1-4.fc17.x86_64.rpm | 713 kB 00:01\n" "(24/48): nfs-utils-1.2.5-12.fc17.x86_64.rpm | 311 kB 00:00\n" "(25/48): p11-kit-0.12-1.fc17.x86_64.rpm | 36 kB 00:01\n" "(26/48): pacemaker-1.1.7-2.fc17.x86_64.rpm | 362 kB 00:02\n" "(27/48): pacemaker-cli-1.1.7-2.fc17.x86_64.rpm | 368 kB 00:02\n" "(28/48): pacemaker-cluster-libs-1.1.7-2.fc17.x86_64.rpm | 77 kB 00:00\n" "(29/48): pacemaker-libs-1.1.7-2.fc17.x86_64.rpm | 322 kB 00:01\n" "(30/48): perl-5.14.2-211.fc17.x86_64.rpm | 10 MB 00:15\n" "(31/48): perl-Carp-1.22-2.fc17.noarch.rpm | 17 kB 00:00\n" "(32/48): perl-Module-Pluggable-3.90-211.fc17.noarch.rpm | 47 kB 00:00\n" "(33/48): perl-PathTools-3.33-211.fc17.x86_64.rpm | 105 kB 00:00\n" "(34/48): perl-Pod-Escapes-1.04-211.fc17.noarch.rpm | 40 kB 00:00\n" "(35/48): perl-Pod-Simple-3.16-211.fc17.noarch.rpm | 223 kB 00:00\n" "(36/48): perl-Scalar-List-Utils-1.25-1.fc17.x86_64.rpm | 33 kB 00:01\n" "(37/48): perl-Socket-2.001-1.fc17.x86_64.rpm | 44 kB 00:00\n" "(38/48): perl-TimeDate-1.20-6.fc17.noarch.rpm | 43 kB 00:00\n" "(39/48): perl-libs-5.14.2-211.fc17.x86_64.rpm | 628 kB 00:00\n" "(40/48): perl-macros-5.14.2-211.fc17.x86_64.rpm | 32 kB 00:00\n" "(41/48): perl-threads-1.86-2.fc17.x86_64.rpm | 47 kB 00:00\n" "(42/48): perl-threads-shared-1.40-2.fc17.x86_64.rpm | 36 kB 00:00\n" "(43/48): quota-4.00-3.fc17.x86_64.rpm | 160 kB 00:00\n" "(44/48): quota-nls-4.00-3.fc17.noarch.rpm | 74 kB 00:00\n" "(45/48): resource-agents-3.9.2-2.fc17.1.x86_64.rpm | 466 kB 00:00\n" "(46/48): rpcbind-0.2.0-16.fc17.x86_64.rpm | 52 kB 00:00\n" "(47/48): tcp_wrappers-7.6-69.fc17.x86_64.rpm | 72 kB 00:00\n" "(48/48): xfsprogs-3.1.8-1.fc17.x86_64.rpm | 715 kB 00:03\n" "----------------------------------------------------------------------------------------\n" "Total 333 kB/s | 18 MB 00:55\n" "Retrieving key from file:///etc/pki/rpm-gpg/RPM-GPG-KEY-fedora-x86_64\n" "Importing GPG key 0x1ACA3465:\n" " Userid : \"Fedora (17) <fedora@fedoraproject.org>\"\n" " Fingerprint: cac4 3fb7 74a4 a673 d81c 5de7 50e9 4c99 1aca 3465\n" " Package : fedora-release-17-0.8.noarch (@anaconda-0)\n" " From : /etc/pki/rpm-gpg/RPM-GPG-KEY-fedora-x86_64\n" "Running Transaction Check\n" "Running Transaction Test\n" "Transaction Test Succeeded\n" "Running Transaction\n" " Installing : libqb-0.11.1-1.fc17.x86_64 1/48\n" " Installing : libtool-ltdl-2.4.2-3.fc17.x86_64 2/48\n" " Installing : cluster-glue-libs-1.0.6-9.fc17.1.x86_64 3/48\n" " Installing : libxslt-1.1.26-9.fc17.x86_64 4/48\n" " Installing : 1:perl-Pod-Escapes-1.04-211.fc17.noarch 5/48\n" " Installing : perl-threads-1.86-2.fc17.x86_64 6/48\n" " Installing : 4:perl-macros-5.14.2-211.fc17.x86_64 7/48\n" " Installing : 1:perl-Pod-Simple-3.16-211.fc17.noarch 8/48\n" " Installing : perl-Socket-2.001-1.fc17.x86_64 9/48\n" " Installing : perl-Carp-1.22-2.fc17.noarch 10/48\n" " Installing : 4:perl-libs-5.14.2-211.fc17.x86_64 11/48\n" " Installing : perl-threads-shared-1.40-2.fc17.x86_64 12/48\n" " Installing : perl-Scalar-List-Utils-1.25-1.fc17.x86_64 13/48\n" " Installing : 1:perl-Module-Pluggable-3.90-211.fc17.noarch 14/48\n" " Installing : perl-PathTools-3.33-211.fc17.x86_64 15/48\n" " Installing : 4:perl-5.14.2-211.fc17.x86_64 16/48\n" " Installing : libibverbs-1.1.6-2.fc17.x86_64 17/48\n" " Installing : keyutils-1.5.5-2.fc17.x86_64 18/48\n" " Installing : libgssglue-0.3-1.fc17.x86_64 19/48\n" " Installing : libtirpc-0.2.2-2.1.fc17.x86_64 20/48\n" " Installing : 1:net-snmp-libs-5.7.1-4.fc17.x86_64 21/48\n" " Installing : rpcbind-0.2.0-16.fc17.x86_64 22/48\n" " Installing : librdmacm-1.0.15-1.fc17.x86_64 23/48\n" " Installing : corosynclib-1.99.9-1.fc17.x86_64 24/48\n" " Installing : corosync-1.99.9-1.fc17.x86_64 25/48\n" "error reading information on service corosync: No such file or directory\n" " Installing : 1:perl-TimeDate-1.20-6.fc17.noarch 26/48\n" " Installing : 1:quota-nls-4.00-3.fc17.noarch 27/48\n" " Installing : tcp_wrappers-7.6-69.fc17.x86_64 28/48\n" " Installing : 1:quota-4.00-3.fc17.x86_64 29/48\n" " Installing : libnfsidmap-0.25-1.fc17.x86_64 30/48\n" " Installing : 1:libwbclient-3.6.3-81.fc17.1.x86_64 31/48\n" " Installing : libnet-1.1.5-3.fc17.x86_64 32/48\n" " Installing : 2:ethtool-3.2-2.fc17.x86_64 33/48\n" " Installing : libevent-2.0.14-2.fc17.x86_64 34/48\n" " Installing : 1:nfs-utils-1.2.5-12.fc17.x86_64 35/48\n" " Installing : libtalloc-2.0.7-4.fc17.x86_64 36/48\n" " Installing : cifs-utils-5.3-2.fc17.x86_64 37/48\n" " Installing : libtasn1-2.12-1.fc17.x86_64 38/48\n" " Installing : OpenIPMI-libs-2.0.18-13.fc17.x86_64 39/48\n" " Installing : cluster-glue-1.0.6-9.fc17.1.x86_64 40/48\n" " Installing : p11-kit-0.12-1.fc17.x86_64 41/48\n" " Installing : gnutls-2.12.17-1.fc17.x86_64 42/48\n" " Installing : pacemaker-libs-1.1.7-2.fc17.x86_64 43/48\n" " Installing : pacemaker-cluster-libs-1.1.7-2.fc17.x86_64 44/48\n" " Installing : pacemaker-cli-1.1.7-2.fc17.x86_64 45/48\n" " Installing : xfsprogs-3.1.8-1.fc17.x86_64 46/48\n" " Installing : resource-agents-3.9.2-2.fc17.1.x86_64 47/48\n" " Installing : pacemaker-1.1.7-2.fc17.x86_64 48/48\n" " Verifying : xfsprogs-3.1.8-1.fc17.x86_64 1/48\n" " Verifying : 1:net-snmp-libs-5.7.1-4.fc17.x86_64 2/48\n" " Verifying : corosync-1.99.9-1.fc17.x86_64 3/48\n" " Verifying : cluster-glue-1.0.6-9.fc17.1.x86_64 4/48\n" " Verifying : perl-PathTools-3.33-211.fc17.x86_64 5/48\n" " Verifying : p11-kit-0.12-1.fc17.x86_64 6/48\n" " Verifying : 1:perl-Pod-Simple-3.16-211.fc17.noarch 7/48\n" " Verifying : OpenIPMI-libs-2.0.18-13.fc17.x86_64 8/48\n" " Verifying : libtasn1-2.12-1.fc17.x86_64 9/48\n" " Verifying : perl-threads-1.86-2.fc17.x86_64 10/48\n" " Verifying : 1:perl-Pod-Escapes-1.04-211.fc17.noarch 11/48\n" " Verifying : pacemaker-1.1.7-2.fc17.x86_64 12/48\n" " Verifying : 4:perl-5.14.2-211.fc17.x86_64 13/48\n" " Verifying : gnutls-2.12.17-1.fc17.x86_64 14/48\n" " Verifying : perl-threads-shared-1.40-2.fc17.x86_64 15/48\n" " Verifying : 4:perl-macros-5.14.2-211.fc17.x86_64 16/48\n" " Verifying : 1:perl-Module-Pluggable-3.90-211.fc17.noarch 17/48\n" " Verifying : 1:nfs-utils-1.2.5-12.fc17.x86_64 18/48\n" " Verifying : cluster-glue-libs-1.0.6-9.fc17.1.x86_64 19/48\n" " Verifying : pacemaker-libs-1.1.7-2.fc17.x86_64 20/48\n" " Verifying : libtalloc-2.0.7-4.fc17.x86_64 21/48\n" " Verifying : libevent-2.0.14-2.fc17.x86_64 22/48\n" " Verifying : perl-Socket-2.001-1.fc17.x86_64 23/48\n" " Verifying : libgssglue-0.3-1.fc17.x86_64 24/48\n" " Verifying : perl-Carp-1.22-2.fc17.noarch 25/48\n" " Verifying : libtirpc-0.2.2-2.1.fc17.x86_64 26/48\n" " Verifying : 2:ethtool-3.2-2.fc17.x86_64 27/48\n" " Verifying : 4:perl-libs-5.14.2-211.fc17.x86_64 28/48\n" " Verifying : libxslt-1.1.26-9.fc17.x86_64 29/48\n" " Verifying : rpcbind-0.2.0-16.fc17.x86_64 30/48\n" " Verifying : librdmacm-1.0.15-1.fc17.x86_64 31/48\n" " Verifying : resource-agents-3.9.2-2.fc17.1.x86_64 32/48\n" " Verifying : 1:quota-4.00-3.fc17.x86_64 33/48\n" " Verifying : 1:perl-TimeDate-1.20-6.fc17.noarch 34/48\n" " Verifying : perl-Scalar-List-Utils-1.25-1.fc17.x86_64 35/48\n" " Verifying : libtool-ltdl-2.4.2-3.fc17.x86_64 36/48\n" " Verifying : pacemaker-cluster-libs-1.1.7-2.fc17.x86_64 37/48\n" " Verifying : cifs-utils-5.3-2.fc17.x86_64 38/48\n" " Verifying : libnet-1.1.5-3.fc17.x86_64 39/48\n" " Verifying : corosynclib-1.99.9-1.fc17.x86_64 40/48\n" " Verifying : libqb-0.11.1-1.fc17.x86_64 41/48\n" " Verifying : 1:libwbclient-3.6.3-81.fc17.1.x86_64 42/48\n" " Verifying : libnfsidmap-0.25-1.fc17.x86_64 43/48\n" " Verifying : tcp_wrappers-7.6-69.fc17.x86_64 44/48\n" " Verifying : keyutils-1.5.5-2.fc17.x86_64 45/48\n" " Verifying : libibverbs-1.1.6-2.fc17.x86_64 46/48\n" " Verifying : 1:quota-nls-4.00-3.fc17.noarch 47/48\n" " Verifying : pacemaker-cli-1.1.7-2.fc17.x86_64 48/48\n" "\n" "Installed:\n" " corosync.x86_64 0:1.99.9-1.fc17 pacemaker.x86_64 0:1.1.7-2.fc17\n" "\n" "Dependency Installed:\n" " OpenIPMI-libs.x86_64 0:2.0.18-13.fc17 cifs-utils.x86_64 0:5.3-2.fc17\n" " cluster-glue.x86_64 0:1.0.6-9.fc17.1 cluster-glue-libs.x86_64 0:1.0.6-9.fc17.1\n" " corosynclib.x86_64 0:1.99.9-1.fc17 ethtool.x86_64 2:3.2-2.fc17\n" " gnutls.x86_64 0:2.12.17-1.fc17 keyutils.x86_64 0:1.5.5-2.fc17\n" " libevent.x86_64 0:2.0.14-2.fc17 libgssglue.x86_64 0:0.3-1.fc17\n" " libibverbs.x86_64 0:1.1.6-2.fc17 libnet.x86_64 0:1.1.5-3.fc17\n" " libnfsidmap.x86_64 0:0.25-1.fc17 libqb.x86_64 0:0.11.1-1.fc17\n" " librdmacm.x86_64 0:1.0.15-1.fc17 libtalloc.x86_64 0:2.0.7-4.fc17\n" " libtasn1.x86_64 0:2.12-1.fc17 libtirpc.x86_64 0:0.2.2-2.1.fc17\n" " libtool-ltdl.x86_64 0:2.4.2-3.fc17 libwbclient.x86_64 1:3.6.3-81.fc17.1\n" " libxslt.x86_64 0:1.1.26-9.fc17 net-snmp-libs.x86_64 1:5.7.1-4.fc17\n" " nfs-utils.x86_64 1:1.2.5-12.fc17 p11-kit.x86_64 0:0.12-1.fc17\n" " pacemaker-cli.x86_64 0:1.1.7-2.fc17 pacemaker-cluster-libs.x86_64 0:1.1.7-2.fc17\n" " pacemaker-libs.x86_64 0:1.1.7-2.fc17 perl.x86_64 4:5.14.2-211.fc17\n" " perl-Carp.noarch 0:1.22-2.fc17 perl-Module-Pluggable.noarch 1:3.90-211.fc17\n" " perl-PathTools.x86_64 0:3.33-211.fc17 perl-Pod-Escapes.noarch 1:1.04-211.fc17\n" " perl-Pod-Simple.noarch 1:3.16-211.fc17 perl-Scalar-List-Utils.x86_64 0:1.25-1.fc17\n" " perl-Socket.x86_64 0:2.001-1.fc17 perl-TimeDate.noarch 1:1.20-6.fc17\n" " perl-libs.x86_64 4:5.14.2-211.fc17 perl-macros.x86_64 4:5.14.2-211.fc17\n" " perl-threads.x86_64 0:1.86-2.fc17 perl-threads-shared.x86_64 0:1.40-2.fc17\n" " quota.x86_64 1:4.00-3.fc17 quota-nls.noarch 1:4.00-3.fc17\n" " resource-agents.x86_64 0:3.9.2-2.fc17.1 rpcbind.x86_64 0:0.2.0-16.fc17\n" " tcp_wrappers.x86_64 0:7.6-69.fc17 xfsprogs.x86_64 0:3.1.8-1.fc17\n" "\n" "Complete!\n" "[root@pcmk-1 ~]#" msgstr "" #. Tag: para -#, fuzzy, no-c-format +#, no-c-format msgid "Now install the cluster software on the second node." -msgstr "安装集群软件" +msgstr "在第二个节点上安装集群软件" #. Tag: title -#, fuzzy, no-c-format +#, no-c-format msgid "Install the Cluster Management Software" -msgstr "安装集群软件" +msgstr "安装集群管理软件" #. Tag: para #, no-c-format msgid "The pcs cli command coupled with the pcs daemon creates a cluster management system capable of managing all aspects of the cluster stack across all nodes from a single location." msgstr "" #. Tag: programlisting #, no-c-format msgid "# yum install -y pcs" msgstr "" #. Tag: para #, no-c-format msgid "Make sure to install the pcs packages on both nodes." msgstr "" #. Tag: title #, no-c-format msgid "Setup" msgstr "安装" #. Tag: title #, no-c-format msgid "Enable pcs Daemon" msgstr "" #. Tag: para #, no-c-format msgid "Before the cluster can be configured, the pcs daemon must be started and enabled to boot on startup on each node. This daemon works with the pcs cli command to manage syncing the corosync configuration across all the nodes in the cluster." msgstr "" #. Tag: para #, no-c-format msgid "Start and enable the daemon by issuing the following commands on each node." msgstr "" #. Tag: programlisting #, no-c-format msgid "" "# systemctl start pcsd.service\n" "# systemctl enable pcsd.service" msgstr "" #. Tag: para #, no-c-format msgid "Now we need a way for pcs to talk to itself on other nodes in the cluster. This is necessary in order to perform tasks such as syncing the corosync config, or starting/stopping the cluster on remote nodes" msgstr "" #. Tag: para #, no-c-format msgid "While pcs can be used locally without setting up these user accounts, this tutorial will make use of these remote access commands, so we will set a password for the hacluster user. Its probably best if password is consistent across all the nodes." msgstr "" #. Tag: para #, no-c-format msgid "As root, run:" msgstr "" #. Tag: programlisting #, no-c-format msgid "" "# passwd hacluster\n" "password:" msgstr "" #. Tag: para #, no-c-format msgid "Alternatively, to script this process or set the password on a different machine to the one you’re logged into, you can use the --stdin option for passwd:" msgstr "" #. Tag: programlisting #, no-c-format msgid "# ssh pcmk-2 -- 'echo redhat1 | passwd --stdin hacluster'" msgstr "" #. Tag: title #, no-c-format msgid "Notes on Multicast Address Assignment" msgstr "" #. Tag: para #, fuzzy, no-c-format msgid "There are several subtle points that often deserve consideration when choosing/assigning multicast addresses. This information is borrowed from, the now defunct, http://web.archive.org/web/20101211210054/http://29west.com/docs/THPM/multicast-address-assignment.html" msgstr "请注意你选择的端口和地址不能跟已存在的集群冲突,关于组播地址的选择,可以参考 http://www.29west.com/docs/THPM/multicast-address-assignment.html " #. Tag: para #, no-c-format msgid "Avoid 224.0.0.x" msgstr "" #. Tag: para #, no-c-format msgid "Traffic to addresses of the form 224.0.0.x is often flooded to all switch ports. This address range is reserved for link-local uses. Many routing protocols assume that all traffic within this range will be received by all routers on the network. Hence (at least all Cisco) switches flood traffic within this range. The flooding behavior overrides the normal selective forwarding behavior of a multicast-aware switch (e.g. IGMP snooping, CGMP, etc.)." msgstr "" #. Tag: para #, no-c-format msgid "Watch for 32:1 overlap" msgstr "" #. Tag: para #, no-c-format msgid "32 non-contiguous IP multicast addresses are mapped onto each Ethernet multicast address. A receiver that joins a single IP multicast group implicitly joins 31 others due to this overlap. Of course, filtering in the operating system discards undesired multicast traffic from applications, but NIC bandwidth and CPU resources are nonetheless consumed discarding it. The overlap occurs in the 5 high-order bits, so it’s best to use the 23 low-order bits to make distinct multicast streams unique. For example, IP multicast addresses in the range 239.0.0.0 to 239.127.255.255 all map to unique Ethernet multicast addresses. However, IP multicast address 239.128.0.0 maps to the same Ethernet multicast address as 239.0.0.0, 239.128.0.1 maps to the same Ethernet multicast address as 239.0.0.1, etc." msgstr "" #. Tag: para #, no-c-format msgid "Avoid x.0.0.y and x.128.0.y" msgstr "" #. Tag: para #, no-c-format msgid "Combining the above two considerations, it’s best to avoid using IP multicast addresses of the form x.0.0.y and x.128.0.y since they all map onto the range of Ethernet multicast addresses that are flooded to all switch ports." msgstr "" #. Tag: para #, no-c-format msgid "Watch for address assignment conflicts" msgstr "" #. Tag: para #, no-c-format msgid "IANA administers Internet multicast addresses. Potential conflicts with Internet multicast address assignments can be avoided by using GLOP addressing (AS required) or administratively scoped addresses. Such addresses can be safely used on a network connected to the Internet without fear of conflict with multicast sources originating on the Internet. Administratively scoped addresses are roughly analogous to the unicast address space for private internets. Site-local multicast addresses are of the form 239.255.x.y, but can grow down to 239.252.x.y if needed. Organization-local multicast addresses are of the form 239.192-251.x.y, but can grow down to 239.x.y.z if needed." msgstr "" #. Tag: para #, no-c-format msgid "For a more detailed treatment (57 pages!), see Cisco’s Guidelines for Enterprise IP Multicast Address Allocation paper." msgstr "" #. Tag: title #, no-c-format msgid "Configuring Corosync" msgstr "配置 Corosync" #. Tag: para #, no-c-format msgid "In the past, at this point in the tutorial an explanation of how to configure and propagate corosync’s /etc/corosync.conf file would be necessary. Using pcs with the pcs daemon greatly simplifies this process by generating corosync.conf across all the nodes in the cluster with a single command. The only thing required to achieve this is to authenticate as the pcs user hacluster on one of the nodes in the cluster, and then issue the pcs cluster setup command with a list of all the node names in the cluster." msgstr "" #. Tag: programlisting #, no-c-format msgid "" "# pcs cluster auth pcmk-1 pcmk-2\n" "Username: hacluster\n" "Password:\n" "pcmk-1: Authorized\n" "pcmk-2: Authorized\n" "\n" "# pcs cluster setup mycluster pcmk-1 pcmk-2\n" "pcmk-1: Succeeded\n" "pcmk-2: Succeeded" msgstr "" #. Tag: para #, no-c-format msgid "That’s it. Corosync is configured across the cluster. If you received an authorization error for either of those commands, make sure you setup the hacluster user account and password on every node in the cluster with the same password." msgstr "" #. Tag: para -#, fuzzy, no-c-format +#, no-c-format msgid "The final /etc/corosync.conf configuration on each node should look something like the sample in Appendix B, Sample Corosync Configuration." msgstr "最后配置文件应该看起来像下面的样子。" #. Tag: para #, no-c-format msgid "Pacemaker used to obtain membership and quorum from a custom Corosync plugin. This plugin also had the capability to start Pacemaker automatically when Corosync was started." msgstr "" #. Tag: para #, no-c-format msgid "Neither behavior is possible with Corosync 2.0 and beyond as support for plugins was removed." msgstr "" #. Tag: para #, no-c-format msgid "Instead, Pacemaker must be started as a separate job/initscript. Also, since Pacemaker made use of the plugin for message routing, a node using the plugin (Corosync prior to 2.0) cannot talk to one that isn’t (Corosync 2.0+)." msgstr "" #. Tag: para #, no-c-format msgid "Rolling upgrades between these versions are therefor not possible and an alternate strategy http://www.clusterlabs.org/doc/en-US/Pacemaker/1.1/html/Pacemaker_Explained/ap-upgrade.html must be used." msgstr "" #~ msgid "Burn the disk image to a DVD http://docs.fedoraproject.org/readme-burning-isos/en-US.html and boot from it. Or use the image to boot a virtual machine as I have done here. After clicking through the welcome screen, select your language and keyboard layout http://docs.fedoraproject.org/install-guide/f&DISTRO_VERSION;/en-US/html/s1-langselection-x86.html " #~ msgstr "烧录一个DVD光盘 http://docs.fedoraproject.org/readme-burning-isos/en-US.html 并从它启动。或者就像我一样启动一个虚拟机。 在点击欢迎界面的NETX后 ,我们要开始选择语言和键盘类型 http://docs.fedoraproject.org/install-guide/f&DISTRO_VERSION;/en-US/html/s1-langselection-x86.html " #~ msgid "Fedora Installation - Welcome" #~ msgstr "安装Fedora - 欢迎 " #~ msgid "Fedora Installation: Good choice" #~ msgstr "安装Fedora: 好的选择!" #~ msgid "Fedora Installation - Storage Devices" #~ msgstr "安装Fedora - 存储设备" #~ msgid "Fedora Installation: Storage Devices" #~ msgstr "安装Fedora: 存储设备" #~ msgid "Fedora Installation - Hostname" #~ msgstr "安装Fedora -机器名" #~ msgid "Fedora Installation: Choose a hostname" #~ msgstr "安装Fedora: 选择一个机器名" #~ msgid "Fedora Installation - Installation Type" #~ msgstr "安装Fedora - 安装类型" #~ msgid "Fedora Installation: Choose an installation type" #~ msgstr "安装Fedora: 选择安装类型" #~ msgid "By default, Fedora will give all the space to the / (aka. root) partition. Wel'll take some back so we can use DRBD." #~ msgstr "默认的话,Fedora会将所有的空间都分配给/ (aka. 根)分区。我们要保留一点给DRBD。" #~ msgid "Fedora Installation - Default Partitioning" #~ msgstr "安装Fedora - 默认分区" #~ msgid "The finalized partition layout should look something like the diagram below." #~ msgstr "完整的分区应该像下面一样。" #~ msgid "If you plan on following the DRBD or GFS2 portions of this guide, you should reserve at least 1Gb of space on each machine from which to create a shared volume." #~ msgstr "如果你想试验本文档中关于DRBD或者GFS2的部分,你要为每个节点保留至少1Gb的空间。" #~ msgid "Fedora Installation - Customize Partitioning" #~ msgstr "安装Fedora - 自定义分区" #~ msgid "Fedora Installation: Create a partition to use (later) for website data" #~ msgstr "安装Fedora: 创建一个网站存放数据用的分区" #~ msgid "Fedora Installation - Bootloader" #~ msgstr "安装Fedora - Bootloader" #~ msgid "Fedora Installation: Unless you have a strong reason not to, accept the default bootloader location" #~ msgstr "安装Fedora: 除非有非常强力的理由,不然选择默认的bootloader安装位置" #~ msgid "Fedora Installation - Software" #~ msgstr "安装Fedora - 软件" #~ msgid "Fedora Installation: Software selection" #~ msgstr "安装Fedora: 软件选择" #~ msgid "Fedora Installation - Installing" #~ msgstr "安装Fedora - 安装中" #~ msgid "Fedora Installation - Installation Complete" #~ msgstr "安装Fedora - 安装完成" #~ msgid "Fedora Installation: Stage 1, completed" #~ msgstr "安装Fedora: Stage 1, 完成" #~ msgid "Once the node reboots, follow the on screen instructions http://docs.fedoraproject.org/install-guide/f&DISTRO_VERSION;/en-US/html/ch-firstboot.html to create a system user and configure the time." #~ msgstr "一旦系统重启完毕你可以看到以下界面 http://docs.fedoraproject.org/install-guide/f&DISTRO_VERSION;/en-US/html/ch-firstboot.html ,然后配置用户和设定时间。" #~ msgid "Fedora Installation - First Boot" #~ msgstr "安装Fedora - 第一次启动" #~ msgid "Fedora Installation - Create Non-privileged User" #~ msgstr "安装Fedora - 创建非特权用户" #~ msgid "Fedora Installation: Creating a non-privileged user, take note of the password, you'll need it soon" #~ msgstr "安装Fedora: 创建非特权用户,请注意密码,一会你要用到它的。" #~ msgid "It is highly recommended to enable NTP on your cluster nodes. Doing so ensures all nodes agree on the current time and makes reading log files significantly easier." #~ msgstr "强烈建议开启NTP时间同步,这样可以使集群更好的同步配置文件以及使日志文件有更好的可读性。" #~ msgid "Fedora Installation - Date and Time" #~ msgstr "安装Fedora - 日期和时间" #~ msgid "Fedora Installation: Enable NTP to keep the times on all your nodes consistent" #~ msgstr "安装Fedora : 启用NTP来保证所有节点时间同步" #~ msgid "Click through the next screens until you reach the login window. Click on the user you created and supply the password you indicated earlier." #~ msgstr "点击next会进入登入界面,点击你创建的用户并输入之前设定的密码。" #~ msgid "Fedora Installation - Customize Networking" #~ msgstr "安装Fedora -自定义网络" #~ msgid "Fedora Installation: Click here to configure networking" #~ msgstr "安装Fedora: 点击这里来配置网络" #~ msgid "Fedora Installation - Specify Network Preferences" #~ msgstr "安装Fedora - 指定网络参数" #~ msgid "Fedora Installation: Specify network settings for your machine, never choose DHCP" #~ msgstr "安装Fedora: 设定你的网络,永远不要选择DHCP" #~ msgid "Fedora Installation - Activate Networking" #~ msgstr "安装Fedora - 激活网络" #~ msgid "Fedora Installation: Click the big green button to activate your changes" #~ msgstr "安装Fedora:点击绿色按钮来应用你的更改" #~ msgid "Fedora Installation - Bring up the Terminal" #~ msgstr "安装Fedora - 打开终端" #~ msgid "Fedora Installation: Down to business, fire up the command line" #~ msgstr "安装Fedora:开始干活,打开终端" #~ msgid "Go to the terminal window you just opened and switch to the super user (aka. \"root\") account with the su command. You will need to supply the password you entered earlier during the installation process." #~ msgstr "打开一个终端,然后使用su命令切换到超级用户(root)。输入之前安装时候设定的密码:" #~ msgid "" #~ "\n" #~ "[beekhof@pcmk-1 ~]$ su -\n" #~ "Password:\n" #~ "[root@pcmk-1 ~]#\n" #~ " " #~ msgstr "" #~ "\n" #~ "[beekhof@pcmk-1 ~]$ su -\n" #~ "Password:\n" #~ "[root@pcmk-1 ~]#\n" #~ " " #~ msgid "Note that the username (the text before the @ symbol) now indicates we’re running as the super user “root”." #~ msgstr "注意用户名 (@符号左边的字符串) 显示我们现在使用的是root用户." #~ msgid "" #~ "\n" #~ "[root@pcmk-1 ~]# ip addr\n" #~ "1: lo: <LOOPBACK,UP,LOWER_UP> mtu 16436 qdisc noqueue state UNKNOWN \n" #~ " link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00\n" #~ " inet 127.0.0.1/8 scope host lo\n" #~ " inet6 ::1/128 scope host \n" #~ " valid_lft forever preferred_lft forever\n" #~ "2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UNKNOWN qlen 1000\n" #~ " link/ether 00:0c:29:6f:e1:58 brd ff:ff:ff:ff:ff:ff\n" #~ " inet 192.168.9.41/24 brd 192.168.9.255 scope global eth0\n" #~ " inet6 ::20c:29ff:fe6f:e158/64 scope global dynamic \n" #~ " valid_lft 2591667sec preferred_lft 604467sec\n" #~ " inet6 2002:57ae:43fc:0:20c:29ff:fe6f:e158/64 scope global dynamic \n" #~ " valid_lft 2591990sec preferred_lft 604790sec\n" #~ " inet6 fe80::20c:29ff:fe6f:e158/64 scope link \n" #~ " valid_lft forever preferred_lft forever\n" #~ "[root@pcmk-1 ~]# ping -c 1 www.google.com\n" #~ "PING www.l.google.com (74.125.39.99) 56(84) bytes of data.\n" #~ "64 bytes from fx-in-f99.1e100.net (74.125.39.99): icmp_seq=1 ttl=56 time=16.7 ms\n" #~ "\n" #~ "--- www.l.google.com ping statistics ---\n" #~ "1 packets transmitted, 1 received, 0% packet loss, time 20ms\n" #~ "rtt min/avg/max/mdev = 16.713/16.713/16.713/0.000 ms\n" #~ "[root@pcmk-1 ~]# /sbin/chkconfig network on\n" #~ "[root@pcmk-1 ~]# \n" #~ " " #~ msgstr "" #~ "\n" #~ "[root@pcmk-1 ~]# ip addr\n" #~ "1: lo: <LOOPBACK,UP,LOWER_UP> mtu 16436 qdisc noqueue state UNKNOWN \n" #~ " link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00\n" #~ " inet 127.0.0.1/8 scope host lo\n" #~ " inet6 ::1/128 scope host \n" #~ " valid_lft forever preferred_lft forever\n" #~ "2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UNKNOWN qlen 1000\n" #~ " link/ether 00:0c:29:6f:e1:58 brd ff:ff:ff:ff:ff:ff\n" #~ " inet 192.168.9.41/24 brd 192.168.9.255 scope global eth0\n" #~ " inet6 ::20c:29ff:fe6f:e158/64 scope global dynamic \n" #~ " valid_lft 2591667sec preferred_lft 604467sec\n" #~ " inet6 2002:57ae:43fc:0:20c:29ff:fe6f:e158/64 scope global dynamic \n" #~ " valid_lft 2591990sec preferred_lft 604790sec\n" #~ " inet6 fe80::20c:29ff:fe6f:e158/64 scope link \n" #~ " valid_lft forever preferred_lft forever\n" #~ "[root@pcmk-1 ~]# ping -c 1 www.google.com\n" #~ "PING www.l.google.com (74.125.39.99) 56(84) bytes of data.\n" #~ "64 bytes from fx-in-f99.1e100.net (74.125.39.99): icmp_seq=1 ttl=56 time=16.7 ms\n" #~ "\n" #~ "--- www.l.google.com ping statistics ---\n" #~ "1 packets transmitted, 1 received, 0% packet loss, time 20ms\n" #~ "rtt min/avg/max/mdev = 16.713/16.713/16.713/0.000 ms\n" #~ "[root@pcmk-1 ~]# /sbin/chkconfig network on\n" #~ "[root@pcmk-1 ~]# \n" #~ " " #~ msgid "" #~ "\n" #~ "[root@pcmk-1 ~]# sed -i.bak \"s/SELINUX=enforcing/SELINUX=permissive/g\" /etc/selinux/config\n" #~ "[root@pcmk-1 ~]# /sbin/chkconfig --del iptables\n" #~ "[root@pcmk-1 ~]# service iptables stop\n" #~ "iptables: Flushing firewall rules: [ OK ]\n" #~ "iptables: Setting chains to policy ACCEPT: filter [ OK ]\n" #~ "iptables: Unloading modules: [ OK ]\n" #~ " " #~ msgstr "" #~ "\n" #~ "[root@pcmk-1 ~]# sed -i.bak \"s/SELINUX=enforcing/SELINUX=permissive/g\" /etc/selinux/config\n" #~ "[root@pcmk-1 ~]# /sbin/chkconfig --del iptables\n" #~ "[root@pcmk-1 ~]# service iptables stop\n" #~ "iptables: Flushing firewall rules: [ OK ]\n" #~ "iptables: Setting chains to policy ACCEPT: filter [ OK ]\n" #~ "iptables: Unloading modules: [ OK ]\n" #~ " " #~ msgid "You will need to reboot for the SELinux changes to take effect. Otherwise you will see something like this when you start corosync:" #~ msgstr "你需要重启来保证SELinux正确关闭。不然你启动corosync的时候将看到以下提示:" #~ msgid "" #~ "\n" #~ "May 4 19:30:54 pcmk-1 setroubleshoot: SELinux is preventing /usr/sbin/corosync \"getattr\" access on /. For complete SELinux messages. run sealert -l 6e0d4384-638e-4d55-9aaf-7dac011f29c1\n" #~ "May 4 19:30:54 pcmk-1 setroubleshoot: SELinux is preventing /usr/sbin/corosync \"getattr\" access on /. For complete SELinux messages. run sealert -l 6e0d4384-638e-4d55-9aaf-7dac011f29c1\n" #~ "\t" #~ msgstr "" #~ "\n" #~ "May 4 19:30:54 pcmk-1 setroubleshoot: SELinux is preventing /usr/sbin/corosync \"getattr\" access on /. For complete SELinux messages. run sealert -l 6e0d4384-638e-4d55-9aaf-7dac011f29c1\n" #~ "May 4 19:30:54 pcmk-1 setroubleshoot: SELinux is preventing /usr/sbin/corosync \"getattr\" access on /. For complete SELinux messages. run sealert -l 6e0d4384-638e-4d55-9aaf-7dac011f29c1\n" #~ "\t" #~ msgid "" #~ "\n" #~ "[root@pcmk-1 ~]# sed -i.bak \"s/enabled=0/enabled=1/g\" /etc/yum.repos.d/fedora.repo\n" #~ "[root@pcmk-1 ~]# sed -i.bak \"s/enabled=0/enabled=1/g\" /etc/yum.repos.d/fedora-updates.repo\n" #~ "[root@pcmk-1 ~]# yum install -y pacemaker corosync\n" #~ "Loaded plugins: presto, refresh-packagekit\n" #~ "fedora/metalink \t | 22 kB 00:00 \n" #~ "fedora-debuginfo/metalink \t | 16 kB 00:00 \n" #~ "fedora-debuginfo \t | 3.2 kB 00:00 \n" #~ "fedora-debuginfo/primary_db \t | 1.4 MB 00:04 \n" #~ "fedora-source/metalink \t | 22 kB 00:00 \n" #~ "fedora-source \t | 3.2 kB 00:00 \n" #~ "fedora-source/primary_db \t | 3.0 MB 00:05 \n" #~ "updates/metalink \t | 26 kB 00:00 \n" #~ "updates \t | 2.6 kB 00:00 \n" #~ "updates/primary_db \t | 1.1 kB 00:00 \n" #~ "updates-debuginfo/metalink \t | 18 kB 00:00 \n" #~ "updates-debuginfo \t | 2.6 kB 00:00 \n" #~ "updates-debuginfo/primary_db \t | 1.1 kB 00:00 \n" #~ "updates-source/metalink \t | 25 kB 00:00 \n" #~ "updates-source \t | 2.6 kB 00:00 \n" #~ "updates-source/primary_db \t | 1.1 kB 00:00 \n" #~ "Setting up Install Process\n" #~ "Resolving Dependencies\n" #~ "--> Running transaction check\n" #~ "---> Package corosync.x86_64 0:1.2.1-1.fc13 set to be updated\n" #~ "--> Processing Dependency: corosynclib = 1.2.1-1.fc13 for package: corosync-1.2.1-1.fc13.x86_64\n" #~ "--> Processing Dependency: libquorum.so.4(COROSYNC_QUORUM_1.0)(64bit) for package: corosync-1.2.1-1.fc13.x86_64\n" #~ "--> Processing Dependency: libvotequorum.so.4(COROSYNC_VOTEQUORUM_1.0)(64bit) for package: corosync-1.2.1-1.fc13.x86_64\n" #~ "--> Processing Dependency: libcpg.so.4(COROSYNC_CPG_1.0)(64bit) for package: corosync-1.2.1-1.fc13.x86_64\n" #~ "--> Processing Dependency: libconfdb.so.4(COROSYNC_CONFDB_1.0)(64bit) for package: corosync-1.2.1-1.fc13.x86_64\n" #~ "--> Processing Dependency: libcfg.so.4(COROSYNC_CFG_0.82)(64bit) for package: corosync-1.2.1-1.fc13.x86_64\n" #~ "--> Processing Dependency: libpload.so.4(COROSYNC_PLOAD_1.0)(64bit) for package: corosync-1.2.1-1.fc13.x86_64\n" #~ "--> Processing Dependency: liblogsys.so.4()(64bit) for package: corosync-1.2.1-1.fc13.x86_64\n" #~ "--> Processing Dependency: libconfdb.so.4()(64bit) for package: corosync-1.2.1-1.fc13.x86_64\n" #~ "--> Processing Dependency: libcoroipcc.so.4()(64bit) for package: corosync-1.2.1-1.fc13.x86_64\n" #~ "--> Processing Dependency: libcpg.so.4()(64bit) for package: corosync-1.2.1-1.fc13.x86_64\n" #~ "--> Processing Dependency: libquorum.so.4()(64bit) for package: corosync-1.2.1-1.fc13.x86_64\n" #~ "--> Processing Dependency: libcoroipcs.so.4()(64bit) for package: corosync-1.2.1-1.fc13.x86_64\n" #~ "--> Processing Dependency: libvotequorum.so.4()(64bit) for package: corosync-1.2.1-1.fc13.x86_64\n" #~ "--> Processing Dependency: libcfg.so.4()(64bit) for package: corosync-1.2.1-1.fc13.x86_64\n" #~ "--> Processing Dependency: libtotem_pg.so.4()(64bit) for package: corosync-1.2.1-1.fc13.x86_64\n" #~ "--> Processing Dependency: libpload.so.4()(64bit) for package: corosync-1.2.1-1.fc13.x86_64\n" #~ "---> Package pacemaker.x86_64 0:1.1.1-1.fc13 set to be updated\n" #~ "--> Processing Dependency: heartbeat >= 3.0.0 for package: pacemaker-1.1.1-1.fc13.x86_64\n" #~ "--> Processing Dependency: net-snmp >= 5.4 for package: pacemaker-1.1.1-1.fc13.x86_64\n" #~ "--> Processing Dependency: resource-agents for package: pacemaker-1.1.1-1.fc13.x86_64\n" #~ "--> Processing Dependency: cluster-glue for package: pacemaker-1.1.1-1.fc13.x86_64\n" #~ "--> Processing Dependency: libnetsnmp.so.20()(64bit) for package: pacemaker-1.1.1-1.fc13.x86_64\n" #~ "--> Processing Dependency: libcrmcluster.so.1()(64bit) for package: pacemaker-1.1.1-1.fc13.x86_64\n" #~ "--> Processing Dependency: libpengine.so.3()(64bit) for package: pacemaker-1.1.1-1.fc13.x86_64\n" #~ "--> Processing Dependency: libnetsnmpagent.so.20()(64bit) for package: pacemaker-1.1.1-1.fc13.x86_64\n" #~ "--> Processing Dependency: libesmtp.so.5()(64bit) for package: pacemaker-1.1.1-1.fc13.x86_64\n" #~ "--> Processing Dependency: libstonithd.so.1()(64bit) for package: pacemaker-1.1.1-1.fc13.x86_64\n" #~ "--> Processing Dependency: libhbclient.so.1()(64bit) for package: pacemaker-1.1.1-1.fc13.x86_64\n" #~ "--> Processing Dependency: libpils.so.2()(64bit) for package: pacemaker-1.1.1-1.fc13.x86_64\n" #~ "--> Processing Dependency: libpe_status.so.2()(64bit) for package: pacemaker-1.1.1-1.fc13.x86_64\n" #~ "--> Processing Dependency: libnetsnmpmibs.so.20()(64bit) for package: pacemaker-1.1.1-1.fc13.x86_64\n" #~ "--> Processing Dependency: libnetsnmphelpers.so.20()(64bit) for package: pacemaker-1.1.1-1.fc13.x86_64\n" #~ "--> Processing Dependency: libcib.so.1()(64bit) for package: pacemaker-1.1.1-1.fc13.x86_64\n" #~ "--> Processing Dependency: libccmclient.so.1()(64bit) for package: pacemaker-1.1.1-1.fc13.x86_64\n" #~ "--> Processing Dependency: libstonith.so.1()(64bit) for package: pacemaker-1.1.1-1.fc13.x86_64\n" #~ "--> Processing Dependency: liblrm.so.2()(64bit) for package: pacemaker-1.1.1-1.fc13.x86_64\n" #~ "--> Processing Dependency: libtransitioner.so.1()(64bit) for package: pacemaker-1.1.1-1.fc13.x86_64\n" #~ "--> Processing Dependency: libpe_rules.so.2()(64bit) for package: pacemaker-1.1.1-1.fc13.x86_64\n" #~ "--> Processing Dependency: libcrmcommon.so.2()(64bit) for package: pacemaker-1.1.1-1.fc13.x86_64\n" #~ "--> Processing Dependency: libplumb.so.2()(64bit) for package: pacemaker-1.1.1-1.fc13.x86_64\n" #~ "--> Running transaction check\n" #~ "---> Package cluster-glue.x86_64 0:1.0.2-1.fc13 set to be updated\n" #~ "--> Processing Dependency: perl-TimeDate for package: cluster-glue-1.0.2-1.fc13.x86_64\n" #~ "--> Processing Dependency: libOpenIPMIutils.so.0()(64bit) for package: cluster-glue-1.0.2-1.fc13.x86_64\n" #~ "--> Processing Dependency: libOpenIPMIposix.so.0()(64bit) for package: cluster-glue-1.0.2-1.fc13.x86_64\n" #~ "--> Processing Dependency: libopenhpi.so.2()(64bit) for package: cluster-glue-1.0.2-1.fc13.x86_64\n" #~ "--> Processing Dependency: libOpenIPMI.so.0()(64bit) for package: cluster-glue-1.0.2-1.fc13.x86_64\n" #~ "---> Package cluster-glue-libs.x86_64 0:1.0.2-1.fc13 set to be updated\n" #~ "---> Package corosynclib.x86_64 0:1.2.1-1.fc13 set to be updated\n" #~ "--> Processing Dependency: librdmacm.so.1(RDMACM_1.0)(64bit) for package: corosynclib-1.2.1-1.fc13.x86_64\n" #~ "--> Processing Dependency: libibverbs.so.1(IBVERBS_1.0)(64bit) for package: corosynclib-1.2.1-1.fc13.x86_64\n" #~ "--> Processing Dependency: libibverbs.so.1(IBVERBS_1.1)(64bit) for package: corosynclib-1.2.1-1.fc13.x86_64\n" #~ "--> Processing Dependency: libibverbs.so.1()(64bit) for package: corosynclib-1.2.1-1.fc13.x86_64\n" #~ "--> Processing Dependency: librdmacm.so.1()(64bit) for package: corosynclib-1.2.1-1.fc13.x86_64\n" #~ "---> Package heartbeat.x86_64 0:3.0.0-0.7.0daab7da36a8.hg.fc13 set to be updated\n" #~ "--> Processing Dependency: PyXML for package: heartbeat-3.0.0-0.7.0daab7da36a8.hg.fc13.x86_64\n" #~ "---> Package heartbeat-libs.x86_64 0:3.0.0-0.7.0daab7da36a8.hg.fc13 set to be updated\n" #~ "---> Package libesmtp.x86_64 0:1.0.4-12.fc12 set to be updated\n" #~ "---> Package net-snmp.x86_64 1:5.5-12.fc13 set to be updated\n" #~ "--> Processing Dependency: libsensors.so.4()(64bit) for package: 1:net-snmp-5.5-12.fc13.x86_64\n" #~ "---> Package net-snmp-libs.x86_64 1:5.5-12.fc13 set to be updated\n" #~ "---> Package pacemaker-libs.x86_64 0:1.1.1-1.fc13 set to be updated\n" #~ "---> Package resource-agents.x86_64 0:3.0.10-1.fc13 set to be updated\n" #~ "--> Processing Dependency: libnet.so.1()(64bit) for package: resource-agents-3.0.10-1.fc13.x86_64\n" #~ "--> Running transaction check\n" #~ "---> Package OpenIPMI-libs.x86_64 0:2.0.16-8.fc13 set to be updated\n" #~ "---> Package PyXML.x86_64 0:0.8.4-17.fc13 set to be updated\n" #~ "---> Package libibverbs.x86_64 0:1.1.3-4.fc13 set to be updated\n" #~ "--> Processing Dependency: libibverbs-driver for package: libibverbs-1.1.3-4.fc13.x86_64\n" #~ "---> Package libnet.x86_64 0:1.1.4-3.fc12 set to be updated\n" #~ "---> Package librdmacm.x86_64 0:1.0.10-2.fc13 set to be updated\n" #~ "---> Package lm_sensors-libs.x86_64 0:3.1.2-2.fc13 set to be updated\n" #~ "---> Package openhpi-libs.x86_64 0:2.14.1-3.fc13 set to be updated\n" #~ "---> Package perl-TimeDate.noarch 1:1.20-1.fc13 set to be updated\n" #~ "--> Running transaction check\n" #~ "---> Package libmlx4.x86_64 0:1.0.1-5.fc13 set to be updated\n" #~ "--> Finished Dependency Resolution\n" #~ "\n" #~ "Dependencies Resolved\n" #~ "\n" #~ " " #~ msgstr "" #~ "\n" #~ "[root@pcmk-1 ~]# sed -i.bak \"s/enabled=0/enabled=1/g\" /etc/yum.repos.d/fedora.repo\n" #~ "[root@pcmk-1 ~]# sed -i.bak \"s/enabled=0/enabled=1/g\" /etc/yum.repos.d/fedora-updates.repo\n" #~ "[root@pcmk-1 ~]# yum install -y pacemaker corosync\n" #~ "Loaded plugins: presto, refresh-packagekit\n" #~ "fedora/metalink \t | 22 kB 00:00 \n" #~ "fedora-debuginfo/metalink \t | 16 kB 00:00 \n" #~ "fedora-debuginfo \t | 3.2 kB 00:00 \n" #~ "fedora-debuginfo/primary_db \t | 1.4 MB 00:04 \n" #~ "fedora-source/metalink \t | 22 kB 00:00 \n" #~ "fedora-source \t | 3.2 kB 00:00 \n" #~ "fedora-source/primary_db \t | 3.0 MB 00:05 \n" #~ "updates/metalink \t | 26 kB 00:00 \n" #~ "updates \t | 2.6 kB 00:00 \n" #~ "updates/primary_db \t | 1.1 kB 00:00 \n" #~ "updates-debuginfo/metalink \t | 18 kB 00:00 \n" #~ "updates-debuginfo \t | 2.6 kB 00:00 \n" #~ "updates-debuginfo/primary_db \t | 1.1 kB 00:00 \n" #~ "updates-source/metalink \t | 25 kB 00:00 \n" #~ "updates-source \t | 2.6 kB 00:00 \n" #~ "updates-source/primary_db \t | 1.1 kB 00:00 \n" #~ "Setting up Install Process\n" #~ "Resolving Dependencies\n" #~ "--> Running transaction check\n" #~ "---> Package corosync.x86_64 0:1.2.1-1.fc13 set to be updated\n" #~ "--> Processing Dependency: corosynclib = 1.2.1-1.fc13 for package: corosync-1.2.1-1.fc13.x86_64\n" #~ "--> Processing Dependency: libquorum.so.4(COROSYNC_QUORUM_1.0)(64bit) for package: corosync-1.2.1-1.fc13.x86_64\n" #~ "--> Processing Dependency: libvotequorum.so.4(COROSYNC_VOTEQUORUM_1.0)(64bit) for package: corosync-1.2.1-1.fc13.x86_64\n" #~ "--> Processing Dependency: libcpg.so.4(COROSYNC_CPG_1.0)(64bit) for package: corosync-1.2.1-1.fc13.x86_64\n" #~ "--> Processing Dependency: libconfdb.so.4(COROSYNC_CONFDB_1.0)(64bit) for package: corosync-1.2.1-1.fc13.x86_64\n" #~ "--> Processing Dependency: libcfg.so.4(COROSYNC_CFG_0.82)(64bit) for package: corosync-1.2.1-1.fc13.x86_64\n" #~ "--> Processing Dependency: libpload.so.4(COROSYNC_PLOAD_1.0)(64bit) for package: corosync-1.2.1-1.fc13.x86_64\n" #~ "--> Processing Dependency: liblogsys.so.4()(64bit) for package: corosync-1.2.1-1.fc13.x86_64\n" #~ "--> Processing Dependency: libconfdb.so.4()(64bit) for package: corosync-1.2.1-1.fc13.x86_64\n" #~ "--> Processing Dependency: libcoroipcc.so.4()(64bit) for package: corosync-1.2.1-1.fc13.x86_64\n" #~ "--> Processing Dependency: libcpg.so.4()(64bit) for package: corosync-1.2.1-1.fc13.x86_64\n" #~ "--> Processing Dependency: libquorum.so.4()(64bit) for package: corosync-1.2.1-1.fc13.x86_64\n" #~ "--> Processing Dependency: libcoroipcs.so.4()(64bit) for package: corosync-1.2.1-1.fc13.x86_64\n" #~ "--> Processing Dependency: libvotequorum.so.4()(64bit) for package: corosync-1.2.1-1.fc13.x86_64\n" #~ "--> Processing Dependency: libcfg.so.4()(64bit) for package: corosync-1.2.1-1.fc13.x86_64\n" #~ "--> Processing Dependency: libtotem_pg.so.4()(64bit) for package: corosync-1.2.1-1.fc13.x86_64\n" #~ "--> Processing Dependency: libpload.so.4()(64bit) for package: corosync-1.2.1-1.fc13.x86_64\n" #~ "---> Package pacemaker.x86_64 0:1.1.1-1.fc13 set to be updated\n" #~ "--> Processing Dependency: heartbeat >= 3.0.0 for package: pacemaker-1.1.1-1.fc13.x86_64\n" #~ "--> Processing Dependency: net-snmp >= 5.4 for package: pacemaker-1.1.1-1.fc13.x86_64\n" #~ "--> Processing Dependency: resource-agents for package: pacemaker-1.1.1-1.fc13.x86_64\n" #~ "--> Processing Dependency: cluster-glue for package: pacemaker-1.1.1-1.fc13.x86_64\n" #~ "--> Processing Dependency: libnetsnmp.so.20()(64bit) for package: pacemaker-1.1.1-1.fc13.x86_64\n" #~ "--> Processing Dependency: libcrmcluster.so.1()(64bit) for package: pacemaker-1.1.1-1.fc13.x86_64\n" #~ "--> Processing Dependency: libpengine.so.3()(64bit) for package: pacemaker-1.1.1-1.fc13.x86_64\n" #~ "--> Processing Dependency: libnetsnmpagent.so.20()(64bit) for package: pacemaker-1.1.1-1.fc13.x86_64\n" #~ "--> Processing Dependency: libesmtp.so.5()(64bit) for package: pacemaker-1.1.1-1.fc13.x86_64\n" #~ "--> Processing Dependency: libstonithd.so.1()(64bit) for package: pacemaker-1.1.1-1.fc13.x86_64\n" #~ "--> Processing Dependency: libhbclient.so.1()(64bit) for package: pacemaker-1.1.1-1.fc13.x86_64\n" #~ "--> Processing Dependency: libpils.so.2()(64bit) for package: pacemaker-1.1.1-1.fc13.x86_64\n" #~ "--> Processing Dependency: libpe_status.so.2()(64bit) for package: pacemaker-1.1.1-1.fc13.x86_64\n" #~ "--> Processing Dependency: libnetsnmpmibs.so.20()(64bit) for package: pacemaker-1.1.1-1.fc13.x86_64\n" #~ "--> Processing Dependency: libnetsnmphelpers.so.20()(64bit) for package: pacemaker-1.1.1-1.fc13.x86_64\n" #~ "--> Processing Dependency: libcib.so.1()(64bit) for package: pacemaker-1.1.1-1.fc13.x86_64\n" #~ "--> Processing Dependency: libccmclient.so.1()(64bit) for package: pacemaker-1.1.1-1.fc13.x86_64\n" #~ "--> Processing Dependency: libstonith.so.1()(64bit) for package: pacemaker-1.1.1-1.fc13.x86_64\n" #~ "--> Processing Dependency: liblrm.so.2()(64bit) for package: pacemaker-1.1.1-1.fc13.x86_64\n" #~ "--> Processing Dependency: libtransitioner.so.1()(64bit) for package: pacemaker-1.1.1-1.fc13.x86_64\n" #~ "--> Processing Dependency: libpe_rules.so.2()(64bit) for package: pacemaker-1.1.1-1.fc13.x86_64\n" #~ "--> Processing Dependency: libcrmcommon.so.2()(64bit) for package: pacemaker-1.1.1-1.fc13.x86_64\n" #~ "--> Processing Dependency: libplumb.so.2()(64bit) for package: pacemaker-1.1.1-1.fc13.x86_64\n" #~ "--> Running transaction check\n" #~ "---> Package cluster-glue.x86_64 0:1.0.2-1.fc13 set to be updated\n" #~ "--> Processing Dependency: perl-TimeDate for package: cluster-glue-1.0.2-1.fc13.x86_64\n" #~ "--> Processing Dependency: libOpenIPMIutils.so.0()(64bit) for package: cluster-glue-1.0.2-1.fc13.x86_64\n" #~ "--> Processing Dependency: libOpenIPMIposix.so.0()(64bit) for package: cluster-glue-1.0.2-1.fc13.x86_64\n" #~ "--> Processing Dependency: libopenhpi.so.2()(64bit) for package: cluster-glue-1.0.2-1.fc13.x86_64\n" #~ "--> Processing Dependency: libOpenIPMI.so.0()(64bit) for package: cluster-glue-1.0.2-1.fc13.x86_64\n" #~ "---> Package cluster-glue-libs.x86_64 0:1.0.2-1.fc13 set to be updated\n" #~ "---> Package corosynclib.x86_64 0:1.2.1-1.fc13 set to be updated\n" #~ "--> Processing Dependency: librdmacm.so.1(RDMACM_1.0)(64bit) for package: corosynclib-1.2.1-1.fc13.x86_64\n" #~ "--> Processing Dependency: libibverbs.so.1(IBVERBS_1.0)(64bit) for package: corosynclib-1.2.1-1.fc13.x86_64\n" #~ "--> Processing Dependency: libibverbs.so.1(IBVERBS_1.1)(64bit) for package: corosynclib-1.2.1-1.fc13.x86_64\n" #~ "--> Processing Dependency: libibverbs.so.1()(64bit) for package: corosynclib-1.2.1-1.fc13.x86_64\n" #~ "--> Processing Dependency: librdmacm.so.1()(64bit) for package: corosynclib-1.2.1-1.fc13.x86_64\n" #~ "---> Package heartbeat.x86_64 0:3.0.0-0.7.0daab7da36a8.hg.fc13 set to be updated\n" #~ "--> Processing Dependency: PyXML for package: heartbeat-3.0.0-0.7.0daab7da36a8.hg.fc13.x86_64\n" #~ "---> Package heartbeat-libs.x86_64 0:3.0.0-0.7.0daab7da36a8.hg.fc13 set to be updated\n" #~ "---> Package libesmtp.x86_64 0:1.0.4-12.fc12 set to be updated\n" #~ "---> Package net-snmp.x86_64 1:5.5-12.fc13 set to be updated\n" #~ "--> Processing Dependency: libsensors.so.4()(64bit) for package: 1:net-snmp-5.5-12.fc13.x86_64\n" #~ "---> Package net-snmp-libs.x86_64 1:5.5-12.fc13 set to be updated\n" #~ "---> Package pacemaker-libs.x86_64 0:1.1.1-1.fc13 set to be updated\n" #~ "---> Package resource-agents.x86_64 0:3.0.10-1.fc13 set to be updated\n" #~ "--> Processing Dependency: libnet.so.1()(64bit) for package: resource-agents-3.0.10-1.fc13.x86_64\n" #~ "--> Running transaction check\n" #~ "---> Package OpenIPMI-libs.x86_64 0:2.0.16-8.fc13 set to be updated\n" #~ "---> Package PyXML.x86_64 0:0.8.4-17.fc13 set to be updated\n" #~ "---> Package libibverbs.x86_64 0:1.1.3-4.fc13 set to be updated\n" #~ "--> Processing Dependency: libibverbs-driver for package: libibverbs-1.1.3-4.fc13.x86_64\n" #~ "---> Package libnet.x86_64 0:1.1.4-3.fc12 set to be updated\n" #~ "---> Package librdmacm.x86_64 0:1.0.10-2.fc13 set to be updated\n" #~ "---> Package lm_sensors-libs.x86_64 0:3.1.2-2.fc13 set to be updated\n" #~ "---> Package openhpi-libs.x86_64 0:2.14.1-3.fc13 set to be updated\n" #~ "---> Package perl-TimeDate.noarch 1:1.20-1.fc13 set to be updated\n" #~ "--> Running transaction check\n" #~ "---> Package libmlx4.x86_64 0:1.0.1-5.fc13 set to be updated\n" #~ "--> Finished Dependency Resolution\n" #~ "\n" #~ "Dependencies Resolved\n" #~ "\n" #~ " " #~ msgid "" #~ "\n" #~ "==========================================================================================\n" #~ " Package Arch Version Repository Size\n" #~ "==========================================================================================\n" #~ "Installing:\n" #~ " corosync x86_64 1.2.1-1.fc13 fedora 136 k\n" #~ " pacemaker x86_64 1.1.1-1.fc13 fedora 543 k\n" #~ "Installing for dependencies:\n" #~ " OpenIPMI-libs x86_64 2.0.16-8.fc13 fedora 474 k\n" #~ " PyXML x86_64 0.8.4-17.fc13 fedora 906 k\n" #~ " cluster-glue x86_64 1.0.2-1.fc13 fedora 230 k\n" #~ " cluster-glue-libs x86_64 1.0.2-1.fc13 fedora 116 k\n" #~ " corosynclib x86_64 1.2.1-1.fc13 fedora 145 k\n" #~ " heartbeat x86_64 3.0.0-0.7.0daab7da36a8.hg.fc13 updates 172 k\n" #~ " heartbeat-libs x86_64 3.0.0-0.7.0daab7da36a8.hg.fc13 updates 265 k\n" #~ " libesmtp x86_64 1.0.4-12.fc12 fedora 54 k\n" #~ " libibverbs x86_64 1.1.3-4.fc13 fedora 42 k\n" #~ " libmlx4 x86_64 1.0.1-5.fc13 fedora 27 k\n" #~ " libnet x86_64 1.1.4-3.fc12 fedora 49 k\n" #~ " librdmacm x86_64 1.0.10-2.fc13 fedora 22 k\n" #~ " lm_sensors-libs x86_64 3.1.2-2.fc13 fedora 37 k\n" #~ " net-snmp x86_64 1:5.5-12.fc13 fedora 295 k\n" #~ " net-snmp-libs x86_64 1:5.5-12.fc13 fedora 1.5 M\n" #~ " openhpi-libs x86_64 2.14.1-3.fc13 fedora 135 k\n" #~ " pacemaker-libs x86_64 1.1.1-1.fc13 fedora 264 k\n" #~ " perl-TimeDate noarch 1:1.20-1.fc13 fedora 42 k\n" #~ " resource-agents x86_64 3.0.10-1.fc13 fedora 357 k\n" #~ "\n" #~ "Transaction Summary\n" #~ "=========================================================================================\n" #~ "Install 21 Package(s)\n" #~ "Upgrade 0 Package(s)\n" #~ "\n" #~ "Total download size: 5.7 M\n" #~ "Installed size: 20 M\n" #~ "Downloading Packages:\n" #~ "Setting up and reading Presto delta metadata\n" #~ "updates-testing/prestodelta | 164 kB 00:00 \n" #~ "fedora/prestodelta | 150 B 00:00 \n" #~ "Processing delta metadata\n" #~ "Package(s) data still to download: 5.7 M\n" #~ "(1/21): OpenIPMI-libs-2.0.16-8.fc13.x86_64.rpm | 474 kB 00:00 \n" #~ "(2/21): PyXML-0.8.4-17.fc13.x86_64.rpm | 906 kB 00:01 \n" #~ "(3/21): cluster-glue-1.0.2-1.fc13.x86_64.rpm | 230 kB 00:00 \n" #~ "(4/21): cluster-glue-libs-1.0.2-1.fc13.x86_64.rpm | 116 kB 00:00 \n" #~ "(5/21): corosync-1.2.1-1.fc13.x86_64.rpm | 136 kB 00:00 \n" #~ "(6/21): corosynclib-1.2.1-1.fc13.x86_64.rpm | 145 kB 00:00 \n" #~ "(7/21): heartbeat-3.0.0-0.7.0daab7da36a8.hg.fc13.x86_64.rpm | 172 kB 00:00 \n" #~ "(8/21): heartbeat-libs-3.0.0-0.7.0daab7da36a8.hg.fc13.x86_64.rpm | 265 kB 00:00 \n" #~ "(9/21): libesmtp-1.0.4-12.fc12.x86_64.rpm | 54 kB 00:00 \n" #~ "(10/21): libibverbs-1.1.3-4.fc13.x86_64.rpm | 42 kB 00:00 \n" #~ "(11/21): libmlx4-1.0.1-5.fc13.x86_64.rpm | 27 kB 00:00 \n" #~ "(12/21): libnet-1.1.4-3.fc12.x86_64.rpm | 49 kB 00:00 \n" #~ "(13/21): librdmacm-1.0.10-2.fc13.x86_64.rpm | 22 kB 00:00 \n" #~ "(14/21): lm_sensors-libs-3.1.2-2.fc13.x86_64.rpm | 37 kB 00:00 \n" #~ "(15/21): net-snmp-5.5-12.fc13.x86_64.rpm | 295 kB 00:00 \n" #~ "(16/21): net-snmp-libs-5.5-12.fc13.x86_64.rpm | 1.5 MB 00:01 \n" #~ "(17/21): openhpi-libs-2.14.1-3.fc13.x86_64.rpm | 135 kB 00:00 \n" #~ "(18/21): pacemaker-1.1.1-1.fc13.x86_64.rpm | 543 kB 00:00 \n" #~ "(19/21): pacemaker-libs-1.1.1-1.fc13.x86_64.rpm | 264 kB 00:00 \n" #~ "(20/21): perl-TimeDate-1.20-1.fc13.noarch.rpm | 42 kB 00:00 \n" #~ "(21/21): resource-agents-3.0.10-1.fc13.x86_64.rpm | 357 kB 00:00 \n" #~ "----------------------------------------------------------------------------------------\n" #~ "Total 539 kB/s | 5.7 MB 00:10 \n" #~ "warning: rpmts_HdrFromFdno: Header V3 RSA/SHA256 Signature, key ID e8e40fde: NOKEY\n" #~ "fedora/gpgkey | 3.2 kB 00:00 ... \n" #~ "Importing GPG key 0xE8E40FDE \"Fedora (13) <fedora@fedoraproject.org%gt;\" from /etc/pki/rpm-gpg/RPM-GPG-KEY-fedora-x86_64\n" #~ " " #~ msgstr "" #~ "\n" #~ "==========================================================================================\n" #~ " Package Arch Version Repository Size\n" #~ "==========================================================================================\n" #~ "Installing:\n" #~ " corosync x86_64 1.2.1-1.fc13 fedora 136 k\n" #~ " pacemaker x86_64 1.1.1-1.fc13 fedora 543 k\n" #~ "Installing for dependencies:\n" #~ " OpenIPMI-libs x86_64 2.0.16-8.fc13 fedora 474 k\n" #~ " PyXML x86_64 0.8.4-17.fc13 fedora 906 k\n" #~ " cluster-glue x86_64 1.0.2-1.fc13 fedora 230 k\n" #~ " cluster-glue-libs x86_64 1.0.2-1.fc13 fedora 116 k\n" #~ " corosynclib x86_64 1.2.1-1.fc13 fedora 145 k\n" #~ " heartbeat x86_64 3.0.0-0.7.0daab7da36a8.hg.fc13 updates 172 k\n" #~ " heartbeat-libs x86_64 3.0.0-0.7.0daab7da36a8.hg.fc13 updates 265 k\n" #~ " libesmtp x86_64 1.0.4-12.fc12 fedora 54 k\n" #~ " libibverbs x86_64 1.1.3-4.fc13 fedora 42 k\n" #~ " libmlx4 x86_64 1.0.1-5.fc13 fedora 27 k\n" #~ " libnet x86_64 1.1.4-3.fc12 fedora 49 k\n" #~ " librdmacm x86_64 1.0.10-2.fc13 fedora 22 k\n" #~ " lm_sensors-libs x86_64 3.1.2-2.fc13 fedora 37 k\n" #~ " net-snmp x86_64 1:5.5-12.fc13 fedora 295 k\n" #~ " net-snmp-libs x86_64 1:5.5-12.fc13 fedora 1.5 M\n" #~ " openhpi-libs x86_64 2.14.1-3.fc13 fedora 135 k\n" #~ " pacemaker-libs x86_64 1.1.1-1.fc13 fedora 264 k\n" #~ " perl-TimeDate noarch 1:1.20-1.fc13 fedora 42 k\n" #~ " resource-agents x86_64 3.0.10-1.fc13 fedora 357 k\n" #~ "\n" #~ "Transaction Summary\n" #~ "=========================================================================================\n" #~ "Install 21 Package(s)\n" #~ "Upgrade 0 Package(s)\n" #~ "\n" #~ "Total download size: 5.7 M\n" #~ "Installed size: 20 M\n" #~ "Downloading Packages:\n" #~ "Setting up and reading Presto delta metadata\n" #~ "updates-testing/prestodelta | 164 kB 00:00 \n" #~ "fedora/prestodelta | 150 B 00:00 \n" #~ "Processing delta metadata\n" #~ "Package(s) data still to download: 5.7 M\n" #~ "(1/21): OpenIPMI-libs-2.0.16-8.fc13.x86_64.rpm | 474 kB 00:00 \n" #~ "(2/21): PyXML-0.8.4-17.fc13.x86_64.rpm | 906 kB 00:01 \n" #~ "(3/21): cluster-glue-1.0.2-1.fc13.x86_64.rpm | 230 kB 00:00 \n" #~ "(4/21): cluster-glue-libs-1.0.2-1.fc13.x86_64.rpm | 116 kB 00:00 \n" #~ "(5/21): corosync-1.2.1-1.fc13.x86_64.rpm | 136 kB 00:00 \n" #~ "(6/21): corosynclib-1.2.1-1.fc13.x86_64.rpm | 145 kB 00:00 \n" #~ "(7/21): heartbeat-3.0.0-0.7.0daab7da36a8.hg.fc13.x86_64.rpm | 172 kB 00:00 \n" #~ "(8/21): heartbeat-libs-3.0.0-0.7.0daab7da36a8.hg.fc13.x86_64.rpm | 265 kB 00:00 \n" #~ "(9/21): libesmtp-1.0.4-12.fc12.x86_64.rpm | 54 kB 00:00 \n" #~ "(10/21): libibverbs-1.1.3-4.fc13.x86_64.rpm | 42 kB 00:00 \n" #~ "(11/21): libmlx4-1.0.1-5.fc13.x86_64.rpm | 27 kB 00:00 \n" #~ "(12/21): libnet-1.1.4-3.fc12.x86_64.rpm | 49 kB 00:00 \n" #~ "(13/21): librdmacm-1.0.10-2.fc13.x86_64.rpm | 22 kB 00:00 \n" #~ "(14/21): lm_sensors-libs-3.1.2-2.fc13.x86_64.rpm | 37 kB 00:00 \n" #~ "(15/21): net-snmp-5.5-12.fc13.x86_64.rpm | 295 kB 00:00 \n" #~ "(16/21): net-snmp-libs-5.5-12.fc13.x86_64.rpm | 1.5 MB 00:01 \n" #~ "(17/21): openhpi-libs-2.14.1-3.fc13.x86_64.rpm | 135 kB 00:00 \n" #~ "(18/21): pacemaker-1.1.1-1.fc13.x86_64.rpm | 543 kB 00:00 \n" #~ "(19/21): pacemaker-libs-1.1.1-1.fc13.x86_64.rpm | 264 kB 00:00 \n" #~ "(20/21): perl-TimeDate-1.20-1.fc13.noarch.rpm | 42 kB 00:00 \n" #~ "(21/21): resource-agents-3.0.10-1.fc13.x86_64.rpm | 357 kB 00:00 \n" #~ "----------------------------------------------------------------------------------------\n" #~ "Total 539 kB/s | 5.7 MB 00:10 \n" #~ "warning: rpmts_HdrFromFdno: Header V3 RSA/SHA256 Signature, key ID e8e40fde: NOKEY\n" #~ "fedora/gpgkey | 3.2 kB 00:00 ... \n" #~ "Importing GPG key 0xE8E40FDE \"Fedora (13) <fedora@fedoraproject.org%gt;\" from /etc/pki/rpm-gpg/RPM-GPG-KEY-fedora-x86_64\n" #~ " " #~ msgid "" #~ "\n" #~ "Running rpm_check_debug\n" #~ "Running Transaction Test\n" #~ "Transaction Test Succeeded\n" #~ "Running Transaction\n" #~ " Installing : lm_sensors-libs-3.1.2-2.fc13.x86_64 1/21 \n" #~ " Installing : 1:net-snmp-libs-5.5-12.fc13.x86_64 2/21 \n" #~ " Installing : 1:net-snmp-5.5-12.fc13.x86_64 3/21 \n" #~ " Installing : openhpi-libs-2.14.1-3.fc13.x86_64 4/21 \n" #~ " Installing : libibverbs-1.1.3-4.fc13.x86_64 5/21 \n" #~ " Installing : libmlx4-1.0.1-5.fc13.x86_64 6/21 \n" #~ " Installing : librdmacm-1.0.10-2.fc13.x86_64 7/21 \n" #~ " Installing : corosync-1.2.1-1.fc13.x86_64 8/21 \n" #~ " Installing : corosynclib-1.2.1-1.fc13.x86_64 9/21 \n" #~ " Installing : libesmtp-1.0.4-12.fc12.x86_64 10/21 \n" #~ " Installing : OpenIPMI-libs-2.0.16-8.fc13.x86_64 11/21 \n" #~ " Installing : PyXML-0.8.4-17.fc13.x86_64 12/21 \n" #~ " Installing : libnet-1.1.4-3.fc12.x86_64 13/21 \n" #~ " Installing : 1:perl-TimeDate-1.20-1.fc13.noarch 14/21 \n" #~ " Installing : cluster-glue-1.0.2-1.fc13.x86_64 15/21 \n" #~ " Installing : cluster-glue-libs-1.0.2-1.fc13.x86_64 16/21 \n" #~ " Installing : resource-agents-3.0.10-1.fc13.x86_64 17/21 \n" #~ " Installing : heartbeat-libs-3.0.0-0.7.0daab7da36a8.hg.fc13.x86_64 18/21 \n" #~ " Installing : heartbeat-3.0.0-0.7.0daab7da36a8.hg.fc13.x86_64 19/21 \n" #~ " Installing : pacemaker-1.1.1-1.fc13.x86_64 20/21 \n" #~ " Installing : pacemaker-libs-1.1.1-1.fc13.x86_64 21/21 \n" #~ "\n" #~ "Installed:\n" #~ " corosync.x86_64 0:1.2.1-1.fc13 pacemaker.x86_64 0:1.1.1-1.fc13 \n" #~ "\n" #~ "Dependency Installed:\n" #~ " OpenIPMI-libs.x86_64 0:2.0.16-8.fc13 \n" #~ " PyXML.x86_64 0:0.8.4-17.fc13 \n" #~ " cluster-glue.x86_64 0:1.0.2-1.fc13 \n" #~ " cluster-glue-libs.x86_64 0:1.0.2-1.fc13 \n" #~ " corosynclib.x86_64 0:1.2.1-1.fc13 \n" #~ " heartbeat.x86_64 0:3.0.0-0.7.0daab7da36a8.hg.fc13 \n" #~ " heartbeat-libs.x86_64 0:3.0.0-0.7.0daab7da36a8.hg.fc13 \n" #~ " libesmtp.x86_64 0:1.0.4-12.fc12 \n" #~ " libibverbs.x86_64 0:1.1.3-4.fc13 \n" #~ " libmlx4.x86_64 0:1.0.1-5.fc13 \n" #~ " libnet.x86_64 0:1.1.4-3.fc12 \n" #~ " librdmacm.x86_64 0:1.0.10-2.fc13 \n" #~ " lm_sensors-libs.x86_64 0:3.1.2-2.fc13 \n" #~ " net-snmp.x86_64 1:5.5-12.fc13 \n" #~ " net-snmp-libs.x86_64 1:5.5-12.fc13 \n" #~ " openhpi-libs.x86_64 0:2.14.1-3.fc13 \n" #~ " pacemaker-libs.x86_64 0:1.1.1-1.fc13 \n" #~ " perl-TimeDate.noarch 1:1.20-1.fc13 \n" #~ " resource-agents.x86_64 0:3.0.10-1.fc13 \n" #~ "\n" #~ "Complete!\n" #~ "[root@pcmk-1 ~]# \n" #~ " " #~ msgstr "" #~ "\n" #~ "Running rpm_check_debug\n" #~ "Running Transaction Test\n" #~ "Transaction Test Succeeded\n" #~ "Running Transaction\n" #~ " Installing : lm_sensors-libs-3.1.2-2.fc13.x86_64 1/21 \n" #~ " Installing : 1:net-snmp-libs-5.5-12.fc13.x86_64 2/21 \n" #~ " Installing : 1:net-snmp-5.5-12.fc13.x86_64 3/21 \n" #~ " Installing : openhpi-libs-2.14.1-3.fc13.x86_64 4/21 \n" #~ " Installing : libibverbs-1.1.3-4.fc13.x86_64 5/21 \n" #~ " Installing : libmlx4-1.0.1-5.fc13.x86_64 6/21 \n" #~ " Installing : librdmacm-1.0.10-2.fc13.x86_64 7/21 \n" #~ " Installing : corosync-1.2.1-1.fc13.x86_64 8/21 \n" #~ " Installing : corosynclib-1.2.1-1.fc13.x86_64 9/21 \n" #~ " Installing : libesmtp-1.0.4-12.fc12.x86_64 10/21 \n" #~ " Installing : OpenIPMI-libs-2.0.16-8.fc13.x86_64 11/21 \n" #~ " Installing : PyXML-0.8.4-17.fc13.x86_64 12/21 \n" #~ " Installing : libnet-1.1.4-3.fc12.x86_64 13/21 \n" #~ " Installing : 1:perl-TimeDate-1.20-1.fc13.noarch 14/21 \n" #~ " Installing : cluster-glue-1.0.2-1.fc13.x86_64 15/21 \n" #~ " Installing : cluster-glue-libs-1.0.2-1.fc13.x86_64 16/21 \n" #~ " Installing : resource-agents-3.0.10-1.fc13.x86_64 17/21 \n" #~ " Installing : heartbeat-libs-3.0.0-0.7.0daab7da36a8.hg.fc13.x86_64 18/21 \n" #~ " Installing : heartbeat-3.0.0-0.7.0daab7da36a8.hg.fc13.x86_64 19/21 \n" #~ " Installing : pacemaker-1.1.1-1.fc13.x86_64 20/21 \n" #~ " Installing : pacemaker-libs-1.1.1-1.fc13.x86_64 21/21 \n" #~ "\n" #~ "Installed:\n" #~ " corosync.x86_64 0:1.2.1-1.fc13 pacemaker.x86_64 0:1.1.1-1.fc13 \n" #~ "\n" #~ "Dependency Installed:\n" #~ " OpenIPMI-libs.x86_64 0:2.0.16-8.fc13 \n" #~ " PyXML.x86_64 0:0.8.4-17.fc13 \n" #~ " cluster-glue.x86_64 0:1.0.2-1.fc13 \n" #~ " cluster-glue-libs.x86_64 0:1.0.2-1.fc13 \n" #~ " corosynclib.x86_64 0:1.2.1-1.fc13 \n" #~ " heartbeat.x86_64 0:3.0.0-0.7.0daab7da36a8.hg.fc13 \n" #~ " heartbeat-libs.x86_64 0:3.0.0-0.7.0daab7da36a8.hg.fc13 \n" #~ " libesmtp.x86_64 0:1.0.4-12.fc12 \n" #~ " libibverbs.x86_64 0:1.1.3-4.fc13 \n" #~ " libmlx4.x86_64 0:1.0.1-5.fc13 \n" #~ " libnet.x86_64 0:1.1.4-3.fc12 \n" #~ " librdmacm.x86_64 0:1.0.10-2.fc13 \n" #~ " lm_sensors-libs.x86_64 0:3.1.2-2.fc13 \n" #~ " net-snmp.x86_64 1:5.5-12.fc13 \n" #~ " net-snmp-libs.x86_64 1:5.5-12.fc13 \n" #~ " openhpi-libs.x86_64 0:2.14.1-3.fc13 \n" #~ " pacemaker-libs.x86_64 0:1.1.1-1.fc13 \n" #~ " perl-TimeDate.noarch 1:1.20-1.fc13 \n" #~ " resource-agents.x86_64 0:3.0.10-1.fc13 \n" #~ "\n" #~ "Complete!\n" #~ "[root@pcmk-1 ~]# \n" #~ " " #~ msgid "Verify Connectivity by IP address" #~ msgstr "通过IP地址来检查连接" #~ msgid "Set up /etc/hosts entries" #~ msgstr "否则,我们修改/etc/hosts文件来达到相同的效果:" #~ msgid "Verify Connectivity by Hostname" #~ msgstr "通过机器名检查连接" #~ msgid "" #~ "\n" #~ "[root@pcmk-1 ~]# uname -n\n" #~ "pcmk-1.clusterlabs.org\n" #~ "[root@pcmk-1 ~]# dnsdomainname \n" #~ "clusterlabs.org\n" #~ " " #~ msgstr "" #~ "\n" #~ "[root@pcmk-1 ~]# uname -n\n" #~ "pcmk-1.clusterlabs.org\n" #~ "[root@pcmk-1 ~]# dnsdomainname \n" #~ "clusterlabs.org\n" #~ " " #~ msgid "" #~ "\n" #~ "[root@pcmk-1 ~]# source /etc/sysconfig/network\n" #~ "[root@pcmk-1 ~]# hostname $HOSTNAME\n" #~ " " #~ msgstr "" #~ "\n" #~ "[root@pcmk-1 ~]# source /etc/sysconfig/network\n" #~ "[root@pcmk-1 ~]# hostname $HOSTNAME\n" #~ " " #~ msgid "" #~ "\n" #~ "[root@pcmk-1 ~]# uname -n\n" #~ "pcmk-1\n" #~ "[root@pcmk-1 ~]# dnsdomainname \n" #~ "clusterlabs.org\n" #~ " " #~ msgstr "" #~ "\n" #~ "[root@pcmk-1 ~]# uname -n\n" #~ "pcmk-1\n" #~ "[root@pcmk-1 ~]# dnsdomainname \n" #~ "clusterlabs.org\n" #~ " " #~ msgid "Now repeat on pcmk-2." #~ msgstr "现在在pcmk-2上面重复以上操作." #~ msgid "Choose a port number and multi-cast http://en.wikipedia.org/wiki/Multicast address. http://en.wikipedia.org/wiki/Multicast_address " #~ msgstr "选择一个组播 http://en.wikipedia.org/wiki/Multicast 端口和地址。 http://en.wikipedia.org/wiki/Multicast_address " #~ msgid "For this document, I have chosen port 4000 and used 226.94.1.1 as the multi-cast address." #~ msgstr "在这个文档中,我选择端口4000并且用226.94.1.1作为组播地址:" #~ msgid "" #~ "\n" #~ "[root@pcmk-1 ~]# export ais_port=4000\n" #~ "[root@pcmk-1 ~]# export ais_mcast=226.94.1.1\n" #~ " " #~ msgstr "" #~ "\n" #~ "[root@pcmk-1 ~]# export ais_port=4000\n" #~ "[root@pcmk-1 ~]# export ais_mcast=226.94.1.1\n" #~ " " #~ msgid "Next we automatically determine the hosts address. By not using the full address, we make the configuration suitable to be copied to other nodes." #~ msgstr "然后我们用下面的命令自动获得机器的地址。为了让配置文件能够在机器上面的各个机器通用,我们不使用完整的IP地址而使用网络地址。(译者注:corosync配置文件中的监听地址一项可以填写网络地址,corosync会自动匹配应该监听在哪个地址而不是0.0.0.0)" #~ msgid "[root@pcmk-1 ~]# export ais_addr=`ip addr | grep \"inet \" | tail -n 1 | awk '{print $4}' | sed s/255/0/`" #~ msgstr "[root@pcmk-1 ~]# export ais_addr=`ip addr | grep \"inet \" | tail -n 1 | awk '{print $4}' | sed s/255/0/`" #~ msgid "Display and verify the configuration options" #~ msgstr "显示并检查配置的环境变量是否正确" #~ msgid "" #~ "\n" #~ "[root@pcmk-1 ~]# env | grep ais_\n" #~ "ais_mcast=226.94.1.1\n" #~ "ais_port=4000\n" #~ "ais_addr=192.168.122.0\n" #~ " " #~ msgstr "" #~ "\n" #~ "[root@pcmk-1 ~]# env | grep ais_\n" #~ "ais_mcast=226.94.1.1\n" #~ "ais_port=4000\n" #~ "ais_addr=192.168.122.0\n" #~ " " #~ msgid "Once you’re happy with the chosen values, update the Corosync configuration" #~ msgstr "确认以上输出没有错误以后,我们用以下命令来配置corosync" #~ msgid "" #~ "\n" #~ "[root@pcmk-1 ~]# cp /etc/corosync/corosync.conf.example /etc/corosync/corosync.conf\n" #~ "[root@pcmk-1 ~]# sed -i.bak \"s/.*mcastaddr:.*/mcastaddr:\\ $ais_mcast/g\" /etc/corosync/corosync.conf\n" #~ "[root@pcmk-1 ~]# sed -i.bak \"s/.*mcastport:.*/mcastport:\\ $ais_port/g\" /etc/corosync/corosync.conf\n" #~ "[root@pcmk-1 ~]# sed -i.bak \"s/.*bindnetaddr:.*/bindnetaddr:\\ $ais_addr/g\" /etc/corosync/corosync.conf\n" #~ " " #~ msgstr "" #~ "\n" #~ "[root@pcmk-1 ~]# cp /etc/corosync/corosync.conf.example /etc/corosync/corosync.conf\n" #~ "[root@pcmk-1 ~]# sed -i.bak \"s/.*mcastaddr:.*/mcastaddr:\\ $ais_mcast/g\" /etc/corosync/corosync.conf\n" #~ "[root@pcmk-1 ~]# sed -i.bak \"s/.*mcastport:.*/mcastport:\\ $ais_port/g\" /etc/corosync/corosync.conf\n" #~ "[root@pcmk-1 ~]# sed -i.bak \"s/.*bindnetaddr:.*/bindnetaddr:\\ $ais_addr/g\" /etc/corosync/corosync.conf\n" #~ " " #~ msgid "Finally, tell Corosync to start Pacemaker" #~ msgstr "最后,告诉corosync要启动pacemaker" #~ msgid "" #~ "\n" #~ "[root@pcmk-1 ~]# cat <<-END >>/etc/corosync/service.d/pcmk\n" #~ "service {\n" #~ " # Load the Pacemaker Cluster Resource Manager\n" #~ " name: pacemaker\n" #~ " ver: 0\n" #~ "}\n" #~ "END\n" #~ " " #~ msgstr "" #~ "\n" #~ "[root@pcmk-1 ~]# cat <<-END >>/etc/corosync/service.d/pcmk\n" #~ "service {\n" #~ " # Load the Pacemaker Cluster Resource Manager\n" #~ " name: pacemaker\n" #~ " ver: 0\n" #~ "}\n" #~ "END\n" #~ " " #~ msgid "Propagate the Configuration" #~ msgstr " 传送配置文件" #~ msgid "Now we need to copy the changes so far to the other node:" #~ msgstr "然后我们把配置文件拷贝到其他节点:" #~ msgid "" #~ "\n" #~ "[root@pcmk-1 ~]# for f in /etc/corosync/corosync.conf /etc/corosync/service.d/pcmk /etc/hosts; do scp $f pcmk-2:$f ; done\n" #~ "corosync.conf 100% 1528 1.5KB/s 00:00\n" #~ "hosts 100% 281 0.3KB/s 00:00\n" #~ "[root@pcmk-1 ~]#\n" #~ " " #~ msgstr "" #~ "\n" #~ "[root@pcmk-1 ~]# for f in /etc/corosync/corosync.conf /etc/corosync/service.d/pcmk /etc/hosts; do scp $f pcmk-2:$f ; done\n" #~ "corosync.conf 100% 1528 1.5KB/s 00:00\n" #~ "hosts 100% 281 0.3KB/s 00:00\n" #~ "[root@pcmk-1 ~]#\n" #~ " " #~ msgid "Burn the disk image to a DVD" #~ msgstr "把整个镜像刻录成一个DVD光盘" #~ msgid " http://docs.fedoraproject.org/readme-burning-isos/en-US.html " #~ msgstr " http://docs.fedoraproject.org/readme-burning-isos/en-US.html " #~ msgid "and boot from it. Or use the image to boot a virtual machine as I have done here. After clicking through the welcome screen, select your language and keyboard layout" #~ msgstr "并从它启动。或者就像我一样启动一个虚拟机. 在点击欢迎界面的NETX后 ,我们要开始选择语言和键盘类型" #~ msgid " http://docs.fedoraproject.org/install-guide/f&DISTRO_VERSION;/en-US/html/s1-langselection-x86.html " #~ msgstr " http://docs.fedoraproject.org/install-guide/f&DISTRO_VERSION;/en-US/html/s1-langselection-x86.html " #~ msgid "Assign your machine a host name." #~ msgstr "给你的机器取个名字。" #~ msgid "I happen to control the clusterlabs.org domain name, so I will use that here." #~ msgstr "我正好能控制clusterlabs.org 这个域名,所以我用这个名字。" #~ msgid "You will then be prompted to indicate the machine’s physical location and to supply a root password." #~ msgstr "然后你会被提示选择机器所在地并设定root密码" #~ msgid "Now select where you want Fedora installed." #~ msgstr "然后你选择想在把Fedora安装在哪" #~ msgid "http://docs.fedoraproject.org/install-guide/f13/en-US/html/s1-diskpartsetup-x86.html" #~ msgstr "http://docs.fedoraproject.org/install-guide/f13/en-US/html/s1-diskpartsetup-x86.html" #~ msgid "Once the node reboots, follow the on screen instructions" #~ msgstr "一旦系统重启完毕你可以看到以下界面" #~ msgid "to create a system user and configure the time." #~ msgstr ",然后配置用户和设定时间。" #~ msgid "Choose a port number and multi-cast" #~ msgstr "选择一个组播" #~ msgid "http://en.wikipedia.org/wiki/Multicast" #~ msgstr "http://en.wikipedia.org/wiki/Multicast" #~ msgid "address." #~ msgstr "端口和地址。" #~ msgid "http://en.wikipedia.org/wiki/Multicast_address" #~ msgstr "http://en.wikipedia.org/wiki/Multicast_address" diff --git a/lib/cluster/membership.c b/lib/cluster/membership.c index 444ee8d36e..a453514a96 100644 --- a/lib/cluster/membership.c +++ b/lib/cluster/membership.c @@ -1,676 +1,687 @@ /* * Copyright (C) 2004 Andrew Beekhof * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #include #ifndef _GNU_SOURCE # define _GNU_SOURCE #endif #include #include #include #include #include #include #include #include #include #include GHashTable *crm_peer_cache = NULL; GHashTable *crm_remote_peer_cache = NULL; unsigned long long crm_peer_seq = 0; gboolean crm_have_quorum = FALSE; int crm_remote_peer_cache_size(void) { if (crm_remote_peer_cache == NULL) { return 0; } return g_hash_table_size(crm_remote_peer_cache); } void crm_remote_peer_cache_add(const char *node_name) { crm_node_t *node = g_hash_table_lookup(crm_remote_peer_cache, node_name); if (node == NULL) { crm_trace("added %s to remote cache", node_name); node = calloc(1, sizeof(crm_node_t)); node->flags = crm_remote_node; CRM_ASSERT(node); node->uname = strdup(node_name); node->uuid = strdup(node_name); node->state = strdup(CRM_NODE_MEMBER); g_hash_table_replace(crm_remote_peer_cache, node->uname, node); } } void crm_remote_peer_cache_remove(const char *node_name) { g_hash_table_remove(crm_remote_peer_cache, node_name); } static void remote_cache_refresh_helper(xmlNode *cib, const char *xpath, const char *field, int flags) { const char *remote = NULL; crm_node_t *node = NULL; xmlXPathObjectPtr xpathObj = NULL; int max = 0; int lpc = 0; xpathObj = xpath_search(cib, xpath); max = numXpathResults(xpathObj); for (lpc = 0; lpc < max; lpc++) { xmlNode *xml = getXpathResult(xpathObj, lpc); CRM_LOG_ASSERT(xml != NULL); if(xml != NULL) { remote = crm_element_value(xml, field); } if (remote) { crm_trace("added %s to remote cache", remote); node = calloc(1, sizeof(crm_node_t)); node->flags = flags; CRM_ASSERT(node); node->uname = strdup(remote); node->uuid = strdup(remote); node->state = strdup(CRM_NODE_MEMBER); g_hash_table_replace(crm_remote_peer_cache, node->uname, node); } } freeXpathObject(xpathObj); } void crm_remote_peer_cache_refresh(xmlNode *cib) { const char *xpath = NULL; g_hash_table_remove_all(crm_remote_peer_cache); /* remote nodes associated with a cluster resource */ xpath = "//" XML_TAG_CIB "//" XML_CIB_TAG_CONFIGURATION "//" XML_CIB_TAG_RESOURCE "//" XML_TAG_META_SETS "//" XML_CIB_TAG_NVPAIR "[@name='remote-node']"; remote_cache_refresh_helper(cib, xpath, "value", crm_remote_node | crm_remote_container); /* baremetal nodes defined by connection resources*/ xpath = "//" XML_TAG_CIB "//" XML_CIB_TAG_CONFIGURATION "//" XML_CIB_TAG_RESOURCE "[@type='remote'][@provider='pacemaker']"; remote_cache_refresh_helper(cib, xpath, "id", crm_remote_node | crm_remote_baremetal); /* baremetal nodes we have seen in the config that may or may not have connection * resources associated with them anymore */ xpath = "//" XML_TAG_CIB "//" XML_CIB_TAG_STATUS "//" XML_CIB_TAG_STATE "[@remote_node='true']"; remote_cache_refresh_helper(cib, xpath, "id", crm_remote_node | crm_remote_baremetal); } gboolean crm_is_peer_active(const crm_node_t * node) { if(node == NULL) { return FALSE; } if (is_set(node->flags, crm_remote_node)) { /* remote nodes are never considered active members. This * guarantees they will never be considered for DC membership.*/ return FALSE; } #if SUPPORT_COROSYNC if (is_openais_cluster()) { return crm_is_corosync_peer_active(node); } #endif #if SUPPORT_HEARTBEAT if (is_heartbeat_cluster()) { return crm_is_heartbeat_peer_active(node); } #endif crm_err("Unhandled cluster type: %s", name_for_cluster_type(get_cluster_type())); return FALSE; } static gboolean crm_reap_dead_member(gpointer key, gpointer value, gpointer user_data) { crm_node_t *node = value; crm_node_t *search = user_data; if (search == NULL) { return FALSE; } else if (search->id && node->id != search->id) { return FALSE; } else if (search->id == 0 && safe_str_neq(node->uname, search->uname)) { return FALSE; } else if (crm_is_peer_active(value) == FALSE) { crm_notice("Removing %s/%u from the membership list", node->uname, node->id); return TRUE; } return FALSE; } guint reap_crm_member(uint32_t id, const char *name) { int matches = 0; crm_node_t search; if (crm_peer_cache == NULL) { crm_trace("Nothing to do, cache not initialized"); return 0; } search.id = id; search.uname = name ? strdup(name) : NULL; matches = g_hash_table_foreach_remove(crm_peer_cache, crm_reap_dead_member, &search); if(matches) { crm_notice("Purged %d peers with id=%u and/or uname=%s from the membership cache", matches, id, name); } else { crm_info("No peers with id=%u and/or uname=%s exist", id, name); } free(search.uname); return matches; } static void crm_count_peer(gpointer key, gpointer value, gpointer user_data) { guint *count = user_data; crm_node_t *node = value; if (crm_is_peer_active(node)) { *count = *count + 1; } } guint crm_active_peers(void) { guint count = 0; if (crm_peer_cache) { g_hash_table_foreach(crm_peer_cache, crm_count_peer, &count); } return count; } static void destroy_crm_node(gpointer data) { crm_node_t *node = data; crm_trace("Destroying entry for node %u: %s", node->id, node->uname); free(node->addr); free(node->uname); free(node->state); free(node->uuid); free(node->expected); free(node); } void crm_peer_init(void) { if (crm_peer_cache == NULL) { crm_peer_cache = g_hash_table_new_full(crm_strcase_hash, crm_strcase_equal, free, destroy_crm_node); } if (crm_remote_peer_cache == NULL) { crm_remote_peer_cache = g_hash_table_new_full(crm_strcase_hash, crm_strcase_equal, NULL, destroy_crm_node); } } void crm_peer_destroy(void) { if (crm_peer_cache != NULL) { crm_trace("Destroying peer cache with %d members", g_hash_table_size(crm_peer_cache)); g_hash_table_destroy(crm_peer_cache); crm_peer_cache = NULL; } if (crm_remote_peer_cache != NULL) { crm_trace("Destroying remote peer cache with %d members", g_hash_table_size(crm_remote_peer_cache)); g_hash_table_destroy(crm_remote_peer_cache); crm_remote_peer_cache = NULL; } } void (*crm_status_callback) (enum crm_status_type, crm_node_t *, const void *) = NULL; void crm_set_status_callback(void (*dispatch) (enum crm_status_type, crm_node_t *, const void *)) { crm_status_callback = dispatch; } static void crm_dump_peer_hash(int level, const char *caller) { GHashTableIter iter; const char *id = NULL; crm_node_t *node = NULL; g_hash_table_iter_init(&iter, crm_peer_cache); while (g_hash_table_iter_next(&iter, (gpointer *) &id, (gpointer *) &node)) { do_crm_log(level, "%s: Node %u/%s = %p - %s", caller, node->id, node->uname, node, id); } } static gboolean crm_hash_find_by_data(gpointer key, gpointer value, gpointer user_data) { if(value == user_data) { return TRUE; } return FALSE; } crm_node_t * crm_find_peer_full(unsigned int id, const char *uname, int flags) { crm_node_t *node = NULL; CRM_ASSERT(id > 0 || uname != NULL); crm_peer_init(); if (flags & CRM_GET_PEER_REMOTE) { node = g_hash_table_lookup(crm_remote_peer_cache, uname); } if (node == NULL && (flags & CRM_GET_PEER_CLUSTER)) { node = crm_find_peer(id, uname); } return node; } crm_node_t * crm_get_peer_full(unsigned int id, const char *uname, int flags) { crm_node_t *node = NULL; CRM_ASSERT(id > 0 || uname != NULL); crm_peer_init(); if (flags & CRM_GET_PEER_REMOTE) { node = g_hash_table_lookup(crm_remote_peer_cache, uname); } if (node == NULL && (flags & CRM_GET_PEER_CLUSTER)) { node = crm_get_peer(id, uname); } return node; } crm_node_t * crm_find_peer(unsigned int id, const char *uname) { GHashTableIter iter; crm_node_t *node = NULL; crm_node_t *by_id = NULL; crm_node_t *by_name = NULL; CRM_ASSERT(id > 0 || uname != NULL); crm_peer_init(); if (uname != NULL) { g_hash_table_iter_init(&iter, crm_peer_cache); while (g_hash_table_iter_next(&iter, NULL, (gpointer *) &node)) { if(node->uname && strcasecmp(node->uname, uname) == 0) { crm_trace("Name match: %s = %p", node->uname, node); by_name = node; break; } } } if (id > 0) { g_hash_table_iter_init(&iter, crm_peer_cache); while (g_hash_table_iter_next(&iter, NULL, (gpointer *) &node)) { if(node->id == id) { crm_trace("ID match: %u = %p", node->id, node); by_id = node; break; } } } node = by_id; /* Good default */ if(by_id == by_name) { /* Nothing to do if they match (both NULL counts) */ crm_trace("Consistent: %p for %u/%s", by_id, id, uname); } else if(by_id == NULL && by_name) { crm_trace("Only one: %p for %u/%s", by_name, id, uname); if(id && by_name->id) { crm_dump_peer_hash(LOG_WARNING, __FUNCTION__); crm_crit("Node %u and %u share the same name '%s'", id, by_name->id, uname); node = NULL; /* Create a new one */ } else { node = by_name; } } else if(by_name == NULL && by_id) { crm_trace("Only one: %p for %u/%s", by_id, id, uname); if(uname && by_id->uname) { crm_dump_peer_hash(LOG_WARNING, __FUNCTION__); crm_crit("Node '%s' and '%s' share the same cluster nodeid %u: assuming '%s' is correct", uname, by_id->uname, id, uname); } } else if(uname && by_id->uname) { crm_warn("Node '%s' and '%s' share the same cluster nodeid: %u", by_id->uname, by_name->uname, id); } else if(id && by_name->id) { crm_warn("Node %u and %u share the same name: '%s'", by_id->id, by_name->id, uname); } else { /* Simple merge */ /* Only corosync based clusters use nodeid's * * The functions that call crm_update_peer_state() only know nodeid * so 'by_id' is authorative when merging * * Same for crm_update_peer_proc() */ crm_dump_peer_hash(LOG_DEBUG, __FUNCTION__); crm_info("Merging %p into %p", by_name, by_id); g_hash_table_foreach_remove(crm_peer_cache, crm_hash_find_by_data, by_name); } return node; } /* coverity[-alloc] Memory is referenced in one or both hashtables */ crm_node_t * crm_get_peer(unsigned int id, const char *uname) { crm_node_t *node = NULL; char *uname_lookup = NULL; CRM_ASSERT(id > 0 || uname != NULL); crm_peer_init(); node = crm_find_peer(id, uname); + /* if uname wasn't provided, and find_peer did not turn up a uname based on id. + * we need to do a lookup of the node name using the id in the cluster membership. */ + if ((node == NULL || node->uname == NULL) && (uname == NULL)) { + uname_lookup = get_node_name(id); + } + + if (uname_lookup) { + crm_trace("Inferred a name of '%s' for node %u", uname, id); + uname = uname_lookup; + + /* try to turn up the node one more time now that we know the uname. */ + if (node == NULL) { + node = crm_find_peer(id, uname); + } + } + + if (node == NULL) { char *uniqueid = crm_generate_uuid(); node = calloc(1, sizeof(crm_node_t)); CRM_ASSERT(node); crm_info("Created entry %s/%p for node %s/%u (%d total)", uniqueid, node, uname, id, 1 + g_hash_table_size(crm_peer_cache)); g_hash_table_replace(crm_peer_cache, uniqueid, node); } - if(id && uname == NULL && node->uname == NULL) { - uname_lookup = get_node_name(id); - uname = uname_lookup; - crm_trace("Inferred a name of '%s' for node %u", uname, id); - } - if(id > 0 && uname && (node->id == 0 || node->uname == NULL)) { crm_info("Node %u is now known as %s", id, uname); } if(id > 0 && node->id == 0) { node->id = id; } if(uname && node->uname == NULL) { int lpc, len = strlen(uname); for (lpc = 0; lpc < len; lpc++) { if (uname[lpc] >= 'A' && uname[lpc] <= 'Z') { crm_warn("Node names with capitals are discouraged, consider changing '%s' to something else", uname); break; } } node->uname = strdup(uname); if (crm_status_callback) { crm_status_callback(crm_status_uname, node, NULL); } } if(node->uuid == NULL) { const char *uuid = crm_peer_uuid(node); if (uuid) { crm_info("Node %u has uuid %s", id, uuid); } else { crm_info("Cannot obtain a UUID for node %u/%s", id, node->uname); } } free(uname_lookup); return node; } crm_node_t * crm_update_peer(const char *source, unsigned int id, uint64_t born, uint64_t seen, int32_t votes, uint32_t children, const char *uuid, const char *uname, const char *addr, const char *state) { #if SUPPORT_PLUGIN gboolean addr_changed = FALSE; gboolean votes_changed = FALSE; #endif crm_node_t *node = NULL; id = get_corosync_id(id, uuid); node = crm_get_peer(id, uname); CRM_ASSERT(node != NULL); if (node->uuid == NULL) { if (is_openais_cluster()) { /* Yes, overrule whatever was passed in */ crm_peer_uuid(node); } else if (uuid != NULL) { node->uuid = strdup(uuid); } } if (children > 0) { crm_update_peer_proc(source, node, children, state); } if (state != NULL) { crm_update_peer_state(source, node, state, seen); } #if SUPPORT_HEARTBEAT if (born != 0) { node->born = born; } #endif #if SUPPORT_PLUGIN /* These were only used by the plugin */ if (born != 0) { node->born = born; } if (votes > 0 && node->votes != votes) { votes_changed = TRUE; node->votes = votes; } if (addr != NULL) { if (node->addr == NULL || crm_str_eq(node->addr, addr, FALSE) == FALSE) { addr_changed = TRUE; free(node->addr); node->addr = strdup(addr); } } if (addr_changed || votes_changed) { crm_info("%s: Node %s: id=%u state=%s addr=%s%s votes=%d%s born=" U64T " seen=" U64T " proc=%.32x", source, node->uname, node->id, node->state, node->addr, addr_changed ? " (new)" : "", node->votes, votes_changed ? " (new)" : "", node->born, node->last_seen, node->processes); } #endif return node; } void crm_update_peer_proc(const char *source, crm_node_t * node, uint32_t flag, const char *status) { uint32_t last = 0; gboolean changed = FALSE; CRM_CHECK(node != NULL, crm_err("%s: Could not set %s to %s for NULL", source, peer2text(flag), status); return); last = node->processes; if (status == NULL) { node->processes = flag; if (node->processes != last) { changed = TRUE; } } else if (safe_str_eq(status, ONLINESTATUS)) { if ((node->processes & flag) == 0) { set_bit(node->processes, flag); changed = TRUE; } #if SUPPORT_PLUGIN } else if (safe_str_eq(status, CRM_NODE_MEMBER)) { if (flag > 0 && node->processes != flag) { node->processes = flag; changed = TRUE; } #endif } else if (node->processes & flag) { clear_bit(node->processes, flag); changed = TRUE; } if (changed) { if (status == NULL && flag <= crm_proc_none) { crm_info("%s: Node %s[%u] - all processes are now offline", source, node->uname, node->id); } else { crm_info("%s: Node %s[%u] - %s is now %s", source, node->uname, node->id, peer2text(flag), status); } if (crm_status_callback) { crm_status_callback(crm_status_processes, node, &last); } } else { crm_trace("%s: Node %s[%u] - %s is unchanged (%s)", source, node->uname, node->id, peer2text(flag), status); } } void crm_update_peer_expected(const char *source, crm_node_t * node, const char *expected) { char *last = NULL; gboolean changed = FALSE; CRM_CHECK(node != NULL, crm_err("%s: Could not set 'expected' to %s", source, expected); return); last = node->expected; if (expected != NULL && safe_str_neq(node->expected, expected)) { node->expected = strdup(expected); changed = TRUE; } if (changed) { crm_info("%s: Node %s[%u] - expected state is now %s (was %s)", source, node->uname, node->id, expected, last); free(last); } else { crm_trace("%s: Node %s[%u] - expected state is unchanged (%s)", source, node->uname, node->id, expected); } } void crm_update_peer_state(const char *source, crm_node_t * node, const char *state, int membership) { char *last = NULL; gboolean changed = FALSE; CRM_CHECK(node != NULL, crm_err("%s: Could not set 'state' to %s", source, state); return); last = node->state; if (state != NULL && safe_str_neq(node->state, state)) { node->state = strdup(state); changed = TRUE; } if (membership != 0 && safe_str_eq(node->state, CRM_NODE_MEMBER)) { node->last_seen = membership; } if (changed) { crm_notice("%s: Node %s[%u] - state is now %s (was %s)", source, node->uname, node->id, state, last); if (crm_status_callback) { enum crm_status_type status_type = crm_status_nstate; if (is_set(node->flags, crm_remote_node)) { status_type = crm_status_rstate; } crm_status_callback(status_type, node, last); } free(last); } else { crm_trace("%s: Node %s[%u] - state is unchanged (%s)", source, node->uname, node->id, state); } } int crm_terminate_member(int nodeid, const char *uname, void *unused) { /* Always use the synchronous, non-mainloop version */ return stonith_api_kick(nodeid, uname, 120, TRUE); } int crm_terminate_member_no_mainloop(int nodeid, const char *uname, int *connection) { return stonith_api_kick(nodeid, uname, 120, TRUE); } diff --git a/lib/common/utils.c b/lib/common/utils.c index 6b89fe7d62..4ef02e89f1 100644 --- a/lib/common/utils.c +++ b/lib/common/utils.c @@ -1,2583 +1,2598 @@ /* * Copyright (C) 2004 Andrew Beekhof * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #include #include #ifndef _GNU_SOURCE # define _GNU_SOURCE #endif #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifndef MAXLINE # define MAXLINE 512 #endif #ifdef HAVE_GETOPT_H # include #endif #ifndef PW_BUFFER_LEN # define PW_BUFFER_LEN 500 #endif CRM_TRACE_INIT_DATA(common); gboolean crm_config_error = FALSE; gboolean crm_config_warning = FALSE; char *crm_system_name = NULL; int node_score_red = 0; int node_score_green = 0; int node_score_yellow = 0; int node_score_infinity = INFINITY; static struct crm_option *crm_long_options = NULL; static const char *crm_app_description = NULL; static char *crm_short_options = NULL; static const char *crm_app_usage = NULL; int crm_exit(int rc) { mainloop_cleanup(); #if HAVE_LIBXML2 crm_trace("cleaning up libxml"); crm_xml_cleanup(); #endif crm_trace("exit %d", rc); qb_log_fini(); free(crm_short_options); free(crm_system_name); exit(ABS(rc)); /* Always exit with a positive value so that it can be passed to crm_error * * Otherwise the system wraps it around and people * have to jump through hoops figuring out what the * error was */ return rc; /* Can never happen, but allows return crm_exit(rc) * where "return rc" was used previously - which * keeps compilers happy. */ } gboolean check_time(const char *value) { if (crm_get_msec(value) < 5000) { return FALSE; } return TRUE; } gboolean check_timer(const char *value) { if (crm_get_msec(value) < 0) { return FALSE; } return TRUE; } gboolean check_boolean(const char *value) { int tmp = FALSE; if (crm_str_to_boolean(value, &tmp) != 1) { return FALSE; } return TRUE; } gboolean check_number(const char *value) { errno = 0; if (value == NULL) { return FALSE; } else if (safe_str_eq(value, MINUS_INFINITY_S)) { } else if (safe_str_eq(value, INFINITY_S)) { } else { crm_int_helper(value, NULL); } if (errno != 0) { return FALSE; } return TRUE; } gboolean check_utilization(const char *value) { char *end = NULL; long number = strtol(value, &end, 10); if(end && end[0] != '%') { return FALSE; } else if(number < 0) { return FALSE; } return TRUE; } int char2score(const char *score) { int score_f = 0; if (score == NULL) { } else if (safe_str_eq(score, MINUS_INFINITY_S)) { score_f = -node_score_infinity; } else if (safe_str_eq(score, INFINITY_S)) { score_f = node_score_infinity; } else if (safe_str_eq(score, "+" INFINITY_S)) { score_f = node_score_infinity; } else if (safe_str_eq(score, "red")) { score_f = node_score_red; } else if (safe_str_eq(score, "yellow")) { score_f = node_score_yellow; } else if (safe_str_eq(score, "green")) { score_f = node_score_green; } else { score_f = crm_parse_int(score, NULL); if (score_f > 0 && score_f > node_score_infinity) { score_f = node_score_infinity; } else if (score_f < 0 && score_f < -node_score_infinity) { score_f = -node_score_infinity; } } return score_f; } char * score2char_stack(int score, char *buf, size_t len) { if (score >= node_score_infinity) { strncpy(buf, INFINITY_S, 9); } else if (score <= -node_score_infinity) { strncpy(buf, MINUS_INFINITY_S , 10); } else { return crm_itoa_stack(score, buf, len); } return buf; } char * score2char(int score) { if (score >= node_score_infinity) { return strdup(INFINITY_S); } else if (score <= -node_score_infinity) { return strdup("-" INFINITY_S); } return crm_itoa(score); } const char * cluster_option(GHashTable * options, gboolean(*validate) (const char *), const char *name, const char *old_name, const char *def_value) { const char *value = NULL; CRM_ASSERT(name != NULL); if (options != NULL) { value = g_hash_table_lookup(options, name); } if (value == NULL && old_name && options != NULL) { value = g_hash_table_lookup(options, old_name); if (value != NULL) { crm_config_warn("Using deprecated name '%s' for" " cluster option '%s'", old_name, name); g_hash_table_insert(options, strdup(name), strdup(value)); value = g_hash_table_lookup(options, old_name); } } if (value == NULL) { crm_trace("Using default value '%s' for cluster option '%s'", def_value, name); if (options == NULL) { return def_value; } g_hash_table_insert(options, strdup(name), strdup(def_value)); value = g_hash_table_lookup(options, name); } if (validate && validate(value) == FALSE) { crm_config_err("Value '%s' for cluster option '%s' is invalid." " Defaulting to %s", value, name, def_value); g_hash_table_replace(options, strdup(name), strdup(def_value)); value = g_hash_table_lookup(options, name); } return value; } const char * get_cluster_pref(GHashTable * options, pe_cluster_option * option_list, int len, const char *name) { int lpc = 0; const char *value = NULL; gboolean found = FALSE; for (lpc = 0; lpc < len; lpc++) { if (safe_str_eq(name, option_list[lpc].name)) { found = TRUE; value = cluster_option(options, option_list[lpc].is_valid, option_list[lpc].name, option_list[lpc].alt_name, option_list[lpc].default_value); } } CRM_CHECK(found, crm_err("No option named: %s", name)); CRM_ASSERT(value != NULL); return value; } void config_metadata(const char *name, const char *version, const char *desc_short, const char *desc_long, pe_cluster_option * option_list, int len) { int lpc = 0; fprintf(stdout, "" "\n" "\n" " %s\n" " %s\n" " %s\n" " \n", name, version, desc_long, desc_short); for (lpc = 0; lpc < len; lpc++) { if (option_list[lpc].description_long == NULL && option_list[lpc].description_short == NULL) { continue; } fprintf(stdout, " \n" " %s\n" " \n" " %s%s%s\n" " \n", option_list[lpc].name, option_list[lpc].description_short, option_list[lpc].type, option_list[lpc].default_value, option_list[lpc].description_long ? option_list[lpc]. description_long : option_list[lpc].description_short, option_list[lpc].values ? " Allowed values: " : "", option_list[lpc].values ? option_list[lpc].values : ""); } fprintf(stdout, " \n\n"); } void verify_all_options(GHashTable * options, pe_cluster_option * option_list, int len) { int lpc = 0; for (lpc = 0; lpc < len; lpc++) { cluster_option(options, option_list[lpc].is_valid, option_list[lpc].name, option_list[lpc].alt_name, option_list[lpc].default_value); } } char * crm_concat(const char *prefix, const char *suffix, char join) { int len = 0; char *new_str = NULL; CRM_ASSERT(prefix != NULL); CRM_ASSERT(suffix != NULL); len = strlen(prefix) + strlen(suffix) + 2; new_str = malloc(len); if(new_str) { sprintf(new_str, "%s%c%s", prefix, join, suffix); new_str[len - 1] = 0; } return new_str; } char * generate_hash_key(const char *crm_msg_reference, const char *sys) { char *hash_key = crm_concat(sys ? sys : "none", crm_msg_reference, '_'); crm_trace("created hash key: (%s)", hash_key); return hash_key; } char * crm_itoa_stack(int an_int, char *buffer, size_t len) { if (buffer != NULL) { snprintf(buffer, len, "%d", an_int); } return buffer; } char * crm_itoa(int an_int) { int len = 32; char *buffer = NULL; buffer = malloc(len + 1); if (buffer != NULL) { snprintf(buffer, len, "%d", an_int); } return buffer; } void crm_build_path(const char *path_c, mode_t mode) { int offset = 1, len = 0; char *path = strdup(path_c); CRM_CHECK(path != NULL, return); for (len = strlen(path); offset < len; offset++) { if (path[offset] == '/') { path[offset] = 0; if (mkdir(path, mode) < 0 && errno != EEXIST) { crm_perror(LOG_ERR, "Could not create directory '%s'", path); break; } path[offset] = '/'; } } if (mkdir(path, mode) < 0 && errno != EEXIST) { crm_perror(LOG_ERR, "Could not create directory '%s'", path); } free(path); } int crm_user_lookup(const char *name, uid_t * uid, gid_t * gid) { int rc = -1; char *buffer = NULL; struct passwd pwd; struct passwd *pwentry = NULL; buffer = calloc(1, PW_BUFFER_LEN); getpwnam_r(name, &pwd, buffer, PW_BUFFER_LEN, &pwentry); if (pwentry) { rc = 0; if (uid) { *uid = pwentry->pw_uid; } if (gid) { *gid = pwentry->pw_gid; } crm_trace("Cluster user %s has uid=%d gid=%d", name, pwentry->pw_uid, pwentry->pw_gid); } else { crm_err("Cluster user %s does not exist", name); } free(buffer); return rc; } static int crm_version_helper(const char *text, char **end_text) { int atoi_result = -1; CRM_ASSERT(end_text != NULL); errno = 0; if (text != NULL && text[0] != 0) { atoi_result = (int)strtol(text, end_text, 10); if (errno == EINVAL) { crm_err("Conversion of '%s' %c failed", text, text[0]); atoi_result = -1; } } return atoi_result; } /* * version1 < version2 : -1 * version1 = version2 : 0 * version1 > version2 : 1 */ int compare_version(const char *version1, const char *version2) { int rc = 0; int lpc = 0; char *ver1_copy = NULL, *ver2_copy = NULL; char *rest1 = NULL, *rest2 = NULL; if (version1 == NULL && version2 == NULL) { return 0; } else if (version1 == NULL) { return -1; } else if (version2 == NULL) { return 1; } ver1_copy = strdup(version1); ver2_copy = strdup(version2); rest1 = ver1_copy; rest2 = ver2_copy; while (1) { int digit1 = 0; int digit2 = 0; lpc++; if (rest1 == rest2) { break; } if (rest1 != NULL) { digit1 = crm_version_helper(rest1, &rest1); } if (rest2 != NULL) { digit2 = crm_version_helper(rest2, &rest2); } if (digit1 < digit2) { rc = -1; break; } else if (digit1 > digit2) { rc = 1; break; } if (rest1 != NULL && rest1[0] == '.') { rest1++; } if (rest1 != NULL && rest1[0] == 0) { rest1 = NULL; } if (rest2 != NULL && rest2[0] == '.') { rest2++; } if (rest2 != NULL && rest2[0] == 0) { rest2 = NULL; } } free(ver1_copy); free(ver2_copy); if (rc == 0) { crm_trace("%s == %s (%d)", version1, version2, lpc); } else if (rc < 0) { crm_trace("%s < %s (%d)", version1, version2, lpc); } else if (rc > 0) { crm_trace("%s > %s (%d)", version1, version2, lpc); } return rc; } gboolean do_stderr = FALSE; void g_hash_destroy_str(gpointer data) { free(data); } #include /* #include */ /* #include */ long long crm_int_helper(const char *text, char **end_text) { long long result = -1; char *local_end_text = NULL; int saved_errno = 0; errno = 0; if (text != NULL) { #ifdef ANSI_ONLY if (end_text != NULL) { result = strtol(text, end_text, 10); } else { result = strtol(text, &local_end_text, 10); } #else if (end_text != NULL) { result = strtoll(text, end_text, 10); } else { result = strtoll(text, &local_end_text, 10); } #endif saved_errno = errno; /* CRM_CHECK(errno != EINVAL); */ if (errno == EINVAL) { crm_err("Conversion of %s failed", text); result = -1; } else if (errno == ERANGE) { crm_err("Conversion of %s was clipped: %lld", text, result); } else if (errno != 0) { crm_perror(LOG_ERR, "Conversion of %s failed:", text); } if (local_end_text != NULL && local_end_text[0] != '\0') { crm_err("Characters left over after parsing '%s': '%s'", text, local_end_text); } errno = saved_errno; } return result; } int crm_parse_int(const char *text, const char *default_text) { int atoi_result = -1; if (text != NULL) { atoi_result = crm_int_helper(text, NULL); if (errno == 0) { return atoi_result; } } if (default_text != NULL) { atoi_result = crm_int_helper(default_text, NULL); if (errno == 0) { return atoi_result; } } else { crm_err("No default conversion value supplied"); } return -1; } gboolean safe_str_neq(const char *a, const char *b) { if (a == b) { return FALSE; } else if (a == NULL || b == NULL) { return TRUE; } else if (strcasecmp(a, b) == 0) { return FALSE; } return TRUE; } gboolean crm_is_true(const char *s) { gboolean ret = FALSE; if (s != NULL) { crm_str_to_boolean(s, &ret); } return ret; } int crm_str_to_boolean(const char *s, int *ret) { if (s == NULL) { return -1; } else if (strcasecmp(s, "true") == 0 || strcasecmp(s, "on") == 0 || strcasecmp(s, "yes") == 0 || strcasecmp(s, "y") == 0 || strcasecmp(s, "1") == 0) { *ret = TRUE; return 1; } else if (strcasecmp(s, "false") == 0 || strcasecmp(s, "off") == 0 || strcasecmp(s, "no") == 0 || strcasecmp(s, "n") == 0 || strcasecmp(s, "0") == 0) { *ret = FALSE; return 1; } return -1; } #ifndef NUMCHARS # define NUMCHARS "0123456789." #endif #ifndef WHITESPACE # define WHITESPACE " \t\n\r\f" #endif unsigned long long crm_get_interval(const char *input) { unsigned long long msec = 0; if (input == NULL) { return msec; } else if (input[0] != 'P') { long long tmp = crm_get_msec(input); if(tmp > 0) { msec = tmp; } } else { crm_time_t *interval = crm_time_parse_duration(input); msec = 1000 * crm_time_get_seconds(interval); crm_time_free(interval); } return msec; } long long crm_get_msec(const char *input) { const char *cp = input; const char *units; long long multiplier = 1000; long long divisor = 1; long long msec = -1; char *end_text = NULL; /* double dret; */ if (input == NULL) { return msec; } cp += strspn(cp, WHITESPACE); units = cp + strspn(cp, NUMCHARS); units += strspn(units, WHITESPACE); if (strchr(NUMCHARS, *cp) == NULL) { return msec; } if (strncasecmp(units, "ms", 2) == 0 || strncasecmp(units, "msec", 4) == 0) { multiplier = 1; divisor = 1; } else if (strncasecmp(units, "us", 2) == 0 || strncasecmp(units, "usec", 4) == 0) { multiplier = 1; divisor = 1000; } else if (strncasecmp(units, "s", 1) == 0 || strncasecmp(units, "sec", 3) == 0) { multiplier = 1000; divisor = 1; } else if (strncasecmp(units, "m", 1) == 0 || strncasecmp(units, "min", 3) == 0) { multiplier = 60 * 1000; divisor = 1; } else if (strncasecmp(units, "h", 1) == 0 || strncasecmp(units, "hr", 2) == 0) { multiplier = 60 * 60 * 1000; divisor = 1; } else if (*units != EOS && *units != '\n' && *units != '\r') { return msec; } msec = crm_int_helper(cp, &end_text); if (msec > LLONG_MAX/multiplier) { /* arithmetics overflow while multiplier/divisor mutually exclusive */ return LLONG_MAX; } msec *= multiplier; msec /= divisor; /* dret += 0.5; */ /* msec = (long long)dret; */ return msec; } char * generate_op_key(const char *rsc_id, const char *op_type, int interval) { int len = 35; char *op_id = NULL; CRM_CHECK(rsc_id != NULL, return NULL); CRM_CHECK(op_type != NULL, return NULL); len += strlen(op_type); len += strlen(rsc_id); op_id = malloc(len); CRM_CHECK(op_id != NULL, return NULL); sprintf(op_id, "%s_%s_%d", rsc_id, op_type, interval); return op_id; } gboolean parse_op_key(const char *key, char **rsc_id, char **op_type, int *interval) { char *notify = NULL; char *mutable_key = NULL; char *mutable_key_ptr = NULL; int len = 0, offset = 0, ch = 0; CRM_CHECK(key != NULL, return FALSE); *interval = 0; len = strlen(key); offset = len - 1; crm_trace("Source: %s", key); while (offset > 0 && isdigit(key[offset])) { int digits = len - offset; ch = key[offset] - '0'; CRM_CHECK(ch < 10, return FALSE); CRM_CHECK(ch >= 0, return FALSE); while (digits > 1) { digits--; ch = ch * 10; } *interval += ch; offset--; } crm_trace(" Interval: %d", *interval); CRM_CHECK(key[offset] == '_', return FALSE); mutable_key = strdup(key); mutable_key[offset] = 0; offset--; while (offset > 0 && key[offset] != '_') { offset--; } CRM_CHECK(key[offset] == '_', free(mutable_key); return FALSE); mutable_key_ptr = mutable_key + offset + 1; crm_trace(" Action: %s", mutable_key_ptr); *op_type = strdup(mutable_key_ptr); mutable_key[offset] = 0; offset--; CRM_CHECK(mutable_key != mutable_key_ptr, free(mutable_key); return FALSE); notify = strstr(mutable_key, "_post_notify"); if (notify && safe_str_eq(notify, "_post_notify")) { notify[0] = 0; } notify = strstr(mutable_key, "_pre_notify"); if (notify && safe_str_eq(notify, "_pre_notify")) { notify[0] = 0; } crm_trace(" Resource: %s", mutable_key); *rsc_id = mutable_key; return TRUE; } char * generate_notify_key(const char *rsc_id, const char *notify_type, const char *op_type) { int len = 12; char *op_id = NULL; CRM_CHECK(rsc_id != NULL, return NULL); CRM_CHECK(op_type != NULL, return NULL); CRM_CHECK(notify_type != NULL, return NULL); len += strlen(op_type); len += strlen(rsc_id); len += strlen(notify_type); if(len > 0) { op_id = malloc(len); } if (op_id != NULL) { sprintf(op_id, "%s_%s_notify_%s_0", rsc_id, notify_type, op_type); } return op_id; } char * generate_transition_magic_v202(const char *transition_key, int op_status) { int len = 80; char *fail_state = NULL; CRM_CHECK(transition_key != NULL, return NULL); len += strlen(transition_key); fail_state = malloc(len); if (fail_state != NULL) { snprintf(fail_state, len, "%d:%s", op_status, transition_key); } return fail_state; } char * generate_transition_magic(const char *transition_key, int op_status, int op_rc) { int len = 80; char *fail_state = NULL; CRM_CHECK(transition_key != NULL, return NULL); len += strlen(transition_key); fail_state = malloc(len); if (fail_state != NULL) { snprintf(fail_state, len, "%d:%d;%s", op_status, op_rc, transition_key); } return fail_state; } gboolean decode_transition_magic(const char *magic, char **uuid, int *transition_id, int *action_id, int *op_status, int *op_rc, int *target_rc) { int res = 0; char *key = NULL; gboolean result = TRUE; CRM_CHECK(magic != NULL, return FALSE); CRM_CHECK(op_rc != NULL, return FALSE); CRM_CHECK(op_status != NULL, return FALSE); key = calloc(1, strlen(magic) + 1); res = sscanf(magic, "%d:%d;%s", op_status, op_rc, key); if (res != 3) { crm_warn("Only found %d items in: '%s'", res, magic); free(key); return FALSE; } CRM_CHECK(decode_transition_key(key, uuid, transition_id, action_id, target_rc), result = FALSE); free(key); return result; } char * generate_transition_key(int transition_id, int action_id, int target_rc, const char *node) { int len = 40; char *fail_state = NULL; CRM_CHECK(node != NULL, return NULL); len += strlen(node); fail_state = malloc(len); if (fail_state != NULL) { snprintf(fail_state, len, "%d:%d:%d:%-*s", action_id, transition_id, target_rc, 36, node); } return fail_state; } gboolean decode_transition_key(const char *key, char **uuid, int *transition_id, int *action_id, int *target_rc) { int res = 0; gboolean done = FALSE; CRM_CHECK(uuid != NULL, return FALSE); CRM_CHECK(target_rc != NULL, return FALSE); CRM_CHECK(action_id != NULL, return FALSE); CRM_CHECK(transition_id != NULL, return FALSE); *uuid = calloc(1, 37); res = sscanf(key, "%d:%d:%d:%36s", action_id, transition_id, target_rc, *uuid); switch (res) { case 4: /* Post Pacemaker 0.6 */ done = TRUE; break; case 3: case 2: /* this can be tricky - the UUID might start with an integer */ /* Until Pacemaker 0.6 */ done = TRUE; *target_rc = -1; res = sscanf(key, "%d:%d:%36s", action_id, transition_id, *uuid); if (res == 2) { *action_id = -1; res = sscanf(key, "%d:%36s", transition_id, *uuid); CRM_CHECK(res == 2, done = FALSE); } else if (res != 3) { CRM_CHECK(res == 3, done = FALSE); } break; case 1: /* Prior to Heartbeat 2.0.8 */ done = TRUE; *action_id = -1; *target_rc = -1; res = sscanf(key, "%d:%36s", transition_id, *uuid); CRM_CHECK(res == 2, done = FALSE); break; default: crm_crit("Unhandled sscanf result (%d) for %s", res, key); } if (strlen(*uuid) != 36) { crm_warn("Bad UUID (%s) in sscanf result (%d) for %s", *uuid, res, key); } if (done == FALSE) { crm_err("Cannot decode '%s' rc=%d", key, res); free(*uuid); *uuid = NULL; *target_rc = -1; *action_id = -1; *transition_id = -1; } return done; } void filter_action_parameters(xmlNode * param_set, const char *version) { char *key = NULL; char *timeout = NULL; char *interval = NULL; const char *attr_filter[] = { XML_ATTR_ID, XML_ATTR_CRM_VERSION, XML_LRM_ATTR_OP_DIGEST, }; gboolean do_delete = FALSE; int lpc = 0; static int meta_len = 0; if (meta_len == 0) { meta_len = strlen(CRM_META); } if (param_set == NULL) { return; } for (lpc = 0; lpc < DIMOF(attr_filter); lpc++) { xml_remove_prop(param_set, attr_filter[lpc]); } key = crm_meta_name(XML_LRM_ATTR_INTERVAL); interval = crm_element_value_copy(param_set, key); free(key); key = crm_meta_name(XML_ATTR_TIMEOUT); timeout = crm_element_value_copy(param_set, key); if (param_set) { xmlAttrPtr xIter = param_set->properties; while (xIter) { const char *prop_name = (const char *)xIter->name; xIter = xIter->next; do_delete = FALSE; if (strncasecmp(prop_name, CRM_META, meta_len) == 0) { do_delete = TRUE; } if (do_delete) { xml_remove_prop(param_set, prop_name); } } } if (crm_get_msec(interval) > 0 && compare_version(version, "1.0.8") > 0) { /* Re-instate the operation's timeout value */ if (timeout != NULL) { crm_xml_add(param_set, key, timeout); } } free(interval); free(timeout); free(key); } void filter_reload_parameters(xmlNode * param_set, const char *restart_string) { int len = 0; char *name = NULL; char *match = NULL; if (param_set == NULL) { return; } if (param_set) { xmlAttrPtr xIter = param_set->properties; while (xIter) { const char *prop_name = (const char *)xIter->name; xIter = xIter->next; name = NULL; len = strlen(prop_name) + 3; name = malloc(len); if(name) { sprintf(name, " %s ", prop_name); name[len - 1] = 0; match = strstr(restart_string, name); } if (match == NULL) { crm_trace("%s not found in %s", prop_name, restart_string); xml_remove_prop(param_set, prop_name); } free(name); } } } extern bool crm_is_daemon; /* coverity[+kill] */ void crm_abort(const char *file, const char *function, int line, const char *assert_condition, gboolean do_core, gboolean do_fork) { int rc = 0; int pid = 0; int status = 0; /* Implied by the parent's error logging below */ /* crm_write_blackbox(0); */ if(crm_is_daemon == FALSE) { /* This is a command line tool - do not fork */ /* crm_add_logfile(NULL); * Record it to a file? */ crm_enable_stderr(TRUE); /* Make sure stderr is enabled so we can tell the caller */ do_fork = FALSE; /* Just crash if needed */ } if (do_core == FALSE) { crm_err("%s: Triggered assert at %s:%d : %s", function, file, line, assert_condition); return; } else if (do_fork) { pid = fork(); } else { crm_err("%s: Triggered fatal assert at %s:%d : %s", function, file, line, assert_condition); } if (pid == -1) { crm_crit("%s: Cannot create core for non-fatal assert at %s:%d : %s", function, file, line, assert_condition); return; } else if(pid == 0) { /* Child process */ abort(); return; } /* Parent process */ crm_err("%s: Forked child %d to record non-fatal assert at %s:%d : %s", function, pid, file, line, assert_condition); crm_write_blackbox(SIGTRAP, NULL); do { rc = waitpid(pid, &status, 0); if(rc == pid) { return; /* Job done */ } } while(errno == EINTR); if (errno == ECHILD) { /* crm_mon does this */ crm_trace("Cannot wait on forked child %d - SIGCHLD is probably set to SIG_IGN", pid); return; } crm_perror(LOG_ERR, "Cannot wait on forked child %d", pid); } char * generate_series_filename(const char *directory, const char *series, int sequence, gboolean bzip) { int len = 40; char *filename = NULL; const char *ext = "raw"; CRM_CHECK(directory != NULL, return NULL); CRM_CHECK(series != NULL, return NULL); #if !HAVE_BZLIB_H bzip = FALSE; #endif len += strlen(directory); len += strlen(series); filename = malloc(len); CRM_CHECK(filename != NULL, return NULL); if (bzip) { ext = "bz2"; } sprintf(filename, "%s/%s-%d.%s", directory, series, sequence, ext); return filename; } int get_last_sequence(const char *directory, const char *series) { FILE *file_strm = NULL; int start = 0, length = 0, read_len = 0; char *series_file = NULL; char *buffer = NULL; int seq = 0; int len = 36; CRM_CHECK(directory != NULL, return 0); CRM_CHECK(series != NULL, return 0); len += strlen(directory); len += strlen(series); series_file = malloc(len); CRM_CHECK(series_file != NULL, return 0); sprintf(series_file, "%s/%s.last", directory, series); file_strm = fopen(series_file, "r"); if (file_strm == NULL) { crm_debug("Series file %s does not exist", series_file); free(series_file); return 0; } /* see how big the file is */ start = ftell(file_strm); fseek(file_strm, 0L, SEEK_END); length = ftell(file_strm); fseek(file_strm, 0L, start); CRM_ASSERT(length >= 0); CRM_ASSERT(start == ftell(file_strm)); if (length <= 0) { crm_info("%s was not valid", series_file); free(buffer); buffer = NULL; } else { crm_trace("Reading %d bytes from file", length); buffer = calloc(1, (length + 1)); read_len = fread(buffer, 1, length, file_strm); if (read_len != length) { crm_err("Calculated and read bytes differ: %d vs. %d", length, read_len); free(buffer); buffer = NULL; } } seq = crm_parse_int(buffer, "0"); fclose(file_strm); crm_trace("Found %d in %s", seq, series_file); free(series_file); free(buffer); return seq; } void write_last_sequence(const char *directory, const char *series, int sequence, int max) { int rc = 0; int len = 36; FILE *file_strm = NULL; char *series_file = NULL; CRM_CHECK(directory != NULL, return); CRM_CHECK(series != NULL, return); if (max == 0) { return; } if (max > 0 && sequence >= max) { sequence = 0; } len += strlen(directory); len += strlen(series); series_file = malloc(len); if(series_file) { sprintf(series_file, "%s/%s.last", directory, series); file_strm = fopen(series_file, "w"); } if (file_strm != NULL) { rc = fprintf(file_strm, "%d", sequence); if (rc < 0) { crm_perror(LOG_ERR, "Cannot write to series file %s", series_file); } } else { crm_err("Cannot open series file %s for writing", series_file); } if (file_strm != NULL) { fflush(file_strm); fclose(file_strm); } crm_trace("Wrote %d to %s", sequence, series_file); free(series_file); } #define LOCKSTRLEN 11 int crm_pid_active(long pid) { if (pid <= 0) { return -1; } else if (kill(pid, 0) < 0 && errno == ESRCH) { return 0; } #ifndef HAVE_PROC_PID return 1; #else { int rc = 0; int running = 0; char proc_path[PATH_MAX], exe_path[PATH_MAX], myexe_path[PATH_MAX]; /* check to make sure pid hasn't been reused by another process */ snprintf(proc_path, sizeof(proc_path), "/proc/%lu/exe", pid); rc = readlink(proc_path, exe_path, PATH_MAX - 1); if (rc < 0) { crm_perror(LOG_ERR, "Could not read from %s", proc_path); goto bail; } exe_path[rc] = 0; snprintf(proc_path, sizeof(proc_path), "/proc/%lu/exe", (long unsigned int)getpid()); rc = readlink(proc_path, myexe_path, PATH_MAX - 1); if (rc < 0) { crm_perror(LOG_ERR, "Could not read from %s", proc_path); goto bail; } myexe_path[rc] = 0; if (strcmp(exe_path, myexe_path) == 0) { running = 1; } } bail: return running; #endif } static int crm_read_pidfile(const char *filename) { int fd; long pid = -1; char buf[LOCKSTRLEN + 1]; if ((fd = open(filename, O_RDONLY)) < 0) { goto bail; } if (read(fd, buf, sizeof(buf)) < 1) { goto bail; } if (sscanf(buf, "%lu", &pid) > 0) { if (pid <= 0) { pid = -ESRCH; } } bail: if (fd >= 0) { close(fd); } return pid; } static int crm_pidfile_inuse(const char *filename, long mypid) { long pid = 0; struct stat sbuf; char buf[LOCKSTRLEN + 1]; int rc = -ENOENT, fd = 0; if ((fd = open(filename, O_RDONLY)) >= 0) { if (fstat(fd, &sbuf) >= 0 && sbuf.st_size < LOCKSTRLEN) { sleep(2); /* if someone was about to create one, * give'm a sec to do so */ } if (read(fd, buf, sizeof(buf)) > 0) { if (sscanf(buf, "%lu", &pid) > 0) { crm_trace("Got pid %lu from %s\n", pid, filename); if (pid <= 1) { /* Invalid pid */ rc = -ENOENT; unlink(filename); } else if (mypid && pid == mypid) { /* In use by us */ rc = pcmk_ok; } else if (crm_pid_active(pid) == FALSE) { /* Contains a stale value */ unlink(filename); rc = -ENOENT; } else if (mypid && pid != mypid) { /* locked by existing process - give up */ rc = -EEXIST; } } } close(fd); } return rc; } static int crm_lock_pidfile(const char *filename) { long mypid = 0; int fd = 0, rc = 0; char buf[LOCKSTRLEN + 1]; mypid = (unsigned long)getpid(); rc = crm_pidfile_inuse(filename, 0); if (rc == -ENOENT) { /* exists but the process is not active */ } else if (rc != pcmk_ok) { /* locked by existing process - give up */ return rc; } if ((fd = open(filename, O_CREAT | O_WRONLY | O_EXCL, 0644)) < 0) { /* Hmmh, why did we fail? Anyway, nothing we can do about it */ return -errno; } snprintf(buf, sizeof(buf), "%*lu\n", LOCKSTRLEN - 1, mypid); rc = write(fd, buf, LOCKSTRLEN); close(fd); if (rc != LOCKSTRLEN) { crm_perror(LOG_ERR, "Incomplete write to %s", filename); return -errno; } return crm_pidfile_inuse(filename, mypid); } void crm_make_daemon(const char *name, gboolean daemonize, const char *pidfile) { int rc; long pid; const char *devnull = "/dev/null"; if (daemonize == FALSE) { return; } /* Check before we even try... */ rc = crm_pidfile_inuse(pidfile, 1); if(rc < pcmk_ok && rc != -ENOENT) { pid = crm_read_pidfile(pidfile); crm_err("%s: already running [pid %ld in %s]", name, pid, pidfile); printf("%s: already running [pid %ld in %s]\n", name, pid, pidfile); crm_exit(rc); } pid = fork(); if (pid < 0) { fprintf(stderr, "%s: could not start daemon\n", name); crm_perror(LOG_ERR, "fork"); crm_exit(EINVAL); } else if (pid > 0) { crm_exit(pcmk_ok); } rc = crm_lock_pidfile(pidfile); if(rc < pcmk_ok) { crm_err("Could not lock '%s' for %s: %s (%d)", pidfile, name, pcmk_strerror(rc), rc); printf("Could not lock '%s' for %s: %s (%d)\n", pidfile, name, pcmk_strerror(rc), rc); crm_exit(rc); } umask(S_IWGRP | S_IWOTH | S_IROTH); close(STDIN_FILENO); (void)open(devnull, O_RDONLY); /* Stdin: fd 0 */ close(STDOUT_FILENO); (void)open(devnull, O_WRONLY); /* Stdout: fd 1 */ close(STDERR_FILENO); (void)open(devnull, O_WRONLY); /* Stderr: fd 2 */ } gboolean crm_is_writable(const char *dir, const char *file, const char *user, const char *group, gboolean need_both) { int s_res = -1; struct stat buf; char *full_file = NULL; const char *target = NULL; gboolean pass = TRUE; gboolean readwritable = FALSE; CRM_ASSERT(dir != NULL); if (file != NULL) { full_file = crm_concat(dir, file, '/'); target = full_file; s_res = stat(full_file, &buf); if (s_res == 0 && S_ISREG(buf.st_mode) == FALSE) { crm_err("%s must be a regular file", target); pass = FALSE; goto out; } } if (s_res != 0) { target = dir; s_res = stat(dir, &buf); if (s_res != 0) { crm_err("%s must exist and be a directory", dir); pass = FALSE; goto out; } else if (S_ISDIR(buf.st_mode) == FALSE) { crm_err("%s must be a directory", dir); pass = FALSE; } } if (user) { struct passwd *sys_user = NULL; sys_user = getpwnam(user); readwritable = (sys_user != NULL && buf.st_uid == sys_user->pw_uid && (buf.st_mode & (S_IRUSR | S_IWUSR))); if (readwritable == FALSE) { crm_err("%s must be owned and r/w by user %s", target, user); if (need_both) { pass = FALSE; } } } if (group) { struct group *sys_grp = getgrnam(group); readwritable = (sys_grp != NULL && buf.st_gid == sys_grp->gr_gid && (buf.st_mode & (S_IRGRP | S_IWGRP))); if (readwritable == FALSE) { if (need_both || user == NULL) { pass = FALSE; crm_err("%s must be owned and r/w by group %s", target, group); } else { crm_warn("%s should be owned and r/w by group %s", target, group); } } } out: free(full_file); return pass; } char * crm_strip_trailing_newline(char *str) { int len; if (str == NULL) { return str; } for (len = strlen(str) - 1; len >= 0 && str[len] == '\n'; len--) { str[len] = '\0'; } return str; } gboolean crm_str_eq(const char *a, const char *b, gboolean use_case) { if (use_case) { return g_strcmp0(a, b) == 0; /* TODO - Figure out which calls, if any, really need to be case independant */ } else if (a == b) { return TRUE; } else if (a == NULL || b == NULL) { /* shouldn't be comparing NULLs */ return FALSE; } else if (strcasecmp(a, b) == 0) { return TRUE; } return FALSE; } char * crm_meta_name(const char *field) { int lpc = 0; int max = 0; char *crm_name = NULL; CRM_CHECK(field != NULL, return NULL); crm_name = crm_concat(CRM_META, field, '_'); /* Massage the names so they can be used as shell variables */ max = strlen(crm_name); for (; lpc < max; lpc++) { switch (crm_name[lpc]) { case '-': crm_name[lpc] = '_'; break; } } return crm_name; } const char * crm_meta_value(GHashTable * hash, const char *field) { char *key = NULL; const char *value = NULL; key = crm_meta_name(field); if (key) { value = g_hash_table_lookup(hash, key); free(key); } return value; } static struct option * crm_create_long_opts(struct crm_option *long_options) { struct option *long_opts = NULL; #ifdef HAVE_GETOPT_H int index = 0, lpc = 0; /* * A previous, possibly poor, choice of '?' as the short form of --help * means that getopt_long() returns '?' for both --help and for "unknown option" * * This dummy entry allows us to differentiate between the two in crm_get_option() * and exit with the correct error code */ long_opts = realloc(long_opts, (index + 1) * sizeof(struct option)); long_opts[index].name = "__dummmy__"; long_opts[index].has_arg = 0; long_opts[index].flag = 0; long_opts[index].val = '_'; index++; for (lpc = 0; long_options[lpc].name != NULL; lpc++) { if (long_options[lpc].name[0] == '-') { continue; } long_opts = realloc(long_opts, (index + 1) * sizeof(struct option)); /*fprintf(stderr, "Creating %d %s = %c\n", index, * long_options[lpc].name, long_options[lpc].val); */ long_opts[index].name = long_options[lpc].name; long_opts[index].has_arg = long_options[lpc].has_arg; long_opts[index].flag = long_options[lpc].flag; long_opts[index].val = long_options[lpc].val; index++; } /* Now create the list terminator */ long_opts = realloc(long_opts, (index + 1) * sizeof(struct option)); long_opts[index].name = NULL; long_opts[index].has_arg = 0; long_opts[index].flag = 0; long_opts[index].val = 0; #endif return long_opts; } void crm_set_options(const char *short_options, const char *app_usage, struct crm_option *long_options, const char *app_desc) { if (short_options) { crm_short_options = strdup(short_options); } else if (long_options) { int lpc = 0; int opt_string_len = 0; char *local_short_options = NULL; for (lpc = 0; long_options[lpc].name != NULL; lpc++) { if (long_options[lpc].val && long_options[lpc].val != '-' && long_options[lpc].val < UCHAR_MAX) { local_short_options = realloc(local_short_options, opt_string_len + 4); local_short_options[opt_string_len++] = long_options[lpc].val; /* getopt(3) says: Two colons mean an option takes an optional arg; */ if (long_options[lpc].has_arg == optional_argument) { local_short_options[opt_string_len++] = ':'; } if (long_options[lpc].has_arg >= required_argument) { local_short_options[opt_string_len++] = ':'; } local_short_options[opt_string_len] = 0; } } crm_short_options = local_short_options; crm_trace("Generated short option string: '%s'", local_short_options); } if (long_options) { crm_long_options = long_options; } if (app_desc) { crm_app_description = app_desc; } if (app_usage) { crm_app_usage = app_usage; } } int crm_get_option(int argc, char **argv, int *index) { return crm_get_option_long(argc, argv, index, NULL); } int crm_get_option_long(int argc, char **argv, int *index, const char **longname) { #ifdef HAVE_GETOPT_H static struct option *long_opts = NULL; if (long_opts == NULL && crm_long_options) { long_opts = crm_create_long_opts(crm_long_options); } if (long_opts) { int flag = getopt_long(argc, argv, crm_short_options, long_opts, index); switch (flag) { case 0: if (long_opts[*index].val) { return long_opts[*index].val; } else if (longname) { *longname = long_opts[*index].name; } else { crm_notice("Unhandled option --%s", long_opts[*index].name); return flag; } case -1: /* End of option processing */ break; case ':': crm_trace("Missing argument"); crm_help('?', 1); break; case '?': crm_help('?', *index ? 0 : 1); break; } return flag; } #endif if (crm_short_options) { return getopt(argc, argv, crm_short_options); } return -1; } int crm_help(char cmd, int exit_code) { int i = 0; FILE *stream = (exit_code ? stderr : stdout); if (cmd == 'v' || cmd == '$') { fprintf(stream, "Pacemaker %s\n", VERSION); fprintf(stream, "Written by Andrew Beekhof\n"); goto out; } if (cmd == '!') { fprintf(stream, "Pacemaker %s (Build: %s): %s\n", VERSION, BUILD_VERSION, CRM_FEATURES); goto out; } fprintf(stream, "%s - %s\n", crm_system_name, crm_app_description); if (crm_app_usage) { fprintf(stream, "Usage: %s %s\n", crm_system_name, crm_app_usage); } if (crm_long_options) { fprintf(stream, "Options:\n"); for (i = 0; crm_long_options[i].name != NULL; i++) { if (crm_long_options[i].flags & pcmk_option_hidden) { } else if (crm_long_options[i].flags & pcmk_option_paragraph) { fprintf(stream, "%s\n\n", crm_long_options[i].desc); } else if (crm_long_options[i].flags & pcmk_option_example) { fprintf(stream, "\t#%s\n\n", crm_long_options[i].desc); } else if (crm_long_options[i].val == '-' && crm_long_options[i].desc) { fprintf(stream, "%s\n", crm_long_options[i].desc); } else { /* is val printable as char ? */ if (crm_long_options[i].val && crm_long_options[i].val <= UCHAR_MAX) { fprintf(stream, " -%c,", crm_long_options[i].val); } else { fputs(" ", stream); } fprintf(stream, " --%s%s\t%s\n", crm_long_options[i].name, crm_long_options[i].has_arg == optional_argument ? "[=value]" : crm_long_options[i].has_arg == required_argument ? "=value" : "", crm_long_options[i].desc ? crm_long_options[i].desc : ""); } } } else if (crm_short_options) { fprintf(stream, "Usage: %s - %s\n", crm_system_name, crm_app_description); for (i = 0; crm_short_options[i] != 0; i++) { int has_arg = no_argument /* 0 */; if (crm_short_options[i + 1] == ':') { if (crm_short_options[i + 2] == ':') has_arg = optional_argument /* 2 */; else has_arg = required_argument /* 1 */; } fprintf(stream, " -%c %s\n", crm_short_options[i], has_arg == optional_argument ? "[value]" : has_arg == required_argument ? "{value}" : ""); i += has_arg; } } fprintf(stream, "\nReport bugs to %s\n", PACKAGE_BUGREPORT); out: return crm_exit(exit_code); } void cib_ipc_servers_init(qb_ipcs_service_t **ipcs_ro, qb_ipcs_service_t **ipcs_rw, qb_ipcs_service_t **ipcs_shm, struct qb_ipcs_service_handlers *ro_cb, struct qb_ipcs_service_handlers *rw_cb) { *ipcs_ro = mainloop_add_ipc_server(cib_channel_ro, QB_IPC_NATIVE, ro_cb); *ipcs_rw = mainloop_add_ipc_server(cib_channel_rw, QB_IPC_NATIVE, rw_cb); *ipcs_shm = mainloop_add_ipc_server(cib_channel_shm, QB_IPC_SHM, rw_cb); if (*ipcs_ro == NULL || *ipcs_rw == NULL || *ipcs_shm == NULL) { crm_err("Failed to create cib servers: exiting and inhibiting respawn."); crm_warn("Verify pacemaker and pacemaker_remote are not both enabled."); crm_exit(DAEMON_RESPAWN_STOP); } } void cib_ipc_servers_destroy(qb_ipcs_service_t *ipcs_ro, qb_ipcs_service_t *ipcs_rw, qb_ipcs_service_t *ipcs_shm) { qb_ipcs_destroy(ipcs_ro); qb_ipcs_destroy(ipcs_rw); qb_ipcs_destroy(ipcs_shm); } qb_ipcs_service_t * crmd_ipc_server_init(struct qb_ipcs_service_handlers *cb) { return mainloop_add_ipc_server(CRM_SYSTEM_CRMD, QB_IPC_NATIVE, cb); } void attrd_ipc_server_init(qb_ipcs_service_t **ipcs, struct qb_ipcs_service_handlers *cb) { *ipcs = mainloop_add_ipc_server(T_ATTRD, QB_IPC_NATIVE, cb); if (*ipcs == NULL) { crm_err("Failed to create attrd servers: exiting and inhibiting respawn."); crm_warn("Verify pacemaker and pacemaker_remote are not both enabled."); crm_exit(DAEMON_RESPAWN_STOP); } } void stonith_ipc_server_init(qb_ipcs_service_t **ipcs, struct qb_ipcs_service_handlers *cb) { *ipcs = mainloop_add_ipc_server("stonith-ng", QB_IPC_NATIVE, cb); if (*ipcs == NULL) { crm_err("Failed to create stonith-ng servers: exiting and inhibiting respawn."); crm_warn("Verify pacemaker and pacemaker_remote are not both enabled."); crm_exit(DAEMON_RESPAWN_STOP); } } int attrd_update_delegate(crm_ipc_t * ipc, char command, const char *host, const char *name, const char *value, const char *section, const char *set, const char *dampen, const char *user_name, gboolean is_remote) { int rc = -ENOTCONN; int max = 5; enum crm_ipc_flags flags = crm_ipc_flags_none; xmlNode *update = create_xml_node(NULL, __FUNCTION__); static gboolean connected = TRUE; static crm_ipc_t *local_ipc = NULL; if (ipc == NULL && local_ipc == NULL) { local_ipc = crm_ipc_new(T_ATTRD, 0); flags |= crm_ipc_client_response; connected = FALSE; } if (ipc == NULL) { ipc = local_ipc; } /* remap common aliases */ if (safe_str_eq(section, "reboot")) { section = XML_CIB_TAG_STATUS; } else if (safe_str_eq(section, "forever")) { section = XML_CIB_TAG_NODES; } crm_xml_add(update, F_TYPE, T_ATTRD); crm_xml_add(update, F_ORIG, crm_system_name); if (name == NULL && command == 'U') { command = 'R'; } switch (command) { case 'D': case 'U': case 'v': crm_xml_add(update, F_ATTRD_TASK, "update"); crm_xml_add(update, F_ATTRD_ATTRIBUTE, name); break; case 'R': crm_xml_add(update, F_ATTRD_TASK, "refresh"); break; case 'q': crm_xml_add(update, F_ATTRD_TASK, "query"); break; case 'C': crm_xml_add(update, F_ATTRD_TASK, "peer-remove"); break; } crm_xml_add(update, F_ATTRD_VALUE, value); crm_xml_add(update, F_ATTRD_DAMPEN, dampen); crm_xml_add(update, F_ATTRD_SECTION, section); crm_xml_add(update, F_ATTRD_HOST, host); crm_xml_add(update, F_ATTRD_SET, set); crm_xml_add_int(update, F_ATTRD_IS_REMOTE, is_remote); #if ENABLE_ACL if (user_name) { crm_xml_add(update, F_ATTRD_USER, user_name); } #endif while (max > 0) { if (connected == FALSE) { crm_info("Connecting to cluster... %d retries remaining", max); connected = crm_ipc_connect(ipc); } if (connected) { rc = crm_ipc_send(ipc, update, flags, 0, NULL); } if (ipc != local_ipc) { break; } else if (rc > 0) { break; } else if (rc == -EAGAIN || rc == -EALREADY) { sleep(5 - max); max--; } else { crm_ipc_close(ipc); connected = FALSE; sleep(5 - max); max--; } } free_xml(update); if (rc > 0) { crm_debug("Sent update: %s=%s for %s", name, value, host ? host : "localhost"); rc = pcmk_ok; } else { crm_debug("Could not send update %s=%s for %s: %s (%d)", name, value, host ? host : "localhost", pcmk_strerror(rc), rc); } return rc; } #define FAKE_TE_ID "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" static void append_digest(lrmd_event_data_t * op, xmlNode * update, const char *version, const char *magic, int level) { /* this will enable us to later determine that the * resource's parameters have changed and we should force * a restart */ char *digest = NULL; xmlNode *args_xml = NULL; if (op->params == NULL) { return; } args_xml = create_xml_node(NULL, XML_TAG_PARAMS); g_hash_table_foreach(op->params, hash2field, args_xml); filter_action_parameters(args_xml, version); digest = calculate_operation_digest(args_xml, version); #if 0 if (level < get_crm_log_level() && op->interval == 0 && crm_str_eq(op->op_type, CRMD_ACTION_START, TRUE)) { char *digest_source = dump_xml_unformatted(args_xml); do_crm_log(level, "Calculated digest %s for %s (%s). Source: %s\n", digest, ID(update), magic, digest_source); free(digest_source); } #endif crm_xml_add(update, XML_LRM_ATTR_OP_DIGEST, digest); free_xml(args_xml); free(digest); } int rsc_op_expected_rc(lrmd_event_data_t * op) { int rc = 0; if (op && op->user_data) { int dummy = 0; char *uuid = NULL; decode_transition_key(op->user_data, &uuid, &dummy, &dummy, &rc); free(uuid); } return rc; } gboolean did_rsc_op_fail(lrmd_event_data_t * op, int target_rc) { switch (op->op_status) { case PCMK_LRM_OP_CANCELLED: case PCMK_LRM_OP_PENDING: return FALSE; break; case PCMK_LRM_OP_NOTSUPPORTED: case PCMK_LRM_OP_TIMEOUT: case PCMK_LRM_OP_ERROR: return TRUE; break; default: if (target_rc != op->rc) { return TRUE; } } return FALSE; } xmlNode * create_operation_update(xmlNode * parent, lrmd_event_data_t * op, const char *caller_version, int target_rc, const char *origin, int level) { char *key = NULL; char *magic = NULL; char *op_id = NULL; + char *op_id_additional = NULL; char *local_user_data = NULL; xmlNode *xml_op = NULL; const char *task = NULL; gboolean dc_munges_migrate_ops = (compare_version(caller_version, "3.0.3") < 0); gboolean dc_needs_unique_ops = (compare_version(caller_version, "3.0.6") < 0); CRM_CHECK(op != NULL, return NULL); do_crm_log(level, "%s: Updating resource %s after %s op %s (interval=%d)", origin, op->rsc_id, op->op_type, services_lrm_status_str(op->op_status), op->interval); crm_trace("DC version: %s", caller_version); task = op->op_type; /* remap the task name under various scenarios * this makes life easier for the PE when its trying determin the current state */ if (crm_str_eq(task, "reload", TRUE)) { if (op->op_status == PCMK_LRM_OP_DONE) { task = CRMD_ACTION_START; } else { task = CRMD_ACTION_STATUS; } } else if (dc_munges_migrate_ops && crm_str_eq(task, CRMD_ACTION_MIGRATE, TRUE)) { /* if the migrate_from fails it will have enough info to do the right thing */ if (op->op_status == PCMK_LRM_OP_DONE) { task = CRMD_ACTION_STOP; } else { task = CRMD_ACTION_STATUS; } } else if (dc_munges_migrate_ops && op->op_status == PCMK_LRM_OP_DONE && crm_str_eq(task, CRMD_ACTION_MIGRATED, TRUE)) { task = CRMD_ACTION_START; } key = generate_op_key(op->rsc_id, task, op->interval); if (dc_needs_unique_ops && op->interval > 0) { op_id = strdup(key); } else if (crm_str_eq(task, CRMD_ACTION_NOTIFY, TRUE)) { const char *n_type = crm_meta_value(op->params, "notify_type"); const char *n_task = crm_meta_value(op->params, "notify_operation"); CRM_LOG_ASSERT(n_type != NULL); CRM_LOG_ASSERT(n_task != NULL); op_id = generate_notify_key(op->rsc_id, n_type, n_task); /* these are not yet allowed to fail */ op->op_status = PCMK_LRM_OP_DONE; op->rc = 0; } else if (did_rsc_op_fail(op, target_rc)) { op_id = generate_op_key(op->rsc_id, "last_failure", 0); + if (op->interval == 0) { + /* Ensure 'last' gets updated too in case recording-pending="true" */ + op_id_additional = generate_op_key(op->rsc_id, "last", 0); + } } else if (op->interval > 0) { op_id = strdup(key); } else { op_id = generate_op_key(op->rsc_id, "last", 0); } + again: xml_op = find_entity(parent, XML_LRM_TAG_RSC_OP, op_id); if (xml_op == NULL) { xml_op = create_xml_node(parent, XML_LRM_TAG_RSC_OP); } if (op->user_data == NULL) { crm_debug("Generating fake transition key for:" " %s_%s_%d %d from %s", op->rsc_id, op->op_type, op->interval, op->call_id, origin); local_user_data = generate_transition_key(-1, op->call_id, target_rc, FAKE_TE_ID); op->user_data = local_user_data; } - magic = generate_transition_magic(op->user_data, op->op_status, op->rc); + if(magic == NULL) { + magic = generate_transition_magic(op->user_data, op->op_status, op->rc); + } crm_xml_add(xml_op, XML_ATTR_ID, op_id); crm_xml_add(xml_op, XML_LRM_ATTR_TASK_KEY, key); crm_xml_add(xml_op, XML_LRM_ATTR_TASK, task); crm_xml_add(xml_op, XML_ATTR_ORIGIN, origin); crm_xml_add(xml_op, XML_ATTR_CRM_VERSION, caller_version); crm_xml_add(xml_op, XML_ATTR_TRANSITION_KEY, op->user_data); crm_xml_add(xml_op, XML_ATTR_TRANSITION_MAGIC, magic); crm_xml_add_int(xml_op, XML_LRM_ATTR_CALLID, op->call_id); crm_xml_add_int(xml_op, XML_LRM_ATTR_RC, op->rc); crm_xml_add_int(xml_op, XML_LRM_ATTR_OPSTATUS, op->op_status); crm_xml_add_int(xml_op, XML_LRM_ATTR_INTERVAL, op->interval); if (compare_version("2.1", caller_version) <= 0) { if (op->t_run || op->t_rcchange || op->exec_time || op->queue_time) { crm_trace("Timing data (%s_%s_%d): last=%lu change=%lu exec=%lu queue=%lu", op->rsc_id, op->op_type, op->interval, op->t_run, op->t_rcchange, op->exec_time, op->queue_time); if (op->interval == 0) { /* The values are the same for non-recurring ops */ crm_xml_add_int(xml_op, XML_RSC_OP_LAST_RUN, op->t_run); crm_xml_add_int(xml_op, XML_RSC_OP_LAST_CHANGE, op->t_run); } else if(op->t_rcchange) { /* last-run is not accurate for recurring ops */ crm_xml_add_int(xml_op, XML_RSC_OP_LAST_CHANGE, op->t_rcchange); } else { /* ...but is better than nothing otherwise */ crm_xml_add_int(xml_op, XML_RSC_OP_LAST_CHANGE, op->t_run); } crm_xml_add_int(xml_op, XML_RSC_OP_T_EXEC, op->exec_time); crm_xml_add_int(xml_op, XML_RSC_OP_T_QUEUE, op->queue_time); } } if (crm_str_eq(op->op_type, CRMD_ACTION_MIGRATE, TRUE) || crm_str_eq(op->op_type, CRMD_ACTION_MIGRATED, TRUE)) { /* * Record migrate_source and migrate_target always for migrate ops. */ const char *name = XML_LRM_ATTR_MIGRATE_SOURCE; crm_xml_add(xml_op, name, crm_meta_value(op->params, name)); name = XML_LRM_ATTR_MIGRATE_TARGET; crm_xml_add(xml_op, name, crm_meta_value(op->params, name)); } append_digest(op, xml_op, caller_version, magic, LOG_DEBUG); + if (op_id_additional) { + free(op_id); + op_id = op_id_additional; + op_id_additional = NULL; + goto again; + } + if (local_user_data) { free(local_user_data); op->user_data = NULL; } free(magic); free(op_id); free(key); return xml_op; } bool pcmk_acl_required(const char *user) { #if ENABLE_ACL if(user == NULL || strlen(user) == 0) { crm_trace("no user set"); return FALSE; } else if (strcmp(user, CRM_DAEMON_USER) == 0) { return FALSE; } else if (strcmp(user, "root") == 0) { return FALSE; } crm_trace("acls required for %s", user); return TRUE; #else crm_trace("acls not supported"); return FALSE; #endif } #if ENABLE_ACL char * uid2username(uid_t uid) { struct passwd *pwent = getpwuid(uid); if (pwent == NULL) { crm_perror(LOG_ERR, "Cannot get password entry of uid: %d", uid); return NULL; } else { return strdup(pwent->pw_name); } } const char * crm_acl_get_set_user(xmlNode * request, const char *field, const char *peer_user) { /* field is only checked for backwards compatibility */ static const char *effective_user = NULL; const char *requested_user = NULL; const char *user = NULL; if(effective_user == NULL) { effective_user = uid2username(geteuid()); } requested_user = crm_element_value(request, XML_ACL_TAG_USER); if(requested_user == NULL) { requested_user = crm_element_value(request, field); } if (is_privileged(effective_user) == FALSE) { /* We're not running as a privileged user, set or overwrite any existing value for $XML_ACL_TAG_USER */ user = effective_user; } else if(peer_user == NULL && requested_user == NULL) { /* No user known or requested, use 'effective_user' and make sure one is set for the request */ user = effective_user; } else if(peer_user == NULL) { /* No user known, trusting 'requested_user' */ user = requested_user; } else if (is_privileged(peer_user) == FALSE) { /* The peer is not a privileged user, set or overwrite any existing value for $XML_ACL_TAG_USER */ user = peer_user; } else if (requested_user == NULL) { /* Even if we're privileged, make sure there is always a value set */ user = peer_user; } else { /* Legal delegation to 'requested_user' */ user = requested_user; } /* Yes, pointer comparision */ if(user != crm_element_value(request, XML_ACL_TAG_USER)) { crm_xml_add(request, XML_ACL_TAG_USER, user); } if(field != NULL && user != crm_element_value(request, field)) { crm_xml_add(request, field, user); } return requested_user; } void determine_request_user(const char *user, xmlNode * request, const char *field) { /* Get our internal validation out of the way first */ CRM_CHECK(user != NULL && request != NULL && field != NULL, return); /* If our peer is a privileged user, we might be doing something on behalf of someone else */ if (is_privileged(user) == FALSE) { /* We're not a privileged user, set or overwrite any existing value for $field */ crm_xml_replace(request, field, user); } else if (crm_element_value(request, field) == NULL) { /* Even if we're privileged, make sure there is always a value set */ crm_xml_replace(request, field, user); /* } else { Legal delegation */ } crm_trace("Processing msg as user '%s'", crm_element_value(request, field)); } #endif /* * This re-implements g_str_hash as it was prior to glib2-2.28: * * http://git.gnome.org/browse/glib/commit/?id=354d655ba8a54b754cb5a3efb42767327775696c * * Note that the new g_str_hash is presumably a *better* hash (it's actually * a correct implementation of DJB's hash), but we need to preserve existing * behaviour, because the hash key ultimately determines the "sort" order * when iterating through GHashTables, which affects allocation of scores to * clone instances when iterating through rsc->allowed_nodes. It (somehow) * also appears to have some minor impact on the ordering of a few * pseudo_event IDs in the transition graph. */ guint g_str_hash_traditional(gconstpointer v) { const signed char *p; guint32 h = 0; for (p = v; *p != '\0'; p++) h = (h << 5) - h + *p; return h; } guint crm_strcase_hash(gconstpointer v) { const signed char *p; guint32 h = 0; for (p = v; *p != '\0'; p++) h = (h << 5) - h + g_ascii_tolower(*p); return h; } void * find_library_function(void **handle, const char *lib, const char *fn, gboolean fatal) { char *error; void *a_function; if (*handle == NULL) { *handle = dlopen(lib, RTLD_LAZY); } if (!(*handle)) { crm_err("%sCould not open %s: %s", fatal ? "Fatal: " : "", lib, dlerror()); if (fatal) { crm_exit(DAEMON_RESPAWN_STOP); } return NULL; } a_function = dlsym(*handle, fn); if ((error = dlerror()) != NULL) { crm_err("%sCould not find %s in %s: %s", fatal ? "Fatal: " : "", fn, lib, error); if (fatal) { crm_exit(DAEMON_RESPAWN_STOP); } } return a_function; } char * add_list_element(char *list, const char *value) { int len = 0; int last = 0; if (value == NULL) { return list; } if (list) { last = strlen(list); } len = last + 2; /* +1 space, +1 EOS */ len += strlen(value); list = realloc(list, len); sprintf(list + last, " %s", value); return list; } void * convert_const_pointer(const void *ptr) { /* Worst function ever */ return (void *)ptr; } #ifdef HAVE_UUID_UUID_H # include #endif char * crm_generate_uuid(void) { unsigned char uuid[16]; char *buffer = malloc(37); /* Including NUL byte */ uuid_generate(uuid); uuid_unparse(uuid, buffer); return buffer; } #include char * crm_md5sum(const char *buffer) { int lpc = 0, len = 0; char *digest = NULL; unsigned char raw_digest[MD5_DIGEST_SIZE]; if(buffer != NULL) { len = strlen(buffer); } crm_trace("Beginning digest of %d bytes", len); digest = malloc(2 * MD5_DIGEST_SIZE + 1); if(digest) { md5_buffer(buffer, len, raw_digest); for (lpc = 0; lpc < MD5_DIGEST_SIZE; lpc++) { sprintf(digest + (2 * lpc), "%02x", raw_digest[lpc]); } digest[(2 * MD5_DIGEST_SIZE)] = 0; crm_trace("Digest %s.", digest); } else { crm_err("Could not create digest"); } return digest; } #include #include bool crm_compress_string(const char *data, int length, int max, char **result, unsigned int *result_len) { int rc; char *compressed = NULL; char *uncompressed = strdup(data); struct timespec after_t; struct timespec before_t; if(max == 0) { max = (length * 1.1) + 600; /* recomended size */ } #ifdef CLOCK_MONOTONIC clock_gettime(CLOCK_MONOTONIC, &before_t); #endif /* coverity[returned_null] Ignore */ compressed = malloc(max); *result_len = max; rc = BZ2_bzBuffToBuffCompress(compressed, result_len, uncompressed, length, CRM_BZ2_BLOCKS, 0, CRM_BZ2_WORK); free(uncompressed); if (rc != BZ_OK) { crm_err("Compression of %d bytes failed: %s (%d)", length, bz2_strerror(rc), rc); free(compressed); return FALSE; } #ifdef CLOCK_MONOTONIC clock_gettime(CLOCK_MONOTONIC, &after_t); crm_info("Compressed %d bytes into %d (ratio %d:1) in %dms", length, *result_len, length / (*result_len), (after_t.tv_sec - before_t.tv_sec) * 1000 + (after_t.tv_nsec - before_t.tv_nsec) / 1000000); #else crm_info("Compressed %d bytes into %d (ratio %d:1)", length, *result_len, length / (*result_len)); #endif *result = compressed; return TRUE; } diff --git a/pengine/graph.c b/pengine/graph.c index 1d0334e444..b2105232a0 100644 --- a/pengine/graph.c +++ b/pengine/graph.c @@ -1,1228 +1,1228 @@ /* * Copyright (C) 2004 Andrew Beekhof * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This software is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #include #include #include #include #include #include #include #include #include gboolean update_action(action_t * action); gboolean rsc_update_action(action_t * first, action_t * then, enum pe_ordering type); static enum pe_action_flags get_action_flags(action_t * action, node_t * node) { enum pe_action_flags flags = action->flags; if (action->rsc) { flags = action->rsc->cmds->action_flags(action, NULL); if (action->rsc->variant >= pe_clone && node) { /* We only care about activity on $node */ enum pe_action_flags clone_flags = action->rsc->cmds->action_flags(action, node); /* Go to great lengths to ensure the correct value for pe_action_runnable... * * If we are a clone, then for _ordering_ constraints, its only relevant * if we are runnable _anywhere_. * * This only applies to _runnable_ though, and only for ordering constraints. * If this function is ever used during colocation, then we'll need additional logic * * Not very satisfying, but its logical and appears to work well. */ if (is_not_set(clone_flags, pe_action_runnable) && is_set(flags, pe_action_runnable)) { pe_rsc_trace(action->rsc, "Fixing up runnable flag for %s", action->uuid); set_bit(clone_flags, pe_action_runnable); } flags = clone_flags; } } return flags; } static char * convert_non_atomic_uuid(char *old_uuid, resource_t * rsc, gboolean allow_notify, gboolean free_original) { int interval = 0; char *uuid = NULL; char *rid = NULL; char *raw_task = NULL; int task = no_action; CRM_ASSERT(rsc); pe_rsc_trace(rsc, "Processing %s", old_uuid); if (old_uuid == NULL) { return NULL; } else if (strstr(old_uuid, "notify") != NULL) { goto done; /* no conversion */ } else if (rsc->variant < pe_group) { goto done; /* no conversion */ } CRM_ASSERT(parse_op_key(old_uuid, &rid, &raw_task, &interval)); if (interval > 0) { goto done; /* no conversion */ } task = text2task(raw_task); switch (task) { case stop_rsc: case start_rsc: case action_notify: case action_promote: case action_demote: break; case stopped_rsc: case started_rsc: case action_notified: case action_promoted: case action_demoted: task--; break; case monitor_rsc: case shutdown_crm: case stonith_node: task = no_action; break; default: crm_err("Unknown action: %s", raw_task); task = no_action; break; } if (task != no_action) { if (is_set(rsc->flags, pe_rsc_notify) && allow_notify) { uuid = generate_notify_key(rid, "confirmed-post", task2text(task + 1)); } else { uuid = generate_op_key(rid, task2text(task + 1), 0); } pe_rsc_trace(rsc, "Converted %s -> %s", old_uuid, uuid); } done: if (uuid == NULL) { uuid = strdup(old_uuid); } if (free_original) { free(old_uuid); } free(raw_task); free(rid); return uuid; } static action_t * rsc_expand_action(action_t * action) { action_t *result = action; if (action->rsc && action->rsc->variant >= pe_group) { /* Expand 'start' -> 'started' */ char *uuid = NULL; gboolean notify = FALSE; if (action->rsc->parent == NULL) { /* Only outter-most resources have notification actions */ notify = is_set(action->rsc->flags, pe_rsc_notify); } uuid = convert_non_atomic_uuid(action->uuid, action->rsc, notify, FALSE); if (uuid) { pe_rsc_trace(action->rsc, "Converting %s to %s %d", action->uuid, uuid, is_set(action->rsc->flags, pe_rsc_notify)); result = find_first_action(action->rsc->actions, uuid, NULL, NULL); if (result == NULL) { crm_err("Couldn't expand %s", action->uuid); result = action; } free(uuid); } } return result; } static enum pe_graph_flags graph_update_action(action_t * first, action_t * then, node_t * node, enum pe_action_flags flags, enum pe_ordering type) { enum pe_graph_flags changed = pe_graph_none; gboolean processed = FALSE; /* TODO: Do as many of these in parallel as possible */ if (type & pe_order_implies_then) { processed = TRUE; if (then->rsc) { changed |= then->rsc->cmds->update_actions(first, then, node, flags & pe_action_optional, pe_action_optional, pe_order_implies_then); } else if (is_set(flags, pe_action_optional) == FALSE) { if (update_action_flags(then, pe_action_optional | pe_action_clear)) { changed |= pe_graph_updated_then; } } if (changed) { pe_rsc_trace(then->rsc, "implies right: %s then %s: changed", first->uuid, then->uuid); } else { crm_trace("implies right: %s then %s", first->uuid, then->uuid); } } if ((type & pe_order_restart) && then->rsc) { enum pe_action_flags restart = (pe_action_optional | pe_action_runnable); processed = TRUE; changed |= then->rsc->cmds->update_actions(first, then, node, flags, restart, pe_order_restart); if (changed) { pe_rsc_trace(then->rsc, "restart: %s then %s: changed", first->uuid, then->uuid); } else { crm_trace("restart: %s then %s", first->uuid, then->uuid); } } if (type & pe_order_implies_first) { processed = TRUE; if (first->rsc) { changed |= first->rsc->cmds->update_actions(first, then, node, flags, pe_action_optional, pe_order_implies_first); } else if (is_set(flags, pe_action_optional) == FALSE) { if (update_action_flags(first, pe_action_runnable | pe_action_clear)) { changed |= pe_graph_updated_first; } } if (changed) { pe_rsc_trace(then->rsc, "implies left: %s then %s: changed", first->uuid, then->uuid); } else { crm_trace("implies left: %s then %s", first->uuid, then->uuid); } } if (type & pe_order_implies_first_master) { processed = TRUE; if (then->rsc) { changed |= then->rsc->cmds->update_actions(first, then, node, flags & pe_action_optional, pe_action_optional, pe_order_implies_first_master); } if (changed) { pe_rsc_trace(then->rsc, "implies left when right rsc is Master role: %s then %s: changed", first->uuid, then->uuid); } else { crm_trace("implies left when right rsc is Master role: %s then %s", first->uuid, then->uuid); } } if (type & pe_order_one_or_more) { processed = TRUE; if (then->rsc) { changed |= then->rsc->cmds->update_actions(first, then, node, flags, pe_action_runnable, pe_order_one_or_more); } else if (is_set(flags, pe_action_runnable)) { if (update_action_flags(then, pe_action_runnable)) { changed |= pe_graph_updated_then; } } if (changed) { pe_rsc_trace(then->rsc, "runnable_one_or_more: %s then %s: changed", first->uuid, then->uuid); } else { crm_trace("runnable_one_or_more: %s then %s", first->uuid, then->uuid); } } if (type & pe_order_runnable_left) { processed = TRUE; if (then->rsc) { changed |= then->rsc->cmds->update_actions(first, then, node, flags, pe_action_runnable, pe_order_runnable_left); } else if (is_set(flags, pe_action_runnable) == FALSE) { if (update_action_flags(then, pe_action_runnable | pe_action_clear)) { changed |= pe_graph_updated_then; } } if (changed) { pe_rsc_trace(then->rsc, "runnable: %s then %s: changed", first->uuid, then->uuid); } else { crm_trace("runnable: %s then %s", first->uuid, then->uuid); } } if (type & pe_order_implies_first_migratable) { processed = TRUE; if (then->rsc) { changed |= then->rsc->cmds->update_actions(first, then, node, flags, pe_action_optional, pe_order_implies_first_migratable); } if (changed) { pe_rsc_trace(then->rsc, "optional: %s then %s: changed", first->uuid, then->uuid); } else { crm_trace("optional: %s then %s", first->uuid, then->uuid); } } if (type & pe_order_pseudo_left) { processed = TRUE; if (then->rsc) { changed |= then->rsc->cmds->update_actions(first, then, node, flags, pe_action_optional, pe_order_pseudo_left); } if (changed) { pe_rsc_trace(then->rsc, "optional: %s then %s: changed", first->uuid, then->uuid); } else { crm_trace("optional: %s then %s", first->uuid, then->uuid); } } if (type & pe_order_optional) { processed = TRUE; if (then->rsc) { changed |= then->rsc->cmds->update_actions(first, then, node, flags, pe_action_runnable, pe_order_optional); } if (changed) { pe_rsc_trace(then->rsc, "optional: %s then %s: changed", first->uuid, then->uuid); } else { crm_trace("optional: %s then %s", first->uuid, then->uuid); } } if (type & pe_order_asymmetrical) { processed = TRUE; if (then->rsc) { changed |= then->rsc->cmds->update_actions(first, then, node, flags, pe_action_runnable, pe_order_asymmetrical); } if (changed) { pe_rsc_trace(then->rsc, "asymmetrical: %s then %s: changed", first->uuid, then->uuid); } else { crm_trace("asymmetrical: %s then %s", first->uuid, then->uuid); } } if ((first->flags & pe_action_runnable) && (type & pe_order_implies_then_printed) && (flags & pe_action_optional) == 0) { processed = TRUE; crm_trace("%s implies %s printed", first->uuid, then->uuid); update_action_flags(then, pe_action_print_always); /* dont care about changed */ } if ((type & pe_order_implies_first_printed) && (flags & pe_action_optional) == 0) { processed = TRUE; crm_trace("%s implies %s printed", then->uuid, first->uuid); update_action_flags(first, pe_action_print_always); /* dont care about changed */ } if ((type & pe_order_implies_then || type & pe_order_implies_first || type & pe_order_restart) && first->rsc && safe_str_eq(first->task, RSC_STOP) && is_not_set(first->rsc->flags, pe_rsc_managed) && is_set(first->rsc->flags, pe_rsc_block) && is_not_set(first->flags, pe_action_runnable)) { if (update_action_flags(then, pe_action_runnable | pe_action_clear)) { changed |= pe_graph_updated_then; } if (changed) { pe_rsc_trace(then->rsc, "unmanaged left: %s then %s: changed", first->uuid, then->uuid); } else { crm_trace("unmanaged left: %s then %s", first->uuid, then->uuid); } } if (processed == FALSE) { crm_trace("Constraint 0x%.6x not applicable", type); } return changed; } gboolean update_action(action_t * then) { GListPtr lpc = NULL; enum pe_graph_flags changed = pe_graph_none; int last_flags = then->flags; crm_trace("Processing %s (%s %s %s)", then->uuid, is_set(then->flags, pe_action_optional) ? "optional" : "required", is_set(then->flags, pe_action_runnable) ? "runnable" : "unrunnable", is_set(then->flags, pe_action_pseudo) ? "pseudo" : then->node ? then->node->details->uname : ""); if (is_set(then->flags, pe_action_requires_any)) { clear_bit(then->flags, pe_action_runnable); } for (lpc = then->actions_before; lpc != NULL; lpc = lpc->next) { action_wrapper_t *other = (action_wrapper_t *) lpc->data; action_t *first = other->action; node_t *then_node = then->node; node_t *first_node = first->node; enum pe_action_flags then_flags = 0; enum pe_action_flags first_flags = 0; if (first->rsc && first->rsc->variant == pe_group && safe_str_eq(first->task, RSC_START)) { first_node = first->rsc->fns->location(first->rsc, NULL, FALSE); if (first_node) { crm_trace("First: Found node %s for %s", first_node->details->uname, first->uuid); } } if (then->rsc && then->rsc->variant == pe_group && safe_str_eq(then->task, RSC_START)) { then_node = then->rsc->fns->location(then->rsc, NULL, FALSE); if (then_node) { crm_trace("Then: Found node %s for %s", then_node->details->uname, then->uuid); } } clear_bit(changed, pe_graph_updated_first); if (first->rsc != then->rsc && first->rsc != NULL && then->rsc != NULL && first->rsc != then->rsc->parent) { first = rsc_expand_action(first); } if (first != other->action) { crm_trace("Ordering %s afer %s instead of %s", then->uuid, first->uuid, other->action->uuid); } first_flags = get_action_flags(first, then_node); then_flags = get_action_flags(then, first_node); crm_trace("Checking %s (%s %s %s) against %s (%s %s %s) filter=0x%.6x type=0x%.6x", then->uuid, is_set(then_flags, pe_action_optional) ? "optional" : "required", is_set(then_flags, pe_action_runnable) ? "runnable" : "unrunnable", is_set(then_flags, pe_action_pseudo) ? "pseudo" : then->node ? then->node->details-> uname : "", first->uuid, is_set(first_flags, pe_action_optional) ? "optional" : "required", is_set(first_flags, pe_action_runnable) ? "runnable" : "unrunnable", is_set(first_flags, pe_action_pseudo) ? "pseudo" : first->node ? first->node->details-> uname : "", first_flags, other->type); if (first == other->action) { /* * 'first' was not expanded (ie. from 'start' to 'running'), which could mean it: * - has no associated resource, * - was a primitive, * - was pre-expanded (ie. 'running' instead of 'start') * * The third argument here to graph_update_action() is a node which is used under two conditions: * - Interleaving, in which case first->node and * then->node are equal (and NULL) * - If 'then' is a clone, to limit the scope of the * constraint to instances on the supplied node * */ int otype = other->type; node_t *node = then->node; if(is_set(otype, pe_order_implies_then_on_node)) { /* Normally we want the _whole_ 'then' clone to * restart if 'first' is restarted, so then->node is * needed. * * However for unfencing, we want to limit this to * instances on the same node as 'first' (the * unfencing operation), so first->node is supplied. * * Swap the node, from then on we can can treat it * like any other 'pe_order_implies_then' */ clear_bit(otype, pe_order_implies_then_on_node); set_bit(otype, pe_order_implies_then); node = first->node; } clear_bit(first_flags, pe_action_pseudo); changed |= graph_update_action(first, then, node, first_flags, otype); /* 'first' was for a complex resource (clone, group, etc), * create a new dependancy if necessary */ } else if (order_actions(first, then, other->type)) { /* This was the first time 'first' and 'then' were associated, * start again to get the new actions_before list */ changed |= (pe_graph_updated_then | pe_graph_disable); } if (changed & pe_graph_disable) { crm_trace("Disabled constraint %s -> %s", other->action->uuid, then->uuid); clear_bit(changed, pe_graph_disable); other->type = pe_order_none; } if (changed & pe_graph_updated_first) { GListPtr lpc2 = NULL; crm_trace("Updated %s (first %s %s %s), processing dependants ", first->uuid, is_set(first->flags, pe_action_optional) ? "optional" : "required", is_set(first->flags, pe_action_runnable) ? "runnable" : "unrunnable", is_set(first->flags, pe_action_pseudo) ? "pseudo" : first->node ? first->node->details-> uname : ""); for (lpc2 = first->actions_after; lpc2 != NULL; lpc2 = lpc2->next) { action_wrapper_t *other = (action_wrapper_t *) lpc2->data; update_action(other->action); } update_action(first); } } if (is_set(then->flags, pe_action_requires_any)) { if (last_flags != then->flags) { changed |= pe_graph_updated_then; } else { clear_bit(changed, pe_graph_updated_then); } } if (changed & pe_graph_updated_then) { crm_trace("Updated %s (then %s %s %s), processing dependants ", then->uuid, is_set(then->flags, pe_action_optional) ? "optional" : "required", is_set(then->flags, pe_action_runnable) ? "runnable" : "unrunnable", is_set(then->flags, pe_action_pseudo) ? "pseudo" : then->node ? then->node->details-> uname : ""); update_action(then); for (lpc = then->actions_after; lpc != NULL; lpc = lpc->next) { action_wrapper_t *other = (action_wrapper_t *) lpc->data; update_action(other->action); } } return FALSE; } gboolean shutdown_constraints(node_t * node, action_t * shutdown_op, pe_working_set_t * data_set) { /* add the stop to the before lists so it counts as a pre-req * for the shutdown */ GListPtr lpc = NULL; for (lpc = data_set->actions; lpc != NULL; lpc = lpc->next) { action_t *action = (action_t *) lpc->data; if (action->rsc == NULL || action->node == NULL) { continue; } else if (action->node->details != node->details) { continue; } else if (is_set(action->rsc->flags, pe_rsc_maintenance)) { pe_rsc_trace(action->rsc, "Skipping %s: maintainence mode", action->uuid); continue; } else if (node->details->maintenance) { pe_rsc_trace(action->rsc, "Skipping %s: node %s is in maintenance mode", action->uuid, node->details->uname); continue; } else if (safe_str_neq(action->task, RSC_STOP)) { continue; } else if (is_not_set(action->rsc->flags, pe_rsc_managed) && is_not_set(action->rsc->flags, pe_rsc_block)) { /* * If another action depends on this one, we may still end up blocking */ pe_rsc_trace(action->rsc, "Skipping %s: unmanaged", action->uuid); continue; } pe_rsc_trace(action->rsc, "Ordering %s before shutdown on %s", action->uuid, node->details->uname); pe_clear_action_bit(action, pe_action_optional); custom_action_order(action->rsc, NULL, action, NULL, strdup(CRM_OP_SHUTDOWN), shutdown_op, pe_order_optional | pe_order_runnable_left, data_set); } return TRUE; } gboolean stonith_constraints(node_t * node, action_t * stonith_op, pe_working_set_t * data_set) { CRM_CHECK(stonith_op != NULL, return FALSE); /* * Make sure the stonith OP occurs before we start any shared resources */ if (stonith_op != NULL) { GListPtr lpc = NULL; for (lpc = data_set->resources; lpc != NULL; lpc = lpc->next) { resource_t *rsc = (resource_t *) lpc->data; rsc_stonith_ordering(rsc, stonith_op, data_set); } } /* add the stonith OP as a stop pre-req and the mark the stop * as a pseudo op - since its now redundant */ return TRUE; } static node_t * get_router_node(action_t *action) { node_t *began_on = NULL; node_t *ended_on = NULL; node_t *router_node = NULL; if (is_remote_node(action->node) == FALSE) { return NULL; } CRM_ASSERT(action->node->details->remote_rsc != NULL); if (action->node->details->remote_rsc->running_on) { began_on = action->node->details->remote_rsc->running_on->data; } ended_on = action->node->details->remote_rsc->allocated_to; /* if there is only one location to choose from, * this is easy. Check for those conditions first */ if (!began_on || !ended_on) { /* remote rsc is either shutting down or starting up */ return began_on ? began_on : ended_on; } else if (began_on->details == ended_on->details) { /* remote rsc didn't move nodes. */ return began_on; } /* If we have get here, we know the remote resource * began on one node and is moving to another node. * * This means some actions will get routed through the cluster * node the connection rsc began on, and others are routed through * the cluster node the connection rsc ends up on. * * 1. stop, demote, migrate actions of resources living in the remote * node _MUST_ occur _BEFORE_ the connection can move (these actions * are all required before the remote rsc stop action can occur.) In * this case, we know these actions have to be routed through the initial * cluster node the connection resource lived on before the move takes place. * * 2. Everything else (start, promote, monitor, probe, refresh, clear failcount * delete ....) must occur after the resource starts on the node it is * moving to. */ /* 1. before connection rsc moves. */ if (safe_str_eq(action->task, "stop") || safe_str_eq(action->task, "demote") || safe_str_eq(action->task, "migrate_from") || safe_str_eq(action->task, "migrate_to")) { router_node = began_on; /* 2. after connection rsc moves. */ } else { router_node = ended_on; } return router_node; } static xmlNode * action2xml(action_t * action, gboolean as_input, pe_working_set_t *data_set) { gboolean needs_node_info = TRUE; xmlNode *action_xml = NULL; xmlNode *args_xml = NULL; char *action_id_s = NULL; if (action == NULL) { return NULL; } if (safe_str_eq(action->task, CRM_OP_FENCE)) { action_xml = create_xml_node(NULL, XML_GRAPH_TAG_CRM_EVENT); /* needs_node_info = FALSE; */ } else if (safe_str_eq(action->task, CRM_OP_SHUTDOWN)) { action_xml = create_xml_node(NULL, XML_GRAPH_TAG_CRM_EVENT); } else if (safe_str_eq(action->task, CRM_OP_CLEAR_FAILCOUNT)) { action_xml = create_xml_node(NULL, XML_GRAPH_TAG_CRM_EVENT); } else if (safe_str_eq(action->task, CRM_OP_LRM_REFRESH)) { action_xml = create_xml_node(NULL, XML_GRAPH_TAG_CRM_EVENT); /* } else if(safe_str_eq(action->task, RSC_PROBED)) { */ /* action_xml = create_xml_node(NULL, XML_GRAPH_TAG_CRM_EVENT); */ } else if (is_set(action->flags, pe_action_pseudo)) { action_xml = create_xml_node(NULL, XML_GRAPH_TAG_PSEUDO_EVENT); needs_node_info = FALSE; } else { action_xml = create_xml_node(NULL, XML_GRAPH_TAG_RSC_OP); } action_id_s = crm_itoa(action->id); crm_xml_add(action_xml, XML_ATTR_ID, action_id_s); free(action_id_s); crm_xml_add(action_xml, XML_LRM_ATTR_TASK, action->task); if (action->rsc != NULL && action->rsc->clone_name != NULL) { char *clone_key = NULL; const char *interval_s = g_hash_table_lookup(action->meta, XML_LRM_ATTR_INTERVAL); int interval = crm_parse_int(interval_s, "0"); if (safe_str_eq(action->task, RSC_NOTIFY)) { const char *n_type = g_hash_table_lookup(action->meta, "notify_type"); const char *n_task = g_hash_table_lookup(action->meta, "notify_operation"); CRM_CHECK(n_type != NULL, crm_err("No notify type value found for %s", action->uuid)); CRM_CHECK(n_task != NULL, crm_err("No notify operation value found for %s", action->uuid)); clone_key = generate_notify_key(action->rsc->clone_name, n_type, n_task); } else if(action->cancel_task) { clone_key = generate_op_key(action->rsc->clone_name, action->cancel_task, interval); } else { clone_key = generate_op_key(action->rsc->clone_name, action->task, interval); } CRM_CHECK(clone_key != NULL, crm_err("Could not generate a key for %s", action->uuid)); crm_xml_add(action_xml, XML_LRM_ATTR_TASK_KEY, clone_key); crm_xml_add(action_xml, "internal_" XML_LRM_ATTR_TASK_KEY, action->uuid); free(clone_key); } else { crm_xml_add(action_xml, XML_LRM_ATTR_TASK_KEY, action->uuid); } if (needs_node_info && action->node != NULL) { node_t *router_node = get_router_node(action); crm_xml_add(action_xml, XML_LRM_ATTR_TARGET, action->node->details->uname); crm_xml_add(action_xml, XML_LRM_ATTR_TARGET_UUID, action->node->details->id); if (router_node) { crm_xml_add(action_xml, XML_LRM_ATTR_ROUTER_NODE, router_node->details->uname); } } if (is_set(action->flags, pe_action_failure_is_fatal) == FALSE) { add_hash_param(action->meta, XML_ATTR_TE_ALLOWFAIL, XML_BOOLEAN_TRUE); } if (as_input) { return action_xml; } if (action->rsc) { if (is_set(action->flags, pe_action_pseudo) == FALSE) { int lpc = 0; xmlNode *rsc_xml = create_xml_node(action_xml, crm_element_name(action->rsc->xml)); const char *attr_list[] = { XML_AGENT_ATTR_CLASS, XML_AGENT_ATTR_PROVIDER, XML_ATTR_TYPE }; if (is_set(action->rsc->flags, pe_rsc_orphan) && action->rsc->clone_name) { /* Do not use the 'instance free' name here as that * might interfere with the instance we plan to keep. * Ie. if there are more than two named /anonymous/ * instances on a given node, we need to make sure the * command goes to the right one. * * Keep this block, even when everyone is using * 'instance free' anonymous clone names - it means * we'll do the right thing if anyone toggles the * unique flag to 'off' */ crm_debug("Using orphan clone name %s instead of %s", action->rsc->id, action->rsc->clone_name); crm_xml_add(rsc_xml, XML_ATTR_ID, action->rsc->clone_name); crm_xml_add(rsc_xml, XML_ATTR_ID_LONG, action->rsc->id); } else if (is_not_set(action->rsc->flags, pe_rsc_unique)) { const char *xml_id = ID(action->rsc->xml); crm_debug("Using anonymous clone name %s for %s (aka. %s)", xml_id, action->rsc->id, action->rsc->clone_name); /* ID is what we'd like client to use * ID_LONG is what they might know it as instead * * ID_LONG is only strictly needed /here/ during the * transition period until all nodes in the cluster * are running the new software /and/ have rebooted * once (meaning that they've only ever spoken to a DC * supporting this feature). * * If anyone toggles the unique flag to 'on', the * 'instance free' name will correspond to an orphan * and fall into the claus above instead */ crm_xml_add(rsc_xml, XML_ATTR_ID, xml_id); if (action->rsc->clone_name && safe_str_neq(xml_id, action->rsc->clone_name)) { crm_xml_add(rsc_xml, XML_ATTR_ID_LONG, action->rsc->clone_name); } else { crm_xml_add(rsc_xml, XML_ATTR_ID_LONG, action->rsc->id); } } else { CRM_ASSERT(action->rsc->clone_name == NULL); crm_xml_add(rsc_xml, XML_ATTR_ID, action->rsc->id); } for (lpc = 0; lpc < DIMOF(attr_list); lpc++) { crm_xml_add(rsc_xml, attr_list[lpc], g_hash_table_lookup(action->rsc->meta, attr_list[lpc])); } } } args_xml = create_xml_node(NULL, XML_TAG_ATTRS); crm_xml_add(args_xml, XML_ATTR_CRM_VERSION, CRM_FEATURE_SET); g_hash_table_foreach(action->extra, hash2field, args_xml); if (action->rsc != NULL && action->node) { GHashTable *p = g_hash_table_new_full(crm_str_hash, g_str_equal, g_hash_destroy_str, g_hash_destroy_str); get_rsc_attributes(p, action->rsc, action->node, data_set); g_hash_table_foreach(p, hash2smartfield, args_xml); g_hash_table_destroy(p); } else if(action->rsc) { g_hash_table_foreach(action->rsc->parameters, hash2smartfield, args_xml); } g_hash_table_foreach(action->meta, hash2metafield, args_xml); if (action->rsc != NULL) { resource_t *parent = action->rsc; while (parent != NULL) { parent->cmds->append_meta(parent, args_xml); parent = parent->parent; } } else if (safe_str_eq(action->task, CRM_OP_FENCE) && action->node) { g_hash_table_foreach(action->node->details->attrs, hash2metafield, args_xml); } sorted_xml(args_xml, action_xml, FALSE); crm_log_xml_trace(action_xml, "dumped action"); free_xml(args_xml); return action_xml; } static gboolean should_dump_action(action_t * action) { CRM_CHECK(action != NULL, return FALSE); if (is_set(action->flags, pe_action_dumped)) { crm_trace("action %d (%s) was already dumped", action->id, action->uuid); return FALSE; } else if (is_set(action->flags, pe_action_pseudo) && safe_str_eq(action->task, CRM_OP_PROBED)) { GListPtr lpc = NULL; /* This is a horrible but convenient hack * * It mimimizes the number of actions with unsatisfied inputs * (ie. not included in the graph) * * This in turn, means we can be more concise when printing * aborted/incomplete graphs. * * It also makes it obvious which node is preventing * probe_complete from running (presumably because it is only * partially up) * * For these reasons we tolerate such perversions */ for (lpc = action->actions_after; lpc != NULL; lpc = lpc->next) { action_wrapper_t *wrapper = (action_wrapper_t *) lpc->data; if (is_not_set(wrapper->action->flags, pe_action_runnable)) { /* Only interested in runnable operations */ } else if (safe_str_neq(wrapper->action->task, RSC_START)) { /* Only interested in start operations */ } else if (is_set(wrapper->action->flags, pe_action_dumped)) { crm_trace("action %d (%s) dependancy of %s", action->id, action->uuid, wrapper->action->uuid); return TRUE; } else if (should_dump_action(wrapper->action)) { crm_trace("action %d (%s) dependancy of %s", action->id, action->uuid, wrapper->action->uuid); return TRUE; } } } if (is_set(action->flags, pe_action_runnable) == FALSE) { crm_trace("action %d (%s) was not runnable", action->id, action->uuid); return FALSE; } else if (is_set(action->flags, pe_action_optional) && is_set(action->flags, pe_action_print_always) == FALSE) { crm_trace("action %d (%s) was optional", action->id, action->uuid); return FALSE; } else if (action->rsc != NULL && is_not_set(action->rsc->flags, pe_rsc_managed)) { const char *interval = NULL; interval = g_hash_table_lookup(action->meta, XML_LRM_ATTR_INTERVAL); /* make sure probes and recurring monitors go through */ if (safe_str_neq(action->task, RSC_STATUS) && interval == NULL) { crm_trace("action %d (%s) was for an unmanaged resource (%s)", action->id, action->uuid, action->rsc->id); return FALSE; } } if (is_set(action->flags, pe_action_pseudo) || safe_str_eq(action->task, CRM_OP_FENCE) || safe_str_eq(action->task, CRM_OP_SHUTDOWN)) { /* skip the next checks */ return TRUE; } if (action->node == NULL) { pe_err("action %d (%s) was not allocated", action->id, action->uuid); log_action(LOG_DEBUG, "Unallocated action", action, FALSE); return FALSE; } else if (action->node->details->online == FALSE) { pe_err("action %d was (%s) scheduled for offline node", action->id, action->uuid); log_action(LOG_DEBUG, "Action for offline node", action, FALSE); return FALSE; #if 0 /* but this would also affect resources that can be safely * migrated before a fencing op */ } else if (action->node->details->unclean == FALSE) { pe_err("action %d was (%s) scheduled for unclean node", action->id, action->uuid); log_action(LOG_DEBUG, "Action for unclean node", action, FALSE); return FALSE; #endif } return TRUE; } /* lowest to highest */ static gint sort_action_id(gconstpointer a, gconstpointer b) { const action_wrapper_t *action_wrapper2 = (const action_wrapper_t *)a; const action_wrapper_t *action_wrapper1 = (const action_wrapper_t *)b; if (a == NULL) { return 1; } if (b == NULL) { return -1; } if (action_wrapper1->action->id > action_wrapper2->action->id) { return -1; } if (action_wrapper1->action->id < action_wrapper2->action->id) { return 1; } return 0; } static gboolean should_dump_input(int last_action, action_t * action, action_wrapper_t * wrapper) { int type = wrapper->type; type &= ~pe_order_implies_first_printed; type &= ~pe_order_implies_then_printed; type &= ~pe_order_optional; if (wrapper->action->node && action->rsc && action->rsc->fillers && is_not_set(type, pe_order_preserve) && wrapper->action->node->details->remote_rsc && uber_parent(action->rsc) != uber_parent(wrapper->action->rsc) ) { /* This prevents user-defined ordering constraints between * resources in remote nodes and the resources that * define/represent a remote node. * * There is no known valid reason to allow this sort of thing * but if one arises, we'd need to change the * action->rsc->fillers clause to be more specific, possibly * to check that it contained wrapper->action->rsc */ crm_warn("Invalid ordering constraint between %s and %s", wrapper->action->rsc->id, action->rsc->id); wrapper->type = pe_order_none; return FALSE; } wrapper->state = pe_link_not_dumped; if (last_action == wrapper->action->id) { crm_trace("Input (%d) %s duplicated for %s", wrapper->action->id, wrapper->action->uuid, action->uuid); wrapper->state = pe_link_dup; return FALSE; } else if (wrapper->type == pe_order_none) { crm_trace("Input (%d) %s suppressed for %s", wrapper->action->id, wrapper->action->uuid, action->uuid); return FALSE; } else if (is_set(wrapper->action->flags, pe_action_runnable) == FALSE && type == pe_order_none && safe_str_neq(wrapper->action->uuid, CRM_OP_PROBED)) { crm_trace("Input (%d) %s optional (ordering) for %s", wrapper->action->id, wrapper->action->uuid, action->uuid); return FALSE; } else if (is_set(action->flags, pe_action_pseudo) && (wrapper->type & pe_order_stonith_stop)) { crm_trace("Input (%d) %s suppressed for %s", wrapper->action->id, wrapper->action->uuid, action->uuid); return FALSE; } else if ((wrapper->type & pe_order_implies_first_migratable) && (is_set(wrapper->action->flags, pe_action_runnable) == FALSE)) { return FALSE; } else if ((wrapper->type & pe_order_apply_first_non_migratable) && (is_set(wrapper->action->flags, pe_action_migrate_runnable))) { return FALSE; } else if ((wrapper->type == pe_order_optional) && strstr(wrapper->action->uuid, "_stop_0") && is_set(wrapper->action->flags, pe_action_migrate_runnable)) { /* for optional only ordering, ordering is not preserved for * a stop action that is actually involved with a migration. */ return FALSE; } else if (wrapper->type == pe_order_load) { crm_trace("check load filter %s.%s -> %s.%s", wrapper->action->uuid, wrapper->action->node ? wrapper->action->node->details->uname : "", action->uuid, action->node ? action->node->details->uname : ""); if (action->rsc && safe_str_eq(action->task, RSC_MIGRATE)) { /* Remove the orders like : * "load_stopped_node2" -> "rscA_migrate_to node1" * which were created from: pengine/native.c: MigrateRsc() * order_actions(other, then, other_w->type); */ wrapper->type = pe_order_none; return FALSE; } else if (wrapper->action->node == NULL || action->node == NULL || wrapper->action->node->details != action->node->details) { /* Check if the actions are for the same node, ignore otherwise */ crm_trace("load filter - node"); wrapper->type = pe_order_none; return FALSE; } else if (is_set(wrapper->action->flags, pe_action_optional)) { /* Check if the pre-req is optional, ignore if so */ crm_trace("load filter - optional"); wrapper->type = pe_order_none; return FALSE; } } else if (wrapper->type == pe_order_anti_colocation) { crm_trace("check anti-colocation filter %s.%s -> %s.%s", wrapper->action->uuid, wrapper->action->node ? wrapper->action->node->details->uname : "", action->uuid, action->node ? action->node->details->uname : ""); if (wrapper->action->node && action->node && wrapper->action->node->details != action->node->details) { /* Check if the actions are for the same node, ignore otherwise */ crm_trace("anti-colocation filter - node"); wrapper->type = pe_order_none; return FALSE; } else if (is_set(wrapper->action->flags, pe_action_optional)) { /* Check if the pre-req is optional, ignore if so */ crm_trace("anti-colocation filter - optional"); wrapper->type = pe_order_none; return FALSE; } } else if (wrapper->action->rsc && wrapper->action->rsc != action->rsc && is_set(wrapper->action->rsc->flags, pe_rsc_failed) && is_not_set(wrapper->action->rsc->flags, pe_rsc_managed) && strstr(wrapper->action->uuid, "_stop_0") && action->rsc && action->rsc->variant >= pe_clone) { - crm_warn("Ignoring requirement that %s comeplete before %s:" + crm_warn("Ignoring requirement that %s complete before %s:" " unmanaged failed resources cannot prevent clone shutdown", wrapper->action->uuid, action->uuid); return FALSE; } else if (is_set(wrapper->action->flags, pe_action_dumped) || should_dump_action(wrapper->action)) { crm_trace("Input (%d) %s should be dumped for %s", wrapper->action->id, wrapper->action->uuid, action->uuid); goto dump; #if 0 } else if (is_set(wrapper->action->flags, pe_action_runnable) && is_set(wrapper->action->flags, pe_action_pseudo) && wrapper->action->rsc->variant != pe_native) { crm_crit("Input (%d) %s should be dumped for %s", wrapper->action->id, wrapper->action->uuid, action->uuid); goto dump; #endif } else if (is_set(wrapper->action->flags, pe_action_optional) == TRUE && is_set(wrapper->action->flags, pe_action_print_always) == FALSE) { crm_trace("Input (%d) %s optional for %s", wrapper->action->id, wrapper->action->uuid, action->uuid); crm_trace("Input (%d) %s n=%p p=%d r=%d o=%d a=%d f=0x%.6x", wrapper->action->id, wrapper->action->uuid, wrapper->action->node, is_set(wrapper->action->flags, pe_action_pseudo), is_set(wrapper->action->flags, pe_action_runnable), is_set(wrapper->action->flags, pe_action_optional), is_set(wrapper->action->flags, pe_action_print_always), wrapper->type); return FALSE; } dump: crm_trace("Input (%d) %s n=%p p=%d r=%d o=%d a=%d f=0x%.6x dumped for %s", wrapper->action->id, wrapper->action->uuid, wrapper->action->node, is_set(wrapper->action->flags, pe_action_pseudo), is_set(wrapper->action->flags, pe_action_runnable), is_set(wrapper->action->flags, pe_action_optional), is_set(wrapper->action->flags, pe_action_print_always), wrapper->type, action->uuid); return TRUE; } void graph_element_from_action(action_t * action, pe_working_set_t * data_set) { GListPtr lpc = NULL; int last_action = -1; int synapse_priority = 0; xmlNode *syn = NULL; xmlNode *set = NULL; xmlNode *in = NULL; xmlNode *input = NULL; xmlNode *xml_action = NULL; if (should_dump_action(action) == FALSE) { return; } set_bit(action->flags, pe_action_dumped); syn = create_xml_node(data_set->graph, "synapse"); set = create_xml_node(syn, "action_set"); in = create_xml_node(syn, "inputs"); crm_xml_add_int(syn, XML_ATTR_ID, data_set->num_synapse); data_set->num_synapse++; if (action->rsc != NULL) { synapse_priority = action->rsc->priority; } if (action->priority > synapse_priority) { synapse_priority = action->priority; } if (synapse_priority > 0) { crm_xml_add_int(syn, XML_CIB_ATTR_PRIORITY, synapse_priority); } xml_action = action2xml(action, FALSE, data_set); add_node_nocopy(set, crm_element_name(xml_action), xml_action); action->actions_before = g_list_sort(action->actions_before, sort_action_id); for (lpc = action->actions_before; lpc != NULL; lpc = lpc->next) { action_wrapper_t *wrapper = (action_wrapper_t *) lpc->data; if (should_dump_input(last_action, action, wrapper) == FALSE) { continue; } wrapper->state = pe_link_dumped; CRM_CHECK(last_action < wrapper->action->id,; ); last_action = wrapper->action->id; input = create_xml_node(in, "trigger"); xml_action = action2xml(wrapper->action, TRUE, data_set); add_node_nocopy(input, crm_element_name(xml_action), xml_action); } }