diff --git a/daemons/pacemakerd/Makefile.am b/daemons/pacemakerd/Makefile.am index cc657f5806..84517a38cd 100644 --- a/daemons/pacemakerd/Makefile.am +++ b/daemons/pacemakerd/Makefile.am @@ -1,34 +1,36 @@ # # Copyright 2004-2021 the Pacemaker project contributors # # The version control history for this file may have further details. # # This source code is licensed under the GNU General Public License version 2 # or later (GPLv2+) WITHOUT ANY WARRANTY. # include $(top_srcdir)/mk/common.mk sbin_PROGRAMS = pacemakerd if BUILD_SYSTEMD systemdsystemunit_DATA = pacemaker.service endif +EXTRA_DIST = pacemakerd.8.inc + ## SOURCES noinst_HEADERS = pacemakerd.h pacemakerd_CFLAGS = $(CFLAGS_HARDENED_EXE) pacemakerd_LDFLAGS = $(LDFLAGS_HARDENED_EXE) pacemakerd_LDADD = $(top_builddir)/lib/cluster/libcrmcluster.la $(top_builddir)/lib/common/libcrmcommon.la pacemakerd_LDADD += $(CLUSTERLIBS) pacemakerd_SOURCES = pacemakerd.c if BUILD_CS_SUPPORT pacemakerd_SOURCES += pcmkd_corosync.c endif pacemakerd_SOURCES += pcmkd_messages.c pacemakerd_SOURCES += pcmkd_subdaemons.c CLEANFILES = $(man8_MANS) diff --git a/daemons/pacemakerd/pacemaker.service.in b/daemons/pacemakerd/pacemaker.service.in index b128ddcb3d..0363a2259c 100644 --- a/daemons/pacemakerd/pacemaker.service.in +++ b/daemons/pacemakerd/pacemaker.service.in @@ -1,97 +1,97 @@ [Unit] Description=Pacemaker High Availability Cluster Manager Documentation=man:pacemakerd Documentation=https://clusterlabs.org/pacemaker/doc/ # DefaultDependencies takes care of sysinit.target, # basic.target, and shutdown.target # We need networking to bind to a network address. It is recommended not to # use Wants or Requires with network.target, and not to use # network-online.target for server daemons. After=network.target # Time syncs can make the clock jump backward, which messes with logging # and failure timestamps, so wait until it's done. After=time-sync.target # Managing systemd resources requires DBus. After=dbus.service Wants=dbus.service # Some OCF resources may have dependencies that aren't managed by the cluster; # these must be started before Pacemaker and stopped after it. The # resource-agents package provides this target, which lets system adminstrators # add drop-ins for those dependencies. After=resource-agents-deps.target Wants=resource-agents-deps.target After=syslog.service After=rsyslog.service After=corosync.service Requires=corosync.service [Install] WantedBy=multi-user.target [Service] Type=simple KillMode=process NotifyAccess=main EnvironmentFile=-@CONFIGDIR@/pacemaker EnvironmentFile=-@CONFIGDIR@/sbd SuccessExitStatus=100 -ExecStart=@sbindir@/pacemakerd -f +ExecStart=@sbindir@/pacemakerd # Systemd v227 and above can limit the number of processes spawned by a # service. That is a bad idea for an HA cluster resource manager, so disable it # by default. The administrator can create a local override if they really want # a limit. If your systemd version does not support TasksMax, and you want to # get rid of the resulting log warnings, comment out this option. TasksMax=infinity # If pacemakerd doesn't stop, it's probably waiting on a cluster # resource. Sending -KILL will just get the node fenced SendSIGKILL=no # If we ever hit the StartLimitInterval/StartLimitBurst limit, and the # admin wants to stop the cluster while pacemakerd is not running, it # might be a good idea to enable the ExecStopPost directive below. # # However, the node will likely end up being fenced as a result, so it's # not enabled by default. # # ExecStopPost=/usr/bin/killall -TERM pacemaker-attrd pacemaker-based \ # pacemaker-controld pacemaker-execd pacemaker-fenced \ # pacemaker-schedulerd # If you want Corosync to stop whenever Pacemaker is stopped, # uncomment the next line too: # # ExecStopPost=/bin/sh -c 'pidof pacemaker-controld || killall -TERM corosync' # Pacemaker will restart along with Corosync if Corosync is stopped while # Pacemaker is running. # In this case, if you want to be fenced always (if you do not want to restart) # uncomment ExecStopPost below. # # ExecStopPost=/bin/sh -c 'pidof corosync || \ # /usr/bin/systemctl --no-block stop pacemaker' # When the service functions properly, it will wait to exit until all resources # have been stopped on the local node, and potentially across all nodes that # are shutting down. The default of 30min should cover most typical cluster # configurations, but it may need an increase to adapt to local conditions # (e.g. a large, clustered database could conceivably take longer to stop). TimeoutStopSec=30min TimeoutStartSec=60s # Restart options include: no, on-success, on-failure, on-abort or always Restart=on-failure # crm_perror() writes directly to stderr, so ignore it here # to avoid double-logging with the wrong format StandardError=null diff --git a/daemons/pacemakerd/pacemakerd.8.inc b/daemons/pacemakerd/pacemakerd.8.inc new file mode 100644 index 0000000000..902af4e678 --- /dev/null +++ b/daemons/pacemakerd/pacemakerd.8.inc @@ -0,0 +1,5 @@ +[synopsis] +pacemakerd [options] + +/subsidiary Pacemaker daemons/ +.SH OPTIONS diff --git a/daemons/pacemakerd/pacemakerd.c b/daemons/pacemakerd/pacemakerd.c index 8ec9708634..ce194bfc87 100644 --- a/daemons/pacemakerd/pacemakerd.c +++ b/daemons/pacemakerd/pacemakerd.c @@ -1,378 +1,370 @@ /* * Copyright 2010-2021 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU General Public License version 2 * or later (GPLv2+) WITHOUT ANY WARRANTY. */ #include #include "pacemakerd.h" #include #include #include #include #include #include #include #include #include #include /* indirectly: CRM_EX_* */ #include #include +#include #include #include #include -static const char *pid_file = PCMK_RUN_DIR "/pacemaker.pid"; +#define SUMMARY "pacemakerd - primary Pacemaker daemon that launches and monitors all subsidiary Pacemaker daemons" + +struct { + gboolean features; + gboolean foreground; + gboolean shutdown; + gboolean standby; +} options; + +static gboolean +pid_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **err) { + return TRUE; +} + +static gboolean +standby_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **err) { + options.standby = TRUE; + pcmk__set_env_option("node_start_state", "standby"); + return TRUE; +} + +static GOptionEntry entries[] = { + { "features", 'F', 0, G_OPTION_ARG_NONE, &options.features, + "Display full version and list of features Pacemaker was built with", + NULL }, + { "foreground", 'f', 0, G_OPTION_ARG_NONE, &options.foreground, + "(Ignored) Pacemaker always runs in the foreground", + NULL }, + { "pid-file", 'p', 0, G_OPTION_ARG_CALLBACK, pid_cb, + "(Ignored) Daemon pid file location", + "FILE" }, + { "shutdown", 'S', 0, G_OPTION_ARG_NONE, &options.shutdown, + "Instruct Pacemaker to shutdown on this machine", + NULL }, + { "standby", 's', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, standby_cb, + "Start node in standby state", + NULL }, + + { NULL } +}; static void pcmk_ignore(int nsig) { crm_info("Ignoring signal %s (%d)", strsignal(nsig), nsig); } static void pcmk_sigquit(int nsig) { pcmk__panic(__func__); } -static pcmk__cli_option_t long_options[] = { - // long option, argument type, storage, short option, description, flags - { - "help", no_argument, NULL, '?', - "\tThis text", pcmk__option_default - }, - { - "version", no_argument, NULL, '$', - "\tVersion information", pcmk__option_default - }, - { - "verbose", no_argument, NULL, 'V', - "\tIncrease debug output", pcmk__option_default - }, - { - "shutdown", no_argument, NULL, 'S', - "\tInstruct Pacemaker to shutdown on this machine", pcmk__option_default - }, - { - "features", no_argument, NULL, 'F', - "\tDisplay full version and list of features Pacemaker was built with", - pcmk__option_default - }, - { - "-spacer-", no_argument, NULL, '-', - "\nAdditional Options:", pcmk__option_default - }, - { - "foreground", no_argument, NULL, 'f', - "\t(Ignored) Pacemaker always runs in the foreground", - pcmk__option_default - }, - { - "pid-file", required_argument, NULL, 'p', - "\t(Ignored) Daemon pid file location", pcmk__option_default - }, - { - "standby", no_argument, NULL, 's', - "\tStart node in standby state", pcmk__option_default - }, - { 0, 0, 0, 0 } -}; - static void mcp_chown(const char *path, uid_t uid, gid_t gid) { int rc = chown(path, uid, gid); if (rc < 0) { crm_warn("Cannot change the ownership of %s to user %s and gid %d: %s", path, CRM_DAEMON_USER, gid, pcmk_strerror(errno)); } } static void create_pcmk_dirs(void) { uid_t pcmk_uid = 0; gid_t pcmk_gid = 0; const char *dirs[] = { CRM_PACEMAKER_DIR, // core/blackbox/scheduler/CIB files CRM_CORE_DIR, // core files CRM_BLACKBOX_DIR, // blackbox dumps PE_STATE_DIR, // scheduler inputs CRM_CONFIG_DIR, // the Cluster Information Base (CIB) // Don't build CRM_RSCTMP_DIR, pacemaker-execd will do it NULL }; if (pcmk_daemon_user(&pcmk_uid, &pcmk_gid) < 0) { crm_err("Cluster user %s does not exist, aborting Pacemaker startup", CRM_DAEMON_USER); crm_exit(CRM_EX_NOUSER); } // Used by some resource agents if ((mkdir(CRM_STATE_DIR, 0750) < 0) && (errno != EEXIST)) { crm_warn("Could not create directory " CRM_STATE_DIR ": %s", pcmk_rc_str(errno)); } else { mcp_chown(CRM_STATE_DIR, pcmk_uid, pcmk_gid); } for (int i = 0; dirs[i] != NULL; ++i) { int rc = pcmk__build_path(dirs[i], 0750); if (rc != pcmk_rc_ok) { crm_warn("Could not create directory %s: %s", dirs[i], pcmk_rc_str(rc)); } else { mcp_chown(dirs[i], pcmk_uid, pcmk_gid); } } } static void remove_core_file_limit(void) { struct rlimit cores; int rc = getrlimit(RLIMIT_CORE, &cores); if (rc < 0) { crm_warn("Cannot determine current maximum core file size: %s", strerror(errno)); return; } if ((cores.rlim_max == 0) && (geteuid() == 0)) { cores.rlim_max = RLIM_INFINITY; } else { crm_info("Maximum core file size is %llu bytes", (unsigned long long) cores.rlim_max); } cores.rlim_cur = cores.rlim_max; rc = setrlimit(RLIMIT_CORE, &cores); if (rc < 0) { crm_warn("Cannot raise system limit on core file size " "(consider doing so manually)"); } } static void pacemakerd_event_cb(pcmk_ipc_api_t *pacemakerd_api, enum pcmk_ipc_event event_type, crm_exit_t status, void *event_data, void *user_data) { pcmk_pacemakerd_api_reply_t *reply = event_data; switch (event_type) { case pcmk_ipc_event_reply: break; default: return; } if (status != CRM_EX_OK) { fprintf(stderr, "Bad reply from pacemakerd: %s", crm_exit_str(status)); return; } if (reply->reply_type != pcmk_pacemakerd_reply_shutdown) { fprintf(stderr, "Unknown reply type %d from pacemakerd", reply->reply_type); } } +static GOptionContext * +build_arg_context(pcmk__common_args_t *args) { + GOptionContext *context = NULL; + + context = pcmk__build_arg_context(args, NULL, NULL, NULL); + pcmk__add_main_args(context, entries); + return context; +} + int main(int argc, char **argv) { int rc = pcmk_rc_ok; - int flag; - int argerr = 0; + crm_exit_t exit_code = CRM_EX_OK; + + GError *error = NULL; + + pcmk__common_args_t *args = pcmk__new_common_args(SUMMARY); + gchar **processed_args = pcmk__cmdline_preproc(argv, "p"); + GOptionContext *context = build_arg_context(args); - int option_index = 0; bool old_instance_connected = false; - gboolean shutdown = FALSE; pcmk_ipc_api_t *old_instance = NULL; qb_ipcs_service_t *ipcs = NULL; crm_log_preinit(NULL, argc, argv); - pcmk__set_cli_options(NULL, "[options]", long_options, - "primary Pacemaker daemon that launches and " - "monitors all subsidiary Pacemaker daemons"); mainloop_add_signal(SIGHUP, pcmk_ignore); mainloop_add_signal(SIGQUIT, pcmk_sigquit); - while (1) { - flag = pcmk__next_cli_option(argc, argv, &option_index, NULL); - if (flag == -1) - break; - - switch (flag) { - case 'V': - crm_bump_log_level(argc, argv); - break; - case 'f': - /* Legacy */ - break; - case 'p': - pid_file = optarg; - break; - case 's': - pcmk__set_env_option("node_start_state", "standby"); - break; - case '$': - case '?': - pcmk__cli_help(flag, CRM_EX_OK); - break; - case 'S': - shutdown = TRUE; - break; - case 'F': - printf("Pacemaker %s (Build: %s)\n Supporting v%s: %s\n", PACEMAKER_VERSION, BUILD_VERSION, - CRM_FEATURE_SET, CRM_FEATURES); - crm_exit(CRM_EX_OK); - default: - printf("Argument code 0%o (%c) is not (?yet?) supported\n", flag, flag); - ++argerr; - break; - } + if (!g_option_context_parse_strv(context, &processed_args, &error)) { + exit_code = CRM_EX_USAGE; + goto done; } - if (optind < argc) { - printf("non-option ARGV-elements: "); - while (optind < argc) - printf("%s ", argv[optind++]); - printf("\n"); - } - if (argerr) { - pcmk__cli_help('?', CRM_EX_USAGE); + if (options.features) { + printf("Pacemaker %s (Build: %s)\n Supporting v%s: %s\n", PACEMAKER_VERSION, BUILD_VERSION, + CRM_FEATURE_SET, CRM_FEATURES); + exit_code = CRM_EX_OK; + goto done; } + if (args->version) { + g_strfreev(processed_args); + pcmk__free_arg_context(context); + /* FIXME: When pacemakerd is converted to use formatted output, this can go. */ + pcmk__cli_help('v', CRM_EX_USAGE); + } setenv("LC_ALL", "C", 1); pcmk__set_env_option("mcp", "true"); + pcmk__cli_init_logging("pacemakerd", args->verbosity); crm_log_init(NULL, LOG_INFO, TRUE, FALSE, argc, argv, FALSE); crm_debug("Checking for existing Pacemaker instance"); rc = pcmk_new_ipc_api(&old_instance, pcmk_ipc_pacemakerd); if (old_instance == NULL) { fprintf(stderr, "Could not connect to pacemakerd: %s", pcmk_rc_str(rc)); - crm_exit(pcmk_rc2exitc(rc)); + exit_code = pcmk_rc2exitc(rc); + goto done; } pcmk_register_ipc_callback(old_instance, pacemakerd_event_cb, NULL); rc = pcmk_connect_ipc(old_instance, pcmk_ipc_dispatch_sync); old_instance_connected = pcmk_ipc_is_connected(old_instance); - if (shutdown) { + if (options.shutdown) { if (old_instance_connected) { rc = pcmk_pacemakerd_api_shutdown(old_instance, crm_system_name); pcmk_dispatch_ipc(old_instance); pcmk_free_ipc_api(old_instance); - crm_exit(pcmk_rc2exitc(rc)); + exit_code = pcmk_rc2exitc(rc); + goto done; } else { crm_err("Could not request shutdown of existing " "Pacemaker instance: %s", strerror(errno)); pcmk_free_ipc_api(old_instance); - crm_exit(CRM_EX_DISCONNECT); + exit_code = CRM_EX_DISCONNECT; + goto done; } } else if (old_instance_connected) { pcmk_free_ipc_api(old_instance); crm_err("Aborting start-up because active Pacemaker instance found"); - crm_exit(CRM_EX_FATAL); + exit_code = CRM_EX_FATAL; + goto done; } pcmk_free_ipc_api(old_instance); #ifdef SUPPORT_COROSYNC if (mcp_read_config() == FALSE) { crm_exit(CRM_EX_UNAVAILABLE); } #endif // OCF shell functions and cluster-glue need facility under different name { const char *facility = pcmk__env_option("logfacility"); if (facility && !pcmk__str_eq(facility, "none", pcmk__str_casei)) { setenv("HA_LOGFACILITY", facility, 1); } } crm_notice("Starting Pacemaker %s "CRM_XS" build=%s features:%s", PACEMAKER_VERSION, BUILD_VERSION, CRM_FEATURES); mainloop = g_main_loop_new(NULL, FALSE); remove_core_file_limit(); create_pcmk_dirs(); pcmk__serve_pacemakerd_ipc(&ipcs, &mcp_ipc_callbacks); #ifdef SUPPORT_COROSYNC /* Allows us to block shutdown */ if (!cluster_connect_cfg()) { - crm_exit(CRM_EX_PROTOCOL); + exit_code = CRM_EX_PROTOCOL; + goto done; } #endif if (pcmk__locate_sbd() > 0) { setenv("PCMK_watchdog", "true", 1); running_with_sbd = TRUE; } else { setenv("PCMK_watchdog", "false", 1); } switch (find_and_track_existing_processes()) { case pcmk_rc_ok: break; case pcmk_rc_ipc_unauthorized: - crm_exit(CRM_EX_CANTCREAT); + exit_code = CRM_EX_CANTCREAT; + goto done; default: - crm_exit(CRM_EX_FATAL); + exit_code = CRM_EX_FATAL; + goto done; }; mainloop_add_signal(SIGTERM, pcmk_shutdown); mainloop_add_signal(SIGINT, pcmk_shutdown); if ((running_with_sbd) && pcmk__get_sbd_sync_resource_startup()) { crm_notice("Waiting for startup-trigger from SBD."); pacemakerd_state = XML_PING_ATTR_PACEMAKERDSTATE_WAITPING; startup_trigger = mainloop_add_trigger(G_PRIORITY_HIGH, init_children_processes, NULL); } else { if (running_with_sbd) { crm_warn("Enabling SBD_SYNC_RESOURCE_STARTUP would (if supported " "by your SBD version) improve reliability of " "interworking between SBD & pacemaker."); } pacemakerd_state = XML_PING_ATTR_PACEMAKERDSTATE_STARTINGDAEMONS; init_children_processes(NULL); } crm_notice("Pacemaker daemon successfully started and accepting connections"); g_main_loop_run(mainloop); if (ipcs) { crm_trace("Closing IPC server"); mainloop_del_ipc_server(ipcs); ipcs = NULL; } g_main_loop_unref(mainloop); #ifdef SUPPORT_COROSYNC cluster_disconnect_cfg(); #endif - crm_exit(CRM_EX_OK); + +done: + g_strfreev(processed_args); + pcmk__free_arg_context(context); + + pcmk__output_and_clear_error(error, NULL); + crm_exit(exit_code); } diff --git a/doc/sphinx/Clusters_from_Scratch/verification.rst b/doc/sphinx/Clusters_from_Scratch/verification.rst index 9d647f81a0..b7fa20ea7e 100644 --- a/doc/sphinx/Clusters_from_Scratch/verification.rst +++ b/doc/sphinx/Clusters_from_Scratch/verification.rst @@ -1,215 +1,215 @@ Start and Verify Cluster ------------------------ Start the Cluster ################# Now that corosync is configured, it is time to start the cluster. The command below will start corosync and pacemaker on both nodes in the cluster. If you are issuing the start command from a different node than the one you ran the ``pcs host auth`` command on earlier, you must authenticate on the current node you are logged into before you will be allowed to start the cluster. .. code-block:: none [root@pcmk-1 ~]# pcs cluster start --all pcmk-1: Starting Cluster... pcmk-2: Starting Cluster... .. NOTE:: An alternative to using the ``pcs cluster start --all`` command is to issue either of the below command sequences on each node in the cluster separately: .. code-block:: none # pcs cluster start Starting Cluster... or .. code-block:: none # systemctl start corosync.service # systemctl start pacemaker.service .. IMPORTANT:: In this example, we are not enabling the corosync and pacemaker services to start at boot. If a cluster node fails or is rebooted, you will need to run ``pcs cluster start `` (or ``--all``) to start the cluster on it. While you could enable the services to start at boot, requiring a manual start of cluster services gives you the opportunity to do a post-mortem investigation of a node failure before returning it to the cluster. Verify Corosync Installation ############################ First, use ``corosync-cfgtool`` to check whether cluster communication is happy: .. code-block:: none [root@pcmk-1 ~]# corosync-cfgtool -s Printing link status. Local node ID 1 LINK ID 0 addr = 192.168.122.101 status: nodeid 1: localhost nodeid 2: connected We can see here that everything appears normal with our fixed IP address (not a 127.0.0.x loopback address) listed as the **addr**, and **localhost** and **connected** for the statuses of nodeid 1 and nodeid 2, respectively. If you see something different, you might want to start by checking the node's network, firewall and SELinux configurations. Next, check the membership and quorum APIs: .. code-block:: none [root@pcmk-1 ~]# corosync-cmapctl | grep members runtime.members.1.config_version (u64) = 0 runtime.members.1.ip (str) = r(0) ip(192.168.122.101) runtime.members.1.join_count (u32) = 1 runtime.members.1.status (str) = joined runtime.members.2.config_version (u64) = 0 runtime.members.2.ip (str) = r(0) ip(192.168.122.102) runtime.members.2.join_count (u32) = 1 runtime.members.2.status (str) = joined [root@pcmk-1 ~]# pcs status corosync Membership information ---------------------- Nodeid Votes Name 1 1 pcmk-1 (local) 2 1 pcmk-2 You should see both nodes have joined the cluster. Verify Pacemaker Installation ############################# Now that we have confirmed that Corosync is functional, we can check the rest of the stack. Pacemaker has already been started, so verify the necessary processes are running: .. code-block:: none [root@pcmk-1 ~]# ps axf PID TTY STAT TIME COMMAND 2 ? S 0:00 [kthreadd] ...lots of processes... 17121 ? SLsl 0:01 /usr/sbin/corosync -f - 17133 ? Ss 0:00 /usr/sbin/pacemakerd -f + 17133 ? Ss 0:00 /usr/sbin/pacemakerd 17134 ? Ss 0:00 \_ /usr/libexec/pacemaker/pacemaker-based 17135 ? Ss 0:00 \_ /usr/libexec/pacemaker/pacemaker-fenced 17136 ? Ss 0:00 \_ /usr/libexec/pacemaker/pacemaker-execd 17137 ? Ss 0:00 \_ /usr/libexec/pacemaker/pacemaker-attrd 17138 ? Ss 0:00 \_ /usr/libexec/pacemaker/pacemaker-schedulerd 17139 ? Ss 0:00 \_ /usr/libexec/pacemaker/pacemaker-controld If that looks OK, check the ``pcs status`` output: .. code-block:: none [root@pcmk-1 ~]# pcs status Cluster name: mycluster WARNINGS: No stonith devices and stonith-enabled is not false Cluster Summary: * Stack: corosync * Current DC: pcmk-2 (version 2.0.5-4.el8-ba59be7122) - partition with quorum * Last updated: Wed Jan 20 07:54:02 2021 * Last change: Wed Jan 20 07:48:25 2021 by hacluster via crmd on pcmk-2 * 2 nodes configured * 0 resource instances configured Node List: * Online: [ pcmk-1 pcmk-2 ] Full List of Resources: * No resources Daemon Status: corosync: active/disabled pacemaker: active/disabled pcsd: active/enabled Finally, ensure there are no start-up errors from corosync or pacemaker (aside from messages relating to not having STONITH configured, which are OK at this point): .. code-block:: none [root@pcmk-1 ~]# journalctl -b | grep -i error .. NOTE:: Other operating systems may report startup errors in other locations, for example ``/var/log/messages``. Repeat these checks on the other node. The results should be the same. Explore the Existing Configuration ################################## For those who are not of afraid of XML, you can see the raw cluster configuration and status by using the ``pcs cluster cib`` command. .. topic:: The last XML you'll see in this document .. code-block:: none [root@pcmk-1 ~]# pcs cluster cib .. code-block:: xml Before we make any changes, it's a good idea to check the validity of the configuration. .. code-block:: none [root@pcmk-1 ~]# crm_verify -L -V error: unpack_resources: Resource start-up disabled since no STONITH resources have been defined error: unpack_resources: Either configure some or disable STONITH with the stonith-enabled option error: unpack_resources: NOTE: Clusters with shared data need STONITH to ensure data integrity Errors found during check: config not valid As you can see, the tool has found some errors. The cluster will not start any resources until we configure STONITH. diff --git a/mk/common.mk b/mk/common.mk index b2476700ee..aa59feb200 100644 --- a/mk/common.mk +++ b/mk/common.mk @@ -1,100 +1,100 @@ # -# Copyright 2014-2020 the Pacemaker project contributors +# Copyright 2014-2021 the Pacemaker project contributors # # The version control history for this file may have further details. # # This source code is licensed under the GNU General Public License version 2 # or later (GPLv2+) WITHOUT ANY WARRANTY. # # # Some variables to help with silent rules # https://www.gnu.org/software/automake/manual/html_node/Automake-silent_002drules-Option.html V ?= $(AM_DEFAULT_VERBOSITY) # When a make command is prefixed with one of the AM_V_* macros, it may also be # desirable to suffix the command with this, to silence stdout. PCMK_quiet = $(pcmk_quiet_$(V)) pcmk_quiet_0 = >/dev/null pcmk_quiet_1 = # AM_V_GEN is intended to be used in custom pattern rules, and replaces echoing # the command used with a more concise line with "GEN" and the name of the file # being generated. Our AM_V_* macros are similar but more descriptive. AM_V_MAN = $(am__v_MAN_$(V)) am__v_MAN_0 = @echo " MAN $@"; am__v_MAN_1 = AM_V_SCHEMA = $(am__v_SCHEMA_$(V)) am__v_SCHEMA_0 = @echo " SCHEMA $@"; am__v_SCHEMA_1 = AM_V_BOOK = $(am__v_BOOK_$(V)) am__v_BOOK_0 = @echo " BOOK $(@:%/_build=%): $(BOOK_FORMATS)"; am__v_BOOK_1 = MAINTAINERCLEANFILES = Makefile.in AM_CPPFLAGS = -I$(top_builddir)/include -I$(top_srcdir)/include \ -I$(top_builddir)/libltdl -I$(top_srcdir)/libltdl # # Man page builders # # We have three types of man pages: # - man pages for the tools # - man pages for OCF agents # - man pages for cluster properties used by daemons # # "BUILD_HELP" actually means "help2man is available", so it only controls the # tool man pages, which are generated by help2man. The other man pages are # generated via XSL transforms. # if BUILD_HELP man8_MANS = $(sbin_PROGRAMS:%=%.8) $(sbin_SCRIPTS:%=%.8) HELP2MAN_ARGS = -N --section 8 --name "Part of the Pacemaker cluster resource manager" # Some of our tools' help are just shell script invocations of another tool's # help. Putting the real tool in MAN8DEPS helps detect when the wrapped help # needs updating. # # If a ".inc" file exists, the tool has been converted to use glib for # argument parsing, otherwise it still uses the libcrmcommon functions. # # @TODO Drop MAN8DEPS once we've converted all tools to libpacemaker API calls # and all wrappers to C code. %.8: % $(MAN8DEPS) $(AM_V_at)chmod a+x $(abs_builddir)/$< - $(AM_V_MAN)if [ -f $(top_srcdir)/tools/$@.inc ]; then \ + $(AM_V_MAN)if [ -f $(abs_srcdir)/$@.inc ]; then \ PATH=$(abs_builddir):$$PATH $(HELP2MAN) $(HELP2MAN_ARGS) \ -h --help-all \ --no-discard-stderr \ - -i $(top_srcdir)/tools/$@.inc $(abs_builddir)/$< \ + -i $(abs_srcdir)/$@.inc $(abs_builddir)/$< \ | sed -f $(top_srcdir)/tools/fix-manpages > $@ ; \ else \ PATH=$(abs_builddir):$$PATH $(HELP2MAN) $(HELP2MAN_ARGS) \ --no-discard-stderr \ $(abs_builddir)/$< --output $@ ; \ fi endif # Save raw XML meta-data from daemon executables, for later conversion into man # pages. (Note that more specific rules may override this for creating other # types of XML files.) %.xml: % $(AM_V_at)$(abs_builddir)/$< metadata > $@ # Process the raw daemon and OCF agent meta-data output using our # meta-data-to-docbook-XML tranform. %.dbook: %.xml $(AM_V_at)$(XSLTPROC) --nonet --novalid --stringparam man.name $* \ $(DBOOK_OPTS) $(top_srcdir)/xml/ocf-meta2man.xsl \ $(abs_builddir)/$< > $(abs_builddir)/$@ # Generate the actual man page for an OCF resource agent from the intermediate # docbook XML. %.7: %.dbook $(AM_V_MAN)$(XSLTPROC) $(MANPAGE_XSLT) $(abs_builddir)/$< $(PCMK_quiet)