diff --git a/daemons/execd/pacemaker_remote.service.in b/daemons/execd/pacemaker_remote.service.in index 825f8edc9d..3189a59865 100644 --- a/daemons/execd/pacemaker_remote.service.in +++ b/daemons/execd/pacemaker_remote.service.in @@ -1,46 +1,49 @@ [Unit] Description=Pacemaker Remote executor daemon Documentation=man:pacemaker-remoted Documentation=https://clusterlabs.org/pacemaker/doc/en-US/Pacemaker/2.0/html-single/Pacemaker_Remote/index.html # See main pacemaker unit file for descriptions of why these are needed After=network.target After=time-sync.target After=dbus.service Wants=dbus.service After=resource-agents-deps.target Wants=resource-agents-deps.target After=syslog.service After=rsyslog.service [Install] Alias=pacemaker-remote.service WantedBy=multi-user.target [Service] Type=simple KillMode=process NotifyAccess=none EnvironmentFile=-@CONFIGDIR@/pacemaker EnvironmentFile=-@CONFIGDIR@/sbd ExecStart=@sbindir@/pacemaker-remoted # Systemd v227 and above can limit the number of processes spawned by a # service. That is a bad idea for an HA cluster resource manager, so disable it # by default. The administrator can create a local override if they really want # a limit. If your systemd version does not support TasksMax, and you want to # get rid of the resulting log warnings, comment out this option. TasksMax=infinity -# Pacemaker Remote can exit only after all managed services have shut down; -# an HA database could conceivably take even longer than this +# If connected to the cluster and when the service functions properly, it will +# wait to exit until the cluster notifies it all resources on the remote node +# have been stopped. The default of 30min should cover most typical cluster +# configurations, but it may need an increase to adapt to local conditions +# (e.g. a large, clustered database could conceivably take longer to stop). TimeoutStopSec=30min TimeoutStartSec=30s # Restart options include: no, on-success, on-failure, on-abort or always Restart=on-failure # crm_perror() writes directly to stderr, so ignore it here # to avoid double-logging with the wrong format StandardError=null diff --git a/daemons/pacemakerd/pacemaker.service.in b/daemons/pacemakerd/pacemaker.service.in index 6a7df8cd33..5fdb8c5b3e 100644 --- a/daemons/pacemakerd/pacemaker.service.in +++ b/daemons/pacemakerd/pacemaker.service.in @@ -1,90 +1,89 @@ [Unit] Description=Pacemaker High Availability Cluster Manager Documentation=man:pacemakerd Documentation=https://clusterlabs.org/pacemaker/doc/en-US/Pacemaker/2.0/html-single/Pacemaker_Explained/index.html # DefaultDependencies takes care of sysinit.target, # basic.target, and shutdown.target # We need networking to bind to a network address. It is recommended not to # use Wants or Requires with network.target, and not to use # network-online.target for server daemons. After=network.target # Time syncs can make the clock jump backward, which messes with logging # and failure timestamps, so wait until it's done. After=time-sync.target # Managing systemd resources requires DBus. After=dbus.service Wants=dbus.service # Some OCF resources may have dependencies that aren't managed by the cluster; # these must be started before Pacemaker and stopped after it. The # resource-agents package provides this target, which lets system adminstrators # add drop-ins for those dependencies. After=resource-agents-deps.target Wants=resource-agents-deps.target After=syslog.service After=rsyslog.service After=corosync.service Requires=corosync.service [Install] WantedBy=multi-user.target [Service] Type=simple KillMode=process NotifyAccess=main EnvironmentFile=-@CONFIGDIR@/pacemaker EnvironmentFile=-@CONFIGDIR@/sbd SuccessExitStatus=100 ExecStart=@sbindir@/pacemakerd -f # Systemd v227 and above can limit the number of processes spawned by a # service. That is a bad idea for an HA cluster resource manager, so disable it # by default. The administrator can create a local override if they really want # a limit. If your systemd version does not support TasksMax, and you want to # get rid of the resulting log warnings, comment out this option. TasksMax=infinity # If pacemakerd doesn't stop, it's probably waiting on a cluster # resource. Sending -KILL will just get the node fenced SendSIGKILL=no # If we ever hit the StartLimitInterval/StartLimitBurst limit, and the # admin wants to stop the cluster while pacemakerd is not running, it # might be a good idea to enable the ExecStopPost directive below. # # However, the node will likely end up being fenced as a result, so it's # not enabled by default. # # ExecStopPost=/usr/bin/killall -TERM pacemaker-attrd pacemaker-based \ # pacemaker-controld pacemaker-execd pacemaker-fenced \ # pacemaker-schedulerd # If you want Corosync to stop whenever Pacemaker is stopped, # uncomment the next line too: # # ExecStopPost=/bin/sh -c 'pidof pacemaker-controld || killall -TERM corosync' -# Uncomment this for older versions of systemd that didn't support -# TimeoutStopSec -# TimeoutSec=30min - -# Pacemaker can only exit after all managed services have shut down -# A HA database could conceivably take even longer than this +# When the service functions properly, it will wait to exit until all resources +# have been stopped on the local node, and potentially across all nodes that +# are shutting down. The default of 30min should cover most typical cluster +# configurations, but it may need an increase to adapt to local conditions +# (e.g. a large, clustered database could conceivably take longer to stop). TimeoutStopSec=30min TimeoutStartSec=60s # Restart options include: no, on-success, on-failure, on-abort or always Restart=on-failure # crm_perror() writes directly to stderr, so ignore it here # to avoid double-logging with the wrong format StandardError=null