diff --git a/heartbeat/Makefile.am b/heartbeat/Makefile.am index 5045aa668..d8c79b961 100644 --- a/heartbeat/Makefile.am +++ b/heartbeat/Makefile.am @@ -1,172 +1,173 @@ # Makefile.am for OCF RAs # # Author: Sun Jing Dong # Copyright (C) 2004 IBM # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. # MAINTAINERCLEANFILES = Makefile.in EXTRA_DIST = $(ocf_SCRIPTS) $(ocfcommon_DATA) \ $(common_DATA) $(hb_DATA) $(dtd_DATA) \ README AM_CPPFLAGS = -I$(top_srcdir)/include -I$(top_srcdir)/linux-ha halibdir = $(libexecdir)/heartbeat ocfdir = $(OCF_RA_DIR_PREFIX)/heartbeat dtddir = $(datadir)/$(PACKAGE_NAME) dtd_DATA = ra-api-1.dtd metadata.rng if USE_IPV6ADDR_AGENT ocf_PROGRAMS = IPv6addr else ocf_PROGRAMS = endif if IPV6ADDR_COMPATIBLE halib_PROGRAMS = send_ua else halib_PROGRAMS = endif IPv6addr_SOURCES = IPv6addr.c IPv6addr_utils.c send_ua_SOURCES = send_ua.c IPv6addr_utils.c IPv6addr_LDADD = -lplumb $(LIBNETLIBS) send_ua_LDADD = $(LIBNETLIBS) ocf_SCRIPTS = AoEtarget \ AudibleAlarm \ ClusterMon \ CTDB \ Delay \ Dummy \ EvmsSCC \ Evmsd \ Filesystem \ ICP \ IPaddr \ IPaddr2 \ IPsrcaddr \ LVM \ LinuxSCSI \ MailTo \ ManageRAID \ ManageVE \ NodeUtilization \ Pure-FTPd \ Raid1 \ Route \ SAPDatabase \ SAPInstance \ SendArp \ ServeRAID \ SphinxSearchDaemon \ Squid \ Stateful \ SysInfo \ VIPArip \ VirtualDomain \ WAS \ WAS6 \ WinPopup \ Xen \ Xinetd \ + ZFS \ anything \ apache \ asterisk \ awseip \ awsvip \ clvm \ conntrackd \ db2 \ dhcpd \ dnsupdate \ docker \ eDir88 \ ethmonitor \ exportfs \ fio \ galera \ garbd \ iSCSILogicalUnit \ iSCSITarget \ ids \ iface-bridge \ iface-vlan \ iscsi \ jboss \ kamailio \ lxc \ mysql \ mysql-proxy \ nagios \ named \ nfsnotify \ nfsserver \ nginx \ oraasm \ oracle \ oralsnr \ pgagent \ pgsql \ pingd \ portblock \ postfix \ pound \ proftpd \ rabbitmq-cluster \ redis \ rsyncd \ rsyslog \ scsi2reservation \ sfex \ sg_persist \ slapd \ symlink \ syslog-ng \ tomcat \ varnish \ vmware \ vsftpd \ zabbixserver ocfcommondir = $(OCF_LIB_DIR_PREFIX)/heartbeat ocfcommon_DATA = ocf-shellfuncs \ ocf-binaries \ ocf-directories \ ocf-returncodes \ ocf-rarun \ ocf-distro \ apache-conf.sh \ http-mon.sh \ sapdb-nosha.sh \ sapdb.sh \ ora-common.sh \ mysql-common.sh \ nfsserver-redhat.sh \ findif.sh # Legacy locations hbdir = $(sysconfdir)/ha.d hb_DATA = shellfuncs check: $(ocf_SCRIPTS:=.check) %.check: % OCF_ROOT=$(abs_srcdir) OCF_FUNCTIONS_DIR=$(abs_srcdir) ./$< meta-data | xmllint --path $(abs_srcdir) --noout --relaxng $(abs_srcdir)/metadata.rng - diff --git a/heartbeat/ZFS b/heartbeat/ZFS index a629c69c0..3854f9145 100755 --- a/heartbeat/ZFS +++ b/heartbeat/ZFS @@ -1,211 +1,198 @@ #!/bin/sh # # License: GNU General Public License (GPL) # Support: zfs@lists.illumos.org # Written by: Saso Kiselkov # # This script manages ZFS pools # It can import a ZFS pool or export it # # usage: $0 {start|stop|status|monitor|validate-all|meta-data} # # The "start" arg imports a ZFS pool. # The "stop" arg exports it. # # OCF parameters are as follows # OCF_RESKEY_pool - the pool to import/export # ####################################################################### # Initialization: : ${OCF_FUNCTIONS_DIR=${OCF_ROOT}/lib/heartbeat} . ${OCF_FUNCTIONS_DIR}/ocf-shellfuncs -: ${HELPERS_DIR=${OCF_ROOT}/lib/heartbeat/helpers} # Set the variable below to '1' to enable zpool import/export logging. # This is useful to determine how long these operations take on your system. # DEBUG=0 DEBUGLOG="/var/log/ZFS_cluster_debug.log" USAGE="usage: $0 {start|stop|status|monitor|validate-all|meta-data}"; ####################################################################### debug_log () { if [ "$DEBUG" -eq 1 ]; then - echo "`date`:${OCF_RESKEY_pool}:" $@ >> "$DEBUGLOG" + echo "$(date):${OCF_RESKEY_pool}:" "$@" >> "$DEBUGLOG" fi } meta_data() { cat < 1.0 This script manages ZFS pools It can import a ZFS pool or export it Manages ZFS pools The name of the ZFS pool to manage, e.g. "tank". ZFS pool name Arguments to zpool import, e.g. "-d /dev/disk/by-id". Import arguments + + +zpool import is given the -f option. + +Import is forced + + END exit $OCF_SUCCESS } zpool_is_imported () { zpool list -H "$OCF_RESKEY_pool" > /dev/null } # Forcibly imports a ZFS pool, mounting all of its auto-mounted filesystems -# (as configured in the `mountpoint' and `canmount' properties) +# (as configured in the 'mountpoint' and 'canmount' properties) # If the pool is already imported, no operation is taken. -# You can use the zfs-helper script to assist pool setup before and/or -# after import. zpool_import () { if ! zpool_is_imported; then debug_log "starting import" - if [ -x "${HELPERS_DIR}/zfs-helper" ]; then - debug_log "pre-import: start" - "${HELPERS_DIR}/zfs-helper" pre-import "$OCF_RESKEY_pool" - debug_log "pre-import: done" - fi # The meanings of the options to import are as follows: # -f : import even if the pool is marked as imported to another # system - the system may have failed and not exported it # cleanly. # -o cachefile=none : the import should be temporary, so do not # cache it persistently (across machine reboots). We want # the CRM to explicitly control imports of this pool. - if zpool import -f $OCF_RESKEY_importargs -o cachefile=none "$OCF_RESKEY_pool" ; then + if [ ${OCF_RESKEY_importforce} = 1 ]; then + FORCE=-f + else + FORCE="" + fi + if zpool import $FORCE $OCF_RESKEY_importargs -o cachefile=none "$OCF_RESKEY_pool" ; then debug_log "import successful" - if [ -x "${HELPERS_DIR}/zfs-helper" ]; then - debug_log "post-import: starting" - "${HELPERS_DIR}/zfs-helper" post-import "$OCF_RESKEY_pool" - debug_log "post-import: done" - fi return $OCF_SUCCESS else debug_log "import failed" return $OCF_ERR_GENERIC fi fi } # Forcibly exports a ZFS pool, unmounting all of its filesystems in the process # If the pool is not imported, no operation is taken. -# You can use the zfs-helper script to assist pool setup before and/or -# after export. zpool_export () { if zpool_is_imported; then debug_log "starting export" - if [ -x "${HELPERS_DIR}/zfs-helper" ]; then - debug_log "pre-export: start" - "${HELPERS_DIR}/zfs-helper" pre-export "$OCF_RESKEY_pool" - debug_log "pre-export: done" - fi # -f : force the export, even if we have mounted filesystems # Please note that this may fail with a "busy" error if there are # other kernel subsystems accessing the pool (e.g. SCSI targets). # Always make sure the pool export is last in your failover logic. if zpool export -f "$OCF_RESKEY_pool" ; then debug_log "export successful" - if [ -x "${HELPERS_DIR}/zfs-helper" ]; then - debug_log "post-export: starting" - "${HELPERS_DIR}/zfs-helper" post-export "$OCF_RESKEY_pool" - debug_log "post-export: done" - fi return $OCF_SUCCESS else debug_log "export failed" return $OCF_ERR_GENERIC fi fi } # Monitors the health of a ZFS pool resource. Please note that this only # checks whether the pool is imported and functional, not whether it has # any degraded devices (use monitoring systems such as Zabbix for that). zpool_monitor () { # If the pool is not imported, then we can't monitor its health if ! zpool_is_imported; then return $OCF_NOT_RUNNING fi # Check the pool status - HEALTH=`zpool list -H -o health "$OCF_RESKEY_pool"` + HEALTH=$(zpool list -H -o health "$OCF_RESKEY_pool") case "$HEALTH" in ONLINE|DEGRADED) return $OCF_SUCCESS;; FAULTED) return $OCF_NOT_RUNNING;; *) return $OCF_ERR_GENERIC;; esac } # Validates whether we can import a given ZFS pool zpool_validate () { - # Check that the `zpool' command is known + # Check that the 'zpool' command is known if ! which zpool > /dev/null; then return $OCF_ERR_INSTALLED fi # If the pool is imported, then it is obviously valid if zpool_is_imported; then return $OCF_SUCCESS fi # Check that the pool can be imported if zpool import $OCF_RESKEY_importargs | grep 'pool:' | grep "\\<$OCF_RESKEY_pool\\>" > /dev/null; then return $OCF_SUCCESS else return $OCF_ERR_CONFIGURED fi } usage () { echo $USAGE >&2 return $1 } if [ $# -ne 1 ]; then usage $OCF_ERR_ARGS fi case $1 in meta-data) meta_data;; start) zpool_import;; stop) zpool_export;; status|monitor) zpool_monitor;; validate-all) zpool_validate;; usage) usage $OCF_SUCCESS;; *) usage $OCF_ERR_UNIMPLEMENTED;; esac exit $?