diff --git a/heartbeat/Makefile.am b/heartbeat/Makefile.am index 5045aa668..d8c79b961 100644 --- a/heartbeat/Makefile.am +++ b/heartbeat/Makefile.am @@ -1,172 +1,173 @@ # Makefile.am for OCF RAs # # Author: Sun Jing Dong # Copyright (C) 2004 IBM # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. # MAINTAINERCLEANFILES = Makefile.in EXTRA_DIST = $(ocf_SCRIPTS) $(ocfcommon_DATA) \ $(common_DATA) $(hb_DATA) $(dtd_DATA) \ README AM_CPPFLAGS = -I$(top_srcdir)/include -I$(top_srcdir)/linux-ha halibdir = $(libexecdir)/heartbeat ocfdir = $(OCF_RA_DIR_PREFIX)/heartbeat dtddir = $(datadir)/$(PACKAGE_NAME) dtd_DATA = ra-api-1.dtd metadata.rng if USE_IPV6ADDR_AGENT ocf_PROGRAMS = IPv6addr else ocf_PROGRAMS = endif if IPV6ADDR_COMPATIBLE halib_PROGRAMS = send_ua else halib_PROGRAMS = endif IPv6addr_SOURCES = IPv6addr.c IPv6addr_utils.c send_ua_SOURCES = send_ua.c IPv6addr_utils.c IPv6addr_LDADD = -lplumb $(LIBNETLIBS) send_ua_LDADD = $(LIBNETLIBS) ocf_SCRIPTS = AoEtarget \ AudibleAlarm \ ClusterMon \ CTDB \ Delay \ Dummy \ EvmsSCC \ Evmsd \ Filesystem \ ICP \ IPaddr \ IPaddr2 \ IPsrcaddr \ LVM \ LinuxSCSI \ MailTo \ ManageRAID \ ManageVE \ NodeUtilization \ Pure-FTPd \ Raid1 \ Route \ SAPDatabase \ SAPInstance \ SendArp \ ServeRAID \ SphinxSearchDaemon \ Squid \ Stateful \ SysInfo \ VIPArip \ VirtualDomain \ WAS \ WAS6 \ WinPopup \ Xen \ Xinetd \ + ZFS \ anything \ apache \ asterisk \ awseip \ awsvip \ clvm \ conntrackd \ db2 \ dhcpd \ dnsupdate \ docker \ eDir88 \ ethmonitor \ exportfs \ fio \ galera \ garbd \ iSCSILogicalUnit \ iSCSITarget \ ids \ iface-bridge \ iface-vlan \ iscsi \ jboss \ kamailio \ lxc \ mysql \ mysql-proxy \ nagios \ named \ nfsnotify \ nfsserver \ nginx \ oraasm \ oracle \ oralsnr \ pgagent \ pgsql \ pingd \ portblock \ postfix \ pound \ proftpd \ rabbitmq-cluster \ redis \ rsyncd \ rsyslog \ scsi2reservation \ sfex \ sg_persist \ slapd \ symlink \ syslog-ng \ tomcat \ varnish \ vmware \ vsftpd \ zabbixserver ocfcommondir = $(OCF_LIB_DIR_PREFIX)/heartbeat ocfcommon_DATA = ocf-shellfuncs \ ocf-binaries \ ocf-directories \ ocf-returncodes \ ocf-rarun \ ocf-distro \ apache-conf.sh \ http-mon.sh \ sapdb-nosha.sh \ sapdb.sh \ ora-common.sh \ mysql-common.sh \ nfsserver-redhat.sh \ findif.sh # Legacy locations hbdir = $(sysconfdir)/ha.d hb_DATA = shellfuncs check: $(ocf_SCRIPTS:=.check) %.check: % OCF_ROOT=$(abs_srcdir) OCF_FUNCTIONS_DIR=$(abs_srcdir) ./$< meta-data | xmllint --path $(abs_srcdir) --noout --relaxng $(abs_srcdir)/metadata.rng - diff --git a/heartbeat/ZFS b/heartbeat/ZFS new file mode 100755 index 000000000..16cb13824 --- /dev/null +++ b/heartbeat/ZFS @@ -0,0 +1,192 @@ +#!/bin/sh +# +# License: GNU General Public License (GPL) +# Support: zfs@lists.illumos.org +# Written by: Saso Kiselkov +# +# This script manages ZFS pools +# It can import a ZFS pool or export it +# +# usage: $0 {start|stop|status|monitor|validate-all|meta-data} +# +# The "start" arg imports a ZFS pool. +# The "stop" arg exports it. +# +# OCF parameters are as follows +# OCF_RESKEY_pool - the pool to import/export +# +####################################################################### +# Initialization: + +: ${OCF_FUNCTIONS_DIR=${OCF_ROOT}/lib/heartbeat} +. ${OCF_FUNCTIONS_DIR}/ocf-shellfuncs + +# Defaults +OCF_RESKEY_importforce_default=true + +: ${OCF_RESKEY_importforce=${OCF_RESKEY_importforce_default}} + +USAGE="usage: $0 {start|stop|status|monitor|validate-all|meta-data}"; + +####################################################################### + +meta_data() { + cat < + + +1.0 + +This script manages ZFS pools +It can import a ZFS pool or export it + +Manages ZFS pools + + + + +The name of the ZFS pool to manage, e.g. "tank". + +ZFS pool name + + + + +Arguments to zpool import, e.g. "-d /dev/disk/by-id". + +Import arguments + + + + +zpool import is given the -f option. + +Import is forced + + + + + + + + + + + + +END + exit $OCF_SUCCESS +} + +zpool_is_imported () { + zpool list -H "$OCF_RESKEY_pool" > /dev/null +} + +# Forcibly imports a ZFS pool, mounting all of its auto-mounted filesystems +# (as configured in the 'mountpoint' and 'canmount' properties) +# If the pool is already imported, no operation is taken. +zpool_import () { + if ! zpool_is_imported; then + ocf_log debug "${OCF_RESKEY_pool}:starting import" + + # The meanings of the options to import are as follows: + # -f : import even if the pool is marked as imported to another + # system - the system may have failed and not exported it + # cleanly. + # -o cachefile=none : the import should be temporary, so do not + # cache it persistently (across machine reboots). We want + # the CRM to explicitly control imports of this pool. + if ocf_is_true "${OCF_RESKEY_importforce}"; then + FORCE=-f + else + FORCE="" + fi + if zpool import $FORCE $OCF_RESKEY_importargs -o cachefile=none "$OCF_RESKEY_pool" ; then + ocf_log debug "${OCF_RESKEY_pool}:import successful" + return $OCF_SUCCESS + else + ocf_log debug "${OCF_RESKEY_pool}:import failed" + return $OCF_ERR_GENERIC + fi + fi +} + +# Forcibly exports a ZFS pool, unmounting all of its filesystems in the process +# If the pool is not imported, no operation is taken. +zpool_export () { + if zpool_is_imported; then + ocf_log debug "${OCF_RESKEY_pool}:starting export" + + # -f : force the export, even if we have mounted filesystems + # Please note that this may fail with a "busy" error if there are + # other kernel subsystems accessing the pool (e.g. SCSI targets). + # Always make sure the pool export is last in your failover logic. + if zpool export -f "$OCF_RESKEY_pool" ; then + ocf_log debug "${OCF_RESKEY_pool}:export successful" + return $OCF_SUCCESS + else + ocf_log debug "${OCF_RESKEY_pool}:export failed" + return $OCF_ERR_GENERIC + fi + fi +} + +# Monitors the health of a ZFS pool resource. Please note that this only +# checks whether the pool is imported and functional, not whether it has +# any degraded devices (use monitoring systems such as Zabbix for that). +zpool_monitor () { + # If the pool is not imported, then we can't monitor its health + if ! zpool_is_imported; then + return $OCF_NOT_RUNNING + fi + + # Check the pool status + HEALTH=$(zpool list -H -o health "$OCF_RESKEY_pool") + case "$HEALTH" in + ONLINE|DEGRADED) return $OCF_SUCCESS;; + FAULTED) return $OCF_NOT_RUNNING;; + *) return $OCF_ERR_GENERIC;; + esac +} + +# Validates whether we can import a given ZFS pool +zpool_validate () { + # Check that the 'zpool' command is known + if ! which zpool > /dev/null; then + return $OCF_ERR_INSTALLED + fi + + # If the pool is imported, then it is obviously valid + if zpool_is_imported; then + return $OCF_SUCCESS + fi + + # Check that the pool can be imported + if zpool import $OCF_RESKEY_importargs | grep 'pool:' | grep "\\<$OCF_RESKEY_pool\\>" > /dev/null; + then + return $OCF_SUCCESS + else + return $OCF_ERR_CONFIGURED + fi +} + +usage () { + echo "$USAGE" >&2 + return $1 +} + +if [ $# -ne 1 ]; then + usage $OCF_ERR_ARGS +fi + +case $1 in + meta-data) meta_data;; + start) zpool_import;; + stop) zpool_export;; + status|monitor) zpool_monitor;; + validate-all) zpool_validate;; + usage) usage $OCF_SUCCESS;; + *) usage $OCF_ERR_UNIMPLEMENTED;; +esac + +exit $?