Page Menu
Home
ClusterLabs Projects
Search
Configure Global Search
Log In
Files
F4639205
No One
Temporary
Actions
View File
Edit File
Delete File
View Transforms
Subscribe
Mute Notifications
Flag For Later
Award Token
Size
12 KB
Referenced Files
None
Subscribers
None
View Options
diff --git a/heartbeat/Raid1.in b/heartbeat/Raid1.in
index db478e797..83d6fe83c 100644
--- a/heartbeat/Raid1.in
+++ b/heartbeat/Raid1.in
@@ -1,331 +1,420 @@
#!/bin/sh
#
-# $Id: Raid1.in,v 1.2 2005/07/29 07:36:15 sunjd Exp $
+# $Id: Raid1.in,v 1.3 2005/08/03 05:53:38 sunjd Exp $
#
# License: GNU General Public License (GPL)
# Support: linux-ha@lists.linux-ha.org
#
# Raid1
# Description: Manages a software Raid1 device on a shared storage medium.
# Original Author: Eric Z. Ayers (eric.ayers@compgen.com)
# Original Release: 25 Oct 2000
-# Support: linux-ha-dev@lists.tummy.com
# RAID patches: http://people.redhat.com/mingo/raid-patches/
# Word to the Wise: http://lwn.net/2000/0810/a/raid-faq.php3
# Sympathetic Ear: mailto:linux-raid@vger.kernel.org
#
-# usage: $0 {start|stop|status|monitor|meta-data}
+# usage: $0 {start|stop|status|monitor|validate-all|usage|meta-data}
#
# OCF parameters are as below:
# OCF_RESKEY_raidconf
# (name of MD configuration file. e.g. /etc/raidtab)
# OCF_RESKEY_raiddev
# (of the form /dev/md?? the block device to use)
#
# in /etc/ha.d/haresources, use a line such as:
# nodea 10.0.0.170 Raid1::/etc/raidtab.md0::/dev/md0 Filesystem::/dev/md0::/data1::ext2
#
-# This script assumes you are running the so-called RAID v.90 patches vs.
-# the Linux 2.2 kernel (distributed with RedHat 6.2). I have not used
-# kernel version 2.4.
-#
# The "start" arg starts up the raid device
# The "stop" arg stops it. NOTE: all filesystems must be unmounted
# and no processes should be accessing the device.
# The "status" arg just prints out whether the device is running or not
#
#
# DISCLAIMER: Use at your own risk!
#
# Besides all of the usual legalese that accompanies free software,
# I will warn you that I do not yet use this kind of setup (software RAID
# over shared storage) in production, and I have reservations about doing so.
#
# The linux md driver/scsi drivers under Raid 0.90 and kernel version 2.2
# do not behave well when a drive is in the process of going bad.
# The kernel slows down, but doesn't completely crash. This is about the
# worst possible thing that could happen in an un-attended HA type
# environment. (Once the system is rebooted, the sofware raid stuff works
# like a champ.)
# My other reservation has to do with the interation of RAID recovery with
# journaling filesystems and other parts of the kernel. Subscribe to
# linux-raid@vger.kernel.org for other opinions and possible solutions.
#
# -EZA 25 Oct 2000
#
# SETUP:
#
# You might need to pass the command line parameter: raid=noautodetect
# in an HA environment so that the kernel doesn't automatically start
# up your raid partitions when you boot the node. This means that it isn't
# going to work to use RAID for the system disks and the shared disks.
#
# 0) partition the disks to use for RAID. Use normal Linux partition
# types, not the RAID autodetect type for your partitions.
# 1) Create /etc/raidtab.md? on both systems (see example file below)
# 2) Initialize your raid partition with
# /sbin/mkraid --configfile /etc/raidtab.md? /dev/md?
# 3) Format your filesystem
# mke2fs /dev/md0 # for ext2fs... a journaling filesystem would be nice
# 3) Create the mount point on both systems.
# DO NOT add your raid filesystem to /etc/fstab
# 4) copy this script (to /etc/rc.d/init.d if you wish) and edit it to
# reflect your desired settings.
# 5) Modify the heartbeat 'haresources' setup file
# 6) unmount the filesystem and stop the raid device with 'raidstop'
# 7) fire up heartbeat!
#
#
# EXAMPLE config file /etc/raidtab.md0
# This file must exist on both machines!
#
# raiddev /dev/md0
# raid-level 1
# nr-raid-disks 2
# chunk-size 64k
# persistent-superblock 1
# #nr-spare-disks 0
# device /dev/sda1
# raid-disk 0
# device /dev/sdb1
# raid-disk 1
#
#######################################################################
# Initialization:
. @hb_libdir@/ocf-shellfuncs
#######################################################################
prefix=@prefix@
exec_prefix=@exec_prefix@
# Utilities used by this script
MODPROBE=@MODPROBE@
FSCK=@FSCK@
FUSER=@FUSER@
RAIDSTART=@RAIDSTART@
MOUNT=@MOUNT@
UMOUNT=@UMOUNT@
RAIDSTOP=@RAIDSTOP@
+MDADM=@MDADM@
check_util () {
if [ ! -x "$1" ] ; then
- ocf_log "err" "setup problem: Couldn't find utility $1"
- exit 1
+ ocf_log err "setup problem: Couldn't find utility $1"
+ exit $OCF_ERR_GENERIC
fi
}
usage() {
cat <<-EOT;
- usage: $0 {start|stop|status|monitor|meta-data}
- $Id: Raid1.in,v 1.2 2005/07/29 07:36:15 sunjd Exp $
+ usage: $0 {start|stop|status|monitor|validate-all|usage|meta-data}
+ $Id: Raid1.in,v 1.3 2005/08/03 05:53:38 sunjd Exp $
EOT
}
meta_data() {
cat <<END
<?xml version="1.0"?>
<!DOCTYPE resource-agent SYSTEM "ra-api-1.dtd">
-<resource-agent name="RAID1" version="0.9">
+<resource-agent name="Raid1">
<version>1.0</version>
<longdesc lang="en">
Resource script for RAID1. It manages a software Raid1 device on a shared
storage medium.
</longdesc>
<shortdesc lang="en">RAID1 resource agent</shortdesc>
<parameters>
<parameter name="raidconf" unique="0">
<longdesc lang="en">
The name of RAID configuration file. e.g. /etc/raidtab.
</longdesc>
<shortdesc lang="en">RAID tab config file</shortdesc>
<content type="string" default="" />
</parameter>
<parameter name="raiddev" unique="0">
<longdesc lang="en">
The block device to use.
</longdesc>
<shortdesc lang="en">block device</shortdesc>
<content type="string" default="" />
</parameter>
</parameters>
<actions>
<action name="start" timeout="10" />
<action name="stop" timeout="10" />
<action name="status" depth="0" timeout="10" interval="10" start-delay="10" />
<action name="monitor" depth="0" timeout="10" interval="10" start-delay="10" />
+<action name="validate-all" timeout="5" />
<action name="meta-data" timeout="5" />
</actions>
</resource-agent>
END
}
#
# START: Start up the RAID device
#
raid1_start() {
# See if the md device is already mounted.
- # NOTE: this will not work right if you have more than 10 md devices!
- $MOUNT | grep -e "^$MDDEV" >/dev/null
+ $MOUNT | grep -e "^$MDDEV\>" >/dev/null
if [ $? -ne 1 ] ; then
- ocf_log "err" "Device $MDDEV is already mounted!"
- return 1;
+ ocf_log err "Device $MDDEV is already mounted!"
+ return $OCF_ERR_GENERIC
fi
+ if [ "running" = `raid1_status` ]; then
+ # We are already online, do not bother
+ return $OCF_SUCCESS
+ fi
+
# Insert SCSI module
$MODPROBE scsi_hostadapter
if [ $? -ne 0 ] ; then
- ocf_log "warn" "Couldn't insert SCSI module."
+ ocf_log warn "Couldn't insert SCSI module."
fi
# Insert raid personality module
$MODPROBE raid1
if [ $? -ne 0 ] ; then
- ocf_log "err" "Couldn't insert RAID1 module"
- return 1
+ # It is not fatal, chance is that we have raid1 builtin...
+ ocf_log warn "Couldn't insert RAID1 module"
+# return 1
fi
-
- # Run raidstart to start up the RAID array
- $RAIDSTART --configfile $RAIDTAB_CONFIG $MDDEV
+ grep -q "^Personalities.*\[raid1\]" /proc/mdstat 2>/dev/null
if [ $? -ne 0 ] ; then
- ocf_log "err" "Couldn't start RAID for $MDDEV"
- return 1
+ ocf_log err "We don't have RAID1 support! Exiting"
+ return $OCF_ERR_GENERIC
fi
- return 0
+ if [ $HAVE_RAIDTOOLS = "true" ]; then
+ # Run raidstart to start up the RAID array
+ $RAIDSTART --configfile $RAIDTAB_CONFIG $MDDEV
+ else
+ # Run mdadm
+ $MDADM --run $MDDEV --config=$RAIDTAB_CONFIG
+ fi
+
+
+ # We can't rely the exit status above, $MDADM will return 0 on some failures, see
+ #
+ # # mdadm --run /dev/nosuchdevice
+ # mdadm: error opening /dev/nosuchdevice: No such file or directory
+ # # echo $?
+ # 0
+
+ if [ "running" = `raid1_status` ]; then
+ return $OCF_SUCCESS
+ else
+ ocf_log err "Couldn't start RAID for $MDDEV"
+ return $OCF_ERR_GENERIC
+ fi
}
#
# STOP: stop the RAID device
#
raid1_stop() {
# See if the MD device is online
- grep -e "^$MD" /proc/mdstat >/dev/null
+ grep -e "^$MD[ \t:]" /proc/mdstat >/dev/null
if [ $? -ne 0 ] ; then
- ocf_log "warn" "device $MD is not online according to kernel"
- return 0
+ ocf_log warn "device $MD is not online according to kernel"
+ return $OCF_SUCCESS
fi
# See if the MD device is mounted
# NOTE: this will not work right if you have more than 10 md devices!
- $MOUNT | grep -e "^$MDDEV" >/dev/null
+ $MOUNT | grep -e "^$MDDEV\>" >/dev/null
if [ $? -ne 1 ] ; then
# Kill all processes open on filesystem
$FUSER -mk $MOUNTPOINT
# the return from fuser doesn't tell us much
#if [ $? -ne 0 ] ; then
# ocf_log "err" "Couldn't kill processes on $MOUNTPOINT"
# return 1;
#fi
# Unmount the filesystem
$UMOUNT $MDDEV
if [ $? -ne 0 ] ; then
- ocf_log "err" "Couldn't unmount filesystem for $MDDEV"
- return 1
+ ocf_log err "Couldn't unmount filesystem for $MDDEV"
+ return $OCF_ERR_GENERIC
fi
- $MOUNT | grep -e "^$MDDEV" >/dev/null
+ $MOUNT | grep -e "^$MDDEV\>" >/dev/null
if [ $? -ne 1 ] ; then
- ocf_log "err" "filesystem for $MDDEV still mounted"
- return 1
+ ocf_log err "filesystem for $MDDEV still mounted"
+ return $OCF_ERR_GENERIC
fi
fi
# Turn off raid
- $RAIDSTOP --configfile /etc/raidtab.$MD $MDDEV
+ if [ $HAVE_RAIDTOOLS = "true" ]; then
+ $RAIDSTOP --configfile /etc/raidtab.$MD $MDDEV
+ else
+ $MDADM --stop $MDDEV --config=$RAIDTAB_CONFIG
+ fi
+
if [ $? -ne 0 ] ; then
- ocf_log "err" "Couldn't stop RAID for $MDDEV"
- return 1
+ ocf_log err "Couldn't stop RAID for $MDDEV"
+ return $OCF_ERR_GENERIC
fi
- return 0
+ return $OCF_SUCCESS
}
#
# STATUS: is the raid device online or offline?
#
raid1_status() {
# See if the MD device is online
- grep -e "^$MD" /proc/mdstat >/dev/null
+ grep -e "^$MD[ \t:]" /proc/mdstat >/dev/null
if [ $? -ne 0 ] ; then
echo "stopped"
+ return $OCF_NOT_RUNNING
else
echo "running"
+ return $OCF_SUCCESS
fi
- return 0
}
+raid1_validate_all() {
+ if [ $HAVE_RAIDTOOLS = "true" ]; then
+
+ # $MDDEV should be an md device
+ lsraid -a $MDDEV 2>&1 | grep -q -i "is not an md device"
+ if [ $? -eq 0 ]; then
+ ocf_log err "$MDDEV is not an md device!"
+ exit $OCF_ERR_ARGS
+ fi
+
+ COMMENT="\(#.*\)"
+ grep -q "^[[:space:]]*raiddev[[:space:]]\+$MDDEV[[:space:]]*$COMMENT\?$" $RAIDTAB_CONFIG 2>/dev/null
+ if [ $? -ne 0 ]; then
+ ocf_log err "Raid device $MDDEV does not appear in $RAIDTAB_CONFIG"
+ exit $OCF_ERR_GENERIC
+ fi
+ else
+ error=`$MDADM --query $MDDEV 2>&1`
+ if [ $? -ne 0 ]; then
+ ocf_log err "$error"
+ exit $OCF_ERR_GENERIC
+ fi
+ echo $error | grep -q -i "^$MDDEV[ \t:].*is not an md array"
+ if [ $? -eq 0 ]; then
+ ocf_log err "$MDDEV is not an md array!"
+ exit $OCF_ERR_ARGS
+ fi
+ fi
+
+ return $OCF_SUCCESS
+}
+
if
- ( [ $# -eq 0 ] || [ $# -gt 1 ] )
+ ( [ $# -ne 1 ] )
then
usage
- exit 1
+ exit $OCF_ERR_ARGS
fi
+case "$1" in
+ meta-data)
+ meta_data
+ exit $OCF_SUCCESS
+ ;;
+ usage)
+ usage
+ exit $OCF_SUCCESS
+ ;;
+ *)
+ ;;
+esac
+
#
# Check the necessary enviroment virable's setting
#
+
RAIDTAB_CONFIG=$OCF_RESKEY_raidconf
MDDEV=$OCF_RESKEY_raiddev
-if [ ! -f "$RAIDTAB_CONFIG" ] ; then
- ocf_log "err" "Couldn't open file $RAIDTAB_CONFIG"
- usage
- exit 1
+if [ -z "$RAIDTAB_CONFIG" ] ; then
+ ocf_log err "Please set OCF_RESKEY_raidconf!"
+ exit $OCF_ERR_ARGS
+fi
+
+if [ ! -r "$RAIDTAB_CONFIG" ] ; then
+ ocf_log err "Configuration file [$RAIDTAB_CONFIG] does not exist, or can not be opend!"
+ exit $OCF_ERR_ARGS
+fi
+
+if [ -z "$MDDEV" ] ; then
+ ocf_log err "Please set OCF_RESKEY_raiddev to the Raid device you want to control!"
+ exit $OCF_ERR_ARGS
fi
if [ ! -b "$MDDEV" ] ; then
- ocf_log "err" "Couldn't find MD device $MDDEV. Expected /dev/md* to exist"
- usage
- exit 1
+ ocf_log err "Couldn't find MD device $MDDEV. Expected /dev/md* to exist"
+ exit $OCF_ERR_ARGS
fi
# strip off the /dev/ prefix to get the name of the MD device
MD=`echo $MDDEV | sed -e 's/\/dev\///'`
# Check to make sure the utilites are found
check_util $MODPROBE
check_util $FUSER
-check_util $RAIDSTART
+#check_util $RAIDSTART
check_util $MOUNT
check_util $UMOUNT
-check_util $RAIDSTOP
+HAVE_RAIDTOOLS=false
+
+if [ -z "$RAIDSTART" -o ! -x "$RAIDSTART" ]; then
+ ocf_log info "Raidstart not found, trying mdadm..."
+ check_util $MDADM
+else
+ check_util $RAIDSTOP
+
+ HAVE_RAIDTOOLS=true
+
+fi
+
+# At this stage,
+# [ $HAVE_RAIDTOOLS = false ] <=> we have $MDADM,
+# otherwise we have raidtools (raidstart and raidstop)
# Look for how we are called
case "$1" in
- meta-data)
- meta_data
- exit $OCF_SUCCESS
- ;;
start)
raid1_start
;;
stop)
raid1_stop
;;
- status)
+ status|monitor)
raid1_status
;;
-
- usage)
- usage
- exit $OCF_SUCCESS
+ validate-all)
+ raid1_validate_all
;;
*)
usage
exit $OCF_ERR_UNIMPLEMENTED
;;
esac
exit $?
File Metadata
Details
Attached
Mime Type
text/x-diff
Expires
Thu, Jul 10, 2:13 AM (1 d, 12 h)
Storage Engine
blob
Storage Format
Raw Data
Storage Handle
2009711
Default Alt Text
(12 KB)
Attached To
Mode
rR Resource Agents
Attached
Detach File
Event Timeline
Log In to Comment