Page MenuHomeClusterLabs Projects

No OneTemporary

diff --git a/heartbeat/NodeUtilization b/heartbeat/NodeUtilization
index 1eee5f91f..c4708ab43 100755
--- a/heartbeat/NodeUtilization
+++ b/heartbeat/NodeUtilization
@@ -1,226 +1,226 @@
#!/bin/sh
#
#
# NodeUtilization OCF Resource Agent
#
# Copyright (c) 2011 SUSE LINUX, John Shi
# Copyright (c) 2016 SUSE LINUX, Kristoffer Gronlund
# All Rights Reserved.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of version 2 of the GNU General Public License as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it would be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
#
# Further, this software is distributed without any warranty that it is
# free of the rightful claim of any third person regarding infringement
# or the like. Any license provided herein, whether implied or
# otherwise, applies only to this software file. Patent licenses, if
# any, provided herein do not apply to combinations of this program with
# other software, or any other product whatsoever.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write the Free Software Foundation,
# Inc., 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
#
#######################################################################
# Initialization:
: ${OCF_FUNCTIONS_DIR=${OCF_ROOT}/lib/heartbeat}
. ${OCF_FUNCTIONS_DIR}/ocf-shellfuncs
#######################################################################
NodeUtilization_meta_data() {
cat <<END
<?xml version="1.0"?>
<!DOCTYPE resource-agent SYSTEM "ra-api-1.dtd">
<resource-agent name="NodeUtilization">
<version>1.0</version>
<longdesc lang="en">
The Node Utilization agent detects system parameters like available CPU, host
memory and hypervisor memory availability, and adds them into the CIB for each
node using crm_attribute. Run the agent as a clone resource to have it populate
these parameters on each node.
Note: Setting hv_memory only works with Xen at the moment, using the xl or xm
command line tools.
</longdesc>
<shortdesc lang="en">Node Utilization</shortdesc>
<parameters>
<parameter name="dynamic" unique="0" required="0">
<longdesc lang="en">
If set, parameters will be updated if there are differences between the HA
parameters and the system values when running the monitor action.
If not set, the parameters will be set once when the resource instance starts.
</longdesc>
<shortdesc lang="en">Dynamically update parameters in monitor</shortdesc>
<content type="boolean" default="true" />
</parameter>
<parameter name="utilization_cpu" unique="0" required="0">
<longdesc lang="en">Enable setting node CPU utilization limit.</longdesc>
<shortdesc lang="en">Set node CPU utilization limit.</shortdesc>
<content type="boolean" default="true" />
</parameter>
<parameter name="utilization_cpu_reservation" unique="0" required="0">
<longdesc lang="en">Subtract this value when setting the CPU utilization parameter.</longdesc>
<shortdesc lang="en">CPU reservation.</shortdesc>
<content type="integer" default="1" />
</parameter>
<parameter name="utilization_host_memory" unique="0" required="0">
<longdesc lang="en">Enable setting available host memory.</longdesc>
<shortdesc lang="en">Set available host memory.</shortdesc>
<content type="boolean" default="true" />
</parameter>
<parameter name="utilization_host_memory_reservation" unique="0" required="0">
<longdesc lang="en">Subtract this value when setting host memory utilization, in MB.</longdesc>
<shortdesc lang="en">Host memory reservation, in MB.</shortdesc>
<content type="integer" default="512" />
</parameter>
<parameter name="utilization_hv_memory" unique="0" required="0">
<longdesc lang="en">Enable setting available hypervisor memory.</longdesc>
<shortdesc lang="en">Set available hypervisor memory.</shortdesc>
<content type="boolean" default="true" />
</parameter>
<parameter name="utilization_hv_memory_reservation" unique="0" required="0">
<longdesc lang="en">Subtract this value when setting hypervisor memory utilization, in MB.</longdesc>
<shortdesc lang="en">Hypervisor memory reservation, in MB.</shortdesc>
<content type="integer" default="512" />
</parameter>
</parameters>
<actions>
<action name="start" timeout="90" />
<action name="stop" timeout="100" />
<action name="monitor" timeout="20s" interval="60s"/>
<action name="meta-data" timeout="5" />
<action name="validate-all" timeout="30" />
</actions>
</resource-agent>
END
}
Host_Total_Memory() {
local xentool
xentool=$(which xl 2> /dev/null || which xm 2> /dev/null)
if [ -x $xentool ]; then
$xentool info | awk '/total_memory/{printf("%d\n",$3);exit(0)}'
else
ocf_log warn "Can only set hv_memory for Xen hypervisor"
echo "0"
fi
}
set_utilization() {
host_name="$(ocf_local_nodename)"
if ocf_is_true "$OCF_RESKEY_utilization_cpu"; then
sys_cpu=$(( $(grep -c processor /proc/cpuinfo) - $OCF_RESKEY_utilization_cpu_reservation ))
- uti_cpu=$(crm_attribute -Q -t nodes --node "$host_name" -z -n cpu 2>/dev/null)
+ uti_cpu=$(crm_attribute --quiet -t nodes --node "$host_name" -z -n cpu 2>/dev/null)
if [ "$sys_cpu" != "$uti_cpu" ]; then
if ! crm_attribute -t nodes --node "$host_name" -z -n cpu -v $sys_cpu; then
ocf_log err "Failed to set the cpu utilization attribute for $host_name using crm_attribute."
return 1
fi
fi
fi
if ocf_is_true "$OCF_RESKEY_utilization_host_memory"; then
sys_mem=$(( $(awk '/MemTotal/{printf("%d\n",$2/1024);exit(0)}' /proc/meminfo) - $OCF_RESKEY_utilization_host_memory_reservation ))
- uti_mem=$(crm_attribute -Q -t nodes --node "$host_name" -z -n host_memory 2>/dev/null)
+ uti_mem=$(crm_attribute --quiet -t nodes --node "$host_name" -z -n host_memory 2>/dev/null)
if [ "$sys_mem" != "$uti_mem" ]; then
if ! crm_attribute -t nodes --node "$host_name" -z -n host_memory -v $sys_mem; then
ocf_log err "Failed to set the host_memory utilization attribute for $host_name using crm_attribute."
return 1
fi
fi
fi
if ocf_is_true "$OCF_RESKEY_utilization_hv_memory"; then
hv_mem=$(( $(Host_Total_Memory) - OCF_RESKEY_utilization_hv_memory_reservation ))
- uti_mem=$(crm_attribute -Q -t nodes --node "$host_name" -z -n hv_memory 2>/dev/null)
+ uti_mem=$(crm_attribute --quiet -t nodes --node "$host_name" -z -n hv_memory 2>/dev/null)
[ $hv_mem -lt 0 ] && hv_mem=0
if [ "$hv_mem" != "$uti_mem" ]; then
if ! crm_attribute -t nodes --node "$host_name" -z -n hv_memory -v $hv_mem; then
ocf_log err "Failed to set the hv_memory utilization attribute for $host_name using crm_attribute."
return 1
fi
fi
fi
}
NodeUtilization_usage() {
cat <<END
usage: $0 {start|stop|monitor|validate-all|meta-data}
Expects to have a fully populated OCF RA-compliant environment set.
END
}
NodeUtilization_start() {
ha_pseudo_resource $statefile start
if ! ocf_is_true "$OCF_RESKEY_dynamic"; then
if ! set_utilization; then
exit $OCF_ERR_GENERIC
fi
fi
exit $OCF_SUCCESS
}
NodeUtilization_stop() {
ha_pseudo_resource $statefile stop
exit $OCF_SUCCESS
}
NodeUtilization_monitor() {
local rc
ha_pseudo_resource $statefile monitor
rc=$?
case $rc in
$OCF_SUCCESS)
if ocf_is_true "$OCF_RESKEY_dynamic"; then
if ! set_utilization; then
exit $OCF_ERR_GENERIC
fi
fi
;;
*) exit $rc;;
esac
}
NodeUtilization_validate() {
exit $OCF_SUCCESS
}
statefile=$OCF_RESOURCE_TYPE.$(echo $OCF_RESOURCE_INSTANCE | sed -e 's/^.*://')
: ${OCF_RESKEY_pidfile:="$HA_VARRUN/NodeUtilization-${OCF_RESOURCE_INSTANCE}"}
: ${OCF_RESKEY_dynamic:="true"}
: ${OCF_RESKEY_utilization_cpu:="true"}
: ${OCF_RESKEY_utilization_cpu_reservation="1"}
: ${OCF_RESKEY_utilization_hv_memory:="true"}
: ${OCF_RESKEY_utilization_hv_memory_reservation="512"}
: ${OCF_RESKEY_utilization_host_memory:="true"}
: ${OCF_RESKEY_utilization_host_memory_reservation="512"}
OCF_REQUIRED_PARAMS=""
OCF_REQUIRED_BINARIES=""
ocf_rarun $*
diff --git a/heartbeat/db2 b/heartbeat/db2
index f56ec3e71..69d0119aa 100755
--- a/heartbeat/db2
+++ b/heartbeat/db2
@@ -1,902 +1,902 @@
#!/bin/sh
#
# db2
#
# Resource agent that manages a DB2 LUW database in Standard role
# or HADR configuration in master/slave configuration.
# Multi partition is supported as well.
#
# Copyright (c) 2011 Holger Teutsch <holger.teutsch@web.de>
#
# This agent incoporates code of a previous release created by
# Alan Robertson and the community.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of version 2 of the GNU General Public License as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it would be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
#
# Further, this software is distributed without any warranty that it is
# free of the rightful claim of any third person regarding infringement
# or the like. Any license provided herein, whether implied or
# otherwise, applies only to this software file. Patent licenses, if
# any, provided herein do not apply to combinations of this program with
# other software, or any other product whatsoever.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
#######################################################################
# Initialization:
: ${OCF_FUNCTIONS_DIR=${OCF_ROOT}/lib/heartbeat}
. ${OCF_FUNCTIONS_DIR}/ocf-shellfuncs
#######################################################################
db2_usage() {
echo "db2 start|stop|monitor|promote|demote|notify|validate-all|meta-data"
}
db2_meta_data() {
cat <<END
<?xml version="1.0"?>
<!DOCTYPE resource-agent SYSTEM "ra-api-1.dtd">
<resource-agent name="db2">
<version>1.0</version>
<longdesc lang="en">
Resource Agent that manages an IBM DB2 LUW databases in Standard role as primitive or in HADR roles in master/slave configuration. Multiple partitions are supported.
Standard mode:
An instance including all or selected databases is made highly available.
Configure each partition as a separate primitive resource.
HADR mode:
A single database in HADR configuration is made highly available by automating takeover operations.
Configure a master / slave resource with notifications enabled and an
additional monitoring operation with role "Master".
In case of HADR be very deliberate in specifying intervals/timeouts. The detection of a failure including promote must complete within HADR_PEER_WINDOW.
In addition to honoring requirements for crash recovery etc. for your specific database use the following relations as guidance:
"monitor interval" &lt; HADR_PEER_WINDOW - (appr 30 sec)
"promote timeout" &lt; HADR_PEER_WINDOW + (appr 20 sec)
For further information and examples consult http://www.linux-ha.org/wiki/db2_(resource_agent)
</longdesc>
<shortdesc lang="en">Resource Agent that manages an IBM DB2 LUW databases in Standard role as primitive or in HADR roles as master/slave configuration. Multiple partitions are supported.</shortdesc>
<parameters>
<parameter name="instance" unique="1" required="1">
<longdesc lang="en">
The instance of the database(s).
</longdesc>
<shortdesc lang="en">instance</shortdesc>
<content type="string" default="" />
</parameter>
<parameter name="dblist" unique="0" required="0">
<longdesc lang="en">
List of databases to be managed, e.g "db1 db2".
Defaults to all databases in the instance. Specify one db for HADR mode.
</longdesc>
<shortdesc lang="en">List of databases to be managed</shortdesc>
<content type="string"/>
</parameter>
<parameter name="admin" unique="0" required="0">
<longdesc lang="en">
DEPRECATED: The admin user of the instance.
</longdesc>
<shortdesc lang="en">DEPRECATED: admin</shortdesc>
<content type="string" default="" />
</parameter>
<parameter name="dbpartitionnum" unique="0" required="0">
<longdesc lang="en">
The number of the partion (DBPARTITIONNUM) to be managed.
</longdesc>
<shortdesc lang="en">database partition number (DBPARTITIONNUM)</shortdesc>
<content type="string" default="0" />
</parameter>
</parameters>
<actions>
<action name="start" timeout="120"/>
<action name="stop" timeout="120"/>
<action name="promote" timeout="120"/>
<action name="demote" timeout="120"/>
<action name="notify" timeout="10"/>
<action name="monitor" depth="0" timeout="60" interval="20"/>
<action name="monitor" depth="0" timeout="60" role="Master" interval="22"/>
<action name="validate-all" timeout="5"/>
<action name="meta-data" timeout="5"/>
</actions>
</resource-agent>
END
}
#
# validate
# .. and set global variables
#
# exit on error
#
db2_validate() {
local db2home db2sql db2instance
# db2 uses korn shell
check_binary "ksh"
# check required instance vars
if [ -z "$OCF_RESKEY_instance" ]
then
ocf_log err "DB2 required parameter instance is not set!"
return $OCF_ERR_CONFIGURED
fi
instance=$OCF_RESKEY_instance
if [ -n "$OCF_RESKEY_admin" ]
then
ocf_log warn "DB2 deprecated parameter admin is set, using $OCF_RESKEY_admin as instance."
instance=$OCF_RESKEY_admin
fi
db2node=${OCF_RESKEY_dbpartitionnum:-0}
db2home=$(sh -c "echo ~$instance")
db2sql=$db2home/sqllib
db2profile=$db2sql/db2profile
db2bin=$db2sql/bin
STATE_FILE=${HA_RSCTMP}/db2-${OCF_RESOURCE_INSTANCE}.state
# Let's make sure a few important things are there...
if ! [ -d "$db2sql" -a -d "$db2bin" -a -f "$db2profile" -a \
-x "$db2profile" -a -x "$db2bin/db2" ]
then
ocf_is_probe && exit $OCF_NOT_RUNNING
ocf_log err "DB2 required directories and/or files not found"
exit $OCF_ERR_INSTALLED
fi
db2instance=$(runasdb2 'echo $DB2INSTANCE')
if [ "$db2instance" != "$instance" ]
then
ocf_is_probe && exit $OCF_NOT_RUNNING
ocf_log err "DB2 parameter instance \"$instance\" != DB2INSTANCE \"$db2instance\""
exit $OCF_ERR_CONFIGURED
fi
# enough checking for stop to succeed
[ $__OCF_ACTION = stop ] && return $OCF_SUCCESS
dblist=$OCF_RESKEY_dblist
if [ -n "$dblist" ]
then
# support , as separator as well
dblist=$(echo "$dblist" | sed -e 's/[,]/ /g')
else
if ! dblist=$(db2_dblist)
then
ocf_log err "DB2 $instance($db2node): cannot retrieve db directory"
exit $OCF_ERR_INSTALLED
fi
fi
# check requirements for the HADR case
if ocf_is_ms
then
set -- $dblist
if [ $# != 1 ]
then
ocf_log err "DB2 resource $OCF_RESOURCE_INSTANCE must have exactly one name in dblist"
exit $OCF_ERR_CONFIGURED
fi
if [ $db2node != 0 ]
then
ocf_log err "DB2 resource $OCF_RESOURCE_INSTANCE must have dbpartitionnum=0"
exit $OCF_ERR_CONFIGURED
fi
fi
return $OCF_SUCCESS
}
master_score()
{
if ! have_binary "crm_master"; then
return
fi
crm_master $*
}
#
# Run the given command as db2 instance user
#
runasdb2() {
su $instance -c ". $db2profile; $*"
}
#
# Run a command as the DB2 admin, and log the output
#
logasdb2() {
local output rc
output=$(runasdb2 $*)
rc=$?
if [ $rc -eq 0 ]
then
ocf_log info "$output"
else
ocf_log err "$output"
fi
return $rc
}
#
# maintain the fal (first active log) attribute
# db2_fal_attrib DB {set val|get}
#
db2_fal_attrib() {
local db=$1
local attr val rc id node member me
attr=db2hadr_${instance}_${db}_fal
case "$2" in
set)
me=$(uname -n)
# loop over all member nodes and set attribute
crm_node -l |
while read id node member
do
[ "$member" = member -a "$node" != "$me" ] || continue
crm_attribute -t nodes -l reboot --node=$node -n $attr -v "$3"
rc=$?
ocf_log info "DB2 instance $instance($db2node/$db: setting attrib for FAL to $FIRST_ACTIVE_LOG @ $node"
[ $rc != 0 ] && break
done
;;
get)
- crm_attribute -t nodes -l reboot -n $attr -G -Q 2>&1
+ crm_attribute -t nodes -l reboot -n $attr -G --quiet 2>&1
rc=$?
if [ $rc != 0 ]
then
ocf_log warn "DB2 instance $instance($db2node/$db: can't retrieve attribute $attr, are you sure notifications are enabled ?"
fi
;;
*)
exit $OCF_ERR_CONFIGURED
esac
return $rc
}
#
# unfortunately a first connect after a crash may need several minutes
# for some internal cleanup stuff in DB2.
# We run a connect in background so other connects (i.e. monitoring!) may proceed.
#
db2_run_connect() {
local db=$1
logasdb2 "db2 connect to $db; db2 terminate"
}
#
# get some data from the database config
# sets HADR_ROLE HADR_TIMEOUT HADR_PEER_WINDOW
#
db2_get_cfg() {
local db=$1
local output hadr_vars
output=$(runasdb2 db2 get db cfg for $db)
[ $? != 0 ] && return $OCF_ERR_GENERIC
hadr_vars=$(echo "$output" |
awk '/HADR database role/ {printf "HADR_ROLE='%s'; ", $NF;}
/HADR_TIMEOUT/ {printf "HADR_TIMEOUT='%s'; ", $NF;}
/First active log file/ {printf "FIRST_ACTIVE_LOG='%s'\n", $NF;}
/HADR_PEER_WINDOW/ {printf "HADR_PEER_WINDOW='%s'\n", $NF;}')
# sets HADR_ROLE HADR_TIMEOUT HADR_PEER_WINDOW
eval $hadr_vars
# HADR_PEER_WINDOW comes with V9 and is checked later
if [ -z "$HADR_ROLE" -o -z "$HADR_TIMEOUT" ]
then
ocf_log error "DB2 cfg values invalid for $instance($db2node)/$db: $hadr_vars"
return $OCF_ERR_GENERIC
fi
return $OCF_SUCCESS
}
#
# return the list of databases in the instance
#
db2_dblist() {
local output
output=$(runasdb2 db2 list database directory) || return $OCF_ERR_GENERIC
echo "$output" | grep -i 'Database name.*=' | sed 's%.*= *%%'
}
#
# Delayed check of the compatibility of DB2 instance and pacemaker
# config.
# Logically this belongs to validate but certain parameters can only
# be retrieved once the instance is started.
#
db2_check_config_compatibility() {
local db=$1
local is_ms
ocf_is_ms
is_ms=$?
case "$HADR_ROLE/$is_ms" in
STANDARD/0)
ocf_log err "DB2 database $instance/$db is not in a HADR configuration but I am a M/S resource"
exit $OCF_ERR_INSTALLED
;;
STANDARD/1)
# OK
;;
*/0)
if [ -z "$HADR_PEER_WINDOW" ]
then
ocf_log err "DB2 database $instance: release to old, need HADR_PEER_WINDOW (>=V9)"
exit $OCF_ERR_INSTALLED
fi
;;
*/1)
ocf_log err "DB2 database $instance/$db is in a HADR configuration but I must be a M/S resource"
esac
}
#
# Start instance and DB.
# Standard mode is through "db2 activate" in order to start in previous
# mode (Standy/Primary).
# If the database is a primary AND we can determine that the running master
# has a higher "first active log" we conclude that we come up after a crash
# an the previous Standby is now Primary.
# The db is then started as Standby.
#
# Other cases: danger of split brain, log error and do nothing.
#
db2_start() {
local output start_cmd db
local start_opts="dbpartitionnum $db2node"
# If we detect that db partitions are not in use, and no
# partition is explicitly specified, activate without
# partition information. This allows db2 instances without
# partition support to be managed.
if [ -z "$OCF_RESKEY_dbpartitionnum" ] && ! [ -a "$db2sql/db2nodes.cfg" ]; then
start_opts=""
fi
if output=$(runasdb2 db2start $start_opts)
then
ocf_log info "DB2 instance $instance($db2node) started: $output"
else
case $output in
*SQL1026N*)
ocf_log info "DB2 instance $instance($db2node) already running: $output"
;;
*)
ocf_log err "$output"
return $OCF_ERR_GENERIC
esac
fi
if ! db2_instance_status
then
ocf_log err "DB2 instance $instance($db2node) is not active!"
return $OCF_ERR_GENERIC
fi
[ $db2node = 0 ] || return $OCF_SUCCESS
# activate DB only on node 0
for db in $dblist
do
# sets HADR_ROLE HADR_TIMEOUT HADR_PEER_WINDOW FIRST_ACTIVE_LOG
db2_get_cfg $db || return $?
# Better late than never: can only check this when the instance is already up
db2_check_config_compatibility $db
start_cmd="db2 activate db $db"
if [ $HADR_ROLE = PRIMARY ]
then
local master_fal
# communicate our FAL to other nodes the might start concurrently
db2_fal_attrib $db set $FIRST_ACTIVE_LOG
# ignore false positive:
# error: Can't use > in [ ]. Escape it or use [[..]]. [SC2073]
# see https://github.com/koalaman/shellcheck/issues/691
# shellcheck disable=SC2073
if master_fal=$(db2_fal_attrib $db get) && [ "$master_fal" '>' $FIRST_ACTIVE_LOG ]
then
ocf_log info "DB2 database $instance($db2node)/$db is Primary and outdated, starting as secondary"
start_cmd="db2 start hadr on db $db as standby"
HADR_ROLE=STANDBY
fi
fi
if output=$(runasdb2 $start_cmd)
then
ocf_log info "DB2 database $instance($db2node)/$db started/activated"
[ $HADR_ROLE != STANDBY ] && db2_run_connect $db &
else
case $output in
SQL1490W*|SQL1494W*|SQL1497W*|SQL1777N*)
ocf_log info "DB2 database $instance($db2node)/$db already activated: $output"
;;
SQL1768N*"Reason code = \"7\""*)
ocf_log err "DB2 database $instance($db2node)/$db is a Primary and the Standby is down"
ocf_log err "Possible split brain ! Manual intervention required."
ocf_log err "If this DB is outdated use \"db2 start hadr on db $db as standby\""
ocf_log err "If this DB is the surviving primary use \"db2 start hadr on db $db as primary by force\""
# might be the Standby is not yet there
# might be a timing problem because "First active log" is delayed
# on the next start attempt we might succeed when FAL was advanced
# might be manual intervention is required
# ... so let pacemaker give it another try and we will succeed then
return $OCF_ERR_GENERIC
;;
*)
ocf_log err "DB2 database $instance($db2node)/$db didn't start: $output"
return $OCF_ERR_GENERIC
esac
fi
done
# come here with success
# Even if we are a db2 Primary pacemaker requires start to end up in slave mode
echo SLAVE > $STATE_FILE
return $OCF_SUCCESS
}
#
# helper function to be spawned
# so we can detect a hang of the db2stop command
#
db2_stop_bg() {
local rc output
local stop_opts="dbpartitionnum $db2node"
rc=$OCF_SUCCESS
if [ -z "$OCF_RESKEY_dbpartitionnum" ] && ! [ -a "$db2sql/db2nodes.cfg" ]; then
stop_opts=""
fi
if output=$(runasdb2 db2stop force $stop_opts)
then
ocf_log info "DB2 instance $instance($db2node) stopped: $output"
else
case $output in
*SQL1032N*)
#SQL1032N No start database manager command was issued
ocf_log info "$output"
;;
*)
ocf_log err "DB2 instance $instance($db2node) stop failed: $output"
rc=$OCF_ERR_GENERIC
esac
fi
return $rc
}
#
# Stop the given db2 database instance
#
db2_stop() {
local stop_timeout grace_timeout stop_bg_pid i must_kill
# remove master score
master_score -D -l reboot
# be very early here in order to avoid stale data
rm -f $STATE_FILE
db2_instance_status
if [ $? -eq $OCF_NOT_RUNNING ]; then
ocf_log info "DB2 instance $instance already stopped"
return $OCF_SUCCESS
fi
stop_timeout=${OCF_RESKEY_CRM_meta_timeout:-20000}
# grace_time is 4/5 (unit is ms)
grace_timeout=$((stop_timeout/1250))
# start db2stop in background as this may hang
db2_stop_bg &
stop_bg_pid=$!
# wait for grace_timeout
i=0
while [ $i -lt $grace_timeout ]
do
kill -0 $stop_bg_pid 2>/dev/null || break;
sleep 1
i=$((i+1))
done
# collect exit status but don't hang
if kill -0 $stop_bg_pid 2>/dev/null
then
stoprc=1
kill -9 $stop_bg_pid 2>/dev/null
else
wait $stop_bg_pid
stoprc=$?
fi
must_kill=0
if [ $stoprc -ne 0 ]
then
ocf_log warn "DB2 instance $instance($db2node): db2stop failed, using db2nkill"
must_kill=1
elif ! db2_instance_dead
then
ocf_log warn "DB2 instance $instance($db2node): db2stop indicated success but there a still processes, using db2nkill"
must_kill=1
fi
if [ $must_kill -eq 1 ]
then
# db2nkill kills *all* partions on the node
if [ -x $db2bin/db2nkill ]
then
logasdb2 $db2bin/db2nkill $db2node
elif [ -x $db2bin/db2_kill ]
then
logasdb2 $db2bin/db2_kill
fi
# loop forever (or lrmd kills us due to timeout) until the
# instance is dead
while ! db2_instance_dead
do
ocf_log info "DB2 instance $instance($db2node): waiting for processes to exit"
sleep 1
done
ocf_log info "DB2 instance $instance($db2node) is now dead"
fi
return $OCF_SUCCESS
}
#
# check whether `enough´ processes for a healthy instance are up
#
db2_instance_status() {
local pscount
pscount=$(runasdb2 $db2bin/db2nps $db2node | cut -c9- | grep ' db2[^ ]' | wc -l)
if [ $pscount -ge 4 ]; then
return $OCF_SUCCESS;
elif [ $pscount -ge 1 ]; then
return $OCF_ERR_GENERIC
fi
return $OCF_NOT_RUNNING
}
#
# is the given db2 instance dead?
#
db2_instance_dead() {
local pscount
pscount=$(runasdb2 $db2bin/db2nps $db2node | cut -c9- | grep ' db2[^ ]' | wc -l)
test $pscount -eq 0
}
#
# return the status of the db as "Role/Status"
# e.g. Primary/Peer, Standby/RemoteCatchupPending
#
# If not in HADR configuration return "Standard/Standalone"
#
db2_hadr_status() {
local db=$1
local output
output=$(runasdb2 db2pd -hadr -db $db)
if [ $? != 0 ]
then
echo "Down/Off"
return 1
fi
echo "$output" |
awk '/^\s+HADR_(ROLE|STATE) =/ {printf $3"/"}
/^\s+HADR_CONNECT_STATUS =/ {print $3; exit; }
/^HADR is not active/ {print "Standard/Standalone"; exit; }
/^Role *State */ {getline; printf "%s/%s\n", $1, $2; exit; }'
}
#
# Monitor the db
# And as side effect set crm_master / FAL attribute
#
db2_monitor() {
local CMD output hadr db
local rc
db2_instance_status
rc=$?
if [ $rc -ne $OCF_SUCCESS ]; then
# instance is dead remove master score
master_score -D -l reboot
exit $rc
fi
[ $db2node = 0 ] || return 0
# monitoring only for partition 0
for db in $dblist
do
hadr=$(db2_hadr_status $db) || return $OCF_ERR_GENERIC
ocf_log debug "Monitor: DB2 database $instance($db2node)/$db has HADR status $hadr"
# set master preference accordingly
case "$hadr" in
PRIMARY/*|Primary/*|Standard/*)
# perform a basic health check
CMD="if db2 connect to $db;
then
db2 select \* from sysibm.sysversions ; rc=\$?;
db2 terminate;
else
rc=\$?;
fi;
exit \$rc"
if ! output=$(runasdb2 $CMD)
then
case "$output" in
SQL1776N*)
# can't connect/select on standby, may be spurious turing takeover
;;
*)
ocf_log err "DB2 database $instance($db2node)/$db is not working"
ocf_log err "DB2 message: $output"
# dead primary, remove master score
master_score -D -l reboot
return $OCF_ERR_GENERIC
esac
fi
ocf_log debug "DB2 database $instance($db2node)/$db appears to be working"
ocf_is_ms && master_score -v 10000 -l reboot
;;
STANDBY/*PEER/*|Standby/*Peer)
master_score -v 8000 -l reboot
;;
STANDBY/*|Standby/*)
ocf_log warn "DB2 database $instance($db2node)/$db in status $hadr can never be promoted"
master_score -D -l reboot
;;
*)
return $OCF_ERR_GENERIC
esac
done
# everything OK, return if running as slave
grep MASTER $STATE_FILE >/dev/null 2>&1 || return $OCF_SUCCESS
return $OCF_RUNNING_MASTER
}
#
# Promote db to Primary
#
db2_promote() {
# validate ensured that dblist contains only one entry
local db=$dblist
local i hadr output force
# we run this twice as after a crash of the other node
# within HADR_TIMEOUT the status may be still reported as Peer
# although a connection no longer exists
for i in 1 2
do
hadr=$(db2_hadr_status $db) || return $OCF_ERR_GENERIC
ocf_log info "DB2 database $instance($db2node)/$db has HADR status $hadr and will be promoted"
case "$hadr" in
Standard/Standalone)
# this case only to keep ocf-tester happy
return $OCF_SUCCESS
;;
PRIMARY/PEER/*|PRIMARY/REMOTE_CATCHUP/*|Primary/Peer)
# nothing to do, only update pacemaker's view
echo MASTER > $STATE_FILE
return $OCF_SUCCESS
;;
STANDBY/PEER/CONNECTED|Standby/Peer)
# must take over
;;
STANDBY/*PEER/DISCONNECTED|Standby/DisconnectedPeer)
# must take over forced
force="by force peer window only"
;;
*)
return $OCF_ERR_GENERIC
esac
if output=$(runasdb2 db2 takeover hadr on db $db $force)
then
# update pacemaker's view
echo MASTER > $STATE_FILE
# turn the log so we rapidly get a new FAL
logasdb2 "db2 archive log for db $db"
return $OCF_SUCCESS
fi
case "$output" in
SQL1770N*"Reason code = \"7\""*)
# expected, HADR_TIMEOUT is now expired
# go for the second try
continue
;;
*)
ocf_log err "DB2 database $instance($db2node)/$db promote failed: $output"
return $OCF_ERR_GENERIC
esac
done
return $OCF_ERR_GENERIC
}
#
# Demote db to standby
#
db2_demote() {
# validate ensured that dblist contains only one entry
local db=$dblist
local hadr
# house keeping, set pacemaker's view to slave
echo SLAVE > $STATE_FILE
hadr=$(db2_hadr_status $dblist) || return $OCF_ERR_GENERIC
ocf_log info "DB2 database $instance($db2node)/$db has HADR status $hadr and will be demoted"
db2_monitor
return $?
}
#
# handle pre start notification
# We record our first active log on the other nodes.
# If two primaries come up after a crash they can safely determine who is
# the outdated one.
#
db2_notify() {
local node
# only interested in pre-start
[ $OCF_RESKEY_CRM_meta_notify_type = pre \
-a $OCF_RESKEY_CRM_meta_notify_operation = start ] || return $OCF_SUCESS
# gets FIRST_ACTIVE_LOG
db2_get_cfg $dblist || return $?
db2_fal_attrib $dblist set $FIRST_ACTIVE_LOG || return $OCF_ERR_GENERIC
exit $OCF_SUCCESS
}
########
# Main #
########
case "$__OCF_ACTION" in
meta-data)
db2_meta_data
exit $OCF_SUCCESS
;;
usage)
db2_usage
exit $OCF_SUCCESS
;;
start)
db2_validate
db2_start || exit $?
db2_monitor
exit $?
;;
stop)
db2_validate
db2_stop
exit $?
;;
promote)
db2_validate
db2_promote
exit $?
;;
demote)
db2_validate
db2_demote
exit $?
;;
notify)
db2_validate
db2_notify
exit $?
;;
monitor)
db2_validate
db2_monitor
exit $?
;;
validate-all)
db2_validate
exit $?
;;
*)
db2_usage
exit $OCF_ERR_UNIMPLEMENTED
esac
diff --git a/heartbeat/galera b/heartbeat/galera
index ee8451427..5c3c80d9d 100755
--- a/heartbeat/galera
+++ b/heartbeat/galera
@@ -1,975 +1,975 @@
#!/bin/sh
#
# Copyright (c) 2014 David Vossel <davidvossel@gmail.com>
# All Rights Reserved.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of version 2 of the GNU General Public License as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it would be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
#
# Further, this software is distributed without any warranty that it is
# free of the rightful claim of any third person regarding infringement
# or the like. Any license provided herein, whether implied or
# otherwise, applies only to this software file. Patent licenses, if
# any, provided herein do not apply to combinations of this program with
# other software, or any other product whatsoever.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write the Free Software Foundation,
# Inc., 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
#
##
# README.
#
# This agent only supports being configured as a multistate Master
# resource.
#
# Slave vs Master role:
#
# During the 'Slave' role, galera instances are in read-only mode and
# will not attempt to connect to the cluster. This role exists only as
# a means to determine which galera instance is the most up-to-date. The
# most up-to-date node will be used to bootstrap a galera cluster that
# has no current members.
#
# The galera instances will only begin to be promoted to the Master role
# once all the nodes in the 'wsrep_cluster_address' connection address
# have entered read-only mode. At that point the node containing the
# database that is most current will be promoted to Master. Once the first
# Master instance bootstraps the galera cluster, the other nodes will be
# promoted to Master as well.
#
# Example: Create a galera cluster using nodes rhel7-node1 rhel7-node2 rhel7-node3
#
# pcs resource create db galera enable_creation=true \
# wsrep_cluster_address="gcomm://rhel7-auto1,rhel7-auto2,rhel7-auto3" meta master-max=3 --master
#
# By setting the 'enable_creation' option, the database will be automatically
# generated at startup. The meta attribute 'master-max=3' means that all 3
# nodes listed in the wsrep_cluster_address list will be allowed to connect
# to the galera cluster and perform replication.
#
# NOTE: If you have more nodes in the pacemaker cluster then you wish
# to have in the galera cluster, make sure to use location contraints to prevent
# pacemaker from attempting to place a galera instance on a node that is
# not in the 'wsrep_cluster_address" list.
#
##
#######################################################################
# Initialization:
: ${OCF_FUNCTIONS_DIR=${OCF_ROOT}/lib/heartbeat}
. ${OCF_FUNCTIONS_DIR}/ocf-shellfuncs
. ${OCF_FUNCTIONS_DIR}/mysql-common.sh
NODENAME=$(ocf_attribute_target)
# It is common for some galera instances to store
# check user that can be used to query status
# in this file
if [ -f "/etc/sysconfig/clustercheck" ]; then
. /etc/sysconfig/clustercheck
elif [ -f "/etc/default/clustercheck" ]; then
. /etc/default/clustercheck
fi
#######################################################################
usage() {
cat <<UEND
usage: $0 (start|stop|validate-all|meta-data|monitor|promote|demote)
$0 manages a galera Database as an HA resource.
The 'start' operation starts the database.
The 'stop' operation stops the database.
The 'status' operation reports whether the database is running
The 'monitor' operation reports whether the database seems to be working
The 'promote' operation makes this mysql server run as master
The 'demote' operation makes this mysql server run as slave
The 'validate-all' operation reports whether the parameters are valid
UEND
}
meta_data() {
cat <<END
<?xml version="1.0"?>
<!DOCTYPE resource-agent SYSTEM "ra-api-1.dtd">
<resource-agent name="galera">
<version>1.0</version>
<longdesc lang="en">
Resource script for managing galara database.
</longdesc>
<shortdesc lang="en">Manages a galara instance</shortdesc>
<parameters>
<parameter name="binary" unique="0" required="0">
<longdesc lang="en">
Location of the MySQL server binary
</longdesc>
<shortdesc lang="en">MySQL server binary</shortdesc>
<content type="string" default="${OCF_RESKEY_binary_default}" />
</parameter>
<parameter name="client_binary" unique="0" required="0">
<longdesc lang="en">
Location of the MySQL client binary
</longdesc>
<shortdesc lang="en">MySQL client binary</shortdesc>
<content type="string" default="${OCF_RESKEY_client_binary_default}" />
</parameter>
<parameter name="config" unique="0" required="0">
<longdesc lang="en">
Configuration file
</longdesc>
<shortdesc lang="en">MySQL config</shortdesc>
<content type="string" default="${OCF_RESKEY_config_default}" />
</parameter>
<parameter name="datadir" unique="0" required="0">
<longdesc lang="en">
Directory containing databases
</longdesc>
<shortdesc lang="en">MySQL datadir</shortdesc>
<content type="string" default="${OCF_RESKEY_datadir_default}" />
</parameter>
<parameter name="user" unique="0" required="0">
<longdesc lang="en">
User running MySQL daemon
</longdesc>
<shortdesc lang="en">MySQL user</shortdesc>
<content type="string" default="${OCF_RESKEY_user_default}" />
</parameter>
<parameter name="group" unique="0" required="0">
<longdesc lang="en">
Group running MySQL daemon (for logfile and directory permissions)
</longdesc>
<shortdesc lang="en">MySQL group</shortdesc>
<content type="string" default="${OCF_RESKEY_group_default}"/>
</parameter>
<parameter name="log" unique="0" required="0">
<longdesc lang="en">
The logfile to be used for mysqld.
</longdesc>
<shortdesc lang="en">MySQL log file</shortdesc>
<content type="string" default="${OCF_RESKEY_log_default}"/>
</parameter>
<parameter name="pid" unique="0" required="0">
<longdesc lang="en">
The pidfile to be used for mysqld.
</longdesc>
<shortdesc lang="en">MySQL pid file</shortdesc>
<content type="string" default="${OCF_RESKEY_pid_default}"/>
</parameter>
<parameter name="socket" unique="0" required="0">
<longdesc lang="en">
The socket to be used for mysqld.
</longdesc>
<shortdesc lang="en">MySQL socket</shortdesc>
<content type="string" default="${OCF_RESKEY_socket_default}"/>
</parameter>
<parameter name="enable_creation" unique="0" required="0">
<longdesc lang="en">
If the MySQL database does not exist, it will be created
</longdesc>
<shortdesc lang="en">Create the database if it does not exist</shortdesc>
<content type="boolean" default="${OCF_RESKEY_enable_creation_default}"/>
</parameter>
<parameter name="additional_parameters" unique="0" required="0">
<longdesc lang="en">
Additional parameters which are passed to the mysqld on startup.
(e.g. --skip-external-locking or --skip-grant-tables)
</longdesc>
<shortdesc lang="en">Additional parameters to pass to mysqld</shortdesc>
<content type="string" default="${OCF_RESKEY_additional_parameters_default}"/>
</parameter>
<parameter name="wsrep_cluster_address" unique="0" required="1">
<longdesc lang="en">
The galera cluster address. This takes the form of:
gcomm://node,node,node
Only nodes present in this node list will be allowed to start a galera instance.
The galera node names listed in this address are expected to match valid
pacemaker node names. If both names need to differ, you must provide a
mapping in option cluster_host_map.
</longdesc>
<shortdesc lang="en">Galera cluster address</shortdesc>
<content type="string" default=""/>
</parameter>
<parameter name="cluster_host_map" unique="0" required="0">
<longdesc lang="en">
A mapping of pacemaker node names to galera node names.
To be used when both pacemaker and galera names need to differ,
(e.g. when galera names map to IP from a specific network interface)
This takes the form of:
pcmk1:node.1.galera;pcmk2:node.2.galera;pcmk3:node.3.galera
where the galera resource started on node pcmk1 would be named
node.1.galera in the wsrep_cluster_address
</longdesc>
<shortdesc lang="en">Pacemaker to Galera name mapping</shortdesc>
<content type="string" default=""/>
</parameter>
<parameter name="check_user" unique="0" required="0">
<longdesc lang="en">
Cluster check user.
</longdesc>
<shortdesc lang="en">MySQL test user</shortdesc>
<content type="string" default="root" />
</parameter>
<parameter name="check_passwd" unique="0" required="0">
<longdesc lang="en">
Cluster check user password
</longdesc>
<shortdesc lang="en">check password</shortdesc>
<content type="string" default="" />
</parameter>
</parameters>
<actions>
<action name="start" timeout="120" />
<action name="stop" timeout="120" />
<action name="status" timeout="60" />
<action name="monitor" depth="0" timeout="30" interval="20" />
<action name="monitor" role="Master" depth="0" timeout="30" interval="10" />
<action name="monitor" role="Slave" depth="0" timeout="30" interval="30" />
<action name="promote" timeout="300" />
<action name="demote" timeout="120" />
<action name="validate-all" timeout="5" />
<action name="meta-data" timeout="5" />
</actions>
</resource-agent>
END
}
get_option_variable()
{
local key=$1
$MYSQL $MYSQL_OPTIONS_CHECK -e "SHOW VARIABLES like '$key';" | tail -1
}
get_status_variable()
{
local key=$1
$MYSQL $MYSQL_OPTIONS_CHECK -e "show status like '$key';" | tail -1
}
set_bootstrap_node()
{
local node=$(ocf_attribute_target $1)
${HA_SBIN_DIR}/crm_attribute -N $node -l reboot --name "${INSTANCE_ATTR_NAME}-bootstrap" -v "true"
}
clear_bootstrap_node()
{
${HA_SBIN_DIR}/crm_attribute -N $NODENAME -l reboot --name "${INSTANCE_ATTR_NAME}-bootstrap" -D
}
is_bootstrap()
{
- ${HA_SBIN_DIR}/crm_attribute -N $NODENAME -l reboot --name "${INSTANCE_ATTR_NAME}-bootstrap" -Q 2>/dev/null
+ ${HA_SBIN_DIR}/crm_attribute -N $NODENAME -l reboot --name "${INSTANCE_ATTR_NAME}-bootstrap" --quiet 2>/dev/null
}
set_no_grastate()
{
${HA_SBIN_DIR}/crm_attribute -N $NODENAME -l reboot --name "${INSTANCE_ATTR_NAME}-no-grastate" -v "true"
}
clear_no_grastate()
{
${HA_SBIN_DIR}/crm_attribute -N $NODENAME -l reboot --name "${INSTANCE_ATTR_NAME}-no-grastate" -D
}
is_no_grastate()
{
local node=$(ocf_attribute_target $1)
- ${HA_SBIN_DIR}/crm_attribute -N $node -l reboot --name "${INSTANCE_ATTR_NAME}-no-grastate" -Q 2>/dev/null
+ ${HA_SBIN_DIR}/crm_attribute -N $node -l reboot --name "${INSTANCE_ATTR_NAME}-no-grastate" --quiet 2>/dev/null
}
clear_last_commit()
{
${HA_SBIN_DIR}/crm_attribute -N $NODENAME -l reboot --name "${INSTANCE_ATTR_NAME}-last-committed" -D
}
set_last_commit()
{
${HA_SBIN_DIR}/crm_attribute -N $NODENAME -l reboot --name "${INSTANCE_ATTR_NAME}-last-committed" -v $1
}
get_last_commit()
{
local node=$(ocf_attribute_target $1)
if [ -z "$node" ]; then
- ${HA_SBIN_DIR}/crm_attribute -N $NODENAME -l reboot --name "${INSTANCE_ATTR_NAME}-last-committed" -Q 2>/dev/null
+ ${HA_SBIN_DIR}/crm_attribute -N $NODENAME -l reboot --name "${INSTANCE_ATTR_NAME}-last-committed" --quiet 2>/dev/null
else
- ${HA_SBIN_DIR}/crm_attribute -N $node -l reboot --name "${INSTANCE_ATTR_NAME}-last-committed" -Q 2>/dev/null
+ ${HA_SBIN_DIR}/crm_attribute -N $node -l reboot --name "${INSTANCE_ATTR_NAME}-last-committed" --quiet 2>/dev/null
fi
}
clear_safe_to_bootstrap()
{
${HA_SBIN_DIR}/crm_attribute -N $NODENAME -l reboot --name "${INSTANCE_ATTR_NAME}-safe-to-bootstrap" -D
}
set_safe_to_bootstrap()
{
${HA_SBIN_DIR}/crm_attribute -N $NODENAME -l reboot --name "${INSTANCE_ATTR_NAME}-safe-to-bootstrap" -v $1
}
get_safe_to_bootstrap()
{
local node=$(ocf_attribute_target $1)
if [ -z "$node" ]; then
- ${HA_SBIN_DIR}/crm_attribute -N $NODENAME -l reboot --name "${INSTANCE_ATTR_NAME}-safe-to-bootstrap" -Q 2>/dev/null
+ ${HA_SBIN_DIR}/crm_attribute -N $NODENAME -l reboot --name "${INSTANCE_ATTR_NAME}-safe-to-bootstrap" --quiet 2>/dev/null
else
- ${HA_SBIN_DIR}/crm_attribute -N $node -l reboot --name "${INSTANCE_ATTR_NAME}-safe-to-bootstrap" -Q 2>/dev/null
+ ${HA_SBIN_DIR}/crm_attribute -N $node -l reboot --name "${INSTANCE_ATTR_NAME}-safe-to-bootstrap" --quiet 2>/dev/null
fi
}
wait_for_sync()
{
local state=$(get_status_variable "wsrep_local_state")
ocf_log info "Waiting for database to sync with the cluster. "
while [ "$state" != "4" ]; do
sleep 1
state=$(get_status_variable "wsrep_local_state")
done
ocf_log info "Database synced."
}
is_primary()
{
cluster_status=$(get_status_variable "wsrep_cluster_status")
if [ "$cluster_status" = "Primary" ]; then
return 0
fi
if [ -z "$cluster_status" ]; then
ocf_exit_reason "Unable to retrieve wsrep_cluster_status, verify check_user '$OCF_RESKEY_check_user' has permissions to view status"
else
ocf_log info "Galera instance wsrep_cluster_status=${cluster_status}"
fi
return 1
}
is_readonly()
{
local res=$(get_option_variable "read_only")
if ! ocf_is_true "$res"; then
return 1
fi
cluster_status=$(get_status_variable "wsrep_cluster_status")
if ! [ "$cluster_status" = "Disconnected" ]; then
return 1
fi
return 0
}
master_exists()
{
if [ "$__OCF_ACTION" = "demote" ]; then
# We don't want to detect master instances during demote.
# 1. we could be detecting ourselves as being master, which is no longer the case.
# 2. we could be detecting other master instances that are in the process of shutting down.
# by not detecting other master instances in "demote" we are deferring this check
# to the next recurring monitor operation which will be much more accurate
return 1
fi
# determine if a master instance is already up and is healthy
crm_mon --as-xml | grep "resource.*id=\"${INSTANCE_ATTR_NAME}\".*role=\"Master\".*active=\"true\".*orphaned=\"false\".*failed=\"false\"" > /dev/null 2>&1
return $?
}
clear_master_score()
{
local node=$(ocf_attribute_target $1)
if [ -z "$node" ]; then
$CRM_MASTER -D
else
$CRM_MASTER -D -N $node
fi
}
set_master_score()
{
local node=$(ocf_attribute_target $1)
if [ -z "$node" ]; then
$CRM_MASTER -v 100
else
$CRM_MASTER -N $node -v 100
fi
}
promote_everyone()
{
for node in $(echo "$OCF_RESKEY_wsrep_cluster_address" | sed 's/gcomm:\/\///g' | tr -d ' ' | tr -s ',' ' '); do
local pcmk_node=$(galera_to_pcmk_name $node)
if [ -z "$pcmk_node" ]; then
ocf_log err "Could not determine pacemaker node from galera name <${node}>."
return
else
node=$pcmk_node
fi
set_master_score $node
done
}
greater_than_equal_long()
{
# there are values we need to compare in this script
# that are too large for shell -gt to process
echo | awk -v n1="$1" -v n2="$2" '{if (n1>=n2) printf ("true"); else printf ("false");}' | grep -q "true"
}
galera_to_pcmk_name()
{
local galera=$1
if [ -z "$OCF_RESKEY_cluster_host_map" ]; then
echo $galera
else
echo "$OCF_RESKEY_cluster_host_map" | tr ';' '\n' | tr -d ' ' | sed 's/:/ /' | awk -F' ' '$2=="'"$galera"'" {print $1;exit}'
fi
}
pcmk_to_galera_name()
{
local pcmk=$1
if [ -z "$OCF_RESKEY_cluster_host_map" ]; then
echo $pcmk
else
echo "$OCF_RESKEY_cluster_host_map" | tr ';' '\n' | tr -d ' ' | sed 's/:/ /' | awk -F' ' '$1=="'"$pcmk"'" {print $2;exit}'
fi
}
detect_first_master()
{
local best_commit=0
local last_commit=0
local missing_nodes=0
local nodes=""
local nodes_recovered=""
local all_nodes
local best_node_gcomm
local best_node
local safe_to_bootstrap
all_nodes=$(echo "$OCF_RESKEY_wsrep_cluster_address" | sed 's/gcomm:\/\///g' | tr -d ' ' | tr -s ',' ' ')
best_node_gcomm=$(echo "$all_nodes" | sed 's/^.* \(.*\)$/\1/')
best_node=$(galera_to_pcmk_name $best_node_gcomm)
if [ -z "$best_node" ]; then
ocf_log err "Could not determine initial best node from galera name <${best_node_gcomm}>."
return
fi
# avoid selecting a recovered node as bootstrap if possible
for node in $all_nodes; do
local pcmk_node=$(galera_to_pcmk_name $node)
if [ -z "$pcmk_node" ]; then
ocf_log err "Could not determine pacemaker node from galera name <${node}>."
return
else
node=$pcmk_node
fi
if is_no_grastate $node; then
nodes_recovered="$nodes_recovered $node"
else
nodes="$nodes $node"
fi
done
for node in $nodes_recovered $nodes; do
safe_to_bootstrap=$(get_safe_to_bootstrap $node)
if [ "$safe_to_bootstrap" = "1" ]; then
# Galera marked the node as safe to boostrap during shutdown. Let's just
# pick it as our bootstrap node.
ocf_log info "Node <${node}> is marked as safe to bootstrap."
best_node=$node
# We don't need to wait for the other nodes to report state in this case
missing_nodes=0
break
fi
last_commit=$(get_last_commit $node)
if [ -z "$last_commit" ]; then
ocf_log info "Waiting on node <${node}> to report database status before Master instances can start."
missing_nodes=1
continue
fi
# this means -1, or that no commit has occured yet.
if [ "$last_commit" = "18446744073709551615" ]; then
last_commit="0"
fi
greater_than_equal_long "$last_commit" "$best_commit"
if [ $? -eq 0 ]; then
best_node=$(ocf_attribute_target $node)
best_commit=$last_commit
fi
done
if [ $missing_nodes -eq 1 ]; then
return
fi
ocf_log info "Promoting $best_node to be our bootstrap node"
set_master_score $best_node
set_bootstrap_node $best_node
}
detect_safe_to_bootstrap()
{
local safe_to_bootstrap=""
if [ -f ${OCF_RESKEY_datadir}/grastate.dat ]; then
ocf_log info "attempting to read safe_to_bootstrap flag from ${OCF_RESKEY_datadir}/grastate.dat"
safe_to_bootstrap=$(sed -n 's/^safe_to_bootstrap:\s*\(.*\)$/\1/p' < ${OCF_RESKEY_datadir}/grastate.dat)
fi
if [ "$safe_to_bootstrap" = "1" ] || [ "$safe_to_bootstrap" = "0" ]; then
set_safe_to_bootstrap $safe_to_bootstrap
else
clear_safe_to_bootstrap
fi
}
detect_last_commit()
{
local last_commit
local recover_args="--defaults-file=$OCF_RESKEY_config \
--pid-file=$OCF_RESKEY_pid \
--socket=$OCF_RESKEY_socket \
--datadir=$OCF_RESKEY_datadir \
--user=$OCF_RESKEY_user"
local recovery_file_regex='s/.*WSREP\:.*position\s*recovery.*--log_error='\''\([^'\'']*\)'\''.*/\1/p'
local recovered_position_regex='s/.*WSREP\:\s*[R|r]ecovered\s*position.*\:\(.*\)\s*$/\1/p'
# codership/galera#354
# Some ungraceful shutdowns can leave an empty gvwstate.dat on
# disk. This will prevent galera to join the cluster if it is
# configured to attempt PC recovery. Removing that file makes the
# node fall back to the normal, unoptimized joining process.
if [ -f ${OCF_RESKEY_datadir}/gvwstate.dat ] && \
[ ! -s ${OCF_RESKEY_datadir}/gvwstate.dat ]; then
ocf_log warn "empty ${OCF_RESKEY_datadir}/gvwstate.dat detected, removing it to prevent PC recovery failure at next restart"
rm -f ${OCF_RESKEY_datadir}/gvwstate.dat
fi
ocf_log info "attempting to detect last commit version by reading ${OCF_RESKEY_datadir}/grastate.dat"
last_commit="$(cat ${OCF_RESKEY_datadir}/grastate.dat | sed -n 's/^seqno.\s*\(.*\)\s*$/\1/p')"
if [ -z "$last_commit" ] || [ "$last_commit" = "-1" ]; then
local tmp=$(mktemp)
chown $OCF_RESKEY_user:$OCF_RESKEY_group $tmp
# if we pass here because grastate.dat doesn't exist,
# try not to bootstrap from this node if possible
if [ ! -f ${OCF_RESKEY_datadir}/grastate.dat ]; then
set_no_grastate
fi
ocf_log info "now attempting to detect last commit version using 'mysqld_safe --wsrep-recover'"
${OCF_RESKEY_binary} $recover_args --wsrep-recover --log-error=$tmp 2>/dev/null
last_commit="$(cat $tmp | sed -n $recovered_position_regex | tail -1)"
if [ -z "$last_commit" ]; then
# Galera uses InnoDB's 2pc transactions internally. If
# server was stopped in the middle of a replication, the
# recovery may find a "prepared" XA transaction in the
# redo log, and mysql won't recover automatically
local recovery_file="$(cat $tmp | sed -n $recovery_file_regex)"
if [ -e $recovery_file ]; then
cat $recovery_file | grep -q -E '\[ERROR\]\s+Found\s+[0-9]+\s+prepared\s+transactions!' 2>/dev/null
if [ $? -eq 0 ]; then
# we can only rollback the transaction, but that's OK
# since the DB will get resynchronized anyway
ocf_log warn "local node <${NODENAME}> was not shutdown properly. Rollback stuck transaction with --tc-heuristic-recover"
${OCF_RESKEY_binary} $recover_args --wsrep-recover \
--tc-heuristic-recover=rollback --log-error=$tmp 2>/dev/null
last_commit="$(cat $tmp | sed -n $recovered_position_regex | tail -1)"
if [ ! -z "$last_commit" ]; then
ocf_log warn "State recovered. force SST at next restart for full resynchronization"
rm -f ${OCF_RESKEY_datadir}/grastate.dat
# try not to bootstrap from this node if possible
set_no_grastate
fi
fi
fi
fi
rm -f $tmp
fi
if [ ! -z "$last_commit" ]; then
ocf_log info "Last commit version found: $last_commit"
set_last_commit $last_commit
return $OCF_SUCCESS
else
ocf_exit_reason "Unable to detect last known write sequence number"
clear_last_commit
return $OCF_ERR_GENERIC
fi
}
# For galera, promote is really start
galera_promote()
{
local rc
local extra_opts
local bootstrap
local safe_to_bootstrap
master_exists
if [ $? -eq 0 ]; then
# join without bootstrapping
extra_opts="--wsrep-cluster-address=${OCF_RESKEY_wsrep_cluster_address}"
else
bootstrap=$(is_bootstrap)
if ocf_is_true $bootstrap; then
# The best node for bootstrapping wasn't cleanly shutdown. Allow
# bootstrapping anyways
if [ "$(get_safe_to_bootstrap)" = "0" ]; then
sed -ie 's/^\(safe_to_bootstrap:\) 0/\1 1/' ${OCF_RESKEY_datadir}/grastate.dat
fi
ocf_log info "Node <${NODENAME}> is bootstrapping the cluster"
extra_opts="--wsrep-cluster-address=gcomm://"
else
ocf_exit_reason "Failure, Attempted to promote Master instance of $OCF_RESOURCE_INSTANCE before bootstrap node has been detected."
clear_last_commit
return $OCF_ERR_GENERIC
fi
fi
galera_monitor
if [ $? -eq $OCF_RUNNING_MASTER ]; then
if ocf_is_true $bootstrap; then
promote_everyone
clear_bootstrap_node
ocf_log info "boostrap node already up, promoting the rest of the galera instances."
fi
clear_safe_to_bootstrap
clear_last_commit
return $OCF_SUCCESS
fi
# last commit/safe_to_bootstrap flag are no longer relevant once promoted
clear_last_commit
clear_safe_to_bootstrap
mysql_common_prepare_dirs
mysql_common_start "$extra_opts"
rc=$?
if [ $rc != $OCF_SUCCESS ]; then
return $rc
fi
galera_monitor
rc=$?
if [ $rc != $OCF_SUCCESS -a $rc != $OCF_RUNNING_MASTER ]; then
ocf_exit_reason "Failed initial monitor action"
return $rc
fi
is_readonly
if [ $? -eq 0 ]; then
ocf_exit_reason "Failure. Master instance started in read-only mode, check configuration."
return $OCF_ERR_GENERIC
fi
is_primary
if [ $? -ne 0 ]; then
ocf_exit_reason "Failure. Master instance started, but is not in Primary mode."
return $OCF_ERR_GENERIC
fi
if ocf_is_true $bootstrap; then
promote_everyone
clear_bootstrap_node
# clear attribute no-grastate. if last shutdown was
# not clean, we cannot be extra-cautious by requesting a SST
# since this is the bootstrap node
clear_no_grastate
ocf_log info "Bootstrap complete, promoting the rest of the galera instances."
else
# if this is not the bootstrap node, make sure this instance
# syncs with the rest of the cluster before promotion returns.
wait_for_sync
# sync is done, clear info about last startup
clear_no_grastate
fi
ocf_log info "Galera started"
return $OCF_SUCCESS
}
galera_demote()
{
mysql_common_stop
rc=$?
if [ $rc -ne $OCF_SUCCESS ] && [ $rc -ne $OCF_NOT_RUNNING ]; then
ocf_exit_reason "Failed to stop Master galera instance during demotion to Master"
return $rc
fi
# if this node was previously a bootstrap node, that is no longer the case.
clear_bootstrap_node
clear_last_commit
clear_no_grastate
clear_safe_to_bootstrap
# Clear master score here rather than letting pacemaker do so once
# demote finishes. This way a promote cannot take place right
# after this demote even if pacemaker is requested to do so. It
# will first have to run a start/monitor op, to reprobe the state
# of the other galera nodes and act accordingly.
clear_master_score
# record last commit for next promotion
detect_safe_to_bootstrap
detect_last_commit
rc=$?
return $rc
}
galera_start()
{
local rc
local galera_node
galera_node=$(pcmk_to_galera_name $NODENAME)
if [ -z "$galera_node" ]; then
ocf_exit_reason "Could not determine galera name from pacemaker node <${NODENAME}>."
return $OCF_ERR_CONFIGURED
fi
echo $OCF_RESKEY_wsrep_cluster_address | grep -q -F $galera_node
if [ $? -ne 0 ]; then
ocf_exit_reason "local node <${NODENAME}> (galera node <${galera_node}>) must be a member of the wsrep_cluster_address <${OCF_RESKEY_wsrep_cluster_address}> to start this galera instance"
return $OCF_ERR_CONFIGURED
fi
galera_monitor
if [ $? -eq $OCF_RUNNING_MASTER ]; then
ocf_exit_reason "master galera instance started outside of the cluster's control"
return $OCF_ERR_GENERIC
fi
mysql_common_prepare_dirs
detect_safe_to_bootstrap
detect_last_commit
rc=$?
if [ $rc -ne $OCF_SUCCESS ]; then
return $rc
fi
master_exists
if [ $? -eq 0 ]; then
ocf_log info "Master instances are already up, setting master score so this instance will join galera cluster."
set_master_score $NODENAME
else
clear_master_score
detect_first_master
fi
return $OCF_SUCCESS
}
galera_monitor()
{
local rc
local galera_node
local status_loglevel="err"
# Set loglevel to info during probe
if ocf_is_probe; then
status_loglevel="info"
fi
mysql_common_status $status_loglevel
rc=$?
if [ $rc -eq $OCF_NOT_RUNNING ]; then
last_commit=$(get_last_commit $node)
if [ -n "$last_commit" ]; then
# if last commit is set, this instance is considered started in slave mode
rc=$OCF_SUCCESS
master_exists
if [ $? -ne 0 ]; then
detect_first_master
else
# a master instance exists and is healthy, promote this
# local read only instance
# so it can join the master galera cluster.
set_master_score
fi
fi
return $rc
elif [ $rc -ne $OCF_SUCCESS ]; then
return $rc
fi
# if we make it here, mysql is running. Check cluster status now.
galera_node=$(pcmk_to_galera_name $NODENAME)
if [ -z "$galera_node" ]; then
ocf_exit_reason "Could not determine galera name from pacemaker node <${NODENAME}>."
return $OCF_ERR_CONFIGURED
fi
echo $OCF_RESKEY_wsrep_cluster_address | grep -q -F $galera_node
if [ $? -ne 0 ]; then
ocf_exit_reason "local node <${NODENAME}> (galera node <${galera_node}>) is started, but is not a member of the wsrep_cluster_address <${OCF_RESKEY_wsrep_cluster_address}>"
return $OCF_ERR_GENERIC
fi
is_primary
if [ $? -eq 0 ]; then
if ocf_is_probe; then
# restore master score during probe
# if we detect this is a master instance
set_master_score
fi
rc=$OCF_RUNNING_MASTER
else
ocf_exit_reason "local node <${NODENAME}> is started, but not in primary mode. Unknown state."
rc=$OCF_ERR_GENERIC
fi
return $rc
}
galera_stop()
{
local rc
# make sure the process is stopped
mysql_common_stop
rc=$1
clear_safe_to_bootstrap
clear_last_commit
clear_master_score
clear_bootstrap_node
clear_no_grastate
return $rc
}
galera_validate()
{
if ! ocf_is_ms; then
ocf_exit_reason "Galera must be configured as a multistate Master/Slave resource."
return $OCF_ERR_CONFIGURED
fi
if [ -z "$OCF_RESKEY_wsrep_cluster_address" ]; then
ocf_exit_reason "Galera must be configured with a wsrep_cluster_address value."
return $OCF_ERR_CONFIGURED
fi
mysql_common_validate
}
case "$1" in
meta-data) meta_data
exit $OCF_SUCCESS;;
usage|help) usage
exit $OCF_SUCCESS;;
esac
galera_validate
rc=$?
LSB_STATUS_STOPPED=3
if [ $rc -ne 0 ]; then
case "$1" in
stop) exit $OCF_SUCCESS;;
monitor) exit $OCF_NOT_RUNNING;;
status) exit $LSB_STATUS_STOPPED;;
*) exit $rc;;
esac
fi
if [ -z "${OCF_RESKEY_check_passwd}" ]; then
# This value is automatically sourced from /etc/sysconfig/checkcluster if available
OCF_RESKEY_check_passwd=${MYSQL_PASSWORD}
fi
if [ -z "${OCF_RESKEY_check_user}" ]; then
# This value is automatically sourced from /etc/sysconfig/checkcluster if available
OCF_RESKEY_check_user=${MYSQL_USERNAME}
fi
: ${OCF_RESKEY_check_user="root"}
MYSQL_OPTIONS_CHECK="-nNE --user=${OCF_RESKEY_check_user}"
if [ -n "${OCF_RESKEY_check_passwd}" ]; then
MYSQL_OPTIONS_CHECK="$MYSQL_OPTIONS_CHECK --password=${OCF_RESKEY_check_passwd}"
fi
# This value is automatically sourced from /etc/sysconfig/checkcluster if available
if [ -n "${MYSQL_HOST}" ]; then
MYSQL_OPTIONS_CHECK="$MYSQL_OPTIONS_CHECK -h ${MYSQL_HOST}"
fi
# This value is automatically sourced from /etc/sysconfig/checkcluster if available
if [ -n "${MYSQL_PORT}" ]; then
MYSQL_OPTIONS_CHECK="$MYSQL_OPTIONS_CHECK -P ${MYSQL_PORT}"
fi
# What kind of method was invoked?
case "$1" in
start) galera_start;;
stop) galera_stop;;
status) mysql_common_status err;;
monitor) galera_monitor;;
promote) galera_promote;;
demote) galera_demote;;
validate-all) exit $OCF_SUCCESS;;
*) usage
exit $OCF_ERR_UNIMPLEMENTED;;
esac
# vi:sw=4:ts=4:et:

File Metadata

Mime Type
text/x-diff
Expires
Thu, Feb 27, 1:19 AM (17 h, 14 m ago)
Storage Engine
blob
Storage Format
Raw Data
Storage Handle
1465991
Default Alt Text
(65 KB)

Event Timeline