diff --git a/heartbeat/SAPInstance b/heartbeat/SAPInstance index ca320de1f..bd20c1fc8 100755 --- a/heartbeat/SAPInstance +++ b/heartbeat/SAPInstance @@ -1,980 +1,980 @@ #!/bin/sh # # SAPInstance # # Description: Manages a single SAP Instance as a High-Availability # resource. One SAP Instance is defined by one # SAP Instance-Profile. start/stop handles all services # of the START-Profile, status and monitor care only # about essential services. # # Author: Alexander Krauth, June 2006 # Support: linux@sap.com # License: GNU General Public License (GPL) # Copyright: (c) 2006-2008 Alexander Krauth # # An example usage: # See usage() function below for more details... # # OCF instance parameters: # OCF_RESKEY_InstanceName # OCF_RESKEY_DIR_EXECUTABLE (optional, well known directories will be searched by default) # OCF_RESKEY_DIR_PROFILE (optional, well known directories will be searched by default) # OCF_RESKEY_START_PROFILE (optional, well known directories will be searched by default) # OCF_RESKEY_START_WAITTIME (optional, to solve timing problems during J2EE-Addin start) # OCF_RESKEY_AUTOMATIC_RECOVER (optional, automatic startup recovery using cleanipc, default is false) # OCF_RESKEY_MONITOR_SERVICES (optional, default is to monitor critical services only) # OCF_RESKEY_SHUTDOWN_METHOD (optional, defaults to NORMAL, KILL: terminate the SAP instance with OS commands - faster, at your own risk) # OCF_RESKEY_ERS_InstanceName (optional, InstanceName of the ERS instance in a Master/Slave configuration) # OCF_RESKEY_ERS_START_PROFILE (optional, START_PROFILE of the ERS instance in a Master/Slave configuration) # OCF_RESKEY_PRE_START_USEREXIT (optional, lists a script which can be executed before the resource is started) # OCF_RESKEY_POST_START_USEREXIT (optional, lists a script which can be executed after the resource is started) # OCF_RESKEY_PRE_STOP_USEREXIT (optional, lists a script which can be executed before the resource is stopped) # OCF_RESKEY_POST_STOP_USEREXIT (optional, lists a script which can be executed after the resource is stopped) # OCF_RESKEY_IS_ERS (needed for ENQ/REPL NW 740) # # TODO: - Option to shutdown sapstartsrv for non-active instances -> that means: do probes only with OS tools (sapinstance_status) # - Option for better standalone enqueue server monitoring, using ensmon (test enque-deque) # - Option for cleanup abandoned enqueue replication tables # ####################################################################### # Initialization: : ${OCF_FUNCTIONS_DIR=${OCF_ROOT}/lib/heartbeat} . ${OCF_FUNCTIONS_DIR}/ocf-shellfuncs ####################################################################### SH=/bin/sh sapinstance_usage() { methods=`sapinstance_methods` methods=`echo $methods | tr ' ' '|'` cat <<-EOF usage: $0 ($methods) $0 manages a SAP Instance as an HA resource. The 'start' operation starts the instance or the ERS instance in a Master/Slave configuration The 'stop' operation stops the instance The 'status' operation reports whether the instance is running The 'monitor' operation reports whether the instance seems to be working The 'promote' operation starts the primary instance in a Master/Slave configuration The 'demote' operation stops the primary instance and starts the ERS instance The 'reload' operation allows changed parameters (non-unique only) without restarting the service The 'notify' operation always returns SUCCESS The 'validate-all' operation reports whether the parameters are valid The 'methods' operation reports on the methods $0 supports EOF } sapinstance_meta_data() { cat < 2.14 Usually a SAP system consists of one database and at least one or more SAP instances (sometimes called application servers). One SAP Instance is defined by having exactly one instance profile. The instance profiles can usually be found in the directory /sapmnt/SID/profile. Each instance must be configured as it's own resource in the cluster configuration. The resource agent supports the following SAP versions: - SAP WebAS ABAP Release 6.20 - 7.40 - SAP WebAS Java Release 6.40 - 7.40 - SAP WebAS ABAP + Java Add-In Release 6.20 - 7.40 (Java is not monitored by the cluster in that case) When using a SAP Kernel 6.40 please check and implement the actions from the section "Manual postprocessing" from SAP note 995116 (http://sdn.sap.com). Other versions may also work with this agent, but have not been verified. All operations of the SAPInstance resource agent are done by using the startup framework called SAP Management Console or sapstartsrv that was introduced with SAP kernel release 6.40. Find more information about the SAP Management Console in SAP note 1014480. Using this framework defines a clear interface for the Heartbeat cluster, how it sees the SAP system. The options for monitoring the SAP system are also much better than other methods like just watching the ps command for running processes or doing some pings to the application. sapstartsrv uses SOAP messages to request the status of running SAP processes. Therefore it can actually ask a process itself what it's status is, independent from other problems that might exist at the same time. sapstartsrv knows 4 status colours: - GREEN = everything is fine - YELLOW = something is wrong, but the service is still working - RED = the service does not work - GRAY = the service has not been started The SAPInstance resource agent will interpret GREEN and YELLOW as OK. That means that minor problems will not be reported to the Heartbeat cluster. This prevents the cluster from doing an unwanted failover. The statuses RED and GRAY are reported as NOT_RUNNING to the cluster. Depending on the status the cluster expects from the resource, it will do a restart, failover or just nothing. Manages a SAP instance as an HA resource. The full qualified SAP instance name. e.g. P01_DVEBMGS00_sapp01ci. Usually this is the name of the SAP instance profile. Instance name: SID_INSTANCE_VIR-HOSTNAME The full qualified path where to find sapstartsrv and sapcontrol. Specify this parameter, if you have changed the SAP kernel directory location after the default SAP installation. Path of sapstartsrv and sapcontrol The full qualified path where to find the SAP START profile. Specify this parameter, if you have changed the SAP profile directory location after the default SAP installation. Path of start profile The name of the SAP START profile. Specify this parameter, if you have changed the name of the SAP START profile after the default SAP installation. As SAP release 7.10 does not have a START profile anymore, you need to specify the Instance Profile than. Start profile name After that time in seconds a monitor operation is executed by the resource agent. Does the monitor return SUCCESS, the start ishandled as SUCCESS. This is useful to resolve timing problems with e.g. the J2EE-Addin instance.Usually the resource agent waits until all services are started and the SAP Management Console reports a GREEN status. A double stack installation (ABAP + Java AddIn) consists of an ABAP dispatcher and a JAVA instance. Normally the start of the JAVA instance takes much longer than the start of the ABAP instance. For a JAVA Instance you may need to configure a much higher timeout for the start operation of the resource in Heartbeat. The disadvantage here is, that the discovery of a failed start by the cluster takes longer. Somebody might say: For me it is important, that the ABAP instance is up and running. A failure of the JAVA instance shall not cause a failover of the SAP instance. Actually the SAP MC reports a YELLOW status, if the JAVA instance of a double stack system fails. From the resource agent point of view YELLOW means:everything is OK. Setting START_WAITTIME to a lower value determines the resource agent to check the status of the instance during a start operation after that time. As it would wait normally for a GREEN status, now it reports SUCCESS to the cluster in case of a YELLOW status already after the specified time. That is only useful for double stack systems. Check the successful start after that time (do not wait for J2EE-Addin) The SAPInstance resource agent tries to recover a failed start attempt automatically one time. This is done by killing running instance processes, removing the kill.sap file and executing cleanipc. Sometimes a crashed SAP instance leaves some processes and/or shared memory segments behind. Setting this option to true will try to remove those leftovers during a start operation. That is to reduce manual work for the administrator. Enable or disable automatic startup recovery Within a SAP instance there can be several services. Usually you will find the defined services in the START profile of the related instance (Attention: with SAP Release 7.10 the START profile content was moved to the instance profile). Not all of those services are worth to monitor by the cluster. For example you properly do not like to failover your SAP instance, if the central syslog collector daemon fails. Those services are monitored within the SAPInstance resource agent: - disp+work - msg_server - enserver (ENSA1) - enq_server (ENSA2) - enrepserver (ENSA1) - enq_replicator (ENSA2) - jcontrol - jstart Some other services could be monitored as well. They have to be given with the parameter MONITOR_SERVICES, e.g.: - sapwebdisp - TREXDaemon.x That names match the strings used in the output of the command 'sapcontrol -nr [Instance-Nr] -function GetProcessList'. The default should fit most cases where you want to manage a SAP Instance from the cluster. You may change this with this parameter, if you like to monitor more/less or other services that sapstartsrv supports. You may specify multiple services separated by a | (pipe) sign in this parameter: disp+work|msg_server|enserver Services to monitor - Usual a SAP Instance is stopped by the command 'sapcontrol -nr InstanceNr -function Stop'. SHUTDOWN_METHOD=KILL means to kill the SAP Instance using OS commands. SAP processes of the instance are terminated with 'kill -9', shared memory is deleted with 'cleanipc' and the 'kill.sap' file will be deleted. That method is much faster than the gracefull stop, but the instance does not have the chance to say goodbye to other SAPinstances in the same system. USE AT YOUR OWN RISK !! + Usually a SAP Instance is stopped by the command 'sapcontrol -nr InstanceNr -function Stop'. SHUTDOWN_METHOD=KILL means to kill the SAP Instance using OS commands. SAP processes of the instance are terminated with 'kill -9', shared memory is deleted with 'cleanipc' and the 'kill.sap' file will be deleted. That method is much faster than the graceful stop, but the instance does not have the chance to say goodbye to other SAPinstances in the same system. USE AT YOUR OWN RISK !! Shutdown graceful or kill a SAP instance by terminating the processes. (normal|KILL) Only used in a Master/Slave resource configuration: The full qualified SAP enqueue replication instance name. e.g. P01_ERS02_sapp01ers. Usually this is the name of the SAP instance profile. -The enqueue replication instance must be installed, before you want to configure a master-slave cluster recource. +The enqueue replication instance must be installed, before you want to configure a master-slave cluster resource. The master-slave configuration in the cluster must use this properties: clone_max = 2 clone_node_max = 1 master_node_max = 1 master_max = 1 Enqueue replication instance name: SID_INSTANCE_VIR-HOSTNAME Only used in a Master/Slave resource configuration: The parameter ERS_InstanceName must also be set in this configuration. The name of the SAP START profile. Specify this parameter, if you have changed the name of the SAP START profile after the default SAP installation. As SAP release 7.10 does not have a START profile anymore, you need to specify the Instance Profile than. Enqueue replication start profile name The full qualified path where to find a script or program which should be executed before this resource gets started. Path to a pre-start script The full qualified path where to find a script or program which should be executed after this resource got started. Path to a post-start script The full qualified path where to find a script or program which should be executed before this resource gets stopped. Path to a pre-start script The full qualified path where to find a script or program which should be executed after this resource got stopped. Path to a post-start script Only used for ASCS/ERS SAP Netweaver installations without implementing a master/slave resource to allow the ASCS to 'find' the ERS running on another cluster node after a resource failure. This parameter should be set to true 'only' for the ERS instance for implementations following the SAP NetWeaver 7.40 HA certification (NW-HA-CLU-740). This includes also - systems for NetWeaver less than 7.40, if you like to impelemnt the NW-HA-CLU-740 scenario. + systems for NetWeaver less than 7.40, if you like to implement the NW-HA-CLU-740 scenario. Mark SAPInstance as ERS instance END } # # methods: What methods/operations do we support? # sapinstance_methods() { cat <<-EOF start stop status monitor promote demote reload notify validate-all methods meta-data usage EOF } # # is_clone : find out if we are configured to run in a Master/Slave configuration # is_clone() { if [ -n "$OCF_RESKEY_CRM_meta_clone_max" ] \ && [ "$OCF_RESKEY_CRM_meta_clone_max" -gt 0 ] then if [ "$OCF_RESKEY_CRM_meta_clone_max" -ne 2 ] || \ [ "$OCF_RESKEY_CRM_meta_clone_node_max" -ne 1 ] || \ [ "$OCF_RESKEY_CRM_meta_master_node_max" -ne 1 ] || \ [ "$OCF_RESKEY_CRM_meta_master_max" -ne 1 ] then ocf_log err "Clone options misconfigured. (expect: clone_max=2,clone_node_max=1,master_node_max=1,master_max=1)" exit $OCF_ERR_CONFIGURED fi if [ -z "$OCF_RESKEY_ERS_InstanceName" ] then ocf_log err "In a Master/Slave configuration the ERS_InstanceName parameter is mandatory." exit $OCF_ERR_ARGS fi else return 0 fi return 1 } # # abnormal_end : essential things are missing, but in the natur of a SAP installation - which can be very different # from customer to customer - we cannot handle this always as an error # This would be the case, if the software is installed on shared disks and not visible # to all cluster nodes at all times. # abnormal_end() { local err_msg=$1 ocf_is_probe && { sapinstance_status exit $? } ocf_log err $err_msg if [ "$ACTION" = "stop" ] then cleanup_instance exit $OCF_SUCCESS fi exit $OCF_ERR_CONFIGURED } # # sapinstance_init : Define global variables with default values, if optional parameters are not set # # sapinstance_init() { local myInstanceName="$1" SID=`echo "$myInstanceName" | cut -d_ -f1` InstanceName=`echo "$myInstanceName" | cut -d_ -f2` InstanceNr=`echo "$InstanceName" | sed 's/.*\([0-9][0-9]\)$/\1/'` SAPVIRHOST=`echo "$myInstanceName" | cut -d_ -f3` # optional OCF parameters, we try to guess which directories are correct if [ -z "$OCF_RESKEY_DIR_EXECUTABLE" ] then if have_binary /usr/sap/$SID/$InstanceName/exe/sapstartsrv && have_binary /usr/sap/$SID/$InstanceName/exe/sapcontrol then DIR_EXECUTABLE="/usr/sap/$SID/$InstanceName/exe" SAPSTARTSRV="/usr/sap/$SID/$InstanceName/exe/sapstartsrv" SAPCONTROL="/usr/sap/$SID/$InstanceName/exe/sapcontrol" elif have_binary /usr/sap/$SID/SYS/exe/run/sapstartsrv && have_binary /usr/sap/$SID/SYS/exe/run/sapcontrol then DIR_EXECUTABLE="/usr/sap/$SID/SYS/exe/run" SAPSTARTSRV="/usr/sap/$SID/SYS/exe/run/sapstartsrv" SAPCONTROL="/usr/sap/$SID/SYS/exe/run/sapcontrol" fi else if have_binary "$OCF_RESKEY_DIR_EXECUTABLE/sapstartsrv" && have_binary "$OCF_RESKEY_DIR_EXECUTABLE/sapcontrol" then DIR_EXECUTABLE="$OCF_RESKEY_DIR_EXECUTABLE" SAPSTARTSRV="$OCF_RESKEY_DIR_EXECUTABLE/sapstartsrv" SAPCONTROL="$OCF_RESKEY_DIR_EXECUTABLE/sapcontrol" fi fi sidadm="`echo $SID | tr '[:upper:]' '[:lower:]'`adm" [ -z "$DIR_EXECUTABLE" ] && abnormal_end "Cannot find sapstartsrv and sapcontrol executable, please set DIR_EXECUTABLE parameter!" if [ -z "$OCF_RESKEY_DIR_PROFILE" ] then DIR_PROFILE="/usr/sap/$SID/SYS/profile" else DIR_PROFILE="$OCF_RESKEY_DIR_PROFILE" fi if [ "$myInstanceName" != "$OCF_RESKEY_InstanceName" ] then currentSTART_PROFILE=$OCF_RESKEY_ERS_START_PROFILE else currentSTART_PROFILE=$OCF_RESKEY_START_PROFILE fi if [ -z "$OCF_RESKEY_IS_ERS" ]; then is_ers="no" else is_ers="$OCF_RESKEY_IS_ERS" fi if [ -z "$currentSTART_PROFILE" ] then if [ ! -r "$DIR_PROFILE/START_${InstanceName}_${SAPVIRHOST}" -a -r "$DIR_PROFILE/${SID}_${InstanceName}_${SAPVIRHOST}" ]; then SAPSTARTPROFILE="$DIR_PROFILE/${SID}_${InstanceName}_${SAPVIRHOST}" else SAPSTARTPROFILE="$DIR_PROFILE/START_${InstanceName}_${SAPVIRHOST}" fi else SAPSTARTPROFILE="$currentSTART_PROFILE" fi if [ -z "$OCF_RESKEY_START_WAITTIME" ] then export OCF_RESKEY_START_WAITTIME=3600 fi if [ -z "$OCF_RESKEY_MONITOR_SERVICES" ] then export OCF_RESKEY_MONITOR_SERVICES="disp+work|msg_server|enserver|enrepserver|jcontrol|jstart|enq_server|enq_replicator" fi # as root user we need the library path to the SAP kernel to be able to call sapcontrol if [ `echo $LD_LIBRARY_PATH | grep -c "^$DIR_EXECUTABLE\>"` -eq 0 ]; then LD_LIBRARY_PATH=$DIR_EXECUTABLE${LD_LIBRARY_PATH:+:}$LD_LIBRARY_PATH export LD_LIBRARY_PATH fi return $OCF_SUCCESS } # # check_sapstartsrv : Before using sapcontrol we make sure that the sapstartsrv is running for the correct instance. # We cannot use sapinit and the /usr/sap/sapservices file in case of an enquerep instance, # because then we have two instances with the same instance number. # check_sapstartsrv() { local restart=0 local runninginst="" local chkrc=$OCF_SUCCESS local output="" if [ ! -S /tmp/.sapstream5${InstanceNr}13 ]; then ocf_log warn "sapstartsrv is not running for instance $SID-$InstanceName (no UDS), it will be started now" restart=1 else output=`$SAPCONTROL -nr $InstanceNr -function ParameterValue INSTANCE_NAME -format script` if [ $? -eq 0 ] then runninginst=`echo "$output" | grep '^0 : ' | cut -d' ' -f3` if [ "$runninginst" != "$InstanceName" ] then ocf_log warn "sapstartsrv is running for instance $runninginst, that service will be killed" restart=1 else output=`$SAPCONTROL -nr $InstanceNr -function AccessCheck Start` if [ $? -ne 0 ]; then ocf_log warn "FAILED : sapcontrol -nr $InstanceNr -function AccessCheck Start (`ls -ld1 /tmp/.sapstream5${InstanceNr}13`)" ocf_log warn "sapstartsrv will be restarted to try to solve this situation, otherwise please check sapstsartsrv setup (SAP Note 927637)" restart=1 fi fi else ocf_log warn "sapstartsrv is not running for instance $SID-$InstanceName, it will be started now" restart=1 fi fi if [ -z "$runninginst" ]; then runninginst=$InstanceName; fi if [ $restart -eq 1 ] then if [ -d /usr/sap/$SID/SYS/profile/ ] then DIR_PROFILE="/usr/sap/$SID/SYS/profile" else abnormal_end "Expected /usr/sap/$SID/SYS/profile/ to be a directory, please set DIR_PROFILE parameter!" fi [ ! -r $SAPSTARTPROFILE ] && abnormal_end "Expected $SAPSTARTPROFILE to be the instance START profile, please set START_PROFILE parameter!" pkill -9 -f "sapstartsrv.*$runninginst" # removing the unix domain socket files as they might have wrong permissions # or ownership - they will be recreated by sapstartsrv during next start rm -f /tmp/.sapstream5${InstanceNr}13 rm -f /tmp/.sapstream5${InstanceNr}14 $SAPSTARTSRV pf=$SAPSTARTPROFILE -D -u $sidadm # now make sure the daemon has been started and is able to respond local srvrc=1 while [ $srvrc -eq 1 -a `pgrep -f "sapstartsrv.*$runninginst" | wc -l` -gt 0 ] do sleep 1 $SAPCONTROL -nr $InstanceNr -function GetProcessList > /dev/null 2>&1 srvrc=$? done if [ $srvrc -ne 1 ] then ocf_log info "sapstartsrv for instance $SID-$InstanceName was restarted !" chkrc=$OCF_SUCCESS else ocf_log error "sapstartsrv for instance $SID-$InstanceName could not be started!" chkrc=$OCF_ERR_GENERIC ocf_is_probe && chkrc=$OCF_NOT_RUNNING fi fi return $chkrc } # # sapuserexit : Many SAP customers need some additional processes/tools to run their SAP systems. # This specialties do not allow a totally generic SAP cluster resource agent. # Someone should write a resource agent for each additional process you need, if it # is required to monitor that process within the cluster manager. To enable # you to extent this resource agent without developing a new one, this user exit # was introduced. # sapuserexit() { local NAME="$1" local VALUE="$2" if [ -n "$VALUE" ] then if have_binary "$VALUE" then ocf_log info "Calling userexit ${NAME} with customer script file ${VALUE}" "$VALUE" >/dev/null 2>&1 ocf_log info "Exiting userexit ${NAME} with customer script file ${VALUE}, returncode: $?" else ocf_log warn "Attribute ${NAME} is set to ${VALUE}, but this file is not executable" fi fi return 0 } # # cleanup_instance : remove resources (processes and shared memory) from a crashed instance) # cleanup_instance() { pkill -9 -f -U $sidadm $InstanceName ocf_log info "Terminated instance using 'pkill -9 -f -U $sidadm $InstanceName'" # it is necessary to call cleanipc as user sidadm if the system has 'vmcj/enable = ON' set - otherwise SHM-segments in /dev/shm/SAP_ES2* cannot be removed su - $sidadm -c "cleanipc $InstanceNr remove" ocf_log info "Tried to remove shared memory resources using 'cleanipc $InstanceNr remove' as user $sidadm" ocf_run rm -fv /usr/sap/$SID/$InstanceName/work/kill.sap ocf_run rm -fv /usr/sap/$SID/$InstanceName/work/shutdown.sap ocf_run rm -fv /usr/sap/$SID/$InstanceName/data/rslgcpid ocf_run rm -fv /usr/sap/$SID/$InstanceName/data/rslgspid return 0 } # # sapinstance_start : Start the SAP instance # sapinstance_start() { sapuserexit PRE_START_USEREXIT "$OCF_RESKEY_PRE_START_USEREXIT" local rc=$OCF_NOT_RUNNING local output="" local loopcount=0 while [ $loopcount -lt 2 ] do loopcount=$(($loopcount + 1)) check_sapstartsrv rc=$? if [ $rc -eq $OCF_SUCCESS ]; then output=`$SAPCONTROL -nr $InstanceNr -function Start` rc=$? ocf_log info "Starting SAP Instance $SID-$InstanceName: $output" fi if [ $rc -ne 0 ] then ocf_log err "SAP Instance $SID-$InstanceName start failed." return $OCF_ERR_GENERIC fi local startrc=1 while [ $startrc -gt 0 ] do local waittime_start=`date +%s` output=`$SAPCONTROL -nr $InstanceNr -function WaitforStarted $OCF_RESKEY_START_WAITTIME 10` startrc=$? local waittime_stop=`date +%s` if [ $startrc -ne 0 ] then if [ $(($waittime_stop - $waittime_start)) -ge $OCF_RESKEY_START_WAITTIME ] then sapinstance_monitor NOLOG if [ $? -eq $OCF_SUCCESS ] then output="START_WAITTIME ($OCF_RESKEY_START_WAITTIME) has elapsed, but instance monitor returned SUCCESS. Instance considered running." startrc=0; loopcount=2 fi else if [ $loopcount -eq 1 ] && ocf_is_true $OCF_RESKEY_AUTOMATIC_RECOVER then ocf_log warn "SAP Instance $SID-$InstanceName start failed: $output" ocf_log warn "Try to recover $SID-$InstanceName" cleanup_instance else loopcount=2 fi startrc=-1 fi else loopcount=2 fi done done if [ $startrc -eq 0 ] then ocf_log info "SAP Instance $SID-$InstanceName started: $output" rc=$OCF_SUCCESS sapuserexit POST_START_USEREXIT "$OCF_RESKEY_POST_START_USEREXIT" if ocf_is_true $is_ers; then crm_attribute -n runs_ers_${SID} -v 1 -l reboot; fi else ocf_log err "SAP Instance $SID-$InstanceName start failed: $output" rc=$OCF_NOT_RUNNING if ocf_is_true $is_ers; then crm_attribute -n runs_ers_${SID} -v 0 -l reboot; fi fi return $rc } # # sapinstance_recover: Try startup of failed instance by cleaning up resources # sapinstance_recover() { cleanup_instance sapinstance_start return $? } # # sapinstance_stop: Stop the SAP instance # sapinstance_stop() { local output="" local rc sapuserexit PRE_STOP_USEREXIT "$OCF_RESKEY_PRE_STOP_USEREXIT" if [ "$OCF_RESKEY_SHUTDOWN_METHOD" = "KILL" ] then ocf_log info "Stopping SAP Instance $SID-$InstanceName with shutdown method KILL!" cleanup_instance return $OCF_SUCCESS fi check_sapstartsrv rc=$? if [ $rc -eq $OCF_SUCCESS ]; then output=`$SAPCONTROL -nr $InstanceNr -function Stop` rc=$? ocf_log info "Stopping SAP Instance $SID-$InstanceName: $output" fi if [ $rc -eq 0 ] then output=`$SAPCONTROL -nr $InstanceNr -function WaitforStopped 3600 1` if [ $? -eq 0 ] then ocf_log info "SAP Instance $SID-$InstanceName stopped: $output" rc=$OCF_SUCCESS else ocf_log err "SAP Instance $SID-$InstanceName stop failed: $output" rc=$OCF_ERR_GENERIC fi else ocf_log err "SAP Instance $SID-$InstanceName stop failed: $output" rc=$OCF_ERR_GENERIC fi sapuserexit POST_STOP_USEREXIT "$OCF_RESKEY_POST_STOP_USEREXIT" if ocf_is_true $is_ers; then crm_attribute -n runs_ers_${SID} -v 0 -l reboot; fi return $rc } # # sapinstance_monitor: Can the given SAP instance do anything useful? # sapinstance_monitor() { local MONLOG=$1 local rc check_sapstartsrv rc=$? if [ $rc -eq $OCF_SUCCESS ] then local count=0 local SERVNO local output output=`$SAPCONTROL -nr $InstanceNr -function GetProcessList -format script` # we have to parse the output, because the returncode doesn't tell anything about the instance status for SERVNO in `echo "$output" | grep '^[0-9] ' | cut -d' ' -f1 | sort -u` do local COLOR=`echo "$output" | grep "^$SERVNO dispstatus: " | cut -d' ' -f3` local SERVICE=`echo "$output" | grep "^$SERVNO name: " | cut -d' ' -f3` local STATE=0 local SEARCH case $COLOR in GREEN|YELLOW) STATE=$OCF_SUCCESS;; *) STATE=$OCF_NOT_RUNNING;; esac SEARCH=`echo "$OCF_RESKEY_MONITOR_SERVICES" | sed 's/\+/\\\+/g' | sed 's/\./\\\./g'` if [ `echo "$SERVICE" | egrep -c "$SEARCH"` -eq 1 ] then if [ $STATE -eq $OCF_NOT_RUNNING ] then [ "$MONLOG" != "NOLOG" ] && ocf_log err "SAP instance service $SERVICE is not running with status $COLOR !" rc=$STATE fi count=1 fi done if [ $count -eq 0 -a $rc -eq $OCF_SUCCESS ] then if ocf_is_probe then rc=$OCF_NOT_RUNNING else [ "$MONLOG" != "NOLOG" ] && ocf_log err "The SAP instance does not run any services which this RA could monitor!" rc=$OCF_ERR_GENERIC fi fi fi return $rc } # # sapinstance_status: Lightweight check of SAP instance only with OS tools # sapinstance_status() { local pid local pids [ ! -f "/usr/sap/$SID/$InstanceName/work/kill.sap" ] && return $OCF_NOT_RUNNING pids=`grep '^kill -[0-9]' /usr/sap/$SID/$InstanceName/work/kill.sap | awk '{print $3}'` for pid in $pids do [ `pgrep -f -U $sidadm $InstanceName | grep -c $pid` -gt 0 ] && return $OCF_SUCCESS done return $OCF_NOT_RUNNING } # # sapinstance_validate: Check the semantics of the input parameters # sapinstance_validate() { local rc=$OCF_SUCCESS if [ `echo "$SID" | grep -c '^[A-Z][A-Z0-9][A-Z0-9]$'` -ne 1 ] then ocf_log err "Parsing instance profile name: '$SID' is not a valid system ID!" rc=$OCF_ERR_ARGS fi if [ `echo "$InstanceName" | grep -c '^[A-Z].*[0-9][0-9]$'` -ne 1 ] then ocf_log err "Parsing instance profile name: '$InstanceName' is not a valid instance name!" rc=$OCF_ERR_ARGS fi if [ `echo "$InstanceNr" | grep -c '^[0-9][0-9]$'` -ne 1 ] then ocf_log err "Parsing instance profile name: '$InstanceNr' is not a valid instance number!" rc=$OCF_ERR_ARGS fi if [ `echo "$SAPVIRHOST" | grep -c '^[A-Za-z][A-Za-z0-9_-]*$'` -ne 1 ] then ocf_log err "Parsing instance profile name: '$SAPVIRHOST' is not a valid hostname!" rc=$OCF_ERR_ARGS fi return $rc } # # sapinstance_start_clone # sapinstance_start_clone() { sapinstance_init $OCF_RESKEY_ERS_InstanceName ${HA_SBIN_DIR}/crm_master -v 50 -l reboot sapinstance_start return $? } # # sapinstance_stop_clone # sapinstance_stop_clone() { sapinstance_init $OCF_RESKEY_ERS_InstanceName ${HA_SBIN_DIR}/crm_master -v 0 -l reboot sapinstance_stop return $? } # # sapinstance_monitor_clone # sapinstance_monitor_clone() { # first check with the status function (OS tools) if there could be something like a SAP instance running # as we do not know here, if we are in master or slave state we do not want to start our monitoring # agents (sapstartsrv) on the wrong host local rc sapinstance_init $OCF_RESKEY_InstanceName if sapinstance_status; then if sapinstance_monitor; then ${HA_SBIN_DIR}/crm_master -Q -v 100 -l reboot return $OCF_RUNNING_MASTER fi # by nature of the SAP enqueue server we have to make sure # that we do a failover to the slave (enqueue replication server) # in case the enqueue process has failed. We signal this to the # cluster by setting our master preference to a lower value than the slave. ${HA_SBIN_DIR}/crm_master -v 10 -l reboot return $OCF_FAILED_MASTER fi sapinstance_init $OCF_RESKEY_ERS_InstanceName sapinstance_status && sapinstance_monitor rc=$? if [ $rc -eq $OCF_SUCCESS ]; then ${HA_SBIN_DIR}/crm_master -Q -v 100 -l reboot fi return $rc } # # sapinstance_promote_clone: In a Master/Slave configuration get Master by starting the SCS instance and stopping the ERS instance # The order is important here to behave correct from the application levels view # sapinstance_promote_clone() { local rc sapinstance_init $OCF_RESKEY_InstanceName ocf_log info "Promoting $SID-$InstanceName to running Master." sapinstance_start rc=$? if [ $rc -eq $OCF_SUCCESS ]; then sapinstance_init $OCF_RESKEY_ERS_InstanceName sapinstance_stop rc=$? fi return $rc } # # sapinstance_demote_clone: In a Master/Slave configuration get Slave by stopping the SCS instance and starting the ERS instance # sapinstance_demote_clone() { local rc sapinstance_init $OCF_RESKEY_InstanceName ocf_log info "Demoting $SID-$InstanceName to a slave." sapinstance_stop rc=$? if [ $rc -eq $OCF_SUCCESS ]; then sapinstance_init $OCF_RESKEY_ERS_InstanceName sapinstance_start rc=$? fi return $rc } # # sapinstance_notify: Handle master scoring - to make sure a slave gets the next master # sapinstance_notify() { local n_type="$OCF_RESKEY_CRM_meta_notify_type" local n_op="$OCF_RESKEY_CRM_meta_notify_operation" if [ "${n_type}_${n_op}" = "post_promote" ]; then # After promotion of one master in the cluster, we make sure that all clones reset their master # value back to 100. This is because a failed monitor on a master might have degree one clone # instance to score 10. ${HA_SBIN_DIR}/crm_master -v 100 -l reboot elif [ "${n_type}_${n_op}" = "pre_demote" ]; then # if we are a slave and a demote event is announced, make sure we are highest on the list to become master # that is, when a slave resource was started after the promote event of an already running master (e.g. node of slave was down) # We also have to make sure to overrule the globally set resource_stickiness or any fail-count factors => INFINITY local n_uname="$OCF_RESKEY_CRM_meta_notify_demote_uname" if [ ${n_uname} != ${NODENAME} ]; then ${HA_SBIN_DIR}/crm_master -v INFINITY -l reboot fi fi } # # 'main' starts here... # ## GLOBALS SID="" sidadm="" InstanceName="" InstanceNr="" SAPVIRHOST="" DIR_EXECUTABLE="" SAPSTARTSRV="" SAPCONTROL="" DIR_PROFILE="" SAPSTARTPROFILE="" CLONE=0 NODENAME=$(ocf_local_nodename) if ( [ $# -ne 1 ] ) then sapinstance_usage exit $OCF_ERR_ARGS fi ACTION=$1 if [ "$ACTION" = "status" ]; then ACTION=monitor fi # These operations don't require OCF instance parameters to be set case "$ACTION" in usage|methods) sapinstance_$ACTION exit $OCF_SUCCESS;; meta-data) sapinstance_meta_data exit $OCF_SUCCESS;; notify) sapinstance_notify exit $OCF_SUCCESS;; *);; esac if ! ocf_is_root then ocf_log err "$0 must be run as root" exit $OCF_ERR_PERM fi # parameter check if [ -z "$OCF_RESKEY_InstanceName" ] then ocf_log err "Please set OCF_RESKEY_InstanceName to the name to the SAP instance profile!" exit $OCF_ERR_ARGS fi is_clone; CLONE=$? if [ ${CLONE} -eq 1 ] then CLACT=_clone else if [ "$ACTION" = "promote" -o "$ACTION" = "demote" ] then ocf_log err "$ACTION called in a non master/slave environment" exit $OCF_ERR_ARGS fi sapinstance_init $OCF_RESKEY_InstanceName fi # What kind of method was invoked? case "$ACTION" in start|stop|monitor|promote|demote) sapinstance_$ACTION$CLACT exit $?;; validate-all) sapinstance_validate exit $?;; reload ) ocf_log info "reloading SAPInstance parameters" exit $OCF_SUCCESS;; *) sapinstance_methods exit $OCF_ERR_UNIMPLEMENTED;; esac diff --git a/heartbeat/SendArp b/heartbeat/SendArp index a41094807..a7a14df14 100755 --- a/heartbeat/SendArp +++ b/heartbeat/SendArp @@ -1,267 +1,267 @@ #!/bin/sh # # # Copyright (c) 2006, Huang Zhen # Converting original heartbeat RA to OCF RA. # # Copyright (C) 2004 Horms # # Based on IPaddr2: Copyright (C) 2003 Tuomo Soini # # License: GNU General Public License (GPL) # Support: users@clusterlabs.org # # This script send out gratuitous Arp for an IP address # # It can be used _instead_ of the IPaddr2 or IPaddr resource # to send gratuitous arp for an IP address on a given interface, # without adding the address to that interface. I.e. if for # some reason you want to send gratuitous arp for addresses # managed by IPaddr2 or IPaddr on an additional interface. # # OCF parameters are as below: # OCF_RESKEY_ip # OCF_RESKEY_nic # # # This program is free software; you can redistribute it and/or modify # it under the terms of version 2 of the GNU General Public License as # published by the Free Software Foundation. # # This program is distributed in the hope that it would be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. # # Further, this software is distributed without any warranty that it is # free of the rightful claim of any third person regarding infringement # or the like. Any license provided herein, whether implied or # otherwise, applies only to this software file. Patent licenses, if # any, provided herein do not apply to combinations of this program with # other software, or any other product whatsoever. # # You should have received a copy of the GNU General Public License # along with this program; if not, write the Free Software Foundation, # Inc., 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. # ####################################################################### # Initialization: : ${OCF_FUNCTIONS_DIR=${OCF_ROOT}/lib/heartbeat} . ${OCF_FUNCTIONS_DIR}/ocf-shellfuncs SENDARP=$HA_BIN/send_arp SENDARPPIDDIR=${HA_RSCTMP} BASEIP="$OCF_RESKEY_ip" INTERFACE="$OCF_RESKEY_nic" RESIDUAL="" SENDARPPIDFILE="$SENDARPPIDDIR/send_arp-$BASEIP" BACKGROUND=${OCF_RESKEY_background:-"yes"} # Set default values : ${ARP_INTERVAL_MS=200} # milliseconds between ARPs : ${ARP_REPEAT=5} # repeat count : ${ARP_BACKGROUND=$BACKGROUND} # no to run in foreground : ${ARP_NETMASK=ffffffffffff} # netmask for ARP ####################################################################### sendarp_meta_data() { cat < 1.0 This RA can be used _instead_ of the IPaddr2 or IPaddr RA to send gratuitous ARP for an IP address on a given interface, without adding the address to that interface. For example, -if for some resaon you wanted to send gratuitous ARP for +if for some reason you wanted to send gratuitous ARP for addresses managed by IPaddr2 or IPaddr on an additional interface. Broadcasts unsolicited ARP announcements The IP address for sending ARP packet. IP address The NIC for sending ARP packet. NIC Send ARPs in background. Set to false if you want to test if sending ARPs succeeded. Send ARPs in background END } ####################################################################### sendarp_usage() { cat < 1.0 The resource agent of Squid. This manages a Squid instance as an HA resource. Manages a Squid proxy server instance This is a required parameter. This parameter specifies squid's executable file. Executable file This is a required parameter. This parameter specifies a configuration file for a squid instance managed by this RA. Configuration file This is a required parameter. This parameter specifies a process id file for a squid instance managed by this RA. Pidfile This is a required parameter. This parameter specifies a port number for a squid instance managed by this RA. If multiple ports are used, you must specify only one of them. Port number On stop, a squid shutdown is invoked first. If the resource doesn't stop within this timeout, we resort to stopping processes by sending signals and finally KILLing them. how long to wait for squid shutdown to stop the instance before resorting to kill This is an optional parameter. This RA runs in debug mode when this parameter includes 'x' or 'v'. If 'x' is included, both of STDOUT and STDERR redirect to the logfile specified by "debug_log", and then the builtin shell option 'x' is turned on. It is similar about 'v'. Debug mode -This is an optional and omittable parameter. +This is an optional parameter. This parameter specifies a destination file for debug logs and works only if this RA run in debug mode. Refer to "debug_mode" -about debug mode. If no value is given but it's requied, it's made by the -following rules: "/var/log/" as a directory part, the basename of -the configuration file given by "syslog_ng_conf" as a basename part, -".log" as a suffix. +about debug mode. If no value is given but is required, it's constructed +according to the following rules: "/var/log/" as a directory part, +the basename of the configuration file given by "syslog_ng_conf" +as a basename part, ".log" as a suffix. A destination of the debug log END return $OCF_SUCCESS } get_pids() { SQUID_PIDS=( ) # Seek by pattern SQUID_PIDS[0]=$(pgrep -f "$PROCESS_PATTERN") # Seek by pidfile SQUID_PIDS[1]=$(awk '1{print $1}' $SQUID_PIDFILE 2>/dev/null) if [[ -n "${SQUID_PIDS[1]}" ]]; then typeset exe exe=$(ls -l "/proc/${SQUID_PIDS[1]}/exe") if [[ $? = 0 ]]; then exe=${exe##*-> } if ! [[ "$exe" = $SQUID_EXE ]]; then SQUID_PIDS[1]="" fi else SQUID_PIDS[1]="" fi fi # Seek by port if have_binary netstat; then SQUID_PIDS[2]=$( netstat -apn | awk '/tcp.*:'$SQUID_PORT' .*LISTEN/ && $7~/^[1-9]/ { sub("\\/.*", "", $7); print $7; exit}') else SQUID_PIDS[2]=$( ss -apn | awk '/tcp.*LISTEN.*:'$SQUID_PORT'/ { sub(".*pid=", "", $7); sub(",fd=.*", "", $7); print $7 }') fi } are_all_pids_found() { if [[ -n "${SQUID_PIDS[0]}" ]] && [[ -n "${SQUID_PIDS[1]}" ]] && [[ -n "${SQUID_PIDS[2]}" ]] then return 0 else return 1 fi } are_pids_sane() { if [[ "${SQUID_PIDS[1]}" = "${SQUID_PIDS[2]}" ]]; then return $OCF_SUCCESS else ocf_exit_reason "$SQUID_NAME:Pid unmatch" return $OCF_ERR_GENERIC fi } is_squid_dead() { if [[ -z "${SQUID_PIDS[0]}" ]] && [[ -z "${SQUID_PIDS[2]}" ]] then return 0 else return 1 fi } monitor_squid() { typeset trialcount=0 while true; do get_pids if are_all_pids_found; then are_pids_sane return $OCF_SUCCESS fi if is_squid_dead; then return $OCF_NOT_RUNNING fi ocf_log info "$SQUID_NAME:Inconsistent processes:" \ "${SQUID_PIDS[0]},${SQUID_PIDS[1]},${SQUID_PIDS[2]}" (( trialcount = trialcount + 1 )) if (( trialcount > SQUID_CONFIRM_TRIALCOUNT )); then ocf_exit_reason "$SQUID_NAME:Inconsistency of processes remains unsolved" return $OCF_ERR_GENERIC fi sleep 1 done } start_squid() { typeset status monitor_squid status=$? if [[ $status != $OCF_NOT_RUNNING ]]; then return $status fi set -- "$SQUID_OPTS" ocf_run $SQUID_EXE -f "$SQUID_CONF" "$@" status=$? if [[ $status != $OCF_SUCCESS ]]; then return $OCF_ERR_GENERIC fi while true; do get_pids if are_all_pids_found && are_pids_sane; then return $OCF_SUCCESS fi ocf_log info "$SQUID_NAME:Waiting for squid to be invoked" sleep 1 done return $OCF_ERR_GENERIC } stop_squid() { typeset lapse_sec if ocf_run $SQUID_EXE -f $SQUID_CONF -k shutdown; then lapse_sec=0 while true; do get_pids if is_squid_dead; then rm -f $SQUID_PIDFILE return $OCF_SUCCESS fi (( lapse_sec = lapse_sec + 1 )) if (( lapse_sec > SQUID_STOP_TIMEOUT )); then break fi sleep 1 ocf_log info "$SQUID_NAME:$FUNCNAME:$LINENO: " \ "stop NORM $lapse_sec/$SQUID_STOP_TIMEOUT" done fi while true; do get_pids ocf_log info "$SQUID_NAME:$FUNCNAME:$LINENO: " \ "try to stop by SIGKILL:${SQUID_PIDS[0]} ${SQUID_PIDS[2]}" kill -KILL ${SQUID_PIDS[0]} ${SQUID_PIDS[2]} sleep 1 if is_squid_dead; then rm -f $SQUID_PIDFILE return $OCF_SUCCESS fi done return $OCF_ERR_GENERIC } status_squid() { return $OCF_SUCCESS } validate_all_squid() { ocf_log info "validate_all_squid[$SQUID_NAME]" return $OCF_SUCCESS } : === Debug ${0##*/} $1 === if [[ "$1" = "meta-data" ]]; then metadata_squid exit $? fi SQUID_CONF="${OCF_RESKEY_squid_conf}" if [[ -z "$SQUID_CONF" ]]; then ocf_exit_reason "SQUID_CONF is not defined" exit $OCF_ERR_CONFIGURED fi SQUID_NAME="${SQUID_CONF##*/}" SQUID_NAME="${SQUID_NAME%.*}" DEBUG_LOG="${OCF_RESKEY_debug_log-/var/log/squid_${SQUID_NAME}_debug}.log" DEBUG_MODE="" case $OCF_RESKEY_debug_mode in *x*) DEBUG_MODE="${DEBUG_MODE}x";; esac case $OCF_RESKEY_debug_mode in *v*) DEBUG_MODE="${DEBUG_MODE}v";; esac if [ -n "$DEBUG_MODE" ]; then PS4='\d \t \h '"${1-unknown} " export PS4 exec 1>>$DEBUG_LOG 2>&1 set -$DEBUG_MODE fi SQUID_EXE="${OCF_RESKEY_squid_exe}" if [[ -z "$SQUID_EXE" ]]; then ocf_exit_reason "SQUID_EXE is not defined" exit $OCF_ERR_CONFIGURED fi if [[ ! -x "$SQUID_EXE" ]]; then ocf_exit_reason "$SQUID_EXE is not found" exit $OCF_ERR_CONFIGURED fi SQUID_PIDFILE="${OCF_RESKEY_squid_pidfile}" if [[ -z "$SQUID_PIDFILE" ]]; then ocf_exit_reason "SQUID_PIDFILE is not defined" exit $OCF_ERR_CONFIGURED fi SQUID_PORT="${OCF_RESKEY_squid_port}" if [[ -z "$SQUID_PORT" ]]; then ocf_exit_reason "SQUID_PORT is not defined" exit $OCF_ERR_CONFIGURED fi SQUID_OPTS="${OCF_RESKEY_squid_opts}" SQUID_PIDS=( ) SQUID_CONFIRM_TRIALCOUNT="${OCF_RESKEY_squid_confirm_trialcount-3}" SQUID_STOP_TIMEOUT="${OCF_RESKEY_squid_stop_timeout-10}" SQUID_SUSPEND_TRIALCOUNT="${OCF_RESKEY_squid_suspend_trialcount-10}" PROCESS_PATTERN="$SQUID_EXE -f $SQUID_CONF" COMMAND=$1 case "$COMMAND" in start) ocf_log debug "[$SQUID_NAME] Enter squid start" start_squid func_status=$? ocf_log debug "[$SQUID_NAME] Leave squid start $func_status" exit $func_status ;; stop) ocf_log debug "[$SQUID_NAME] Enter squid stop" stop_squid func_status=$? ocf_log debug "[$SQUID_NAME] Leave squid stop $func_status" exit $func_status ;; status) status_squid exit $? ;; monitor) #ocf_log debug "[$SQUID_NAME] Enter squid monitor" monitor_squid func_status=$? #ocf_log debug "[$SQUID_NAME] Leave squid monitor $func_status" exit $func_status ;; validate-all) validate_all_squid exit $? ;; *) usage ;; esac # vim: set sw=4 ts=4 : diff --git a/heartbeat/VIPArip b/heartbeat/VIPArip index 12804dffb..cd3ca4d7f 100755 --- a/heartbeat/VIPArip +++ b/heartbeat/VIPArip @@ -1,302 +1,302 @@ #!/bin/sh # # License: GNU General Public License (GPL) # Support: users@clusterlabs.org # Author: Huang Zhen # Copyright (c) 2006 International Business Machines # # Virtual IP Address by RIP2 protocol. # This script manages IP alias in different subnet with quagga/ripd. # It can add an IP alias, or remove one. # # The quagga package should be installed to run this RA # # usage: $0 {start|stop|status|monitor|validate-all|meta-data} # # The "start" arg adds an IP alias. # Surprisingly, the "stop" arg removes one. :-) # # OCF parameters are as below # OCF_RESKEY_ip The IP address in different subnet # OCF_RESKEY_nic The nic for broadcast the route information # ####################################################################### # Initialization: : ${OCF_FUNCTIONS_DIR=${OCF_ROOT}/lib/heartbeat} . ${OCF_FUNCTIONS_DIR}/ocf-shellfuncs RIPDCONF=$HA_RSCTMP/VIPArip-ripd.conf ZEBRA=/usr/sbin/zebra RIPD=/usr/sbin/ripd USAGE="usage: $0 {start|stop|status|monitor|validate-all|meta-data}"; ####################################################################### meta_data() { cat < 1.0 Virtual IP Address by RIP2 protocol. This script manages IP alias in different subnet with quagga/ripd. It can add an IP alias, or remove one. Manages a virtual IP address through RIP2 The IPv4 address in different subnet, for example "192.168.1.1". The IP address in different subnet The nic for broadcast the route information. -The ripd uses this nic to broadcast the route informaton to others +The ripd uses this nic to broadcast the route information to others The nic for broadcast the route information Absolute path to the zebra binary. zebra binary Absolute path to the ripd binary. ripd binary END exit $OCF_SUCCESS } usage() { echo $USAGE >&2 } new_config_file() { echo new_config_file $1 $2 $3 cat >$RIPDCONF < $RIPDCONF.tmp cp $RIPDCONF.tmp $RIPDCONF } add_ip() { echo add_ip $1 sed "s/ip_tag/ip_tag\naccess-list private permit $1\/32/g" $RIPDCONF > $RIPDCONF.tmp cp $RIPDCONF.tmp $RIPDCONF } del_ip() { echo del_ip $1 sed "/$1/d" $RIPDCONF > $RIPDCONF.tmp cp $RIPDCONF.tmp $RIPDCONF if $GREP "access-list private permit" $RIPDCONF>/dev/null then echo some other IP is running reload_config else stop_quagga echo remove $RIPDCONF rm $RIPDCONF fi } add_nic() { echo add_nic $1 if $GREP "network $1" $RIPDCONF >/dev/null then echo the nic is already in the config file else sed "s/nic_tag/nic_tag\n no passive-interface $1\n network $1\n distribute-list private out $1\n distribute-list private in $1/g" $RIPDCONF > $RIPDCONF.tmp cp $RIPDCONF.tmp $RIPDCONF fi } reload_config() { echo reload_config echo $RIPDCONF: cat $RIPDCONF echo killall -SIGHUP ripd killall -SIGHUP ripd } start_quagga() { echo start_quagga echo $RIPDCONF: cat $RIPDCONF echo $ZEBRA -d $ZEBRA -d echo $RIPD -d -f $RIPDCONF $RIPD -d -f $RIPDCONF } stop_quagga() { echo stop_quagga echo $RIPDCONF: cat $RIPDCONF echo killall -SIGTERM ripd killall -SIGTERM ripd echo killall -SIGTERM zebra killall -SIGTERM zebra } start_rip_ip() { echo start_rip_ip check_params if [ x"$OCF_RESKEY_nic" = x ] then echo OCF_RESKEY_nic is null, set to eth0 OCF_RESKEY_nic="eth0" fi status_rip_ip case $? in $OCF_SUCCESS) ocf_log info "already running" exit $OCF_SUCCESS ;; $OCF_NOT_RUNNING) ;; *) ocf_log info "state undefined, stopping first" stop_rip_ip ;; esac $IP2UTIL addr add $OCF_RESKEY_ip/32 dev lo if [ -f "$RIPDCONF" ] then # there is a config file, add new data(IP,nic,metric) # to the existing config file. add_ip $OCF_RESKEY_ip add_nic $OCF_RESKEY_nic set_metric 1 reload_config echo sleep 3 sleep 3 set_metric 3 reload_config else new_config_file $OCF_RESKEY_ip $OCF_RESKEY_nic 1 start_quagga echo sleep 3 sleep 3 set_metric 3 reload_config fi return $OCF_SUCCESS } stop_rip_ip() { echo stop_rip_ip check_params status_rip_ip if [ $? = $OCF_NOT_RUNNING ] then exit $OCF_SUCCESS fi $IP2UTIL addr del $OCF_RESKEY_ip dev lo echo sleep 2 sleep 2 del_ip $OCF_RESKEY_ip return $OCF_SUCCESS } status_rip_ip() { check_params if $IP2UTIL addr | $GREP $OCF_RESKEY_ip >/dev/null then if $GREP $OCF_RESKEY_ip $RIPDCONF >/dev/null then if pidof ripd >/dev/null then return $OCF_SUCCESS fi fi return $OCF_ERR_GENERIC fi return $OCF_NOT_RUNNING } if [ $# -ne 1 ] then usage exit $OCF_ERR_ARGS fi [ x != x"$OCF_RESKEY_zebra_binary" ] && ZEBRA=$OCF_RESKEY_zebra_binary [ x != x"$OCF_RESKEY_ripd_binary" ] && RIPD=$OCF_RESKEY_ripd_binary case $1 in start) start_rip_ip;; stop) stop_rip_ip;; status) status_rip_ip;; monitor) status_rip_ip;; validate-all) check_binary $IP2UTIL exit $OCF_SUCCESS;; meta-data) meta_data;; usage) usage; exit $OCF_SUCCESS;; *) usage exit $OCF_ERR_UNIMPLEMENTED ;; esac diff --git a/heartbeat/VirtualDomain b/heartbeat/VirtualDomain index e4cada67b..1f7a250d7 100755 --- a/heartbeat/VirtualDomain +++ b/heartbeat/VirtualDomain @@ -1,1024 +1,1024 @@ #!/bin/sh # # Support: users@clusterlabs.org # License: GNU General Public License (GPL) # # Resource Agent for domains managed by the libvirt API. # Requires a running libvirt daemon (libvirtd). # # (c) 2008-2010 Florian Haas, Dejan Muhamedagic, # and Linux-HA contributors # # usage: $0 {start|stop|status|monitor|migrate_to|migrate_from|meta-data|validate-all} # ####################################################################### # Initialization: : ${OCF_FUNCTIONS_DIR=${OCF_ROOT}/lib/heartbeat} . ${OCF_FUNCTIONS_DIR}/ocf-shellfuncs # Defaults OCF_RESKEY_migration_downtime_default=0 OCF_RESKEY_migration_speed_default=0 OCF_RESKEY_force_stop_default=0 OCF_RESKEY_autoset_utilization_cpu_default="true" OCF_RESKEY_autoset_utilization_hv_memory_default="true" OCF_RESKEY_migrateport_default=$(( 49152 + $(ocf_maybe_random) % 64 )) OCF_RESKEY_CRM_meta_timeout_default=90000 OCF_RESKEY_save_config_on_stop_default=false OCF_RESKEY_sync_config_on_stop_default=false OCF_RESKEY_backingfile_default="" OCF_RESKEY_stateless_default="false" OCF_RESKEY_copyindirs_default="" : ${OCF_RESKEY_migration_downtime=${OCF_RESKEY_migration_downtime_default}} : ${OCF_RESKEY_migration_speed=${OCF_RESKEY_migration_speed_default}} : ${OCF_RESKEY_force_stop=${OCF_RESKEY_force_stop_default}} : ${OCF_RESKEY_autoset_utilization_cpu=${OCF_RESKEY_autoset_utilization_cpu_default}} : ${OCF_RESKEY_autoset_utilization_hv_memory=${OCF_RESKEY_autoset_utilization_hv_memory_default}} : ${OCF_RESKEY_migrateport=${OCF_RESKEY_migrateport_default}} : ${OCF_RESKEY_CRM_meta_timeout=${OCF_RESKEY_CRM_meta_timeout_default}} : ${OCF_RESKEY_save_config_on_stop=${OCF_RESKEY_save_config_on_stop_default}} : ${OCF_RESKEY_sync_config_on_stop=${OCF_RESKEY_sync_config_on_stop_default}} : ${OCF_RESKEY_backingfile=${OCF_RESKEY_backingfile_default}} : ${OCF_RESKEY_stateless=${OCF_RESKEY_stateless_default}} : ${OCF_RESKEY_copyindirs=${OCF_RESKEY_copyindirs_default}} if ocf_is_true ${OCF_RESKEY_sync_config_on_stop}; then OCF_RESKEY_save_config_on_stop="true" fi ####################################################################### ## I'd very much suggest to make this RA use bash, ## and then use magic $SECONDS. ## But for now: NOW=$(date +%s) usage() { echo "usage: $0 {start|stop|status|monitor|migrate_to|migrate_from|meta-data|validate-all}" } VirtualDomain_meta_data() { cat < 1.1 Resource agent for a virtual domain (a.k.a. domU, virtual machine, virtual environment etc., depending on context) managed by libvirtd. Manages virtual domains through the libvirt virtualization framework Absolute path to the libvirt configuration file, for this virtual domain. Virtual domain configuration file Hypervisor URI to connect to. See the libvirt documentation for details on supported URI formats. The default is system dependent. Determine the system's default uri by running 'virsh --quiet uri'. Hypervisor URI Always forcefully shut down ("destroy") the domain on stop. The default behavior is to resort to a forceful shutdown only after a graceful shutdown attempt has failed. You should only set this to true if your virtual domain (or your virtualization backend) does not support graceful shutdown. Always force shutdown on stop Transport used to connect to the remote hypervisor while migrating. Please refer to the libvirt documentation for details on transports available. If this parameter is omitted, the resource will use libvirt's default transport to connect to the remote hypervisor. Remote hypervisor transport The username will be used in the remote libvirt remoteuri/migrateuri. No user will be given (which means root) in the username if omitted If remoteuri is set, migration_user will be ignored. Remote username for the remoteuri Define max downtime during live migration in milliseconds Live migration downtime Define live migration speed per resource in MiB/s Live migration speed Use a dedicated migration network. The migration URI is composed by adding this parameters value to the end of the node name. If the node name happens to be an FQDN (as opposed to an unqualified host name), insert the suffix immediately prior to the first period (.) in the FQDN. At the moment Qemu/KVM and Xen migration via a dedicated network is supported. -Note: Be sure this composed host name is locally resolveable and the +Note: Be sure this composed host name is locally resolvable and the associated IP is reachable through the favored network. This suffix will be added to the remoteuri and migrateuri parameters. See also the migrate_options parameter below. Migration network host name suffix You can also specify here if the calculated migrate URI is unsuitable for your environment. If migrateuri is set then migration_network_suffix, migrateport and --migrateuri in migrate_options are effectively ignored. Use "%n" as the placeholder for the target node name. Please refer to the libvirt documentation for details on guest migration. Custom migrateuri for migration state transfer Extra virsh options for the guest live migration. You can also specify here --migrateuri if the calculated migrate URI is unsuitable for your environment. If --migrateuri is set then migration_network_suffix and migrateport are effectively ignored. Use "%n" as the placeholder for the target node name. Please refer to the libvirt documentation for details on guest migration. live migrate options To additionally monitor services within the virtual domain, add this parameter with a list of scripts to monitor. Note: when monitor scripts are used, the start and migrate_from operations will complete only when all monitor scripts have completed successfully. Be sure to set the timeout of these operations to accommodate this delay. space-separated list of monitor scripts If set true, the agent will detect the number of domainU's vCPUs from virsh, and put it into the CPU utilization of the resource when the monitor is executed. Enable auto-setting the CPU utilization of the resource If set true, the agent will detect the number of *Max memory* from virsh, and put it into the hv_memory utilization of the resource when the monitor is executed. Enable auto-setting the hv_memory utilization of the resource This port will be used in the qemu migrateuri. If unset, the port will be a random highport. Port for migrateuri Use this URI as virsh connection URI to commuicate with a remote hypervisor. If remoteuri is set then migration_user and migration_network_suffix are effectively ignored. Use "%n" as the placeholder for the target node name. Please refer to the libvirt documentation for details on guest migration. Custom remoteuri to communicate with a remote hypervisor Changes to a running VM's config are normally lost on stop. This parameter instructs the RA to save the configuration back to the xml file provided in the "config" parameter. Save running VM's config back to its config file Setting this automatically enables save_config_on_stop. When enabled this parameter instructs the RA to call csync2 -x to synchronize the file to all nodes. csync2 must be properly set up for this to work. Save running VM's config back to its config file Path to the snapshot directory where the virtual machine image will be stored. When this parameter is set, the virtual machine's RAM state will be saved to a file in the snapshot directory when stopped. If on start a state file is present for the domain, the domain will be restored to the same state it was in right before it stopped last. This option is incompatible with the 'force_stop' option. Restore state on start/stop When the VM is used in Copy-On-Write mode, this is the backing file to use (with its full path). The VMs image will be created based on this backing file. This backing file will never be changed during the life of the VM. If the VM is wanted to work with Copy-On-Write mode, this is the backing file to use (with its full path) If set to true and backingfile is defined, the start of the VM will systematically create a new qcow2 based on the backing file, therefore the VM will always be stateless. If set to false, the start of the VM will use the COW (<vmname>.qcow2) file if it exists, otherwise the first start will create a new qcow2 based on the backing file given as backingfile. If set to true, the (<vmname>.qcow2) file will be re-created at each start, based on the backing file (if defined) List of directories for the virt-copy-in before booting the VM. Used only in stateless mode. List of directories for the virt-copy-in before booting the VM stateless mode. virsh shutdown method to use. Please verify that it is supported by your virsh toolsed with 'virsh help shutdown' When this parameter is set --mode shutdown_mode is passed as an additional argument to the 'virsh shutdown' command. One can use this option in case default acpi method does not work. Verify that this mode is supported by your VM. By default --mode is not passed. Instruct virsh to use specific shutdown mode EOF } set_util_attr() { local attr=$1 val=$2 local cval outp cval=$(crm_resource -Q -r $OCF_RESOURCE_INSTANCE -z -g $attr 2>/dev/null) if [ $? -ne 0 ] && [ -z "$cval" ]; then crm_resource -Q -r $OCF_RESOURCE_INSTANCE -z -g $attr 2>&1 | grep -e "not connected" > /dev/null 2>&1 if [ $? -eq 0 ]; then ocf_log debug "Unable to set utilization attribute, cib is not available" return fi fi if [ "$cval" != "$val" ]; then outp=$(crm_resource -r $OCF_RESOURCE_INSTANCE -z -p $attr -v $val 2>&1) || ocf_log warn "crm_resource failed to set utilization attribute $attr: $outp" fi } update_utilization() { local dom_cpu dom_mem if ocf_is_true "$OCF_RESKEY_autoset_utilization_cpu"; then dom_cpu=$(LANG=C virsh $VIRSH_OPTIONS dominfo ${DOMAIN_NAME} 2>/dev/null | awk '/CPU\(s\)/{print $2}') test -n "$dom_cpu" && set_util_attr cpu $dom_cpu fi if ocf_is_true "$OCF_RESKEY_autoset_utilization_hv_memory"; then dom_mem=$(LANG=C virsh $VIRSH_OPTIONS dominfo ${DOMAIN_NAME} 2>/dev/null | awk '/Max memory/{printf("%d", $3/1024)}') test -n "$dom_mem" && set_util_attr hv_memory "$dom_mem" fi } get_emulator() { local emulator="" emulator=$(virsh $VIRSH_OPTIONS dumpxml $DOMAIN_NAME 2>/dev/null | sed -n -e 's/^.*\(.*\)<\/emulator>.*$/\1/p') if [ -z "$emulator" ] && [ -e "$EMULATOR_STATE" ]; then emulator=$(cat $EMULATOR_STATE) fi if [ -z "$emulator" ]; then emulator=$(cat ${OCF_RESKEY_config} | sed -n -e 's/^.*\(.*\)<\/emulator>.*$/\1/p') fi if [ -n "$emulator" ]; then basename $emulator fi } update_emulator_cache() { local emulator emulator=$(get_emulator) if [ -n "$emulator" ]; then echo $emulator > $EMULATOR_STATE fi } # attempt to check domain status outside of libvirt using the emulator process pid_status() { local rc=$OCF_ERR_GENERIC local emulator=$(get_emulator) # An emulator is not required, so only report message in debug mode local loglevel="debug" if ocf_is_probe; then loglevel="notice" fi case "$emulator" in qemu-kvm|qemu-dm|qemu-system-*) rc=$OCF_NOT_RUNNING ps awx | grep -E "[q]emu-(kvm|dm|system).*-name $DOMAIN_NAME " > /dev/null 2>&1 if [ $? -eq 0 ]; then rc=$OCF_SUCCESS fi ;; libvirt_lxc) rc=$OCF_NOT_RUNNING ps awx | grep -E "[l]ibvirt_lxc.*-name $DOMAIN_NAME " > /dev/null 2>&1 if [ $? -eq 0 ]; then rc=$OCF_SUCCESS fi ;; # This can be expanded to check for additional emulators *) # We may be running xen with PV domains, they don't # have an emulator set. try xl list or xen-lists if have_binary xl; then rc=$OCF_NOT_RUNNING xl list $DOMAIN_NAME >/dev/null 2>&1 if [ $? -eq 0 ]; then rc=$OCF_SUCCESS fi elif have_binary xen-list; then rc=$OCF_NOT_RUNNING xen-list $DOMAIN_NAME 2>/dev/null | grep -qs "State.*[-r][-b][-p]--" 2>/dev/null if [ $? -eq 0 ]; then rc=$OCF_SUCCESS fi else ocf_log $loglevel "Unable to determine emulator for $DOMAIN_NAME" fi ;; esac if [ $rc -eq $OCF_SUCCESS ]; then ocf_log debug "Virtual domain $DOMAIN_NAME is currently running." elif [ $rc -eq $OCF_NOT_RUNNING ]; then ocf_log debug "Virtual domain $DOMAIN_NAME is currently not running." fi return $rc } VirtualDomain_status() { local try=0 rc=$OCF_ERR_GENERIC status="no state" while [ "$status" = "no state" ]; do try=$(($try + 1 )) status=$(LANG=C virsh $VIRSH_OPTIONS domstate $DOMAIN_NAME 2>&1 | tr 'A-Z' 'a-z') case "$status" in *"error:"*"domain not found"|*"error:"*"failed to get domain"*|"shut off") # shut off: domain is defined, but not started, will not happen if # domain is created but not defined # "Domain not found" or "failed to get domain": domain is not defined # and thus not started ocf_log debug "Virtual domain $DOMAIN_NAME is not running: $(echo $status | sed s/error://g)" rc=$OCF_NOT_RUNNING ;; running|paused|idle|blocked|"in shutdown") # running: domain is currently actively consuming cycles # paused: domain is paused (suspended) # idle: domain is running but idle # blocked: synonym for idle used by legacy Xen versions # in shutdown: the domain is in process of shutting down, but has not completely shutdown or crashed. ocf_log debug "Virtual domain $DOMAIN_NAME is currently $status." rc=$OCF_SUCCESS ;; ""|*"failed to "*"connect to the hypervisor"*|"no state") # Empty string may be returned when virsh does not # receive a reply from libvirtd. # "no state" may occur when the domain is currently # being migrated (on the migration target only), or # whenever virsh can't reliably obtain the domain # state. status="no state" if [ "$__OCF_ACTION" = "stop" ] && [ $try -ge 3 ]; then # During the stop operation, we want to bail out # quickly, so as to be able to force-stop (destroy) # the domain if necessary. ocf_log error "Virtual domain $DOMAIN_NAME has no state during stop operation, bailing out." return $OCF_ERR_GENERIC; elif [ "$__OCF_ACTION" = "monitor" ]; then pid_status rc=$? if [ $rc -ne $OCF_ERR_GENERIC ]; then # we've successfully determined the domains status outside of libvirt return $rc fi else # During all other actions, we just wait and try # again, relying on the CRM/LRM to time us out if # this takes too long. ocf_log info "Virtual domain $DOMAIN_NAME currently has no state, retrying." fi sleep 1 ;; *) # any other output is unexpected. ocf_log error "Virtual domain $DOMAIN_NAME has unknown status \"$status\"!" sleep 1 ;; esac done return $rc } # virsh undefine removes configuration files if they are in # directories which are managed by libvirt. such directories # include also subdirectories of /etc (for instance # /etc/libvirt/*) which may be surprising. VirtualDomain didn't # include the undefine call before, hence this wasn't an issue # before. # # There seems to be no way to find out which directories are # managed by libvirt. # verify_undefined() { local tmpf if virsh --connect=${OCF_RESKEY_hypervisor} list --all --name 2>/dev/null | grep -wqs "$DOMAIN_NAME" then tmpf=$(mktemp -t vmcfgsave.XXXXXX) if [ ! -r "$tmpf" ]; then ocf_log warn "unable to create temp file, disk full?" # we must undefine the domain virsh $VIRSH_OPTIONS undefine $DOMAIN_NAME > /dev/null 2>&1 else cp -p $OCF_RESKEY_config $tmpf virsh $VIRSH_OPTIONS undefine $DOMAIN_NAME > /dev/null 2>&1 [ -f $OCF_RESKEY_config ] || cp -f $tmpf $OCF_RESKEY_config rm -f $tmpf fi fi } VirtualDomain_start() { local snapshotimage if VirtualDomain_status; then ocf_log info "Virtual domain $DOMAIN_NAME already running." return $OCF_SUCCESS fi # systemd drop-in to stop domain before libvirtd terminates services # during shutdown/reboot if systemd_is_running ; then systemd_drop_in "99-VirtualDomain-libvirt" "After" "libvirtd.service" systemd_drop_in "99-VirtualDomain-machines" "Wants" "virt-guest-shutdown.target" systemctl start virt-guest-shutdown.target fi snapshotimage="$OCF_RESKEY_snapshot/${DOMAIN_NAME}.state" if [ -n "$OCF_RESKEY_snapshot" -a -f "$snapshotimage" ]; then virsh restore $snapshotimage if [ $? -eq 0 ]; then rm -f $snapshotimage return $OCF_SUCCESS fi ocf_exit_reason "Failed to restore ${DOMAIN_NAME} from state file in ${OCF_RESKEY_snapshot} directory." return $OCF_ERR_GENERIC fi # Make sure domain is undefined before creating. # The 'create' command guarantees that the domain will be # undefined on shutdown, but requires the domain to be undefined. # if a user defines the domain # outside of this agent, we have to ensure that the domain # is restored to an 'undefined' state before creating. verify_undefined if [ -z "${OCF_RESKEY_backingfile}" ]; then virsh $VIRSH_OPTIONS create ${OCF_RESKEY_config} if [ $? -ne 0 ]; then ocf_exit_reason "Failed to start virtual domain ${DOMAIN_NAME}." return $OCF_ERR_GENERIC fi else if ocf_is_true "${OCF_RESKEY_stateless}" || [ ! -s "${OCF_RESKEY_config%%.*}.qcow2" ]; then # Create the Stateless image dirconfig=`dirname ${OCF_RESKEY_config}` qemu-img create -f qcow2 -b ${OCF_RESKEY_backingfile} ${OCF_RESKEY_config%%.*}.qcow2 if [ $? -ne 0 ]; then ocf_exit_reason "Failed qemu-img create ${DOMAIN_NAME} with backing file ${OCF_RESKEY_backingfile}." return $OCF_ERR_GENERIC fi virsh define ${OCF_RESKEY_config} if [ $? -ne 0 ]; then ocf_exit_reason "Failed to define virtual domain ${DOMAIN_NAME}." return $OCF_ERR_GENERIC fi if [ -n "${OCF_RESKEY_copyindirs}" ]; then # Inject copyindirs directories and files virt-copy-in -d ${DOMAIN_NAME} ${OCF_RESKEY_copyindirs} / if [ $? -ne 0 ]; then ocf_exit_reason "Failed on virt-copy-in command ${DOMAIN_NAME}." return $OCF_ERR_GENERIC fi fi else virsh define ${OCF_RESKEY_config} if [ $? -ne 0 ]; then ocf_exit_reason "Failed to define virtual domain ${DOMAIN_NAME}." return $OCF_ERR_GENERIC fi fi virsh $VIRSH_OPTIONS start ${DOMAIN_NAME} if [ $? -ne 0 ]; then ocf_exit_reason "Failed to start virtual domain ${DOMAIN_NAME}." return $OCF_ERR_GENERIC fi fi while ! VirtualDomain_monitor; do sleep 1 done return $OCF_SUCCESS } force_stop() { local out ex translate local status=0 ocf_log info "Issuing forced shutdown (destroy) request for domain ${DOMAIN_NAME}." out=$(LANG=C virsh $VIRSH_OPTIONS destroy ${DOMAIN_NAME} 2>&1) ex=$? translate=$(echo $out|tr 'A-Z' 'a-z') echo >&2 "$translate" case $ex$translate in *"error:"*"domain is not running"*|*"error:"*"domain not found"*|\ *"error:"*"failed to get domain"*) : ;; # unexpected path to the intended outcome, all is well [!0]*) ocf_exit_reason "forced stop failed" return $OCF_ERR_GENERIC ;; 0*) while [ $status != $OCF_NOT_RUNNING ]; do VirtualDomain_status status=$? done ;; esac return $OCF_SUCCESS } sync_config(){ ocf_log info "Syncing $DOMAIN_NAME config file with csync2 -x ${OCF_RESKEY_config}" if ! csync2 -x ${OCF_RESKEY_config}; then ocf_log warn "Syncing ${OCF_RESKEY_config} failed."; fi } save_config(){ CFGTMP=$(mktemp -t vmcfgsave.XXX) virsh $VIRSH_OPTIONS dumpxml --inactive --security-info ${DOMAIN_NAME} > ${CFGTMP} if [ -s ${CFGTMP} ]; then if ! cmp -s ${CFGTMP} ${OCF_RESKEY_config}; then if virt-xml-validate ${CFGTMP} domain 2>/dev/null ; then ocf_log info "Saving domain $DOMAIN_NAME to ${OCF_RESKEY_config}. Please make sure it's present on all nodes or sync_config_on_stop is on." if cat ${CFGTMP} > ${OCF_RESKEY_config} ; then ocf_log info "Saved $DOMAIN_NAME domain's configuration to ${OCF_RESKEY_config}." if ocf_is_true "$OCF_RESKEY_sync_config_on_stop"; then sync_config fi else ocf_log warn "Moving ${CFGTMP} to ${OCF_RESKEY_config} failed." fi else ocf_log warn "Domain $DOMAIN_NAME config failed to validate after dump. Skipping config update." fi fi else ocf_log warn "Domain $DOMAIN_NAME config has 0 size. Skipping config update." fi rm -f ${CFGTMP} } VirtualDomain_stop() { local i local status local shutdown_timeout local needshutdown=1 VirtualDomain_status status=$? case $status in $OCF_SUCCESS) if ocf_is_true $OCF_RESKEY_force_stop; then # if force stop, don't bother attempting graceful shutdown. force_stop return $? fi ocf_log info "Issuing graceful shutdown request for domain ${DOMAIN_NAME}." if [ -n "$OCF_RESKEY_snapshot" ]; then virsh save $DOMAIN_NAME "$OCF_RESKEY_snapshot/${DOMAIN_NAME}.state" if [ $? -eq 0 ]; then needshutdown=0 else ocf_log error "Failed to save snapshot state of ${DOMAIN_NAME} on stop" fi fi # save config if needed if ocf_is_true "$OCF_RESKEY_save_config_on_stop"; then save_config fi # issue the shutdown if save state didn't shutdown for us if [ $needshutdown -eq 1 ]; then # Issue a graceful shutdown request if [ -n "${OCF_RESKEY_CRM_shutdown_mode}" ]; then shutdown_opts="--mode ${OCF_RESKEY_CRM_shutdown_mode}" fi virsh $VIRSH_OPTIONS shutdown ${DOMAIN_NAME} $shutdown_opts fi # The "shutdown_timeout" we use here is the operation # timeout specified in the CIB, minus 5 seconds shutdown_timeout=$(( $NOW + ($OCF_RESKEY_CRM_meta_timeout/1000) -5 )) # Loop on status until we reach $shutdown_timeout while [ $NOW -lt $shutdown_timeout ]; do VirtualDomain_status status=$? case $status in $OCF_NOT_RUNNING) # This was a graceful shutdown. return $OCF_SUCCESS ;; $OCF_SUCCESS) # Domain is still running, keep # waiting (until shutdown_timeout # expires) sleep 1 ;; *) # Something went wrong. Bail out and # resort to forced stop (destroy). break; esac NOW=$(date +%s) done ;; $OCF_NOT_RUNNING) ocf_log info "Domain $DOMAIN_NAME already stopped." return $OCF_SUCCESS esac # OK. Now if the above graceful shutdown hasn't worked, kill # off the domain with destroy. If that too does not work, # have the LRM time us out. force_stop } mk_migrateuri() { local target_node local migrate_target local hypervisor target_node="$OCF_RESKEY_CRM_meta_migrate_target" # A typical migration URI via a special migration network looks # like "tcp://bar-mig:49152". The port would be randomly chosen # by libvirt from the range 49152-49215 if omitted, at least since # version 0.7.4 ... if [ -n "${OCF_RESKEY_migration_network_suffix}" ]; then hypervisor="${OCF_RESKEY_hypervisor%%[+:]*}" # Hostname might be a FQDN migrate_target=$(echo ${target_node} | sed -e "s,^\([^.]\+\),\1${OCF_RESKEY_migration_network_suffix},") case $hypervisor in qemu) # For quiet ancient libvirt versions a migration port is needed # and the URI must not contain the "//". Newer versions can handle # the "bad" URI. echo "tcp:${migrate_target}:${OCF_RESKEY_migrateport}" ;; xen) echo "xenmigr://${migrate_target}" ;; *) ocf_log warn "$DOMAIN_NAME: Migration via dedicated network currently not supported for ${hypervisor}." ;; esac fi } VirtualDomain_migrate_to() { local rc local target_node local remoteuri local transport_suffix local migrateuri local migrate_opts local migrate_pid target_node="$OCF_RESKEY_CRM_meta_migrate_target" if VirtualDomain_status; then # Find out the remote hypervisor to connect to. That is, turn # something like "qemu://foo:9999/system" into # "qemu+tcp://bar:9999/system" if [ -n "${OCF_RESKEY_remoteuri}" ]; then remoteuri=`echo "${OCF_RESKEY_remoteuri}" | sed "s/%n/$target_node/g"` else if [ -n "${OCF_RESKEY_migration_transport}" ]; then transport_suffix="+${OCF_RESKEY_migration_transport}" fi # append user defined suffix if virsh target should differ from cluster node name if [ -n "${OCF_RESKEY_migration_network_suffix}" ]; then # Hostname might be a FQDN target_node=$(echo ${target_node} | sed -e "s,^\([^.]\+\),\1${OCF_RESKEY_migration_network_suffix},") fi # a remote user has been defined to connect to target_node if echo ${OCF_RESKEY_migration_user} | grep -q "^[a-z][-a-z0-9]*$" ; then target_node="${OCF_RESKEY_migration_user}@${target_node}" fi # Scared of that sed expression? So am I. :-) remoteuri=$(echo ${OCF_RESKEY_hypervisor} | sed -e "s,\(.*\)://[^/:]*\(:\?[0-9]*\)/\(.*\),\1${transport_suffix}://${target_node}\2/\3,") fi # User defined migrateuri or do we make one? migrate_opts="$OCF_RESKEY_migrate_options" # migration_uri is directly set if [ -n "${OCF_RESKEY_migrateuri}" ]; then migrateuri=`echo "${OCF_RESKEY_migrateuri}" | sed "s/%n/$target_node/g"` # extract migrationuri from options elif echo "$migrate_opts" | fgrep -qs -- "--migrateuri="; then migrateuri=`echo "$migrate_opts" | sed "s/.*--migrateuri=\([^ ]*\).*/\1/;s/%n/$target_node/g"` # auto generate else migrateuri=`mk_migrateuri` fi # remove --migrateuri from migration_opts migrate_opts=`echo "$migrate_opts" | sed "s/\(.*\)--migrateuri=[^ ]*\(.*\)/\1\2/"` # save config if needed if ocf_is_true "$OCF_RESKEY_save_config_on_stop"; then save_config fi # Live migration speed limit if [ ${OCF_RESKEY_migration_speed} -ne 0 ]; then ocf_log info "$DOMAIN_NAME: Setting live migration speed limit for $DOMAIN_NAME (using: virsh ${VIRSH_OPTIONS} migrate-setspeed $DOMAIN_NAME ${OCF_RESKEY_migration_speed})." virsh ${VIRSH_OPTIONS} migrate-setspeed $DOMAIN_NAME ${OCF_RESKEY_migration_speed} fi # OK, we know where to connect to. Now do the actual migration. ocf_log info "$DOMAIN_NAME: Starting live migration to ${target_node} (using: virsh ${VIRSH_OPTIONS} migrate --live $migrate_opts $DOMAIN_NAME $remoteuri $migrateuri)." virsh ${VIRSH_OPTIONS} migrate --live $migrate_opts $DOMAIN_NAME $remoteuri $migrateuri & migrate_pid=${!} # Live migration downtime interval # Note: You can set downtime only while live migration is in progress if [ ${OCF_RESKEY_migration_downtime} -ne 0 ]; then sleep 2 ocf_log info "$DOMAIN_NAME: Setting live migration downtime for $DOMAIN_NAME (using: virsh ${VIRSH_OPTIONS} migrate-setmaxdowntime $DOMAIN_NAME ${OCF_RESKEY_migration_downtime})." virsh ${VIRSH_OPTIONS} migrate-setmaxdowntime $DOMAIN_NAME ${OCF_RESKEY_migration_downtime} fi wait ${migrate_pid} rc=$? if [ $rc -ne 0 ]; then ocf_exit_reason "$DOMAIN_NAME: live migration to ${target_node} failed: $rc" return $OCF_ERR_GENERIC else ocf_log info "$DOMAIN_NAME: live migration to ${target_node} succeeded." return $OCF_SUCCESS fi else ocf_exit_reason "$DOMAIN_NAME: migrate_to: Not active locally!" return $OCF_ERR_GENERIC fi } VirtualDomain_migrate_from() { # systemd drop-in to stop domain before libvirtd terminates services # during shutdown/reboot if systemd_is_running ; then systemd_drop_in "99-VirtualDomain-libvirt" "After" "libvirtd.service" systemd_drop_in "99-VirtualDomain-machines" "Wants" "virt-guest-shutdown.target" systemctl start virt-guest-shutdown.target fi while ! VirtualDomain_monitor; do sleep 1 done ocf_log info "$DOMAIN_NAME: live migration from ${OCF_RESKEY_CRM_meta_migrate_source} succeeded." # save config if needed if ocf_is_true "$OCF_RESKEY_save_config_on_stop"; then save_config fi return $OCF_SUCCESS } VirtualDomain_monitor() { # First, check the domain status. If that returns anything other # than $OCF_SUCCESS, something is definitely wrong. VirtualDomain_status rc=$? if [ ${rc} -eq ${OCF_SUCCESS} ]; then # OK, the generic status check turned out fine. Now, if we # have monitor scripts defined, run them one after another. for script in ${OCF_RESKEY_monitor_scripts}; do script_output="$($script 2>&1)" script_rc=$? if [ ${script_rc} -ne ${OCF_SUCCESS} ]; then # A monitor script returned a non-success exit # code. Stop iterating over the list of scripts, log a # warning message, and propagate $OCF_ERR_GENERIC. ocf_exit_reason "Monitor command \"${script}\" for domain ${DOMAIN_NAME} returned ${script_rc} with output: ${script_output}" rc=$OCF_ERR_GENERIC break else ocf_log debug "Monitor command \"${script}\" for domain ${DOMAIN_NAME} completed successfully with output: ${script_output}" fi done fi update_emulator_cache update_utilization # Save configuration on monitor as well, so we will have a better chance of # having fresh and up to date config files on all nodes. if ocf_is_true "$OCF_RESKEY_save_config_on_stop"; then save_config fi return ${rc} } VirtualDomain_validate_all() { if ocf_is_true $OCF_RESKEY_force_stop && [ -n "$OCF_RESKEY_snapshot" ]; then ocf_exit_reason "The 'force_stop' and 'snapshot' options can not be used together." return $OCF_ERR_CONFIGURED fi # check if we can read the config file (otherwise we're unable to # deduce $DOMAIN_NAME from it, see below) if [ ! -r $OCF_RESKEY_config ]; then if ocf_is_probe; then ocf_log info "Configuration file $OCF_RESKEY_config not readable during probe." elif [ "$__OCF_ACTION" = "stop" ]; then ocf_log info "Configuration file $OCF_RESKEY_config not readable, resource considered stopped." else ocf_exit_reason "Configuration file $OCF_RESKEY_config does not exist or not readable." fi return $OCF_ERR_INSTALLED fi if [ -z $DOMAIN_NAME ]; then ocf_exit_reason "Unable to determine domain name." return $OCF_ERR_INSTALLED fi # Check if csync2 is available when config tells us we might need it. if ocf_is_true $OCF_RESKEY_sync_config_on_stop; then check_binary csync2 fi # Check if migration_speed is a decimal value if ! ocf_is_decimal ${OCF_RESKEY_migration_speed}; then ocf_exit_reason "migration_speed has to be a decimal value" return $OCF_ERR_CONFIGURED fi # Check if migration_downtime is a decimal value if ! ocf_is_decimal ${OCF_RESKEY_migration_downtime}; then ocf_exit_reason "migration_downtime has to be a decimal value" return $OCF_ERR_CONFIGURED fi if ocf_is_true "${OCF_RESKEY_stateless}" && [ -z "${OCF_RESKEY_backingfile}" ]; then ocf_exit_reason "Stateless functionality can't be achieved without a backing file." return $OCF_ERR_CONFIGURED fi } VirtualDomain_getconfig() { # Grab the virsh uri default, but only if hypervisor isn't set : ${OCF_RESKEY_hypervisor=$(virsh --quiet uri 2>/dev/null)} # Set options to be passed to virsh: VIRSH_OPTIONS="--connect=${OCF_RESKEY_hypervisor} --quiet" # Retrieve the domain name from the xml file. DOMAIN_NAME=`egrep '[[:space:]]*.*[[:space:]]*$' ${OCF_RESKEY_config} 2>/dev/null | sed -e 's/[[:space:]]*\(.*\)<\/name>[[:space:]]*$/\1/'` EMULATOR_STATE="${HA_RSCTMP}/VirtualDomain-${DOMAIN_NAME}-emu.state" } OCF_REQUIRED_PARAMS="config" OCF_REQUIRED_BINARIES="virsh sed" ocf_rarun $* diff --git a/heartbeat/kamailio.in b/heartbeat/kamailio.in index 3e83833c8..8e4ffc9d5 100644 --- a/heartbeat/kamailio.in +++ b/heartbeat/kamailio.in @@ -1,741 +1,741 @@ #!@BASH_SHELL@ # # OCF resource agent for Kamailio for pacemaker # # Copyright (c) 2013 FREQUENTIS AG, # Authors: Stefan Wenk # Rainer Brestan # # This program is free software; you can redistribute it and/or modify # it under the terms of version 2 of the GNU General Public License as # published by the Free Software Foundation. # # This program is distributed in the hope that it would be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. # # Further, this software is distributed without any warranty that it is # free of the rightful claim of any third person regarding infringement # or the like. Any license provided herein, whether implied or # otherwise, applies only to this software file. Patent licenses, if # any, provided herein do not apply to combinations of this program with # other software, or any other product whatsoever. # # You should have received a copy of the GNU General Public License # along with this program; if not, write the Free Software Foundation, # Inc., 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. # # OCF input parameters: # OCF_RESKEY_binary # OCF_RESKEY_conffile # OCF_RESKEY_pidfile # OCF_RESKEY_monitoring_ip # OCF_RESKEY_listen_address # OCF_RESKEY_port # OCF_RESKEY_proto # OCF_RESKEY_sipsak # OCF_RESKEY_kamctl # OCF_RESKEY_kamctlrc # OCF_RESKEY_kamuser # OCF_RESKEY_kamgroup # OCF_RESKEY_extra_options # Initialization: : ${OCF_FUNCTIONS_DIR=${OCF_ROOT}/lib/heartbeat} . ${OCF_FUNCTIONS_DIR}/ocf-shellfuncs ####################################################################### # Defaults RESKEY_binary_default="/usr/sbin/kamailio" RESKEY_conffile_default="/etc/kamailio/kamailio.cfg" RESKEY_pidfile_default="/var/run/kamailio_${OCF_RESOURCE_INSTANCE}/kamailio.pid" RESKEY_monitoring_ip_default=127.0.0.1 RESKEY_port_default=5060 RESKEY_proto_default="udptcp" RESKEY_sipsak_default="/usr/bin/sipsak" RESKEY_kamctl_default="/usr/bin/kamctl" RESKEY_kamctlrc_default="/etc/kamailio/kamctlrc" RESKEY_kamuser_default="" RESKEY_kamgroup_default="" RESKEY_extra_options_default="" ####################################################################### : ${OCF_RESKEY_binary=${RESKEY_binary_default}} : ${OCF_RESKEY_conffile=${RESKEY_conffile_default}} : ${OCF_RESKEY_pidfile=${RESKEY_pidfile_default}} : ${OCF_RESKEY_monitoring_ip=${RESKEY_monitoring_ip_default}} : ${OCF_RESKEY_port=${RESKEY_port_default}} : ${OCF_RESKEY_proto=${RESKEY_proto_default}} : ${OCF_RESKEY_sipsak=${RESKEY_sipsak_default}} : ${OCF_RESKEY_kamctl=${RESKEY_kamctl_default}} : ${OCF_RESKEY_kamctlrc=${RESKEY_kamctlrc_default}} : ${OCF_RESKEY_kamuser=${RESKEY_kamuser_default}} : ${OCF_RESKEY_kamgroup=${RESKEY_kamgroup_default}} : ${OCF_RESKEY_extra_options=${RESKEY_extra_options_default}} ####################################################################### usage() { cat < 1.0 Resource agent for the Kamailio SIP proxy/registrar. Multiple instances are possible when using following parameter combinations: Parameters for Kamailio instance 1: listen_address=192.168.159.128 monitoring_ip=192.168.159.128 proto=udptcp port=5060 Parameters for Kamailio instance 2: listen_address=192.168.159.128 monitoring_ip=192.168.159.128 proto=udp port=5070 conffile=/etc/kamailio/kamailio2.cfg kamctlrc="" Only one instance can be monitored via the command "kamctl monitor" because the kamctl tool of kamailio 4.x is not designed for multiple instances. Therefore, the provided kamctrlrc file path needs to be empty for instance 2, 3 ... Parameters for a third Kamailio instance: listen_address=192.168.159.128 monitoring_ip=192.168.159.128 proto=tcp port=5080 conffile=/etc/kamailio/kamailio3.cfg kamctlrc="" Resource agent for Kamailio The kamailio binary The kamailio binary The kamailio configuration file name with full path. For example, "/etc/kamailio/kamailio.cfg" , which is the default value. Make sure to use unique names in case of having multiple instances. Configuration file name with full path The kamailio PID file. The directory used must be writable by kamailio process user. Be sure to use unique name for running more than one instance. Try to use absolute path names. If empty, resource agent create a unique directory from the resource instance name for the PID file and assign it to the process user. PID file SIP IP Address of the kamailio instance used for SIP OPTIONS polling monitoring. Usually the same IP address value as for parameter listen_address should be provided. In order to respond with a 200 OK response to the SIP OOPTION requests, the kamailio.cfg file needs to contain following section: - Note: The following "kamailio.cfg" code sniplet is part of an XML section. + Note: The following "kamailio.cfg" code snippet is part of an XML section. Therefore it contains two & characters, which need to be replaced with two ampersand characters within "kamailio.cfg": if (is_method("OPTIONS") && ($ru=~"sip:monitor@.*")) { ## ## If the method is an OPTIONS we are simply going to respond ## with a 200 OK. # xlog("L_INFO", "Method is an OPTIONS, probably just monitoring\n"); sl_send_reply("200", "Kamailio is alive"); exit; } Monitoring IP address used for SIP OPTIONS polling. SIP IP address the kamailio will listen on. Listening SIP address SIP port for the kamailio instance. SIP Port Extra options to add to kamailio start. extra_options The protocol used for SIP proto = udp|tcp|udptcp|conf_udp|conf_tcp|conf_udptcp. Using the options "conf_*" does not add any "-l" parameters to the kamailio command, the "listen" parameters from kamailio.conf are used instead. The sipsak checks are performed depending what protocol is defined after the underscore. protocol The installation path of the sipsak tool, which is used for monitoring Kamailio via SIP OPTIONS polling. sipsak path The installation path of the "kamctl" control tool. kamctl path The location of the "kamctlrc" file for the Kamailio instance. The file "kamctlrc" is the Kamailio configuration file for its "kamctl" control tool. This parameter only needs to be provided in case of using multiple Kamailio server instances on a single cluster node: In case that the parameter "kamctlrc" is not empty, this resource agent monitors the health state of the Kamailio server via the command "kamctl monitor 1". This setting is recommended in case of using a single Kamailio server instance. In case that the parameter "kamctlrc" is empty, the resource agent does not monitor the health state of the Kamailio server instance via the "kamctl" command. Please note that the "kamctl" control command of Kamailio 4.x does not support running multiple Kamailio instances on one host. Nevertheless this resource agent does allow multiple Kamailio instances per host. The result of the "kamctl" limitation in terms of number of Kamailio server instances is that the health check via "kamctl monitor 1" can be configured for a single Kamailio instance only. Please refer to the long description of this resource agent for an example of parameter combinations in case that multiple instances are to be configured per cluster node. kamctlrc path The user account for kamailio process to run with. Uses the current user, if not specified or empty. There is no check, if running kamailio with the specified user account is possible. kamailio user The group for kamailio process to run with. Uses the current group, if not specified or empty. kamailio group END exit $OCF_SUCCESS } ####################################################################### ### #Check if a process with given PID is running # Parameter 1: PID ### isRunning_PID() { kill -s 0 "$1" > /dev/null 2>&1 } ### #Check if an instance with given command line is running # Parameter 1: command line. ### isRunning_cmd() { pkill -s 0 "$1" > /dev/null 2>&1 } ### # Formats the result of a command. # # Parameter 1: Exit status. # Parameter 2: Standard output (stdout). # Parameter 3: Error output (stderr). # Returns: Formatted result. kamailio_format_result() { local exitstatus="$1" local value="$2" local error="$3" echo -n "exit status: ${exitstatus}" if [ -n "$value" ]; then echo -n ", value: ${value}" fi if [ -n "$error" ]; then echo -n ", error: ${error}" fi echo } ### # Put the command line, how the kamailio process is started according # to the configured parameters, into the variable "kam_cmd". ### kamailio_cmd() { case ${OCF_RESKEY_proto} in udp) listen_param="-T -l udp:${OCF_RESKEY_listen_address}:${OCF_RESKEY_port} -l udp:127.0.0.1:${OCF_RESKEY_port}" ;; tcp) listen_param="-l tcp:${OCF_RESKEY_listen_address}:${OCF_RESKEY_port} -l tcp:127.0.0.1:${OCF_RESKEY_port}" ;; udptcp) listen_param1="-l udp:${OCF_RESKEY_listen_address}:${OCF_RESKEY_port} -l udp:127.0.0.1:${OCF_RESKEY_port}" listen_param2="-l tcp:${OCF_RESKEY_listen_address}:${OCF_RESKEY_port} -l tcp:127.0.0.1:${OCF_RESKEY_port}" listen_param="${listen_param1} ${listen_param2}" ;; conf_*) # doing nothing, no listen_param set ;; *) listen_param="-T" ;; esac kam_cmd="${OCF_RESKEY_binary} -P ${OCF_RESKEY_pidfile} -f ${OCF_RESKEY_conffile}" if [ -n "${listen_param}" ]; then kam_cmd="${kam_cmd} ${listen_param}" fi if [ -n "${OCF_RESKEY_kamuser}" ]; then kam_cmd="${kam_cmd} -u ${OCF_RESKEY_kamuser}" fi if [ -n "${OCF_RESKEY_kamgroup}" ]; then kam_cmd="${kam_cmd} -g ${OCF_RESKEY_kamgroup}" fi if [ -n "${OCF_RESKEY_extra_options}" ]; then kam_cmd="${kam_cmd} ${OCF_RESKEY_extra_options}" fi } ### # Gets the PID for the running Kamailio instance. # # Returns: The variable $PID contains the found PID value or an empty string. # Exit Status: Zero if the PID file was found and this process run under # the command line parameters of our instance. # 1) if the PID file is not present and no process running under # our command line options is active. # 2) in all other fatal cases, which we classify in the followig # as OCF_ERR_genering. These are folloing cases: # a) The PID file contains a PID value which does no match to # to our instance # b) The PID contains a empty string in its first line # c) The PID file contains some text and some processeses # from our instance are still active kamailio_get_pid() { if [ -f ${OCF_RESKEY_pidfile} ]; then PID=`head -n 1 $OCF_RESKEY_pidfile` if [ ! -z "$PID" ]; then #Cross check if the PID file really contains a process of our kamailio instance: kamailio_cmd CROSSPID=`pgrep -o -f "${kam_cmd}"` if [ x"$PID" == x"$CROSSPID" ]; then #ocf_log debug "Found kamailio process PID with value: $PID." return 0 fi #ocf_log debug "PID file does not contain a PID of a $OCF_RESKEY_binary process!" return 2 fi #PID file does not contain a valid PID rm -f ${OCF_RESKEY_pidfile} return 2 fi # No PID file found! # Check if still a process exists even though we don't have the PID any longer: kamailio_cmd pgrep -f "${kam_cmd}" if [ $? -eq 0 ]; then ocf_log info "PID file does not contain a valid PID, but kamailio process is still active" return 2 fi ocf_log info "No PID file found and our kamailio instance is not active" return 1 } kamailio_status() { local not_running_log_level="warn" local errorfile error output if [ "$__OCF_ACTION" = "start" ]; then not_running_log_level="debug" fi kamailio_get_pid >/dev/null RET=$? if [ $RET -ne 0 ]; then if [ $RET -eq 2 ]; then ocf_log $not_running_log_level "PID file does not contain a PID of a ${OCF_RESKEY_binary} process!" return $OCF_ERR_GENERIC fi return $OCF_NOT_RUNNING fi PID=`head -n 1 $OCF_RESKEY_pidfile` isRunning_PID "$PID" RET=$? if [ "$RET" -ne 0 ]; then ocf_log $not_running_log_level "PID from $PID from ${OCF_RESKEY_pidfile} not running" rm -f ${OCF_RESKEY_pidfile} return $OCF_NOT_RUNNING fi rc=0 # In case that OCF_RESKEY_kamctlrc we perfom a health check via "kamctl monitor 1" if [ ! -z ${OCF_RESKEY_kamctlrc} ]; then # PID is running now but it is not save to check via kamctl without care, because # the implementation analysis in the case that we kill all running processes # shows that in case that the fifo cannot be read, then kamctl blocks. This needs # to be avoided. # In order to be on the safe side, we run this check therefore under "timeout" control: rc=1 timeout 3 ${OCF_RESKEY_kamctl} monitor 1 |grep "since" ; rc=$? fi if [ $rc -ne 0 ]; then ocf_log $not_running_log_level "Kamailio is not up according to kamctl monitor!" return $OCF_NOT_RUNNING fi errorfile=`mktemp` case ${OCF_RESKEY_proto} in udp) output=`$OCF_RESKEY_sipsak -s sip:monitor@$OCF_RESKEY_monitoring_ip:${OCF_RESKEY_port} -H localhost --transport udp>/dev/null 2>>$errorfile` result=$? ;; tcp) output=`$OCF_RESKEY_sipsak -s sip:monitor@$OCF_RESKEY_monitoring_ip:${OCF_RESKEY_port} -H localhost --transport tcp>/dev/null 2>>$errorfile` result=$? ;; udptcp) output=`$OCF_RESKEY_sipsak -s sip:monitor@$OCF_RESKEY_monitoring_ip:${OCF_RESKEY_port} -H localhost --transport tcp>/dev/null 2>>$errorfile` result=$? if [ $result -eq 0 ]; then output=`$OCF_RESKEY_sipsak -s sip:monitor@$OCF_RESKEY_monitoring_ip:${OCF_RESKEY_port} -H localhost --transport udp>/dev/null 2>>$errorfile` result=$? fi ;; *) output=`$OCF_RESKEY_sipsak -s sip:monitor@$OCF_RESKEY_monitoring_ip:${OCF_RESKEY_port} -H localhost --transport udp>/dev/null 2>>$errorfile` result=$? ;; esac error=`cat $errorfile` rm -f $errorfile if [ $result -ne 0 ]; then ocf_log $not_running_log_level "Kamailio is running, but not functional as sipsak ${OCF_RESKEY_proto} failed with $(kamailio_format_result $result "$output" "$error")" return $OCF_ERR_GENERIC fi return $OCF_SUCCESS } kamailio_monitor() { kamailio_status } kamailio_start() { local errorfile error output piddir if kamailio_status then ocf_log info "kamailio already running." return $OCF_SUCCESS fi # if pidfile directory does not exist, create it with kamailio process owner piddir=`dirname "${OCF_RESKEY_pidfile}"` if [ ! -d "$piddir" ]; then mkdir -p "$piddir" if [ "$OCF_RESKEY_kamuser" != "" ]; then chown ${OCF_RESKEY_kamuser} "$piddir" fi fi kamailio_cmd if [ "$OCF_RESKEY_kamuser" != "" ]; then kam_cmd="su -s @BASH_SHELL@ $OCF_RESKEY_kamuser -c \"$kam_cmd\"" fi ocf_log info "start kamailio with $kam_cmd." errorfile=`mktemp` output=$(eval ${kam_cmd} 2>>$errorfile) result=$? error=`cat $errorfile` rm -f $errorfile if [ $result -eq 0 ]; then result=1 while [ $result -ne 0 ]; do sleep 1 kamailio_get_pid >/dev/null result=$? done ocf_log info "kamailio instance PID=$PID started." # check with monitor operation if running correctly result=$OCF_ERR_GENERIC while [ $result -ne $OCF_SUCCESS ]; do sleep 1 kamailio_monitor result=$? ocf_log info "monitor in start returned $result" done ocf_log info "kamailio started successful." else ocf_log err "kamailio instance could not be started, $(kamailio_format_result $result "$output" "$error")" result=$OCF_ERR_GENERIC fi return $result } kamailio_stop() { local piddir local TRIES=0 result=$OCF_SUCCESS kamailio_cmd ocf_log info "Stopping kamailio by sending SIGTERM to ${kam_cmd}" pkill -SIGTERM -x -f "${kam_cmd}" if [ $? -eq 1 ]; then # already stopped. no processes found # in case of not specified pidfile, delete the created directory # otherwise only the pidfile itself if [ "${OCF_RESKEY_pidfile}" == "${RESKEY_pidfile_default}" ]; then piddir=`dirname "${OCF_RESKEY_pidfile}"` rm -rf "$piddir" else rm -f "${OCF_RESKEY_pidfile}" fi return $result fi if [ "$OCF_RESKEY_CRM_meta_timeout" != "" ]; then KAMAILIO_STOP_TIMEOUT=$(( ($OCF_RESKEY_CRM_meta_timeout/1000) - 7 )) else KAMAILIO_STOP_TIMEOUT=20 fi while isRunning_cmd "${kam_cmd}" && [ "$TRIES" -lt "${KAMAILIO_STOP_TIMEOUT}" ] do sleep 1 ocf_log info "kamailio ${kam_cmd} is still running after SIGTERM" ((TRIES++)) done isRunning_cmd "${kam_cmd}" RET=$? if [ "$RET" -eq 0 ]; then ocf_log info "Killing ${kam_cmd} with SIGKILL" TRIES=0 pkill -SIGKILL -x -f "${kam_cmd}" > /dev/null 2>&1 while isRunning_cmd "${kam_cmd}" && [ "$TRIES" -lt 3 ] do sleep 1 ocf_log info "kamailio ${kam_cmd} is still running after SIGKILL" ((TRIES++)) done isRunning_cmd "${kam_cmd}" RET=$? if [ "$RET" -eq 0 ]; then ocf_log fatal "kamailio is still running even after SIGKILL" result=$OCF_ERR_GENERIC fi else ocf_log info "${kam_cmd} has stopped." fi # in case of not specified pidfile, delete the created directory # otherwise only the pidfile itself if [ "${OCF_RESKEY_pidfile}" == "${RESKEY_pidfile_default}" ]; then piddir=`dirname "${OCF_RESKEY_pidfile}"` rm -rf "$piddir" else rm -f "${OCF_RESKEY_pidfile}" fi return $result } kamailio_validate_all() { # Check if kamailio configuration is valid before starting the server if [ ! -f $OCF_RESKEY_binary ]; then ocf_log err "File OCF_RESKEY_binary [${OCF_RESKEY_binary}] does not exist!" return $OCF_NOT_INSTALLED fi out=$($OCF_RESKEY_binary -c 2>&1 > /dev/null) retcode=$? if [ "$retcode" -ne '0' ]; then ocf_log info "Not starting kamailio: $OCF_RESKEY_binary does not start!" return $OCF_ERR_CONFIGURED fi case $OCF_RESKEY_monitoring_ip in "") ocf_log err "Required parameter OCF_RESKEY_monitoring_ip is missing!" return $OCF_ERR_CONFIGURED ;; [0-9]*.[0-9]*.[0-9]*.[0-9]*) : OK ;; *) ocf_log err "Parameter OCF_RESKEY_monitoring_ip [$OCF_RESKEY_monitoring_ip] is not an IP address!" return $OCF_ERR_CONFIGURED ;; esac case $OCF_RESKEY_listen_address in "") ocf_log err "Required parameter $OCF_RESKEY_listen_address is missing!" return $OCF_ERR_CONFIGURED ;; [0-9]*.[0-9]*.[0-9]*.[0-9]*) : OK ;; *) ocf_log err "Parameter OCF_RESKEY_listen_address [$OCF_RESKEY_listen_address] not an IP address!" return $OCF_ERR_CONFIGURED ;; esac if [ ! -f ${OCF_RESKEY_sipsak} ]; then ocf_log err "sipsak [${OCF_RESKEY_sipsak}] does not exist!" return $OCF_NOT_INSTALLED fi if [ ! -z ${OCF_RESKEY_kamctlrc} ]; then if [ ! -f ${OCF_RESKEY_kamctlrc} ]; then ocf_log err "kamctlrc file [${kamctlrc}] does not exist!" return $OCF_NOT_INSTALLED fi else ocf_log debug "No monitoring via kamctl monitor because the parameter [kamctlrc] is empty." fi if [ ! -f ${OCF_RESKEY_conffile} ]; then ocf_log err "Kamailio configuration file provided in the parameter conffile [${OCF_RESKEY_conffile}] does not exist!" return $OCF_ERR_CONFIGURED fi case $OCF_RESKEY_proto in "") ocf_log err "Parameter $OCF_RESKEY_proto is empty!" return $OCF_ERR_CONFIGURED ;; udp|tcp|udptcp) : OK ;; *) ocf_log err "Parameter value $OCF_RESKEY_proto for parameter [proto] not yet supported!" return $OCF_ERR_CONFIGURED ;; esac return $OCF_SUCCESS } if [ $# -ne 1 ]; then usage exit $OCF_ERR_ARGS fi case $__OCF_ACTION in meta-data) meta_data exit $OCF_SUCCESS ;; start|stop|status|monitor) kamailio_${__OCF_ACTION} ;; validate-all) kamailio_validate_all ;; notify) exit $OCF_SUCCESS ;; usage) usage exit $OCF_SUCCESS ;; # reload) #Not supported by Kamailio, but not needed by pacemaker # ;; # recover #Not needed by pacemaker # ;; *) usage exit $OCF_ERR_UNIMPLEMENTED ;; esac exit $? diff --git a/heartbeat/lxc.in b/heartbeat/lxc.in index 5e9d9393e..72a377e24 100644 --- a/heartbeat/lxc.in +++ b/heartbeat/lxc.in @@ -1,374 +1,374 @@ #!@BASH_SHELL@ # Should now conform to guidelines: # https://github.com/ClusterLabs/resource-agents/blob/master/doc/dev-guides/ra-dev-guide.asc # # LXC (Linux Containers) OCF RA. # Used to cluster enable the start, stop and monitoring of a LXC container. # # Copyright (c) 2011 AkurIT.com.au, Darren Thompson # All Rights Reserved. # # Without limiting the rights of the original copyright holders # This resource is licensed under GPL version 2 # # This program is free software; you can redistribute it and/or modify # it under the terms of version 2 of the GNU General Public License as # published by the Free Software Foundation. # # This program is distributed in the hope that it would be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. # # Further, this software is distributed without any warranty that it is # free of the rightful claim of any third person regarding infringement # or the like. Any license provided herein, whether implied or # otherwise, applies only to this software file. Patent licenses, if # any, provided herein do not apply to combinations of this program with # other software, or any other product whatsoever. # # You should have received a copy of the GNU General Public License # along with this program; if not, write the Free Software Foundation, # Inc., 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. # OCF instance parameters # OCF_RESKEY_container # OCF_RESKEY_config # OCF_RESKEY_log # OCF_RESKEY_use_screen # Initialization: : ${OCF_FUNCTIONS_DIR=${OCF_ROOT}/lib/heartbeat} . ${OCF_FUNCTIONS_DIR}/ocf-shellfuncs # Defaults OCF_RESKEY_log_default="${HA_RSCTMP}/${OCF_RESOURCE_INSTANCE}.log" OCF_RESKEY_use_screen_default="false" : ${OCF_RESKEY_log=${OCF_RESKEY_log_default}} : ${OCF_RESKEY_use_screen=${OCF_RESKEY_use_screen_default}} # Set default TRANS_RES_STATE (temporary file to "flag" if resource was stated but not stopped) TRANS_RES_STATE="${HA_RSCTMP}/${OCF_RESOURCE_INSTANCE}.state" meta_data() { cat < 0.1 Allows LXC containers to be managed by the cluster. If the container is running "init" it will also perform an orderly shutdown. It is 'assumed' that the 'init' system will do an orderly shudown if presented with a 'kill -PWR' signal. On a 'sysvinit' this would require the container to have an inittab file containing "p0::powerfail:/sbin/init 0" -I have absolutly no idea how this is done with 'upstart' or 'systemd', YMMV if your container is using one of them. +I have absolutely no idea how this is done with 'upstart' or 'systemd', YMMV if your container is using one of them. Manages LXC containers The unique name for this 'Container Instance' e.g. 'test1'. Container Name Absolute path to the file holding the specific configuration for this container e.g. '/etc/lxc/test1/config'. The LXC config file. Absolute path to the container log file Container log file Provides the option of capturing the 'root console' from the container and showing it on a separate screen. To see the screen output run 'screen -r {container name}' The default value is set to 'false', change to 'true' to activate this option Use 'screen' for container 'root console' output END } LXC_usage() { cat <${CGROUP_MOUNT_POINT}/notify_on_release return 0 } LXC_start() { # put this here as it's so long it gets messy later!!! if ocf_is_true $OCF_RESKEY_use_screen; then STARTCMD="screen -dmS ${OCF_RESKEY_container} lxc-start -f ${OCF_RESKEY_config} -n ${OCF_RESKEY_container} -o ${OCF_RESKEY_log}" else STARTCMD="lxc-start -f ${OCF_RESKEY_config} -n ${OCF_RESKEY_container} -o ${OCF_RESKEY_log} -d" fi LXC_status if [ $? -eq $OCF_SUCCESS ]; then ocf_log debug "Resource $OCF_RESOURCE_INSTANCE is already running" ocf_run touch "${TRANS_RES_STATE}" || exit $OCF_ERR_GENERIC return $OCF_SUCCESS fi cgroup_mounted if [ $? -ne 0 ]; then ocf_log err "Unable to find cgroup mount" exit $OCF_ERR_GENERIC fi ocf_log info "Starting" ${OCF_RESKEY_container} ocf_run ${STARTCMD} || exit $OCF_ERR_GENERIC # Spin on status, wait for the cluster manager to time us out if # we fail while ! LXC_status; do ocf_log info "Container ${OCF_RESKEY_container} has not started, waiting" sleep 1 done ocf_run touch "${TRANS_RES_STATE}" || exit $OCF_ERR_GENERIC return $OCF_SUCCESS } LXC_stop() { local shutdown_timeout local now LXC_status if [ $? -eq $OCF_NOT_RUNNING ]; then ocf_log debug "Resource $OCF_RESOURCE_INSTANCE is already stopped" ocf_run rm -f $TRANS_RES_STATE return $OCF_SUCCESS fi cgroup_mounted if [ $? -ne 0 ]; then ocf_log err "Unable to find cgroup mount" exit $OCF_ERR_GENERIC fi # If the container is running "init" and is able to perform and orderly shutdown, then it should be done. # It is 'assumed' that the 'init' system will do an orderly shudown if presented with a 'kill -PWR' signal. # On a 'sysvinit' this would require the container to have an inittab file containing "p0::powerfail:/sbin/init 0" declare -i PID=0 declare CMD= # LXC prior 1.0.0 if ocf_version_cmp "`lxc_version`" 1.0.0 ; then # This should work for traditional 'sysvinit' and 'upstart' lxc-ps --name "${OCF_RESKEY_container}" -- -C init -o pid,comm |while read CN PID CMD ;do [ $PID -gt 1 ] || continue [ "$CMD" = "init" ] || continue ocf_log info "Sending \"OS shut down\" instruction to" ${OCF_RESKEY_container} "as it was found to be using \"sysV init\" or \"upstart\"" kill -PWR $PID done # This should work for containers using 'systemd' instead of 'init' lxc-ps --name "${OCF_RESKEY_container}" -- -C systemd -o pid,comm |while read CN PID CMD ;do [ $PID -gt 1 ] || continue [ "$CMD" = "systemd" ] || continue ocf_log info "Sending \"OS shut down\" instruction to" ${OCF_RESKEY_container} "as it was found to be using \"systemd\"" kill -PWR $PID done else PID=$(lxc-info --name "${OCF_RESKEY_container}" -p -H) # If there is no PID the container seems to be down which # shouldn't happen. if [ $PID -eq 0 ]; then ocf_log err "${OCF_RESKEY_container} seems to run, but has no PID." exit $OCF_ERR_GENERIC fi # Rescue me. if [ $PID -eq 1 ]; then ocf_log err "${OCF_RESKEY_container} seems to run with PID 1 which cannot be." PID=0 CMD= else CMD=$(ps -o comm= -p $PID) fi # This should work for traditional 'sysvinit' and 'upstart' if [ "$CMD" = "init" ]; then ocf_log info "Sending \"OS shut down\" instruction to" ${OCF_RESKEY_container} "as it was found to be using \"sysV init\" or \"upstart\"" kill -PWR $PID fi # This should work for containers using 'systemd' instead of 'init' if [ "$CMD" = "systemd" ]; then ocf_log info "Sending \"OS shut down\" instruction to" ${OCF_RESKEY_container} "as it was found to be using \"systemd\"" kill -PWR $PID fi fi # The "shutdown_timeout" we use here is the operation # timeout specified in the CIB, minus 5 seconds now=$(date +%s) shutdown_timeout=$(( $now + ($OCF_RESKEY_CRM_meta_timeout/1000) -5 )) # Loop on status until we reach $shutdown_timeout while [ $now -lt $shutdown_timeout ]; do LXC_status status=$? case $status in "$OCF_NOT_RUNNING") ocf_run rm -f $TRANS_RES_STATE return $OCF_SUCCESS ;; "$OCF_SUCCESS") # Container is still running, keep waiting (until # shutdown_timeout expires) sleep 1 ;; *) # Something went wrong. Bail out and # resort to forced stop (destroy). break; esac now=$(date +%s) done # If the container is still running, it will be stopped now. regardless of state! # LXC prior 1.0.0 if ocf_version_cmp "`lxc_version`" 1.0.0 ; then ocf_run lxc-stop -n ${OCF_RESKEY_container} || exit $OCF_ERR_GENERIC else ocf_run lxc-stop -n ${OCF_RESKEY_container} -k || exit $OCF_ERR_GENERIC fi ocf_log info "Container" ${OCF_RESKEY_container} "stopped" ocf_run rm -f $TRANS_RES_STATE return $OCF_SUCCESS } LXC_status() { # run lxc-info with -s option for LXC-0.7.5 or later local lxc_info_opt="-s" ocf_version_cmp "`lxc_version`" 0.7.5 && lxc_info_opt="" S=`lxc-info $lxc_info_opt -n ${OCF_RESKEY_container}` ocf_log debug "State of ${OCF_RESKEY_container}: $S" if [[ "${S##* }" = "RUNNING" ]] ; then return $OCF_SUCCESS fi return $OCF_NOT_RUNNING } LXC_monitor() { LXC_status && return $OCF_SUCCESS if [ -f $TRANS_RES_STATE ]; then ocf_log err "${OCF_RESKEY_container} is not running, but state file ${TRANS_RES_STATE} exists." exit $OCF_ERR_GENERIC fi return $OCF_NOT_RUNNING } LXC_validate() { # Quick check that all required attributes are set if [ -z "${OCF_RESKEY_container}" ]; then ocf_log err "LXC container name not set!" exit $OCF_ERR_CONFIGURED fi if [ -z "${OCF_RESKEY_config}" ]; then ocf_log err "LXC configuration filename name not set!" exit $OCF_ERR_CONFIGURED fi # Tests that apply only to non-probes if ! ocf_is_probe; then if ! [ -f "${OCF_RESKEY_config}" ]; then ocf_log err "LXC configuration file \"${OCF_RESKEY_config}\" missing or not found!" exit $OCF_ERR_INSTALLED fi if ocf_is_true $OCF_RESKEY_use_screen; then check_binary screen fi check_binary lxc-start check_binary lxc-stop if ocf_version_cmp "`lxc_version`" 1.0.0 ; then check_binary lxc-ps fi check_binary lxc-info fi return $OCF_SUCCESS } if [ $# -ne 1 ]; then LXC_usage exit $OCF_ERR_ARGS fi case $__OCF_ACTION in meta-data) meta_data exit $OCF_SUCCESS ;; usage|help) LXC_usage exit $OCF_SUCCESS ;; esac # Everything except usage and meta-data must pass the validate test LXC_validate case $__OCF_ACTION in start) LXC_start;; stop) LXC_stop;; status) LXC_status;; monitor) LXC_monitor;; validate-all) ;; *) LXC_usage ocf_log err "$0 was called with unsupported arguments: $*" exit $OCF_ERR_UNIMPLEMENTED ;; esac rc=$? ocf_log debug "${OCF_RESOURCE_INSTANCE} $__OCF_ACTION : $rc" exit $rc diff --git a/heartbeat/mpathpersist.in b/heartbeat/mpathpersist.in index 43dcc7453..813a1b1fd 100644 --- a/heartbeat/mpathpersist.in +++ b/heartbeat/mpathpersist.in @@ -1,682 +1,682 @@ #!@BASH_SHELL@ # # # OCF Resource Agent compliant PERSISTENT SCSI RESERVATION on multipath devices resource script. # Testversion for a mpathpersist implementation for demo purposes by Andreas Thomas # # Copyright (c) 2017 Evgeny Nifontov, lwang@suse.com, # Andreas Tomas, # Zhu Lingshan # All Rights Reserved. # # # This program is free software; you can redistribute it and/or modify # it under the terms of version 2 of the GNU General Public License as # published by the Free Software Foundation. # # This program is distributed in the hope that it would be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. # # Further, this software is distributed without any warranty that it is # free of the rightful claim of any third person regarding infringement # or the like. Any license provided herein, whether implied or # otherwise, applies only to this software file. Patent licenses, if # any, provided herein do not apply to combinations of this program with # other software, or any other product whatsoever. # # You should have received a copy of the GNU General Public License # along with this program; if not, write the Free Software Foundation, # Inc., 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. # # # OCF instance parameters # OCF_RESKEY_binary # OCF_RESKEY_devs # OCF_RESKEY_required_devs_no # OCF_RESKEY_reservation_type # OCF_RESKEY_master_score_base # OCF_RESKEY_master_score_dev_factor # OCF_RESKEY_master_score_delay # # TODO # # 1) PROBLEM: devices which were not accessible during 'start' action, will be never registered/reserved # TODO: 'Master' and 'Slave' registers new devs in 'monitor' action # TODO: 'Master' reserves new devs in 'monitor' action #Defaults OCF_RESKEY_mpathpersist_binary_default="mpathpersist" OCF_RESKEY_required_devs_no_default=1 OCF_RESKEY_reservation_type_default=1 OCF_RESKEY_master_score_base_default=0 OCF_RESKEY_master_score_dev_factor_default=100 OCF_RESKEY_master_score_delay_default=30 ####################################################################### # Initialization: : ${OCF_FUNCTIONS_DIR=${OCF_ROOT}/lib/heartbeat} . ${OCF_FUNCTIONS_DIR}/ocf-shellfuncs # set default values : ${OCF_RESKEY_mpathpersist_binary=${OCF_RESKEY_mpathpersist_binary_default}} # binary name for the resource : ${OCF_RESKEY_required_devs_no=${OCF_RESKEY_required_devs_no_default}} # number of required devices : ${OCF_RESKEY_reservation_type=${OCF_RESKEY_reservation_type_default}} # reservation type : ${OCF_RESKEY_master_score_base=${OCF_RESKEY_master_score_base_default}} # master score base : ${OCF_RESKEY_master_score_dev_factor=${OCF_RESKEY_master_score_dev_factor_default}} # device factor for master score : ${OCF_RESKEY_master_score_delay=${OCF_RESKEY_master_score_delay_default}} # delay for master score ####################################################################### meta_data() { cat < 1.1 This resource agent manages SCSI persistent reservations on multipath devices. "mpathpersist" from multipath-tools is used, please see its documentation. Should be used as multistate (Master/Slave) resource Slave registers its node id ("crm_node -i") as reservation key ( --param-sark ) on each device in the params "devs" list. -Master reservs all devices from params "devs" list with reservation "--prout-type" value from "reservation_type" parameter. +Master reserves all devices from params "devs" list with reservation "--prout-type" value from "reservation_type" parameter. Please see man sg_persist(8) and mpathpersist(8) for reservation_type details. Manages SCSI persistent reservations on multipath devices The name of the binary that manages the resource. the binary name of the resource Device list. Multiple devices can be listed with blank space as separator. -Shell wildcars are allowed. +Shell wildcards are allowed. device list Minimum number of "working" devices from device list 1) existing 2) "mpathpersist --in --read-keys <device>" works (Return code 0) resource actions "start","monitor","promote" and "validate-all" return "OCF_ERR_INSTALLED" if the actual number of "working" devices is less than "required_devs_no". resource actions "stop" and "demote" tries to remove reservations and registration keys from all working devices, but always return "OCF_SUCCESS" minimum number of working devices reservation type reservation type master_score_base value "master_score_base" value is used in "master_score" calculation: master_score = master_score_base + master_score_dev_factor * working_devs if set to bigger value in mpathpersist resource configuration on some node, this node will be "preferred" for master role. base master_score value Working device factor in master_score calculation each "working" device provides additional value to "master_score", so the node that sees more devices will be preferred for the "Master"-role Setting it to 0 will disable this behavior. working device factor in master_score calculation master/slave decreases/increases its master_score after delay of "master_score_delay" seconds so if some device gets inaccessible, the slave decreases its master_score first and the resource will no be watched and after this device reappears again the master increases its master_score first this can work only if the master_score_delay is bigger then monitor interval on both master and slave Setting it to 0 will disable this behavior. master_score decrease/increase delay time END exit $OCF_SUCCESS } mpathpersist_init() { if ! ocf_is_root ; then ocf_log err "You must be root to perform this operation." exit $OCF_ERR_PERM fi MPATHPERSIST="${OCF_RESKEY_mpathpersist_binary}" check_binary $MPATHPERSIST ROLE=$OCF_RESKEY_CRM_meta_role NOW=$(date +%s) RESOURCE="${OCF_RESOURCE_INSTANCE}" MASTER_SCORE_VAR_NAME="master-${OCF_RESOURCE_INSTANCE}" PENDING_VAR_NAME="pending-$MASTER_SCORE_VAR_NAME" #only works with corocync CRM_NODE="${HA_SBIN_DIR}/crm_node" NODE_ID_DEC=$($CRM_NODE -i) NODE=$($CRM_NODE -l | $GREP -w ^$NODE_ID_DEC) NODE=${NODE#$NODE_ID_DEC } NODE=${NODE% *} MASTER_SCORE_ATTRIBUTE="${HA_SBIN_DIR}/crm_attribute --lifetime=reboot --name=$MASTER_SCORE_VAR_NAME --node=$NODE" CRM_MASTER="${HA_SBIN_DIR}/crm_master --lifetime=reboot" PENDING_ATTRIBUTE="${HA_SBIN_DIR}/crm_attribute --lifetime=reboot --name=$PENDING_VAR_NAME --node=$NODE" NODE_ID_HEX=$(printf '0x%x' $NODE_ID_DEC) if [ -z "$NODE_ID_HEX" ]; then ocf_log err "Couldn't get node id with \"$CRM_NODE\"" exit $OCF_ERR_INSTALLED fi ocf_log debug "$RESOURCE: NODE:$NODE, ROLE:$ROLE, NODE_ID DEC:$NODE_ID_DEC HEX:$NODE_ID_HEX" DEVS="${OCF_RESKEY_devs}" REQUIRED_DEVS_NO="${OCF_RESKEY_required_devs_no}" RESERVATION_TYPE="${OCF_RESKEY_reservation_type}" MASTER_SCORE_BASE="${OCF_RESKEY_master_score_base}" MASTER_SCORE_DEV_FACTOR="${OCF_RESKEY_master_score_dev_factor}" MASTER_SCORE_DELAY="${OCF_RESKEY_master_score_delay}" ocf_log debug "$RESOURCE: DEVS=$DEVS" ocf_log debug "$RESOURCE: REQUIRED_DEVS_NO=$REQUIRED_DEVS_NO" ocf_log debug "$RESOURCE: RESERVATION_TYPE=$RESERVATION_TYPE" ocf_log debug "$RESOURCE: MASTER_SCORE_BASE=$MASTER_SCORE_BASE" ocf_log debug "$RESOURCE: MASTER_SCORE_DEV_FACTOR=$MASTER_SCORE_DEV_FACTOR" ocf_log debug "$RESOURCE: MASTER_SCORE_DELAY=$MASTER_SCORE_DELAY" #expand path wildcards DEVS=$(echo $DEVS) if [ -z "$DEVS" ]; then ocf_log err "\"devs\" not defined" exit $OCF_ERR_INSTALLED fi mpathpersist_check_devs mpathpersist_get_status } mpathpersist_action_usage() { cat <&1` if [ $? -eq 0 ]; then WORKING_DEVS+=($dev) echo "$READ_KEYS" | $GREP -w $NODE_ID_HEX\$ >/dev/null if [ $? -eq 0 ]; then REGISTERED_DEVS+=($dev) READ_RESERVATION=`$MPATHPERSIST --in --read-reservation $dev 2>&1` if [ $? -eq 0 ]; then echo "$READ_RESERVATION" | $GREP -w $NODE_ID_HEX\$ >/dev/null if [ $? -eq 0 ]; then RESERVED_DEVS+=($dev) fi reservation_key=`echo $READ_RESERVATION | $GREP -o 'Key = 0x[0-9a-f]*' | $GREP -o '0x[0-9a-f]*'` if [ -n "$reservation_key" ]; then DEVS_WITH_RESERVATION+=($dev) RESERVATION_KEYS+=($reservation_key) fi fi fi fi done WORKING_DEVS_NO=${#WORKING_DEVS[*]} ocf_log debug "$RESOURCE: working devices: `mpathpersist_echo_array ${WORKING_DEVS[*]}`" ocf_log debug "$RESOURCE: number of working devices: $WORKING_DEVS_NO" ocf_log debug "$RESOURCE: registered devices: `mpathpersist_echo_array ${REGISTERED_DEVS[*]}`" ocf_log debug "$RESOURCE: reserved devices: `mpathpersist_echo_array ${RESERVED_DEVS[*]}`" ocf_log debug "$RESOURCE: devices with reservation: `mpathpersist_echo_array ${DEVS_WITH_RESERVATION[*]}`" ocf_log debug "$RESOURCE: reservation keys: `mpathpersist_echo_array ${RESERVATION_KEYS[*]}`" MASTER_SCORE=$(($MASTER_SCORE_BASE + $MASTER_SCORE_DEV_FACTOR*$WORKING_DEVS_NO)) ocf_log debug "$RESOURCE: master_score: $MASTER_SCORE_BASE + $MASTER_SCORE_DEV_FACTOR*$WORKING_DEVS_NO = $MASTER_SCORE" } mpathpersist_check_devs() { for dev in $DEVS do if [ -e "$dev" ]; then EXISTING_DEVS+=($dev) fi done EXISTING_DEVS_NO=${#EXISTING_DEVS[*]} if [ $EXISTING_DEVS_NO -lt $REQUIRED_DEVS_NO ]; then ocf_log err "Number of existing devices=$EXISTING_DEVS_NO less then required_devs_no=$REQUIRED_DEVS_NO" exit $OCF_ERR_INSTALLED fi } mpathpersist_is_registered() { for registered_dev in ${REGISTERED_DEVS[*]} do if [ "$registered_dev" == "$1" ]; then return 0 fi done return 1 } mpathpersist_get_reservation_key() { for array_index in ${!DEVS_WITH_RESERVATION[*]} do if [ "${DEVS_WITH_RESERVATION[$array_index]}" == "$1" ]; then echo ${RESERVATION_KEYS[$array_index]} return 0 fi done echo "" } mpathpersist_echo_array() { str_count=0 arr_str="" for str in "$@" do arr_str="$arr_str[$str_count]:$str " str_count=$(($str_count+1)) done echo $arr_str } mpathpersist_parse_act_pending() { ACT_PENDING_TS=0 ACT_PENDING_SCORE=0 if [ -n "$ACT_PENDING" ]; then ACT_PENDING_TS=${ACT_PENDING%%_*} ACT_PENDING_SCORE=${ACT_PENDING##*_} fi } mpathpersist_clear_pending() { if [ -n "$ACT_PENDING" ]; then DO_PENDING_UPDATE="YES" NEW_PENDING="" fi } mpathpersist_new_master_score() { DO_MASTER_SCORE_UPDATE="YES" NEW_MASTER_SCORE=$1 } mpathpersist_new_pending() { DO_PENDING_UPDATE="YES" NEW_PENDING=$1 } # Functions invoked by resource manager actions mpathpersist_action_start() { ocf_run $MASTER_SCORE_ATTRIBUTE --update=$MASTER_SCORE ocf_run $PENDING_ATTRIBUTE --update="" if [ $WORKING_DEVS_NO -lt $REQUIRED_DEVS_NO ]; then ocf_log err "$RESOURCE: Number of working devices=$WORKING_DEVS_NO less then required_devs_no=$REQUIRED_DEVS_NO" exit $OCF_ERR_GENERIC fi for dev in ${WORKING_DEVS[*]} do if mpathpersist_is_registered $dev ; then : OK else ocf_run $MPATHPERSIST --out --register --param-sark=$NODE_ID_HEX $dev if [ $? -ne $OCF_SUCCESS ] then return $OCF_ERR_GENERIC fi fi done return $OCF_SUCCESS } mpathpersist_action_stop() { if [ ${#REGISTERED_DEVS[*]} -eq 0 ]; then ocf_log debug "$RESOURCE stop: already no registrations" else # Clear preference for becoming master ocf_run $MASTER_SCORE_ATTRIBUTE --delete ocf_run $PENDING_ATTRIBUTE --delete for dev in ${REGISTERED_DEVS[*]} do ocf_run $MPATHPERSIST --out --register --param-rk=$NODE_ID_HEX $dev done fi return $OCF_SUCCESS } mpathpersist_action_monitor() { ACT_MASTER_SCORE=`$MASTER_SCORE_ATTRIBUTE --query --quiet 2>&1` ocf_log debug "$RESOURCE monitor: ACT_MASTER_SCORE=$ACT_MASTER_SCORE" ACT_PENDING=`$PENDING_ATTRIBUTE --query --quiet 2>&1` ocf_log debug "$RESOURCE monitor: ACT_PENDING=$ACT_PENDING" mpathpersist_parse_act_pending ocf_log debug "$RESOURCE monitor: ACT_PENDING_TS=$ACT_PENDING_TS" ocf_log debug "$RESOURCE monitor: ACT_PENDING_VAL=$ACT_PENDING_SCORE" ocf_log debug "$MASTER_SCORE, $ACT_MASTER_SCORE, $ROLE" DO_MASTER_SCORE_UPDATE="NO" DO_PENDING_UPDATE="NO" if [ -n "$ACT_MASTER_SCORE" ] then if [ $ACT_MASTER_SCORE -eq $MASTER_SCORE ]; then mpathpersist_clear_pending else case $ROLE in Master) if [ $MASTER_SCORE -lt $ACT_MASTER_SCORE ]; then if [ -n "$ACT_PENDING" ] then if [ $(($NOW-$ACT_PENDING_TS-$MASTER_SCORE_DELAY)) -ge 0 ]; then mpathpersist_new_master_score $MASTER_SCORE mpathpersist_clear_pending fi else if [ $MASTER_SCORE_DELAY -eq 0 ]; then mpathpersist_new_master_score $MASTER_SCORE mpathpersist_clear_pending else mpathpersist_new_pending "${NOW}_${MASTER_SCORE}" fi fi else mpathpersist_new_master_score $MASTER_SCORE mpathpersist_clear_pending fi ;; Slave) if [ $MASTER_SCORE -gt $ACT_MASTER_SCORE ]; then if [ -n "$ACT_PENDING" ]; then if [ $(($NOW-$ACT_PENDING_TS-$MASTER_SCORE_DELAY)) -ge 0 ]; then mpathpersist_new_master_score $MASTER_SCORE mpathpersist_clear_pending fi else if [ $MASTER_SCORE_DELAY -eq 0 ]; then mpathpersist_new_master_score $MASTER_SCORE mpathpersist_clear_pending else mpathpersist_new_pending "${NOW}_${MASTER_SCORE}" fi fi else mpathpersist_new_master_score $MASTER_SCORE mpathpersist_clear_pending fi ;; *) ;; esac fi fi if [ $DO_MASTER_SCORE_UPDATE == "YES" ]; then ocf_run $MASTER_SCORE_ATTRIBUTE --update=$NEW_MASTER_SCORE fi if [ $DO_PENDING_UPDATE == "YES" ]; then ocf_run $PENDING_ATTRIBUTE --update=$NEW_PENDING fi if [ ${#REGISTERED_DEVS[*]} -eq 0 ]; then ocf_log debug "$RESOURCE monitor: no registrations" return $OCF_NOT_RUNNING fi if [ ${#RESERVED_DEVS[*]} -eq ${#WORKING_DEVS[*]} ]; then return $OCF_RUNNING_MASTER fi if [ ${#REGISTERED_DEVS[*]} -eq ${#WORKING_DEVS[*]} ]; then if [ $RESERVATION_TYPE -eq 7 ] || [ $RESERVATION_TYPE -eq 8 ]; then if [ ${#DEVS_WITH_RESERVATION[*]} -gt 0 ]; then return $OCF_RUNNING_MASTER else return $OCF_SUCCESS fi else return $OCF_SUCCESS fi fi ocf_log err "$RESOURCE monitor: unexpected state" return $OCF_ERR_GENERIC } mpathpersist_action_promote() { if [ ${#RESERVED_DEVS[*]} -gt 0 ]; then ocf_log info "$RESOURCE promote: already master" return $OCF_SUCCESS fi for dev in ${WORKING_DEVS[*]} do reservation_key=`mpathpersist_get_reservation_key $dev` case $RESERVATION_TYPE in 1|3|5|6) if [ -z "$reservation_key" ]; then ocf_run $MPATHPERSIST --out --reserve --param-rk=$NODE_ID_HEX --prout-type=$RESERVATION_TYPE $dev if [ $? -ne $OCF_SUCCESS ]; then return $OCF_ERR_GENERIC fi else ocf_run $MPATHPERSIST --out --preempt --param-sark=$reservation_key --param-rk=$NODE_ID_HEX --prout-type=$RESERVATION_TYPE $dev if [ $? -ne $OCF_SUCCESS ]; then return $OCF_ERR_GENERIC fi fi ;; 7|8) if [ -z "$reservation_key" ]; then ocf_run $MPATHPERSIST --out --reserve --param-rk=$NODE_ID_HEX --prout-type=$RESERVATION_TYPE $dev if [ $? -ne $OCF_SUCCESS ] then return $OCF_ERR_GENERIC fi else ocf_log info "$RESOURCE promote: there already exist an reservation holder, all registrants become reservation holders" return $OCF_SUCCESS fi ;; *) return $OCF_ERR_ARGS ;; esac done return $OCF_SUCCESS } mpathpersist_action_demote() { case $RESERVATION_TYPE in 1|3|5|6) if [ ${#RESERVED_DEVS[*]} -eq 0 ]; then ocf_log info "$RESOURCE demote: already slave" return $OCF_SUCCESS fi for dev in ${RESERVED_DEVS[*]} do ocf_run $MPATHPERSIST --out --release --param-rk=$NODE_ID_HEX --prout-type=$RESERVATION_TYPE $dev if [ $? -ne $OCF_SUCCESS ]; then return $OCF_ERR_GENERIC fi done ;; 7|8) #in case of 7/8, --release won't release the reservation unless unregister the key. if [ ${#REGISTERED_DEVS[*]} -eq 0 ]; then ocf_log info "$RESOURCE demote: already slave" return $OCF_SUCCESS fi for dev in ${REGISTERED_DEVS[*]} do ocf_run $MPATHPERSIST --out --register --param-rk=$NODE_ID_HEX --param-sark=0 $dev if [ $? -ne $OCF_SUCCESS ]; then return $OCF_ERR_GENERIC fi done ;; *) return $OCF_ERR_ARGS ;; esac return $OCF_SUCCESS } mpathpersist_action_notify() { local n_type="$OCF_RESKEY_CRM_meta_notify_type" local n_op="$OCF_RESKEY_CRM_meta_notify_operation" set -- $OCF_RESKEY_CRM_meta_notify_active_resource local n_active="$#" set -- $OCF_RESKEY_CRM_meta_notify_stop_resource local n_stop="$#" set -- $OCF_RESKEY_CRM_meta_notify_start_resource local n_start="$#" ocf_log debug "$RESOURCE notify: $n_type for $n_op - counts: active $n_active - starting $n_start - stopping $n_stop" return $OCF_SUCCESS } mpathpersist_action_validate_all () { if [ "$OCF_RESKEY_CRM_meta_master_max" != "1" ] && [ "$RESERVATION_TYPE" != "7" ] && [ "$RESERVATION_TYPE" != "8" ]; then ocf_log err "Master options misconfigured." exit $OCF_ERR_CONFIGURED fi return $OCF_SUCCESS } if [ $# -ne 1 ]; then echo "Incorrect parameter count." mpathpersist_action_usage exit $OCF_ERR_ARGS fi ACTION=$1 case $ACTION in meta-data) meta_data ;; validate-all) mpathpersist_init mpathpersist_action_validate_all ;; start|promote|monitor|stop|demote) ocf_log debug "$RESOURCE: starting action \"$ACTION\"" mpathpersist_init mpathpersist_action_$ACTION exit $? ;; notify) mpathpersist_action_notify exit $? ;; usage|help) mpathpersist_action_usage exit $OCF_SUCCESS ;; *) mpathpersist_action_usage exit $OCF_ERR_ARGS ;; esac diff --git a/heartbeat/pgsql b/heartbeat/pgsql index 38f6ceeb7..842dc0ac4 100755 --- a/heartbeat/pgsql +++ b/heartbeat/pgsql @@ -1,2193 +1,2193 @@ #!/bin/sh # # Description: Manages a PostgreSQL Server as an OCF High-Availability # resource # # Authors: Serge Dubrouski (sergeyfd@gmail.com) -- original RA # Florian Haas (florian@linbit.com) -- makeover # Takatoshi MATSUO (matsuo.tak@gmail.com) -- support replication # David Corlette (dcorlette@netiq.com) -- add support for non-standard library locations and non-standard port # # Copyright: 2006-2012 Serge Dubrouski # and other Linux-HA contributors # License: GNU General Public License (GPL) # ############################################################################### # Initialization: : ${OCF_FUNCTIONS_DIR=${OCF_ROOT}/lib/heartbeat} . ${OCF_FUNCTIONS_DIR}/ocf-shellfuncs # Use runuser if available for SELinux. if [ -x /sbin/runuser ]; then SU=runuser else SU=su fi # # Get PostgreSQL Configuration parameter # get_pgsql_param() { local param_name param_name=$1 perl_code="if (/^\s*$param_name[\s=]+\s*(.*)$/) { \$dir=\$1; \$dir =~ s/\s*\#.*//; \$dir =~ s/^'(\S*)'/\$1/; print \$dir;}" perl -ne "$perl_code" < $OCF_RESKEY_config } # Defaults OCF_RESKEY_pgctl_default=/usr/bin/pg_ctl OCF_RESKEY_psql_default=/usr/bin/psql OCF_RESKEY_pgdata_default=/var/lib/pgsql/data OCF_RESKEY_pgdba_default=postgres OCF_RESKEY_pghost_default="" OCF_RESKEY_pgport_default=5432 OCF_RESKEY_pglibs_default=/usr/lib OCF_RESKEY_start_opt_default="" OCF_RESKEY_ctl_opt_default="" OCF_RESKEY_pgdb_default=template1 OCF_RESKEY_logfile_default=/dev/null OCF_RESKEY_stop_escalate_default=90 OCF_RESKEY_monitor_user_default="" OCF_RESKEY_monitor_password_default="" OCF_RESKEY_monitor_sql_default="select now();" OCF_RESKEY_check_wal_receiver_default="false" # Defaults for replication OCF_RESKEY_rep_mode_default=none OCF_RESKEY_node_list_default="" OCF_RESKEY_restore_command_default="" OCF_RESKEY_archive_cleanup_command_default="" OCF_RESKEY_recovery_end_command_default="" OCF_RESKEY_master_ip_default="" OCF_RESKEY_repuser_default="postgres" OCF_RESKEY_primary_conninfo_opt_default="" OCF_RESKEY_restart_on_promote_default="false" OCF_RESKEY_tmpdir_default="/var/lib/pgsql/tmp" OCF_RESKEY_xlog_check_count_default="3" OCF_RESKEY_crm_attr_timeout_default="5" OCF_RESKEY_stop_escalate_in_slave_default=90 OCF_RESKEY_replication_slot_name_default="" : ${OCF_RESKEY_pgctl=${OCF_RESKEY_pgctl_default}} : ${OCF_RESKEY_psql=${OCF_RESKEY_psql_default}} : ${OCF_RESKEY_pgdata=${OCF_RESKEY_pgdata_default}} : ${OCF_RESKEY_pgdba=${OCF_RESKEY_pgdba_default}} : ${OCF_RESKEY_pghost=${OCF_RESKEY_pghost_default}} : ${OCF_RESKEY_pgport=${OCF_RESKEY_pgport_default}} : ${OCF_RESKEY_pglibs=${OCF_RESKEY_pglibs_default}} : ${OCF_RESKEY_config=${OCF_RESKEY_pgdata}/postgresql.conf} : ${OCF_RESKEY_start_opt=${OCF_RESKEY_start_opt_default}} : ${OCF_RESKEY_ctl_opt=${OCF_RESKEY_ctl_opt_default}} : ${OCF_RESKEY_pgdb=${OCF_RESKEY_pgdb_default}} : ${OCF_RESKEY_logfile=${OCF_RESKEY_logfile_default}} : ${OCF_RESKEY_stop_escalate=${OCF_RESKEY_stop_escalate_default}} : ${OCF_RESKEY_monitor_user=${OCF_RESKEY_monitor_user_default}} : ${OCF_RESKEY_monitor_password=${OCF_RESKEY_monitor_password_default}} : ${OCF_RESKEY_monitor_sql=${OCF_RESKEY_monitor_sql_default}} : ${OCF_RESKEY_check_wal_receiver=${OCF_RESKEY_check_wal_receiver_default}} # for replication : ${OCF_RESKEY_rep_mode=${OCF_RESKEY_rep_mode_default}} : ${OCF_RESKEY_node_list=${OCF_RESKEY_node_list_default}} : ${OCF_RESKEY_restore_command=${OCF_RESKEY_restore_command_default}} : ${OCF_RESKEY_archive_cleanup_command=${OCF_RESKEY_archive_cleanup_command_default}} : ${OCF_RESKEY_recovery_end_command=${OCF_RESKEY_recovery_end_command_default}} : ${OCF_RESKEY_master_ip=${OCF_RESKEY_master_ip_default}} : ${OCF_RESKEY_repuser=${OCF_RESKEY_repuser_default}} : ${OCF_RESKEY_primary_conninfo_opt=${OCF_RESKEY_primary_conninfo_opt_default}} : ${OCF_RESKEY_restart_on_promote=${OCF_RESKEY_restart_on_promote_default}} : ${OCF_RESKEY_tmpdir=${OCF_RESKEY_tmpdir_default}} : ${OCF_RESKEY_xlog_check_count=${OCF_RESKEY_xlog_check_count_default}} : ${OCF_RESKEY_crm_attr_timeout=${OCF_RESKEY_crm_attr_timeout_default}} : ${OCF_RESKEY_stop_escalate_in_slave=${OCF_RESKEY_stop_escalate_in_slave_default}} : ${OCF_RESKEY_replication_slot_name=${OCF_RESKEY_replication_slot_name_default}} usage() { cat < 1.0 Resource script for PostgreSQL. It manages a PostgreSQL as an HA resource. Manages a PostgreSQL database instance Path to pg_ctl command. pgctl Start options (-o start_opt in pg_ctl). "-i -p 5432" for example. start_opt Additional pg_ctl options (-w, -W etc..). ctl_opt Path to psql command. psql Path to PostgreSQL data directory. pgdata User that owns PostgreSQL. pgdba Hostname/IP address where PostgreSQL is listening pghost Port where PostgreSQL is listening pgport Custom location of the Postgres libraries. If not set, the standard location will be used. pglibs PostgreSQL user that pgsql RA will user for monitor operations. If it's not set pgdba user will be used. monitor_user Password for monitor user. monitor_password SQL script that will be used for monitor operations. monitor_sql Path to the PostgreSQL configuration file for the instance. Configuration file Database that will be used for monitoring. pgdb Path to PostgreSQL server log output file. logfile Unix socket directory for PostgreSQL. If you use PostgreSQL 9.3 or higher and define unix_socket_directories in the postgresql.conf, then you must set socketdir to determine which directory is used for psql command. socketdir Number of seconds to wait for stop (using -m fast) before resorting to -m immediate stop escalation Replication mode may be set to "async" or "sync" or "slave". They require PostgreSQL 9.1 or later. Once set, "async" and "sync" require node_list, master_ip, and restore_command parameters,as well as configuring PostgreSQL for replication (in postgresql.conf and pg_hba.conf). "slave" means that RA only makes recovery.conf before starting to connect to primary which is running somewhere. -It dosen't need master/slave setting. +It doesn't need master/slave setting. It requires master_ip restore_command parameters. rep_mode All node names. Please separate each node name with a space. This is optional for replication. Defaults to all nodes in the cluster node list restore_command for recovery.conf. This is required for replication. restore_command archive_cleanup_command for recovery.conf. This is used for replication and is optional. archive_cleanup_command recovery_end_command for recovery.conf. This is used for replication and is optional. recovery_end_command Master's floating IP address to be connected from hot standby. This parameter is used for "primary_conninfo" in recovery.conf. This is required for replication. master ip User used to connect to the master server. This parameter is used for "primary_conninfo" in recovery.conf. This is required for replication. repuser primary_conninfo options of recovery.conf except host, port, user and application_name. This is optional for replication. primary_conninfo_opt If this is true, RA deletes recovery.conf and restarts PostgreSQL on promote to keep Timeline ID. It probably makes fail-over slower. It's recommended to set on-fail of promote up as fence. This is optional for replication. restart_on_promote Set this option when using replication slots. Can only use lower case letters, numbers and underscore for replication_slot_name. The replication slots would be created for each node, with the name adding the node name as postfix. For example, replication_slot_name is "sample" and 2 slaves which are "node1" and "node2" connect to their slots, the slots names are "sample_node1" and "sample_node2". If the node name contains a upper case letter, hyphen and dot, those characters will be converted to a lower case letter or an underscore. For example, Node-1.example.com to node_1_example_com. -pgsql RA doesn't monitor and delete the repliation slot. +pgsql RA doesn't monitor and delete the replication slot. When the slave node has been disconnected in failure or the like, execute one of the following manually. Otherwise it may eventually cause a disk full because the master node will continue to accumulate the unsent WAL. 1. recover and reconnect the slave node to the master node as soon as possible. 2. delete the slot on the master node by following psql command. $ select pg_drop_replication_slot('replication_slot_name'); replication_slot_name Path to temporary directory. This is optional for replication. tmpdir Number of checks of xlog on monitor before promote. This is optional for replication. Note: For backward compatibility, the terms are unified with PostgreSQL 9. If you are using PostgreSQL 10 or later, replace "xlog" with "wal". Likewise, replacing "location" with "lsn". xlog check count The timeout of crm_attribute forever update command. Default value is 5 seconds. This is optional for replication. The timeout of crm_attribute forever update command. Number of seconds to wait for stop (using -m fast) before resorting to -m immediate in slave state. This is optional for replication. stop escalation_in_slave If this is true, RA checks wal_receiver process on monitor and notifies its status using "(resource name)-receiver-status" attribute. It's useful for checking whether PostgreSQL (hot standby) connects to primary. The attribute shows status as "normal" or "normal (master)" or "ERROR". Note that if you configure PostgreSQL as master/slave resource, then wal receiver is not running in the master and the attribute shows status as "normal (master)" consistently because it is normal status. check_wal_receiver EOF } # # Run the given command in the Resource owner environment... # runasowner() { local quietrun="" local loglevel="-err" local var for var in 1 2 do case "$1" in "-q") quietrun="-q" shift 1;; "warn"|"err") loglevel="-$1" shift 1;; *) ;; esac done ocf_run $quietrun $loglevel $SU $OCF_RESKEY_pgdba -c "cd $OCF_RESKEY_pgdata; $*" } # # Shell escape # escape_string() { echo "$*" | sed -e "s/'/'\\\\''/g" } # # methods: What methods/operations do we support? # pgsql_methods() { cat </dev/null 2>&1" return $? fi # No PID file false } pgsql_wal_receiver_status() { local PID local receiver_parent_pids local pgsql_real_monitor_status=$1 PID=`head -n 1 $PIDFILE` receiver_parent_pids=`ps -ef | tr -s " " | grep "[w]al receiver process" | cut -d " " -f 3` if echo "$receiver_parent_pids" | grep -q -w "$PID" ; then attrd_updater -n "$PGSQL_WAL_RECEIVER_STATUS_ATTR" -v "normal" -q return 0 fi if [ $pgsql_real_monitor_status -eq "$OCF_RUNNING_MASTER" ]; then attrd_updater -n "$PGSQL_WAL_RECEIVER_STATUS_ATTR" -v "normal (master)" -q return 0 fi attrd_updater -n "$PGSQL_WAL_RECEIVER_STATUS_ATTR" -v "ERROR" -q ocf_log warn "wal receiver process is not running" return 1 } # # pgsql_real_monitor # pgsql_real_monitor() { local loglevel local rc local output # Set the log level of the error message loglevel=${1:-err} if ! pgsql_status then ocf_log info "PostgreSQL is down" return $OCF_NOT_RUNNING fi if is_replication; then #Check replication state output=`exec_sql "${CHECK_MS_SQL}"` rc=$? if [ $rc -ne 0 ]; then report_psql_error $rc $loglevel "Can't get PostgreSQL recovery status." return $OCF_ERR_GENERIC fi case "$output" in f) ocf_log debug "PostgreSQL is running as a primary." if [ "$OCF_RESKEY_monitor_sql" = "$OCF_RESKEY_monitor_sql_default" ]; then return $OCF_RUNNING_MASTER fi ;; t) ocf_log debug "PostgreSQL is running as a hot standby." return $OCF_SUCCESS;; *) ocf_exit_reason "$CHECK_MS_SQL output is $output" return $OCF_ERR_GENERIC;; esac fi OCF_RESKEY_monitor_sql=`escape_string "$OCF_RESKEY_monitor_sql"` runasowner -q $loglevel "$OCF_RESKEY_psql $psql_options \ -c '$OCF_RESKEY_monitor_sql'" rc=$? if [ $rc -ne 0 ]; then report_psql_error $rc $loglevel "PostgreSQL $OCF_RESKEY_pgdb isn't running." return $OCF_ERR_GENERIC fi if is_replication; then return $OCF_RUNNING_MASTER fi return $OCF_SUCCESS } pgsql_replication_monitor() { local rc rc=$1 if [ $rc -ne $OCF_SUCCESS -a $rc -ne "$OCF_RUNNING_MASTER" ]; then return $rc fi # If I am Master if [ $rc -eq $OCF_RUNNING_MASTER ]; then change_data_status "$NODENAME" "LATEST" change_pgsql_status "$NODENAME" "PRI" control_slave_status || return $OCF_ERR_GENERIC if [ "$RE_CONTROL_SLAVE" = "true" ]; then sleep 2 ocf_log info "re-controlling slave status." RE_CONTROL_SLAVE="none" control_slave_status || return $OCF_ERR_GENERIC fi return $rc fi # I can't get master node name from $OCF_RESKEY_CRM_meta_notify_master_uname on monitor, # so I will get master node name using crm_mon -n print_crm_mon | tr -d "\t" | tr -d " " | grep -q "^${RESOURCE_NAME}[(:].*[):].*Master" if [ $? -ne 0 ] ; then # If I am Slave and Master is not exist ocf_log info "Master does not exist." change_pgsql_status "$NODENAME" "HS:alone" have_master_right if [ $? -eq 0 ]; then rm -f ${XLOG_NOTE_FILE}.* fi else output=`exec_with_retry 0 $CRM_ATTR_FOREVER -N "$NODENAME" \ -n "$PGSQL_DATA_STATUS_ATTR" -G -q` if [ "$output" = "DISCONNECT" ]; then change_pgsql_status "$NODENAME" "HS:alone" fi fi return $rc } #pgsql_monitor: pgsql_real_monitor() wrapper for replication pgsql_monitor() { local rc pgsql_real_monitor rc=$? if ocf_is_true ${OCF_RESKEY_check_wal_receiver}; then pgsql_wal_receiver_status $rc fi if ! is_replication; then return $rc else pgsql_replication_monitor $rc return $? fi } # pgsql_post_demote pgsql_post_demote() { DEMOTE_NODE=`echo $OCF_RESKEY_CRM_meta_notify_demote_uname | sed "s/ /\n/g" | head -1 | tr '[A-Z]' '[a-z]'` ocf_log debug "post-demote called. Demote uname is $DEMOTE_NODE" if [ "$DEMOTE_NODE" != "$NODENAME" ]; then if ! echo $OCF_RESKEY_CRM_meta_notify_master_uname | tr '[A-Z]' '[a-z]' | grep $NODENAME; then show_master_baseline change_pgsql_status "$NODENAME" "HS:alone" fi fi return $OCF_SUCCESS } pgsql_pre_promote() { local master_baseline local my_master_baseline local cmp_location local number_of_nodes # If my data is newer than new master's one, I fail my resource. PROMOTE_NODE=`echo $OCF_RESKEY_CRM_meta_notify_promote_uname | \ sed "s/ /\n/g" | head -1 | tr '[A-Z]' '[a-z]'` number_of_nodes=`echo $NODE_LIST | wc -w` if [ $number_of_nodes -ge 3 -a \ "$OCF_RESKEY_rep_mode" = "sync" -a \ "$PROMOTE_NODE" != "$NODENAME" ]; then master_baseline=`$CRM_ATTR_REBOOT -N "$PROMOTE_NODE" -n \ "$PGSQL_MASTER_BASELINE" -G -q 2>/dev/null` if [ $? -eq 0 ]; then my_master_baseline=`$CRM_ATTR_REBOOT -N "$NODENAME" -n \ "$PGSQL_MASTER_BASELINE" -G -q 2>/dev/null` # get older location cmp_location=`printf "$master_baseline\n$my_master_baseline\n" |\ sort | head -1` if [ "$cmp_location" != "$my_master_baseline" ]; then # We used to set the failcount to INF for the resource here in # order to move the master to the other node. However, setting # the failcount should be done only by the CRM and so this use # got deprecated in pacemaker version 1.1.17. Now we do the # "ban resource from the node". ocf_exit_reason "My data is newer than new master's one. New master's location : $master_baseline" exec_with_retry 0 $CRM_RESOURCE -B -r $OCF_RESOURCE_INSTANCE -N $NODENAME -Q return $OCF_ERR_GENERIC fi fi fi return $OCF_SUCCESS } pgsql_notify() { local type="${OCF_RESKEY_CRM_meta_notify_type}" local op="${OCF_RESKEY_CRM_meta_notify_operation}" local rc if ! is_replication; then return $OCF_SUCCESS fi ocf_log debug "notify: ${type} for ${op}" case $type in pre) case $op in promote) pgsql_pre_promote return $? ;; esac ;; post) case $op in promote) delete_xlog_location PROMOTE_NODE=`echo $OCF_RESKEY_CRM_meta_notify_promote_uname | \ sed "s/ /\n/g" | head -1 | tr '[A-Z]' '[a-z]'` if [ "$PROMOTE_NODE" != "$NODENAME" ]; then delete_master_baseline fi return $OCF_SUCCESS ;; demote) pgsql_post_demote return $? ;; start|stop) MASTER_NODE=`echo $OCF_RESKEY_CRM_meta_notify_master_uname | \ sed "s/ /\n/g" | head -1 | tr '[A-Z]' '[a-z]'` if [ "$NODENAME" = "$MASTER_NODE" ]; then control_slave_status fi return $OCF_SUCCESS ;; esac ;; esac return $OCF_SUCCESS } control_slave_status() { local rc local data_status local target local all_data_status local tmp_data_status local number_of_nodes all_data_status=`exec_sql "${CHECK_REPLICATION_STATE_SQL}"` rc=$? if [ $rc -eq 0 ]; then if [ -n "$all_data_status" ]; then all_data_status=`echo $all_data_status | sed "s/\n/ /g"` fi else report_psql_error $rc err "Can't get PostgreSQL replication status." return 1 fi number_of_nodes=`echo $NODE_LIST | wc -w` for target in $NODE_LIST; do if [ "$target" = "$NODENAME" ]; then continue fi data_status="DISCONNECT" if [ -n "$all_data_status" ]; then for tmp_data_status in $all_data_status; do if ! echo $tmp_data_status | grep -q "^${target}|"; then continue fi data_status=`echo $tmp_data_status | cut -d "|" -f 2,3` ocf_log debug "node_name and data_status is $tmp_data_status" break done fi case "$data_status" in "STREAMING|SYNC") change_data_status "$target" "$data_status" change_master_score "$target" "$CAN_PROMOTE" change_pgsql_status "$target" "HS:sync" ;; "STREAMING|ASYNC") change_data_status "$target" "$data_status" if [ "$OCF_RESKEY_rep_mode" = "sync" ]; then change_master_score "$target" "$CAN_NOT_PROMOTE" set_sync_mode "$target" else if [ $number_of_nodes -le 2 ]; then change_master_score "$target" "$CAN_PROMOTE" else # I can't determine which slave's data is newest in async mode. change_master_score "$target" "$CAN_NOT_PROMOTE" fi fi change_pgsql_status "$target" "HS:async" ;; "STREAMING|POTENTIAL") change_data_status "$target" "$data_status" change_master_score "$target" "$CAN_NOT_PROMOTE" change_pgsql_status "$target" "HS:potential" ;; "DISCONNECT") change_data_status "$target" "$data_status" change_master_score "$target" "$CAN_NOT_PROMOTE" if [ "$OCF_RESKEY_rep_mode" = "sync" ]; then set_async_mode "$target" fi ;; *) change_data_status "$target" "$data_status" change_master_score "$target" "$CAN_NOT_PROMOTE" if [ "$OCF_RESKEY_rep_mode" = "sync" ]; then set_async_mode "$target" fi change_pgsql_status "$target" "HS:connected" ;; esac done return 0 } have_master_right() { local old local new local output local data_status local node local mylocation local count local newestXlog local oldfile local newfile ocf_log debug "Checking if I have a master right." data_status=`$CRM_ATTR_FOREVER -N "$NODENAME" -n \ "$PGSQL_DATA_STATUS_ATTR" -G -q 2>/dev/null` if [ "$OCF_RESKEY_rep_mode" = "sync" ]; then if [ -n "$data_status" -a "$data_status" != "STREAMING|SYNC" -a \ "$data_status" != "LATEST" ]; then ocf_log warn "My data is out-of-date. status=$data_status" return 1 fi else if [ -n "$data_status" -a "$data_status" != "STREAMING|SYNC" -a \ "$data_status" != "STREAMING|ASYNC" -a \ "$data_status" != "LATEST" ]; then ocf_log warn "My data is out-of-date. status=$data_status" return 1 fi fi ocf_log info "My data status=$data_status." show_xlog_location if [ $? -ne 0 ]; then ocf_exit_reason "Failed to show my xlog location." exit $OCF_ERR_GENERIC fi old=0 for count in `seq $OCF_RESKEY_xlog_check_count`; do if [ -f ${XLOG_NOTE_FILE}.$count ]; then old=$count continue fi break done new=`expr $old + 1` # get xlog locations of all nodes for node in ${NODE_LIST}; do output=`$CRM_ATTR_REBOOT -N "$node" -n \ "$PGSQL_XLOG_LOC_NAME" -G -q 2>/dev/null` if [ $? -ne 0 ]; then ocf_log warn "Can't get $node xlog location." continue else ocf_log info "$node xlog location : $output" echo "$node $output" >> ${XLOG_NOTE_FILE}.${new} if [ "$node" = "$NODENAME" ]; then mylocation=$output fi fi done oldfile=`cat ${XLOG_NOTE_FILE}.${old} 2>/dev/null` newfile=`cat ${XLOG_NOTE_FILE}.${new} 2>/dev/null` if [ "$oldfile" != "$newfile" ]; then # reset counter rm -f ${XLOG_NOTE_FILE}.* printf "$newfile\n" > ${XLOG_NOTE_FILE}.0 return 1 fi if [ "$new" -ge "$OCF_RESKEY_xlog_check_count" ]; then newestXlog=`printf "$newfile\n" | sort -t " " -k 2,3 -r | \ head -1 | cut -d " " -f 2` if [ "$newestXlog" = "$mylocation" ]; then ocf_log info "I have a master right." exec_with_retry 5 $CRM_MASTER -v $PROMOTE_ME return 0 fi change_data_status "$NODENAME" "DISCONNECT" ocf_log info "I don't have correct master data." # reset counter rm -f ${XLOG_NOTE_FILE}.* printf "$newfile\n" > ${XLOG_NOTE_FILE}.0 fi return 1 } is_replication() { if [ "$OCF_RESKEY_rep_mode" != "none" -a "$OCF_RESKEY_rep_mode" != "slave" ]; then return 0 fi return 1 } use_replication_slot() { if [ -n "$OCF_RESKEY_replication_slot_name" ]; then return 0 fi return 1 } create_replication_slot_name() { local number_of_nodes=0 local target local replication_slot_name local replication_slot_name_list_tmp local replication_slot_name_list if [ -n "$NODE_LIST" ]; then number_of_nodes=`echo $NODE_LIST | wc -w` fi if [ $number_of_nodes -le 0 ]; then replication_slot_name_list="" # The Master node should have some slots equal to the number of Slaves, and # the Slave nodes connect to their dedicated slot on the Master. # To ensuring that the slots name are each unique, add postfix to $OCF_RESKEY_replication_slot. # The postfix is "_$target". else for target in $NODE_LIST do if [ "$target" != "$NODENAME" ]; then # The Uppercase, "-" and "." don't allow to use in slot_name. # If the NODENAME contains them, convert upper case to lower case and "_" and "." to "_". target=`echo "$target" | tr 'A-Z.-' 'a-z__'` replication_slot_name="$OCF_RESKEY_replication_slot_name"_"$target" replication_slot_name_list_tmp="$replication_slot_name_list" replication_slot_name_list="$replication_slot_name_list_tmp $replication_slot_name" fi done fi echo $replication_slot_name_list } delete_replication_slot(){ DELETE_REPLICATION_SLOT_sql="SELECT pg_drop_replication_slot('$1');" output=`exec_sql "$DELETE_REPLICATION_SLOT_sql"` return $? } delete_replication_slots() { local replication_slot_name_list local replication_slot_name replication_slot_name_list=`create_replication_slot_name` ocf_log debug "replication slot names are $replication_slot_name_list." for replication_slot_name in $replication_slot_name_list do if [ `check_replication_slot $replication_slot_name` = "1" ]; then delete_replication_slot $replication_slot_name if [ $? -eq 0 ]; then ocf_log info "PostgreSQL delete the replication slot($replication_slot_name)." else ocf_exit_reason "$output" return $OCF_ERR_GENERIC fi fi done } create_replication_slots() { local replication_slot_name local replication_slot_name_list local output local rc local CREATE_REPLICATION_SLOT_sql local DELETE_REPLICATION_SLOT_sql replication_slot_name_list=`create_replication_slot_name` ocf_log debug "replication slot names are $replication_slot_name_list." for replication_slot_name in $replication_slot_name_list do # If the same name slot is already exists, initialize(delete and create) the slot. if [ `check_replication_slot $replication_slot_name` = "1" ]; then delete_replication_slot $replication_slot_name if [ $? -eq 0 ]; then ocf_log info "PostgreSQL delete the replication slot($replication_slot_name)." else ocf_exit_reason "$output" return $OCF_ERR_GENERIC fi fi CREATE_REPLICATION_SLOT_sql="SELECT pg_create_physical_replication_slot('$replication_slot_name');" output=`exec_sql "$CREATE_REPLICATION_SLOT_sql"` rc=$? if [ $rc -eq 0 ]; then ocf_log info "PostgreSQL creates the replication slot($replication_slot_name)." else ocf_exit_reason "$output" return $OCF_ERR_GENERIC fi done return 0 } # This function check the replication slot does exists. check_replication_slot(){ local replication_slot_name=$1 local output local CHECK_REPLICATION_SLOT_sql="SELECT count(*) FROM pg_replication_slots WHERE slot_name = '$replication_slot_name'" output=`exec_sql "$CHECK_REPLICATION_SLOT_sql"` echo "$output" } # On postgreSQL 10 or later, "location" means "lsn". get_my_location() { local rc local output local replay_loc local receive_loc local output1 local output2 local log1 local log2 local newer_location output=`exec_sql "$CHECK_XLOG_LOC_SQL"` rc=$? if [ $rc -ne 0 ]; then report_psql_error $rc err "Can't get my xlog location." return 1 fi replay_loc=`echo $output | cut -d "|" -f 1` receive_loc=`echo $output | cut -d "|" -f 2` output1=`echo "$replay_loc" | cut -d "/" -f 1` output2=`echo "$replay_loc" | cut -d "/" -f 2` log1=`printf "%08s\n" $output1 | sed "s/ /0/g"` log2=`printf "%08s\n" $output2 | sed "s/ /0/g"` replay_loc="${log1}${log2}" output1=`echo "$receive_loc" | cut -d "/" -f 1` output2=`echo "$receive_loc" | cut -d "/" -f 2` log1=`printf "%08s\n" $output1 | sed "s/ /0/g"` log2=`printf "%08s\n" $output2 | sed "s/ /0/g"` receive_loc="${log1}${log2}" newer_location=`printf "$replay_loc\n$receive_loc" | sort -r | head -1` echo "$newer_location" return 0 } # On postgreSQL 10 or later, "xlog_location" means "wal_lsn". show_xlog_location() { local location location=`get_my_location` || return 1 exec_with_retry 0 $CRM_ATTR_REBOOT -N "$NODENAME" -n "$PGSQL_XLOG_LOC_NAME" -v "$location" } # On postgreSQL 10 or later, "xlog_location" means "wal_lsn". delete_xlog_location() { exec_with_retry 5 $CRM_ATTR_REBOOT -N "$NODENAME" -n "$PGSQL_XLOG_LOC_NAME" -D } show_master_baseline() { local rc local location location=`get_my_location` ocf_log info "My master baseline : $location." exec_with_retry 0 $CRM_ATTR_REBOOT -N "$NODENAME" -n "$PGSQL_MASTER_BASELINE" -v "$location" } delete_master_baseline() { exec_with_retry 5 $CRM_ATTR_REBOOT -N "$NODENAME" -n "$PGSQL_MASTER_BASELINE" -D } set_async_mode_all() { [ "$OCF_RESKEY_rep_mode" = "sync" ] || return 0 ocf_log info "Set all nodes into async mode." runasowner -q err "echo \"synchronous_standby_names = ''\" > \"$REP_MODE_CONF\"" if [ $? -ne 0 ]; then ocf_exit_reason "Can't set all nodes into async mode." return 1 fi return 0 } set_async_mode() { cat $REP_MODE_CONF | grep -q -E "(\"$1\")|([,' ]$1[,' ])" if [ $? -eq 0 ]; then ocf_log info "Setup $1 into async mode." runasowner -q err "echo \"synchronous_standby_names = ''\" > \"$REP_MODE_CONF\"" else ocf_log debug "$1 is already in async mode." return 0 fi exec_with_retry 0 reload_conf } set_sync_mode() { local sync_node_in_conf sync_node_in_conf=`cat $REP_MODE_CONF | cut -d "'" -f 2` if [ -n "$sync_node_in_conf" ]; then ocf_log debug "$sync_node_in_conf is already sync mode." else ocf_log info "Setup $1 into sync mode." runasowner -q err "echo \"synchronous_standby_names = '\\\"$1\\\"'\" > \"$REP_MODE_CONF\"" [ "$RE_CONTROL_SLAVE" = "false" ] && RE_CONTROL_SLAVE="true" exec_with_retry 0 reload_conf fi } reload_conf() { # Invoke pg_ctl runasowner "$OCF_RESKEY_pgctl -D $OCF_RESKEY_pgdata reload" if [ $? -eq 0 ]; then ocf_log info "Reload configuration file." else ocf_exit_reason "Can't reload configuration file." return 1 fi return 0 } user_recovery_conf() { local nodename_tmp # put archive_cleanup_command and recovery_end_command only when defined by user if [ -n "$OCF_RESKEY_archive_cleanup_command" ]; then echo "archive_cleanup_command = '${OCF_RESKEY_archive_cleanup_command}'" fi if [ -n "$OCF_RESKEY_recovery_end_command" ]; then echo "recovery_end_command = '${OCF_RESKEY_recovery_end_command}'" fi if use_replication_slot; then nodename_tmp=`echo "$NODENAME" | tr 'A-Z.-' 'a-z__'` echo "primary_slot_name = '${OCF_RESKEY_replication_slot_name}_$nodename_tmp'" fi } make_recovery_conf() { runasowner "touch $RECOVERY_CONF" if [ $? -ne 0 ]; then ocf_exit_reason "Can't create recovery.conf." return 1 fi cat > $RECOVERY_CONF <> $RECOVERY_CONF ocf_log debug "Created recovery.conf. host=${OCF_RESKEY_master_ip}, user=${OCF_RESKEY_repuser}" return 0 } # change pgsql-status. # arg1:node, arg2: value change_pgsql_status() { local output if ! is_node_online $1; then return 0 fi output=`$CRM_ATTR_REBOOT -N "$1" -n "$PGSQL_STATUS_ATTR" -G -q 2>/dev/null` if [ "$output" != "$2" ]; then # If slave's disk is broken, RA cannot read PID file # and misjudges the PostgreSQL as down while it is running. # It causes overwriting of pgsql-status by Master because replication is still connected. if [ "$output" = "STOP" -o "$output" = "UNKNOWN" ]; then if [ "$1" != "$NODENAME" ]; then ocf_log warn "Changing $PGSQL_STATUS_ATTR on $1 : $output->$2 by $NODENAME is prohibited." return 0 fi fi ocf_log info "Changing $PGSQL_STATUS_ATTR on $1 : $output->$2." exec_with_retry 0 $CRM_ATTR_REBOOT -N "$1" -n "$PGSQL_STATUS_ATTR" -v "$2" fi return 0 } # change pgsql-data-status. # arg1:node, arg2: value change_data_status() { local output if ! node_exist $1; then return 0 fi while : do output=`$CRM_ATTR_FOREVER -N "$1" -n "$PGSQL_DATA_STATUS_ATTR" -G -q 2>/dev/null` if [ "$output" != "$2" ]; then ocf_log info "Changing $PGSQL_DATA_STATUS_ATTR on $1 : $output->$2." exec_with_retry 0 exec_with_timeout 0 "$CRM_ATTR_FOREVER" -N $1 -n $PGSQL_DATA_STATUS_ATTR -v "$2" else break fi done return 0 } # set master-score # arg1:node, arg2: score, arg3: resoure set_master_score() { local current_score current_score=`$CRM_ATTR_REBOOT -N "$1" -n "master-$3" -G -q 2>/dev/null` if [ -n "$current_score" -a "$current_score" != "$2" ]; then ocf_log info "Changing $3 master score on $1 : $current_score->$2." exec_with_retry 0 $CRM_ATTR_REBOOT -N "$1" -n "master-$3" -v "$2" fi return 0 } # change master-score # arg1:node, arg2: score change_master_score() { local instance if ! is_node_online $1; then return 0 fi if echo $OCF_RESOURCE_INSTANCE | grep -q ":"; then # If Pacemaker version is 1.0.x instance=0 while : do if [ "$instance" -ge "$OCF_RESKEY_CRM_meta_clone_max" ]; then break fi if [ "${RESOURCE_NAME}:${instance}" = "$OCF_RESOURCE_INSTANCE" ]; then instance=`expr $instance + 1` continue fi set_master_score $1 $2 "${RESOURCE_NAME}:${instance}" || return 1 instance=`expr $instance + 1` done else # If globally-unique=false and Pacemaker version is 1.1.8 or higher # Master/Slave resource has no instance number set_master_score $1 $2 ${RESOURCE_NAME} || return 1 fi return 0 } report_psql_error() { local rc local loglevel local message rc=$1 loglevel=${2:-err} message="$3" ocf_log $loglevel "$message rc=$rc" if [ $rc -eq 1 ]; then ocf_exit_reason "Fatal error (out of memory, file not found, etc.) occurred while executing the psql command." elif [ $rc -eq 2 ]; then ocf_log $loglevel "Connection error (connection to the server went bad and the session was not interactive) occurred while executing the psql command." elif [ $rc -eq 3 ]; then ocf_exit_reason "Script error (the variable ON_ERROR_STOP was set) occurred while executing the psql command." fi } # # timeout management function # arg1 timeout >= 0 (if arg1 is 0, OCF_RESKEY_crm_attr_timeout is used.) # arg2 : command # arg3 : command's args exec_with_timeout() { local func_pid local count=$OCF_RESKEY_crm_attr_timeout local rc if [ "$1" -ne 0 ]; then count=$1 fi shift $* & func_pid=$! sleep .1 while kill -s 0 $func_pid >/dev/null 2>&1; do sleep 1 count=`expr $count - 1` if [ $count -le 0 ]; then ocf_exit_reason "\"$*\" (pid=$func_pid) timed out." kill -s 9 $func_pid >/dev/null 2>&1 return 1 fi ocf_log info "Waiting($count). \"$*\" (pid=$func_pid)." done wait $func_pid } # retry command when command doesn't return 0 # arg1 : count >= 0 (if arg1 is 0, it retries command in infinitum(1day)) # arg2..argN : command and args exec_with_retry() { local count="86400" local output local rc if [ "$1" -ne 0 ]; then count=$1 fi shift while [ $count -gt 0 ]; do output=`$*` rc=$? if [ $rc -ne 0 ]; then ocf_log warn "Retrying(remain $count). \"$*\" failed. rc=$rc. stdout=\"$output\"." count=`expr $count - 1` sleep 1 else printf "${output}" return 0 fi done ocf_exit_reason "giving up executing \"$*\"" return $rc } is_node_online() { print_crm_mon | tr '[A-Z]' '[a-z]' | grep -e "^node $1 " -e "^node $1:" | grep -q -v "offline" } node_exist() { print_crm_mon | tr '[A-Z]' '[a-z]' | grep -q "^node $1" } check_binary2() { if ! have_binary "$1"; then ocf_exit_reason "Setup problem: couldn't find command: $1" return 1 fi return 0 } check_config() { local rc=0 if [ ! -f "$1" ]; then if ocf_is_probe; then ocf_log info "Configuration file is $1 not readable during probe." rc=1 else ocf_exit_reason "Configuration file $1 doesn't exist" rc=2 fi fi return $rc } # Validate most critical parameters pgsql_validate_all() { local version local check_config_rc local rep_mode_string local socket_directories local rc version=`cat $OCF_RESKEY_pgdata/PG_VERSION` if ! check_binary2 "$OCF_RESKEY_pgctl" || ! check_binary2 "$OCF_RESKEY_psql"; then return $OCF_ERR_INSTALLED fi check_config "$OCF_RESKEY_config" check_config_rc=$? [ $check_config_rc -eq 2 ] && return $OCF_ERR_INSTALLED if [ $check_config_rc -eq 0 ]; then ocf_version_cmp "$version" "9.3" if [ $? -eq 0 ]; then : ${OCF_RESKEY_socketdir=`get_pgsql_param unix_socket_directory`} else # unix_socket_directories is used by PostgreSQL 9.3 or higher. socket_directories=`get_pgsql_param unix_socket_directories` if [ -n "$socket_directories" ]; then # unix_socket_directories may have multiple socket directories and the pgsql RA can not know which directory is used for psql command. # Therefore, the user must set OCF_RESKEY_socketdir explicitly. if [ -z "$OCF_RESKEY_socketdir" ]; then ocf_exit_reason "In PostgreSQL 9.3 or higher, socketdir can't be empty if you define unix_socket_directories in the postgresql.conf." return $OCF_ERR_CONFIGURED fi fi fi fi getent passwd $OCF_RESKEY_pgdba >/dev/null 2>&1 if [ ! $? -eq 0 ]; then ocf_exit_reason "User $OCF_RESKEY_pgdba doesn't exist"; return $OCF_ERR_INSTALLED; fi if ocf_is_probe; then ocf_log info "Don't check $OCF_RESKEY_pgdata during probe" else if ! runasowner "test -w $OCF_RESKEY_pgdata"; then ocf_exit_reason "Directory $OCF_RESKEY_pgdata is not writable by $OCF_RESKEY_pgdba" return $OCF_ERR_PERM; fi fi if [ -n "$OCF_RESKEY_monitor_user" -a ! -n "$OCF_RESKEY_monitor_password" ] then ocf_exit_reason "monitor password can't be empty" return $OCF_ERR_CONFIGURED fi if [ ! -n "$OCF_RESKEY_monitor_user" -a -n "$OCF_RESKEY_monitor_password" ] then ocf_exit_reason "monitor_user has to be set if monitor_password is set" return $OCF_ERR_CONFIGURED fi if is_replication || [ "$OCF_RESKEY_rep_mode" = "slave" ]; then if [ `printf "$version\n9.1" | sort -n | head -1` != "9.1" ]; then ocf_exit_reason "Replication mode needs PostgreSQL 9.1 or higher." return $OCF_ERR_INSTALLED fi if [ ! -n "$OCF_RESKEY_master_ip" ]; then ocf_exit_reason "master_ip can't be empty." return $OCF_ERR_CONFIGURED fi fi if is_replication; then REP_MODE_CONF=${OCF_RESKEY_tmpdir}/rep_mode.conf PGSQL_LOCK=${OCF_RESKEY_tmpdir}/PGSQL.lock XLOG_NOTE_FILE=${OCF_RESKEY_tmpdir}/xlog_note CRM_MASTER="${HA_SBIN_DIR}/crm_master -l reboot" CRM_ATTR_REBOOT="${HA_SBIN_DIR}/crm_attribute -l reboot" CRM_ATTR_FOREVER="${HA_SBIN_DIR}/crm_attribute -l forever" CRM_RESOURCE="${HA_SBIN_DIR}/crm_resource" CAN_NOT_PROMOTE="-INFINITY" CAN_PROMOTE="100" PROMOTE_ME="1000" CHECK_MS_SQL="select pg_is_in_recovery()" CHECK_SYNCHRONOUS_STANDBY_NAMES_SQL="show synchronous_standby_names" ocf_version_cmp "$version" "10" rc=$? if [ $rc -eq 1 ]||[ $rc -eq 2 ]; then CHECK_XLOG_LOC_SQL="select pg_last_wal_replay_lsn(),pg_last_wal_receive_lsn()" else CHECK_XLOG_LOC_SQL="select pg_last_xlog_replay_location(),pg_last_xlog_receive_location()" fi CHECK_REPLICATION_STATE_SQL="select application_name,upper(state),upper(sync_state) from pg_stat_replication" PGSQL_STATUS_ATTR="${RESOURCE_NAME}-status" PGSQL_DATA_STATUS_ATTR="${RESOURCE_NAME}-data-status" PGSQL_XLOG_LOC_NAME="${RESOURCE_NAME}-xlog-loc" PGSQL_MASTER_BASELINE="${RESOURCE_NAME}-master-baseline" NODE_LIST=`echo $OCF_RESKEY_node_list | tr '[A-Z]' '[a-z]'` RE_CONTROL_SLAVE="false" if ! ocf_is_ms; then ocf_exit_reason "Replication(rep_mode=async or sync) requires Master/Slave configuration." return $OCF_ERR_CONFIGURED fi if [ ! "$OCF_RESKEY_rep_mode" = "sync" -a ! "$OCF_RESKEY_rep_mode" = "async" ]; then ocf_exit_reason "Invalid rep_mode : $OCF_RESKEY_rep_mode" return $OCF_ERR_CONFIGURED fi if [ ! -n "$NODE_LIST" ]; then ocf_exit_reason "node_list can't be empty." return $OCF_ERR_CONFIGURED fi if [ $check_config_rc -eq 0 ]; then rep_mode_string="include '$REP_MODE_CONF' # added by pgsql RA" if [ "$OCF_RESKEY_rep_mode" = "sync" ]; then if ! grep -q "^[[:space:]]*$rep_mode_string" $OCF_RESKEY_config; then ocf_log info "adding include directive into $OCF_RESKEY_config" echo "$rep_mode_string" >> $OCF_RESKEY_config fi else if grep -q "$rep_mode_string" $OCF_RESKEY_config; then ocf_log info "deleting include directive from $OCF_RESKEY_config" rep_mode_string=`echo $rep_mode_string | sed -e 's|/|\\\\/|g'` sed -i "/$rep_mode_string/d" $OCF_RESKEY_config fi fi fi if ! mkdir -p $OCF_RESKEY_tmpdir || ! chown $OCF_RESKEY_pgdba $OCF_RESKEY_tmpdir || ! chmod 700 $OCF_RESKEY_tmpdir; then ocf_exit_reason "Can't create directory $OCF_RESKEY_tmpdir or it is not readable by $OCF_RESKEY_pgdba" return $OCF_ERR_PERM fi fi if [ "$OCF_RESKEY_rep_mode" = "slave" ]; then if ocf_is_ms; then ocf_exit_reason "Replication(rep_mode=slave) does not support Master/Slave configuration." return $OCF_ERR_CONFIGURED fi fi if use_replication_slot; then ocf_version_cmp "$version" "9.4" rc=$? if [ $rc -eq 0 ]||[ $rc -eq 3 ]; then ocf_exit_reason "Replication slot needs PostgreSQL 9.4 or higher." return $OCF_ERR_CONFIGURED fi echo "$OCF_RESKEY_replication_slot_name" | grep -q -e '[^a-z0-9_]' if [ $? -eq 0 ]; then ocf_exit_reason "Invalid replication_slot_name($OCF_RESKEY_replication_slot_name). only use lower case letters, numbers, and the underscore character." return $OCF_ERR_CONFIGURED fi fi return $OCF_SUCCESS } # # Check if we need to create a log file # check_log_file() { if [ ! -e "$1" ] then touch $1 > /dev/null 2>&1 chown $OCF_RESKEY_pgdba:`getent passwd $OCF_RESKEY_pgdba | cut -d ":" -f 4` $1 fi #Check if $OCF_RESKEY_pgdba can write to the log file if ! runasowner "test -w $1" then return 1 fi return 0 } # # Check if we need to create stats temp directory in tmpfs # check_stat_temp_directory() { local stats_temp stats_temp=`get_pgsql_param stats_temp_directory` if [ -z "$stats_temp" ]; then return fi if [ "${stats_temp#/}" = "$stats_temp" ]; then stats_temp="$OCF_RESKEY_pgdata/$stats_temp" fi if [ -d "$stats_temp" ]; then return fi if ! mkdir -p "$stats_temp"; then ocf_exit_reason "Can't create directory $stats_temp" exit $OCF_ERR_PERM fi if ! chown $OCF_RESKEY_pgdba: "$stats_temp"; then ocf_exit_reason "Can't change ownership for $stats_temp" exit $OCF_ERR_PERM fi if ! chmod 700 "$stats_temp"; then ocf_exit_reason "Can't change permissions for $stats_temp" exit $OCF_ERR_PERM fi } # # Check socket directory # check_socket_dir() { if [ ! -d "$OCF_RESKEY_socketdir" ]; then if ! mkdir "$OCF_RESKEY_socketdir"; then ocf_exit_reason "Can't create directory $OCF_RESKEY_socketdir" exit $OCF_ERR_PERM fi if ! chown $OCF_RESKEY_pgdba:`getent passwd \ $OCF_RESKEY_pgdba | cut -d ":" -f 4` "$OCF_RESKEY_socketdir" then ocf_exit_reason "Can't change ownership for $OCF_RESKEY_socketdir" exit $OCF_ERR_PERM fi if ! chmod 2775 "$OCF_RESKEY_socketdir"; then ocf_exit_reason "Can't change permissions for $OCF_RESKEY_socketdir" exit $OCF_ERR_PERM fi else if ! runasowner "touch $OCF_RESKEY_socketdir/test.$$"; then ocf_exit_reason "$OCF_RESKEY_pgdba can't create files in $OCF_RESKEY_socketdir" exit $OCF_ERR_PERM fi rm $OCF_RESKEY_socketdir/test.$$ fi } print_crm_mon() { if [ -z "$CRM_MON_OUTPUT" ]; then CRM_MON_OUTPUT=`exec_with_retry 0 crm_mon -n1` fi printf "${CRM_MON_OUTPUT}\n" } # # 'main' starts here... # if [ $# -ne 1 ] then usage exit $OCF_ERR_GENERIC fi PIDFILE=${OCF_RESKEY_pgdata}/postmaster.pid BACKUPLABEL=${OCF_RESKEY_pgdata}/backup_label RESOURCE_NAME=`echo $OCF_RESOURCE_INSTANCE | cut -d ":" -f 1` PGSQL_WAL_RECEIVER_STATUS_ATTR="${RESOURCE_NAME}-receiver-status" RECOVERY_CONF=${OCF_RESKEY_pgdata}/recovery.conf NODENAME=$(ocf_local_nodename | tr '[A-Z]' '[a-z]') case "$1" in methods) pgsql_methods exit $?;; meta-data) meta_data exit $OCF_SUCCESS;; esac pgsql_validate_all rc=$? [ "$1" = "validate-all" ] && exit $rc if [ $rc -ne 0 ] then case "$1" in stop) if is_replication; then change_pgsql_status "$NODENAME" "UNKNOWN" fi exit $OCF_SUCCESS;; monitor) exit $OCF_NOT_RUNNING;; status) exit $OCF_NOT_RUNNING;; *) exit $rc;; esac fi US=`id -u -n` if [ $US != root -a $US != $OCF_RESKEY_pgdba ] then ocf_exit_reason "$0 must be run as root or $OCF_RESKEY_pgdba" exit $OCF_ERR_GENERIC fi # make psql command options if [ -n "$OCF_RESKEY_monitor_user" ]; then PGUSER=$OCF_RESKEY_monitor_user; export PGUSER PGPASSWORD=$OCF_RESKEY_monitor_password; export PGPASSWORD psql_options="-p $OCF_RESKEY_pgport $OCF_RESKEY_pgdb" else psql_options="-p $OCF_RESKEY_pgport -U $OCF_RESKEY_pgdba $OCF_RESKEY_pgdb" fi if [ -n "$OCF_RESKEY_pghost" ]; then psql_options="$psql_options -h $OCF_RESKEY_pghost" else if [ -n "$OCF_RESKEY_socketdir" ]; then psql_options="$psql_options -h $OCF_RESKEY_socketdir" fi fi if [ -n "$OCF_RESKEY_pgport" ]; then export PGPORT=$OCF_RESKEY_pgport fi if [ -n "$OCF_RESKEY_pglibs" ]; then if [ -n "$LD_LIBRARY_PATH" ]; then export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:$OCF_RESKEY_pglibs else export LD_LIBRARY_PATH=$OCF_RESKEY_pglibs fi fi # What kind of method was invoked? case "$1" in status) if pgsql_status then ocf_log info "PostgreSQL is up" exit $OCF_SUCCESS else ocf_log info "PostgreSQL is down" exit $OCF_NOT_RUNNING fi;; monitor) pgsql_monitor exit $?;; start) pgsql_start exit $?;; promote) pgsql_promote exit $?;; demote) pgsql_demote exit $?;; notify) pgsql_notify exit $?;; stop) pgsql_stop exit $?;; *) exit $OCF_ERR_UNIMPLEMENTED;; esac diff --git a/heartbeat/sg_persist.in b/heartbeat/sg_persist.in index ea118f98e..177ec0279 100644 --- a/heartbeat/sg_persist.in +++ b/heartbeat/sg_persist.in @@ -1,686 +1,686 @@ #!@BASH_SHELL@ # # # OCF Resource Agent compliant PERSISTENT SCSI RESERVATION resource script. # # # Copyright (c) 2011 Evgeny Nifontov and lwang@suse.com All Rights Reserved. # # "Heartbeat drbd OCF Resource Agent: 2007, Lars Marowsky-Bree" was used # as example of multistate OCF Resource Agent. # # This program is free software; you can redistribute it and/or modify # it under the terms of version 2 of the GNU General Public License as # published by the Free Software Foundation. # # This program is distributed in the hope that it would be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. # # Further, this software is distributed without any warranty that it is # free of the rightful claim of any third person regarding infringement # or the like. Any license provided herein, whether implied or # otherwise, applies only to this software file. Patent licenses, if # any, provided herein do not apply to combinations of this program with # other software, or any other product whatsoever. # # You should have received a copy of the GNU General Public License # along with this program; if not, write the Free Software Foundation, # Inc., 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. # # # OCF instance parameters # OCF_RESKEY_binary # OCF_RESKEY_devs # OCF_RESKEY_required_devs_nof # OCF_RESKEY_reservation_type # OCF_RESKEY_master_score_base # OCF_RESKEY_master_score_dev_factor # OCF_RESKEY_master_score_delay # # TODO # # 1) PROBLEM: devices which were not accessible during 'start' action, will be never registered/reserved # TODO: 'Master' and 'Salve' registers new devs in 'monitor' action # TODO: 'Master' reserves new devs in 'monitor' action ####################################################################### # Initialization: : ${OCF_FUNCTIONS_DIR=${OCF_ROOT}/lib/heartbeat} . ${OCF_FUNCTIONS_DIR}/ocf-shellfuncs # set default values : ${sg_persist_binary="sg_persist"} # binary name for the resource : ${devs=""} # device list : ${required_devs_nof=1} # number of required devices : ${reservation_type=1} # reservation type : ${master_score_base=0} # master score base : ${master_score_dev_factor=100} # device factor for master score : ${master_score_delay=30} # delay for master score ####################################################################### meta_data() { cat < 1.1 This resource agent manages SCSI PERSISTENT RESERVATIONS. "sg_persist" from sg3_utils is used, please see its documentation. Should be used as multistate (Master/Slave) resource Slave registers its node id ("crm_node -i") as reservation key ( --param-rk ) on each device in the "devs" list. -Master reservs all devices from "devs" list with reservation "--prout-type" value from "reservation_type" parameter. +Master reserves all devices from "devs" list with reservation "--prout-type" value from "reservation_type" parameter. Manages SCSI PERSISTENT RESERVATIONS The name of the binary that manages the resource. the binary name of the resource Device list. Multiple devices can be listed with blank space as separator. -Shell wildcars are allowed. +Shell wildcards are allowed. device list Minimum number of "working" devices from device list 1) existing 2) "sg_persist --read-keys \$device" works (Return code 0) resource actions "start","monitor","promote" and "validate-all" return "\$OCF_ERR_INSTALLED" if the actual number of "working" devices is less then "required_devs_nof". resource actions "stop" and "demote" tries to remove reservations and registration keys from all working devices, but always return "\$OCF_SUCCESS" minimum number of working devices reservation type reservation type master_score_base value "master_score_base" value is used in "master_score" calculation: master_score = \$master_score_base + \$master_score_dev_factor * \$working_devs if set to bigger value in sg_persist resource configuration on some node, this node will be "preferred" for master role. base master_score value Working device factor in master_score calculation each "working" device provides additional value to "master_score", so the node that sees more devices will be preferred for the "Master"-role Setting it to 0 will disable this behavior. working device factor in master_score calculation master/slave decreases/increases its master_score after delay of \$master_score_delay seconds so if some device gets inaccessible, the slave decreases its master_score first and the resource will no be watched and after this device reappears again the master increases its master_score first this can work only if the master_score_delay is bigger then monitor interval on both master and slave Setting it to 0 will disable this behavior. master_score decrease/increase delay time END exit $OCF_SUCCESS } sg_persist_init() { if ! ocf_is_root ; then ocf_log err "You must be root to perform this operation." exit $OCF_ERR_PERM fi SG_PERSIST=${OCF_RESKEY_binary:-"$sg_persist_binary"} check_binary $SG_PERSIST ROLE=$OCF_RESKEY_CRM_meta_role NOW=$(date +%s) RESOURCE="${OCF_RESOURCE_INSTANCE}" MASTER_SCORE_VAR_NAME="master-${OCF_RESOURCE_INSTANCE//:/-}" PENDING_VAR_NAME="pending-$MASTER_SCORE_VAR_NAME" #only works with corocync CRM_NODE="${HA_SBIN_DIR}/crm_node" NODE_ID_DEC=$($CRM_NODE -i) NODE=$($CRM_NODE -l | $GREP -w ^$NODE_ID_DEC) NODE=${NODE#$NODE_ID_DEC } NODE=${NODE% *} MASTER_SCORE_ATTRIBUTE="${HA_SBIN_DIR}/crm_attribute --lifetime=reboot --name=$MASTER_SCORE_VAR_NAME --node=$NODE" CRM_MASTER="${HA_SBIN_DIR}/crm_master --lifetime=reboot" PENDING_ATTRIBUTE="${HA_SBIN_DIR}/crm_attribute --lifetime=reboot --name=$PENDING_VAR_NAME --node=$NODE" NODE_ID_HEX=$(printf '0x%x' $NODE_ID_DEC) if [ -z "$NODE_ID_HEX" ]; then ocf_log err "Couldn't get node id with \"$CRM_NODE\"" exit $OCF_ERR_INSTALLED fi ocf_log debug "$RESOURCE: NODE:$NODE, ROLE:$ROLE, NODE_ID DEC:$NODE_ID_DEC HEX:$NODE_ID_HEX" DEVS=${OCF_RESKEY_devs:=$devs} REQUIRED_DEVS_NOF=${OCF_RESKEY_required_devs_nof:=$required_devs_nof} RESERVATION_TYPE=${OCF_RESKEY_reservation_type:=$reservation_type} MASTER_SCORE_BASE=${OCF_RESKEY_master_score_base:=$master_score_base} MASTER_SCORE_DEV_FACTOR=${OCF_RESKEY_master_score_dev_factor:=$master_score_dev_factor} MASTER_SCORE_DELAY=${OCF_RESKEY_master_score_delay:=$master_score_delay} ocf_log debug "$RESOURCE: DEVS=$DEVS" ocf_log debug "$RESOURCE: REQUIRED_DEVS_NOF=$REQUIRED_DEVS_NOF" ocf_log debug "$RESOURCE: RESERVATION_TYPE=$RESERVATION_TYPE" ocf_log debug "$RESOURCE: MASTER_SCORE_BASE=$MASTER_SCORE_BASE" ocf_log debug "$RESOURCE: MASTER_SCORE_DEV_FACTOR=$MASTER_SCORE_DEV_FACTOR" ocf_log debug "$RESOURCE: MASTER_SCORE_DELAY=$MASTER_SCORE_DELAY" #expand path wildcards DEVS=$(echo $DEVS) if [ -z "$DEVS" ]; then ocf_log err "\"devs\" not defined" exit $OCF_ERR_INSTALLED fi sg_persist_check_devs sg_persist_get_status } sg_persist_action_usage() { cat <&1` [ $? -eq 0 ] || continue WORKING_DEVS+=($dev) echo "$READ_KEYS" | $GREP -qw $NODE_ID_HEX\$ [ $? -eq 0 ] || continue REGISTERED_DEVS+=($dev) READ_RESERVATION=`$SG_PERSIST --in --read-reservation $dev 2>&1` [ $? -eq 0 ] || continue echo "$READ_RESERVATION" | $GREP -qw $NODE_ID_HEX\$ if [ $? -eq 0 ]; then RESERVED_DEVS+=($dev) fi reservation_key=`echo $READ_RESERVATION | $GREP -o 'Key=0x[0-9a-f]*' | $GREP -o '0x[0-9a-f]*'` if [ -n "$reservation_key" ]; then DEVS_WITH_RESERVATION+=($dev) RESERVATION_KEYS+=($reservation_key) fi done WORKING_DEVS_NOF=${#WORKING_DEVS[*]} ocf_log debug "$RESOURCE: working devices: `sg_persist_echo_array ${WORKING_DEVS[*]}`" ocf_log debug "$RESOURCE: number of working devices: $WORKING_DEVS_NOF" ocf_log debug "$RESOURCE: registered devices: `sg_persist_echo_array ${REGISTERED_DEVS[*]}`" ocf_log debug "$RESOURCE: reserved devices: `sg_persist_echo_array ${RESERVED_DEVS[*]}`" ocf_log debug "$RESOURCE: devices with reservation: `sg_persist_echo_array ${DEVS_WITH_RESERVATION[*]}`" ocf_log debug "$RESOURCE: reservation keys: `sg_persist_echo_array ${RESERVATION_KEYS[*]}`" MASTER_SCORE=$(($MASTER_SCORE_BASE + $MASTER_SCORE_DEV_FACTOR*$WORKING_DEVS_NOF)) ocf_log debug "$RESOURCE: master_score: $MASTER_SCORE_BASE + $MASTER_SCORE_DEV_FACTOR*$WORKING_DEVS_NOF = $MASTER_SCORE" } sg_persist_check_devs() { for dev in $DEVS do if [ -e "$dev" ]; then EXISTING_DEVS+=($dev) fi done EXISTING_DEVS_NOF=${#EXISTING_DEVS[*]} if [ $EXISTING_DEVS_NOF -lt $REQUIRED_DEVS_NOF ]; then ocf_log err "Number of existing devices=$EXISTING_DEVS_NOF less then required_devs_nof=$REQUIRED_DEVS_NOF" exit $OCF_ERR_INSTALLED fi } sg_persist_is_registered() { for registered_dev in ${REGISTERED_DEVS[*]} do if [ "$registered_dev" == "$1" ]; then return 0 fi done return 1 } sg_persist_get_reservation_key() { for array_index in ${!DEVS_WITH_RESERVATION[*]} do if [ "${DEVS_WITH_RESERVATION[$array_index]}" == "$1" ]; then echo ${RESERVATION_KEYS[$array_index]} return 0 fi done echo "" } sg_persist_echo_array() { str_count=0 arr_str="" for str in "$@" do arr_str="$arr_str[$str_count]:$str " str_count=$(($str_count+1)) done echo $arr_str } sg_persist_parse_act_pending() { ACT_PENDING_TS=0 ACT_PENDING_SCORE=0 if [ -n "$ACT_PENDING" ]; then ACT_PENDING_TS=${ACT_PENDING%%_*} ACT_PENDING_SCORE=${ACT_PENDING##*_} fi } sg_persist_clear_pending() { if [ -n "$ACT_PENDING" ]; then DO_PENDING_UPDATE="YES" NEW_PENDING="" fi } sg_persist_new_master_score() { DO_MASTER_SCORE_UPDATE="YES" NEW_MASTER_SCORE=$1 } sg_persist_new_pending() { DO_PENDING_UPDATE="YES" NEW_PENDING=$1 } # Functions invoked by resource manager actions sg_persist_action_start() { ocf_run $MASTER_SCORE_ATTRIBUTE --update=$MASTER_SCORE ocf_run $PENDING_ATTRIBUTE --update="" if [ $WORKING_DEVS_NOF -lt $REQUIRED_DEVS_NOF ]; then ocf_log err "$RESOURCE: Number of working devices=$WORKING_DEVS_NOF less then required_devs_nof=$REQUIRED_DEVS_NOF" exit $OCF_ERR_GENERIC fi for dev in ${WORKING_DEVS[*]} do if sg_persist_is_registered $dev ; then : OK else ocf_run $SG_PERSIST --out --no-inquiry --register --param-rk=0 --param-sark=$NODE_ID_HEX $dev if [ $? -ne $OCF_SUCCESS ] then return $OCF_ERR_GENERIC fi fi done return $OCF_SUCCESS } sg_persist_action_stop() { if [ ${#REGISTERED_DEVS[*]} -eq 0 ]; then ocf_log debug "$RESOURCE stop: already no registrations" else # Clear preference for becoming master ocf_run $MASTER_SCORE_ATTRIBUTE --delete ocf_run $PENDING_ATTRIBUTE --delete for dev in ${REGISTERED_DEVS[*]} do ocf_run $SG_PERSIST --out --no-inquiry --register --param-rk=$NODE_ID_HEX --param-sark=0 $dev done fi return $OCF_SUCCESS } sg_persist_action_monitor() { ACT_MASTER_SCORE=`$MASTER_SCORE_ATTRIBUTE --query --quiet 2>/dev/null` ocf_log debug "$RESOURCE monitor: ACT_MASTER_SCORE=$ACT_MASTER_SCORE" ACT_PENDING=`$PENDING_ATTRIBUTE --query --quiet 2>/dev/null` ocf_log debug "$RESOURCE monitor: ACT_PENDING=$ACT_PENDING" sg_persist_parse_act_pending ocf_log debug "$RESOURCE monitor: ACT_PENDING_TS=$ACT_PENDING_TS" ocf_log debug "$RESOURCE monitor: ACT_PENDING_VAL=$ACT_PENDING_SCORE" ocf_log debug "$MASTER_SCORE, $ACT_MASTER_SCORE, $ROLE" DO_MASTER_SCORE_UPDATE="NO" DO_PENDING_UPDATE="NO" if [ -n "$ACT_MASTER_SCORE" ] then if [ $ACT_MASTER_SCORE -eq $MASTER_SCORE ]; then sg_persist_clear_pending else case $ROLE in Master) if [ $MASTER_SCORE -lt $ACT_MASTER_SCORE ]; then if [ -n "$ACT_PENDING" ] then if [ $(($NOW-$ACT_PENDING_TS-$MASTER_SCORE_DELAY)) -ge 0 ]; then sg_persist_new_master_score $MASTER_SCORE sg_persist_clear_pending fi else if [ $MASTER_SCORE_DELAY -eq 0 ]; then sg_persist_new_master_score $MASTER_SCORE sg_persist_clear_pending else sg_persist_new_pending "${NOW}_${MASTER_SCORE}" fi fi else sg_persist_new_master_score $MASTER_SCORE sg_persist_clear_pending fi ;; Slave) if [ $MASTER_SCORE -gt $ACT_MASTER_SCORE ]; then if [ -n "$ACT_PENDING" ]; then if [ $(($NOW-$ACT_PENDING_TS-$MASTER_SCORE_DELAY)) -ge 0 ]; then sg_persist_new_master_score $MASTER_SCORE sg_persist_clear_pending fi else if [ $MASTER_SCORE_DELAY -eq 0 ]; then sg_persist_new_master_score $MASTER_SCORE sg_persist_clear_pending else sg_persist_new_pending "${NOW}_${MASTER_SCORE}" fi fi else sg_persist_new_master_score $MASTER_SCORE sg_persist_clear_pending fi ;; *) ;; esac fi fi if [ $DO_MASTER_SCORE_UPDATE == "YES" ]; then ocf_run $MASTER_SCORE_ATTRIBUTE --update=$NEW_MASTER_SCORE fi if [ $DO_PENDING_UPDATE == "YES" ]; then ocf_run $PENDING_ATTRIBUTE --update=$NEW_PENDING fi if [ ${#REGISTERED_DEVS[*]} -eq 0 ]; then ocf_log debug "$RESOURCE monitor: no registrations" if [ -n "$ACT_MASTER_SCORE" ]; then ocf_run $MASTER_SCORE_ATTRIBUTE --delete ocf_run $PENDING_ATTRIBUTE --delete fi return $OCF_NOT_RUNNING fi if [ ${#RESERVED_DEVS[*]} -eq ${#WORKING_DEVS[*]} ]; then if [ -z "$ACT_MASTER_SCORE" ]; then ocf_run $MASTER_SCORE_ATTRIBUTE --update=$MASTER_SCORE ocf_run $PENDING_ATTRIBUTE --update="" fi return $OCF_RUNNING_MASTER fi if [ ${#REGISTERED_DEVS[*]} -eq ${#WORKING_DEVS[*]} ]; then if [ -z "$ACT_MASTER_SCORE" ]; then ocf_run $MASTER_SCORE_ATTRIBUTE --update=$MASTER_SCORE ocf_run $PENDING_ATTRIBUTE --update="" fi if [ $RESERVATION_TYPE -eq 7 ] || [ $RESERVATION_TYPE -eq 8 ]; then if [ ${#DEVS_WITH_RESERVATION[*]} -gt 0 ]; then return $OCF_RUNNING_MASTER else return $OCF_SUCCESS fi else return $OCF_SUCCESS fi fi ocf_log err "$RESOURCE monitor: unexpected state" return $OCF_ERR_GENERIC } sg_persist_action_promote() { if [ ${#RESERVED_DEVS[*]} -gt 0 ]; then ocf_log info "$RESOURCE promote: already master" return $OCF_SUCCESS fi for dev in ${WORKING_DEVS[*]} do reservation_key=`sg_persist_get_reservation_key $dev` case $RESERVATION_TYPE in 1|3|5|6) if [ -z "$reservation_key" ]; then ocf_run $SG_PERSIST --out --no-inquiry --reserve --param-rk=$NODE_ID_HEX --prout-type=$RESERVATION_TYPE $dev if [ $? -ne $OCF_SUCCESS ]; then return $OCF_ERR_GENERIC fi else ocf_run $SG_PERSIST --out --no-inquiry --preempt --param-sark=$reservation_key --param-rk=$NODE_ID_HEX --prout-type=$RESERVATION_TYPE $dev if [ $? -ne $OCF_SUCCESS ]; then return $OCF_ERR_GENERIC fi fi ;; 7|8) if [ -z "$reservation_key" ]; then ocf_run $SG_PERSIST --out --no-inquiry --reserve --param-rk=$NODE_ID_HEX --prout-type=$RESERVATION_TYPE $dev if [ $? -ne $OCF_SUCCESS ] then return $OCF_ERR_GENERIC fi else ocf_log info "$RESOURCE promote: there already exist an reservation holder, all registrants become reservation holders" return $OCF_SUCCESS fi ;; *) return $OCF_ERR_ARGS ;; esac done return $OCF_SUCCESS } sg_persist_action_demote() { case $RESERVATION_TYPE in 1|3|5|6) if [ ${#RESERVED_DEVS[*]} -eq 0 ]; then ocf_log info "$RESOURCE demote: already slave" return $OCF_SUCCESS fi for dev in ${RESERVED_DEVS[*]} do ocf_run $SG_PERSIST --out --no-inquiry --release --param-rk=$NODE_ID_HEX --prout-type=$RESERVATION_TYPE $dev if [ $? -ne $OCF_SUCCESS ]; then return $OCF_ERR_GENERIC fi done ;; 7|8) #in case of 7/8, --release won't release the reservation unless unregister the key. if [ ${#REGISTERED_DEVS[*]} -eq 0 ]; then ocf_log info "$RESOURCE demote: already slave" return $OCF_SUCCESS fi for dev in ${REGISTERED_DEVS[*]} do ocf_run $SG_PERSIST --out --no-inquiry --register --param-rk=$NODE_ID_HEX --param-sark=0 $dev if [ $? -ne $OCF_SUCCESS ]; then return $OCF_ERR_GENERIC fi done ;; *) return $OCF_ERR_ARGS ;; esac return $OCF_SUCCESS } sg_persist_action_notify() { local n_type="$OCF_RESKEY_CRM_meta_notify_type" local n_op="$OCF_RESKEY_CRM_meta_notify_operation" set -- $OCF_RESKEY_CRM_meta_notify_active_resource local n_active="$#" set -- $OCF_RESKEY_CRM_meta_notify_stop_resource local n_stop="$#" set -- $OCF_RESKEY_CRM_meta_notify_start_resource local n_start="$#" ocf_log debug "$RESOURCE notify: $n_type for $n_op - counts: active $n_active - starting $n_start - stopping $n_stop" return $OCF_SUCCESS } sg_persist_action_validate_all () { if [ "$OCF_RESKEY_CRM_meta_master_max" != "1" ] && [ "$RESERVATION_TYPE" != "7" ] && [ "$RESERVATION_TYPE" != "8" ]; then ocf_log err "Master options misconfigured." exit $OCF_ERR_CONFIGURED fi return $OCF_SUCCESS } if [ $# -ne 1 ]; then echo "Incorrect parameter count." sg_persist_action_usage exit $OCF_ERR_ARGS fi ACTION=$1 case $ACTION in meta-data) meta_data ;; validate-all) sg_persist_init sg_persist_action_validate_all ;; start|promote|monitor|stop|demote) ocf_log debug "$RESOURCE: starting action \"$ACTION\"" sg_persist_init sg_persist_action_$ACTION exit $? ;; notify) sg_persist_action_notify exit $? ;; usage|help) sg_persist_action_usage exit $OCF_SUCCESS ;; *) sg_persist_action_usage exit $OCF_ERR_ARGS ;; esac