diff --git a/heartbeat/galera b/heartbeat/galera index 45693ace9..0cab9a464 100755 --- a/heartbeat/galera +++ b/heartbeat/galera @@ -1,1046 +1,882 @@ #!/bin/sh # # Copyright (c) 2014 David Vossel # All Rights Reserved. # # This program is free software; you can redistribute it and/or modify # it under the terms of version 2 of the GNU General Public License as # published by the Free Software Foundation. # # This program is distributed in the hope that it would be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. # # Further, this software is distributed without any warranty that it is # free of the rightful claim of any third person regarding infringement # or the like. Any license provided herein, whether implied or # otherwise, applies only to this software file. Patent licenses, if # any, provided herein do not apply to combinations of this program with # other software, or any other product whatsoever. # # You should have received a copy of the GNU General Public License # along with this program; if not, write the Free Software Foundation, # Inc., 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. # ## # README. # # This agent only supports being configured as a multistate Master # resource. # # Slave vs Master role: # # During the 'Slave' role, galera instances are in read-only mode and -# will not attempt to connect to the cluster. This role exists as +# will not attempt to connect to the cluster. This role exists only as # a means to determine which galera instance is the most up-to-date. The # most up-to-date node will be used to bootstrap a galera cluster that # has no current members. # # The galera instances will only begin to be promoted to the Master role # once all the nodes in the 'wsrep_cluster_address' connection address # have entered read-only mode. At that point the node containing the -# database that is most current will be promoted to Master. -# -# Once the first Master instance bootstraps the galera cluster, the -# other nodes will join the cluster and start synchronizing via SST. -# They will stay in Slave role as long as the SST is running. Their -# promotion to Master will happen once synchronization is finished. +# database that is most current will be promoted to Master. Once the first +# Master instance bootstraps the galera cluster, the other nodes will be +# promoted to Master as well. # # Example: Create a galera cluster using nodes rhel7-node1 rhel7-node2 rhel7-node3 # # pcs resource create db galera enable_creation=true \ # wsrep_cluster_address="gcomm://rhel7-auto1,rhel7-auto2,rhel7-auto3" meta master-max=3 --master # # By setting the 'enable_creation' option, the database will be automatically # generated at startup. The meta attribute 'master-max=3' means that all 3 # nodes listed in the wsrep_cluster_address list will be allowed to connect # to the galera cluster and perform replication. # # NOTE: If you have more nodes in the pacemaker cluster then you wish # to have in the galera cluster, make sure to use location contraints to prevent # pacemaker from attempting to place a galera instance on a node that is # not in the 'wsrep_cluster_address" list. # ## ####################################################################### # Initialization: : ${OCF_FUNCTIONS_DIR=${OCF_ROOT}/lib/heartbeat} . ${OCF_FUNCTIONS_DIR}/ocf-shellfuncs . ${OCF_FUNCTIONS_DIR}/mysql-common.sh # It is common for some galera instances to store # check user that can be used to query status # in this file if [ -f "/etc/sysconfig/clustercheck" ]; then . /etc/sysconfig/clustercheck elif [ -f "/etc/default/clustercheck" ]; then . /etc/default/clustercheck fi ####################################################################### usage() { cat < 1.0 Resource script for managing galara database. Manages a galara instance Location of the MySQL server binary MySQL server binary Location of the MySQL client binary MySQL client binary Configuration file MySQL config Directory containing databases MySQL datadir User running MySQL daemon MySQL user Group running MySQL daemon (for logfile and directory permissions) MySQL group The logfile to be used for mysqld. MySQL log file The pidfile to be used for mysqld. MySQL pid file The socket to be used for mysqld. MySQL socket If the MySQL database does not exist, it will be created Create the database if it does not exist Additional parameters which are passed to the mysqld on startup. (e.g. --skip-external-locking or --skip-grant-tables) Additional parameters to pass to mysqld The galera cluster address. This takes the form of: gcomm://node,node,node Only nodes present in this node list will be allowed to start a galera instance. The galera node names listed in this address are expected to match valid pacemaker node names. If both names need to differ, you must provide a mapping in option cluster_host_map. Galera cluster address A mapping of pacemaker node names to galera node names. To be used when both pacemaker and galera names need to differ, (e.g. when galera names map to IP from a specific network interface) This takes the form of: pcmk1:node.1.galera;pcmk2:node.2.galera;pcmk3:node.3.galera where the galera resource started on node pcmk1 would be named node.1.galera in the wsrep_cluster_address Pacemaker to Galera name mapping Cluster check user. MySQL test user Cluster check user password check password END } get_option_variable() { local key=$1 $MYSQL $MYSQL_OPTIONS_CHECK -e "SHOW VARIABLES like '$key';" | tail -1 } get_status_variable() { local key=$1 $MYSQL $MYSQL_OPTIONS_CHECK -e "show status like '$key';" | tail -1 } set_bootstrap_node() { local node=$1 ${HA_SBIN_DIR}/crm_attribute -N $node -l reboot --name "${INSTANCE_ATTR_NAME}-bootstrap" -v "true" } clear_bootstrap_node() { ${HA_SBIN_DIR}/crm_attribute -N $NODENAME -l reboot --name "${INSTANCE_ATTR_NAME}-bootstrap" -D } is_bootstrap() { ${HA_SBIN_DIR}/crm_attribute -N $NODENAME -l reboot --name "${INSTANCE_ATTR_NAME}-bootstrap" -Q 2>/dev/null } set_no_grastate() { ${HA_SBIN_DIR}/crm_attribute -N $NODENAME -l reboot --name "${INSTANCE_ATTR_NAME}-no-grastate" -v "true" } clear_no_grastate() { ${HA_SBIN_DIR}/crm_attribute -N $NODENAME -l reboot --name "${INSTANCE_ATTR_NAME}-no-grastate" -D } is_no_grastate() { local node=$1 ${HA_SBIN_DIR}/crm_attribute -N $node -l reboot --name "${INSTANCE_ATTR_NAME}-no-grastate" -Q 2>/dev/null } clear_last_commit() { ${HA_SBIN_DIR}/crm_attribute -N $NODENAME -l reboot --name "${INSTANCE_ATTR_NAME}-last-committed" -D } set_last_commit() { ${HA_SBIN_DIR}/crm_attribute -N $NODENAME -l reboot --name "${INSTANCE_ATTR_NAME}-last-committed" -v $1 } get_last_commit() { local node=$1 if [ -z "$node" ]; then ${HA_SBIN_DIR}/crm_attribute -N $NODENAME -l reboot --name "${INSTANCE_ATTR_NAME}-last-committed" -Q 2>/dev/null else ${HA_SBIN_DIR}/crm_attribute -N $node -l reboot --name "${INSTANCE_ATTR_NAME}-last-committed" -Q 2>/dev/null fi } wait_for_sync() { local state=$(get_status_variable "wsrep_local_state") ocf_log info "Waiting for database to sync with the cluster. " while [ "$state" != "4" ]; do sleep 1 state=$(get_status_variable "wsrep_local_state") done ocf_log info "Database synced." } -set_sync_needed() -{ - ${HA_SBIN_DIR}/crm_attribute -N $NODENAME -l reboot --name "${INSTANCE_ATTR_NAME}-sync-needed" -v "true" -} - -clear_sync_needed() -{ - ${HA_SBIN_DIR}/crm_attribute -N $NODENAME -l reboot --name "${INSTANCE_ATTR_NAME}-sync-needed" -D -} - -check_sync_needed() -{ - ${HA_SBIN_DIR}/crm_attribute -N $NODENAME -l reboot --name "${INSTANCE_ATTR_NAME}-sync-needed" -Q 2>/dev/null -} - - -# this function is called when attribute sync-needed is set in the CIB -check_sync_status() -{ - # if the pidfile is created, mysqld is up and running - # an IST might still be in progress, check wsrep status - if [ -e $OCF_RESKEY_pid ]; then - local cluster_status=$(get_status_variable "wsrep_cluster_status") - local state=$(get_status_variable "wsrep_local_state") - local ready=$(get_status_variable "wsrep_ready") - - if [ -z "$cluster_status" -o -z "$state" -o -z "$ready" ]; then - ocf_exit_reason "Unable to retrieve state transfer status, verify check_user '$OCF_RESKEY_check_user' has permissions to view status" - return $OCF_ERR_GENERIC - fi - - if [ "$cluster_status" != "Primary" ]; then - ocf_exit_reason "local node <${NODENAME}> is started, but not in primary mode. Unknown state." - return $OCF_ERR_GENERIC - fi - - if [ "$state" = "4" -a "$ready" = "ON" ]; then - ocf_log info "local node synced with the cluster" - # when sync is finished, we are ready to switch to Master - clear_sync_needed - set_master_score - return $OCF_SUCCESS - fi - fi - - # if we pass here, an IST or SST is still in progress - ocf_log info "local node syncing" - return $OCF_SUCCESS -} - is_primary() { cluster_status=$(get_status_variable "wsrep_cluster_status") if [ "$cluster_status" = "Primary" ]; then return 0 fi if [ -z "$cluster_status" ]; then ocf_exit_reason "Unable to retrieve wsrep_cluster_status, verify check_user '$OCF_RESKEY_check_user' has permissions to view status" else ocf_log info "Galera instance wsrep_cluster_status=${cluster_status}" fi return 1 } is_readonly() { local res=$(get_option_variable "read_only") if ! ocf_is_true "$res"; then return 1 fi cluster_status=$(get_status_variable "wsrep_cluster_status") if ! [ "$cluster_status" = "Disconnected" ]; then return 1 fi return 0 } master_exists() { if [ "$__OCF_ACTION" = "demote" ]; then # We don't want to detect master instances during demote. # 1. we could be detecting ourselves as being master, which is no longer the case. # 2. we could be detecting other master instances that are in the process of shutting down. # by not detecting other master instances in "demote" we are deferring this check # to the next recurring monitor operation which will be much more accurate return 1 fi # determine if a master instance is already up and is healthy crm_mon --as-xml | grep "resource.*id=\"${OCF_RESOURCE_INSTANCE}\".*role=\"Master\".*active=\"true\".*orphaned=\"false\".*failed=\"false\"" > /dev/null 2>&1 return $? } clear_master_score() { local node=$1 if [ -z "$node" ]; then $CRM_MASTER -D else $CRM_MASTER -D -N $node fi } set_master_score() { local node=$1 if [ -z "$node" ]; then $CRM_MASTER -v 100 else $CRM_MASTER -N $node -v 100 fi } +promote_everyone() +{ + + for node in $(echo "$OCF_RESKEY_wsrep_cluster_address" | sed 's/gcomm:\/\///g' | tr -d ' ' | tr -s ',' ' '); do + + set_master_score $node + done +} + greater_than_equal_long() { # there are values we need to compare in this script # that are too large for shell -gt to process echo | awk -v n1="$1" -v n2="$2" '{if (n1>=n2) printf ("true"); else printf ("false");}' | grep -q "true" } galera_to_pcmk_name() { local galera=$1 if [ -z "$OCF_RESKEY_cluster_host_map" ]; then echo $galera else echo "$OCF_RESKEY_cluster_host_map" | tr ';' '\n' | tr -d ' ' | sed 's/:/ /' | awk -F' ' '$2=="'"$galera"'" {print $1;exit}' fi } pcmk_to_galera_name() { local pcmk=$1 if [ -z "$OCF_RESKEY_cluster_host_map" ]; then echo $pcmk else echo "$OCF_RESKEY_cluster_host_map" | tr ';' '\n' | tr -d ' ' | sed 's/:/ /' | awk -F' ' '$1=="'"$pcmk"'" {print $2;exit}' fi } detect_first_master() { local best_commit=0 local best_node="$NODENAME" local last_commit=0 local missing_nodes=0 local nodes="" local nodes_recovered="" # avoid selecting a recovered node as bootstrap if possible for node in $(echo "$OCF_RESKEY_wsrep_cluster_address" | sed 's/gcomm:\/\///g' | tr -d ' ' | tr -s ',' ' '); do local pcmk_node=$(galera_to_pcmk_name $node) if [ -z "$pcmk_node" ]; then ocf_log error "Could not determine pacemaker node from galera name <${node}>." return else node=$pcmk_node fi if is_no_grastate $node; then nodes_recovered="$nodes_recovered $node" else nodes="$nodes $node" fi done for node in $nodes_recovered $nodes; do last_commit=$(get_last_commit $node) if [ -z "$last_commit" ]; then ocf_log info "Waiting on node <${node}> to report database status before Master instances can start." missing_nodes=1 continue fi # this means -1, or that no commit has occured yet. if [ "$last_commit" = "18446744073709551615" ]; then last_commit="0" fi greater_than_equal_long "$last_commit" "$best_commit" if [ $? -eq 0 ]; then best_node=$node best_commit=$last_commit fi done if [ $missing_nodes -eq 1 ]; then return fi ocf_log info "Promoting $best_node to be our bootstrap node" set_master_score $best_node set_bootstrap_node $best_node } -detect_galera_pid() -{ - ps auxww | grep -v -e "${OCF_RESKEY_binary}" -e grep | grep -qe "--pid-file=$OCF_RESKEY_pid" -} - -galera_status() -{ - local loglevel=$1 - local rc - local running - - if [ -e $OCF_RESKEY_pid ]; then - mysql_common_status $loglevel - rc=$? - else - # if pidfile is not created, the server may - # still be starting up, e.g. running SST - detect_galera_pid - running=$? - if [ $running -eq 0 ]; then - rc=$OCF_SUCCESS - else - ocf_log $loglevel "MySQL is not running" - rc=$OCF_NOT_RUNNING - fi - fi - - return $rc -} - -galera_start_nowait() -{ - local mysql_extra_params="$1" - local pid - local running - - ${OCF_RESKEY_binary} --defaults-file=$OCF_RESKEY_config \ - --pid-file=$OCF_RESKEY_pid \ - --socket=$OCF_RESKEY_socket \ - --datadir=$OCF_RESKEY_datadir \ - --log-error=$OCF_RESKEY_log \ - --user=$OCF_RESKEY_user $OCF_RESKEY_additional_parameters \ - $mysql_extra_params >/dev/null 2>&1 & - pid=$! - - # Spin waiting for the server to be spawned. - # Let the CRM/LRM time us out if required. - start_wait=1 - while [ $start_wait = 1 ]; do - if ! ps $pid > /dev/null 2>&1; then - wait $pid - ocf_exit_reason "MySQL server failed to start (pid=$pid) (rc=$?), please check your installation" - return $OCF_ERR_GENERIC - fi - detect_galera_pid - running=$? - if [ $running -eq 0 ]; then - start_wait=0 - else - ocf_log info "MySQL is not running" - fi - sleep 2 - done - - return $OCF_SUCCESS -} - -galera_start_local_node() -{ - local rc - local extra_opts - local bootstrap - - bootstrap=$(is_bootstrap) - - master_exists - if [ $? -eq 0 ]; then - # join without bootstrapping - ocf_log info "Node <${NODENAME}> is joining the cluster" - extra_opts="--wsrep-cluster-address=${OCF_RESKEY_wsrep_cluster_address}" - elif ocf_is_true $bootstrap; then - ocf_log info "Node <${NODENAME}> is bootstrapping the cluster" - extra_opts="--wsrep-cluster-address=gcomm://" - else - ocf_exit_reason "Failure, Attempted to join cluster of $OCF_RESOURCE_INSTANCE before master node has been detected." - clear_last_commit - return $OCF_ERR_GENERIC - fi - - # clear last_commit before we start galera to make sure there - # won't be discrepency between the cib and galera if this node - # processes a few transactions and fails before we detect it - clear_last_commit - - mysql_common_prepare_dirs - - # At start time, if galera requires a SST rather than an IST, the - # mysql server's pidfile won't be available until SST finishes, - # which can be longer than the start timeout. So we only check - # bootstrap node extensively. Joiner nodes are monitored in the - # "monitor" op - if ocf_is_true $bootstrap; then - # start server and wait until it's up and running - mysql_common_start "$extra_opts" - rc=$? - if [ $rc != $OCF_SUCCESS ]; then - return $rc - fi - - mysql_common_status info - rc=$? - - if [ $rc != $OCF_SUCCESS ]; then - ocf_exit_reason "Failed initial monitor action" - return $rc - fi - - is_readonly - if [ $? -eq 0 ]; then - ocf_exit_reason "Failure. Master instance started in read-only mode, check configuration." - return $OCF_ERR_GENERIC - fi - - is_primary - if [ $? -ne 0 ]; then - ocf_exit_reason "Failure. Master instance started, but is not in Primary mode." - return $OCF_ERR_GENERIC - fi - - clear_bootstrap_node - # clear attribute no-grastate. if last shutdown was - # not clean, we cannot be extra-cautious by requesting a SST - # since this is the bootstrap node - clear_no_grastate - else - # only start server, defer full checks to "monitor" op - galera_start_nowait "$extra_opts" - rc=$? - if [ $rc != $OCF_SUCCESS ]; then - return $rc - fi - - set_sync_needed - # attribute no-grastate will be cleared once the joiner - # has finished syncing and is promoted to Master - fi - - ocf_log info "Galera started" - return $OCF_SUCCESS -} - detect_last_commit() { local last_commit local recover_args="--defaults-file=$OCF_RESKEY_config \ --pid-file=$OCF_RESKEY_pid \ --socket=$OCF_RESKEY_socket \ --datadir=$OCF_RESKEY_datadir \ --user=$OCF_RESKEY_user" local recovery_file_regex='s/.*WSREP\:.*position\s*recovery.*--log_error='\''\([^'\'']*\)'\''.*/\1/p' local recovered_position_regex='s/.*WSREP\:\s*[R|r]ecovered\s*position.*\:\(.*\)\s*$/\1/p' ocf_log info "attempting to detect last commit version by reading ${OCF_RESKEY_datadir}/grastate.dat" last_commit="$(cat ${OCF_RESKEY_datadir}/grastate.dat | sed -n 's/^seqno.\s*\(.*\)\s*$/\1/p')" if [ -z "$last_commit" ] || [ "$last_commit" = "-1" ]; then local tmp=$(mktemp) # if we pass here because grastate.dat doesn't exist, # try not to bootstrap from this node if possible if [ ! -f ${OCF_RESKEY_datadir}/grastate.dat ]; then set_no_grastate fi ocf_log info "now attempting to detect last commit version using 'mysqld_safe --wsrep-recover'" ${OCF_RESKEY_binary} $recover_args --wsrep-recover --log-error=$tmp 2>/dev/null last_commit="$(cat $tmp | sed -n $recovered_position_regex | tail -1)" if [ -z "$last_commit" ]; then # Galera uses InnoDB's 2pc transactions internally. If # server was stopped in the middle of a replication, the # recovery may find a "prepared" XA transaction in the # redo log, and mysql won't recover automatically local recovery_file="$(cat $tmp | sed -n $recovery_file_regex)" if [ -e $recovery_file ]; then cat $recovery_file | grep -q -E '\[ERROR\]\s+Found\s+[0-9]+\s+prepared\s+transactions!' 2>/dev/null if [ $? -eq 0 ]; then # we can only rollback the transaction, but that's OK # since the DB will get resynchronized anyway ocf_log warn "local node <${NODENAME}> was not shutdown properly. Rollback stuck transaction with --tc-heuristic-recover" ${OCF_RESKEY_binary} $recover_args --wsrep-recover \ --tc-heuristic-recover=rollback --log-error=$tmp 2>/dev/null last_commit="$(cat $tmp | sed -n $recovered_position_regex | tail -1)" if [ ! -z "$last_commit" ]; then ocf_log warn "State recovered. force SST at next restart for full resynchronization" rm -f ${OCF_RESKEY_datadir}/grastate.dat # try not to bootstrap from this node if possible set_no_grastate fi fi fi fi rm -f $tmp fi if [ ! -z "$last_commit" ]; then ocf_log info "Last commit version found: $last_commit" set_last_commit $last_commit return $OCF_SUCCESS else ocf_exit_reason "Unable to detect last known write sequence number" clear_last_commit return $OCF_ERR_GENERIC fi } +# For galera, promote is really start galera_promote() { local rc local extra_opts local bootstrap - + master_exists - if [ $? -ne 0 ]; then - # promoting the first master will bootstrap the cluster - if is_bootstrap; then - galera_start_local_node - rc=$? - return $rc - else - ocf_exit_reason "Attempted to start the cluster without being a bootstrap node." - return $OCF_ERR_GENERIC - fi + if [ $? -eq 0 ]; then + # join without bootstrapping + extra_opts="--wsrep-cluster-address=${OCF_RESKEY_wsrep_cluster_address}" else - # promoting other masters only performs sanity checks - # as the joining nodes were started during the "monitor" op - if ! check_sync_needed; then - # sync is done, clear info about last startup - clear_no_grastate - return $OCF_SUCCESS + bootstrap=$(is_bootstrap) + + if ocf_is_true $bootstrap; then + ocf_log info "Node <${NODENAME}> is bootstrapping the cluster" + extra_opts="--wsrep-cluster-address=gcomm://" else - ocf_exit_reason "Attempted to promote local node while sync was still needed." + ocf_exit_reason "Failure, Attempted to promote Master instance of $OCF_RESOURCE_INSTANCE before bootstrap node has been detected." + clear_last_commit return $OCF_ERR_GENERIC fi fi + + galera_monitor + if [ $? -eq $OCF_RUNNING_MASTER ]; then + if ocf_is_true $bootstrap; then + promote_everyone + clear_bootstrap_node + ocf_log info "boostrap node already up, promoting the rest of the galera instances." + fi + clear_last_commit + return $OCF_SUCCESS + fi + + # last commit is no longer relevant once promoted + clear_last_commit + + mysql_common_prepare_dirs + mysql_common_start "$extra_opts" + rc=$? + if [ $rc != $OCF_SUCCESS ]; then + return $rc + fi + + galera_monitor + rc=$? + if [ $rc != $OCF_SUCCESS -a $rc != $OCF_RUNNING_MASTER ]; then + ocf_exit_reason "Failed initial monitor action" + return $rc + fi + + is_readonly + if [ $? -eq 0 ]; then + ocf_exit_reason "Failure. Master instance started in read-only mode, check configuration." + return $OCF_ERR_GENERIC + fi + + is_primary + if [ $? -ne 0 ]; then + ocf_exit_reason "Failure. Master instance started, but is not in Primary mode." + return $OCF_ERR_GENERIC + fi + + if ocf_is_true $bootstrap; then + promote_everyone + clear_bootstrap_node + # clear attribute no-grastate. if last shutdown was + # not clean, we cannot be extra-cautious by requesting a SST + # since this is the bootstrap node + clear_no_grastate + ocf_log info "Bootstrap complete, promoting the rest of the galera instances." + else + # if this is not the bootstrap node, make sure this instance + # syncs with the rest of the cluster before promotion returns. + wait_for_sync + # sync is done, clear info about last startup + clear_no_grastate + fi + + ocf_log info "Galera started" + return $OCF_SUCCESS } galera_demote() { mysql_common_stop rc=$? if [ $rc -ne $OCF_SUCCESS ] && [ $rc -ne $OCF_NOT_RUNNING ]; then ocf_exit_reason "Failed to stop Master galera instance during demotion to Master" return $rc fi # if this node was previously a bootstrap node, that is no longer the case. clear_bootstrap_node clear_last_commit - clear_sync_needed clear_no_grastate # Clear master score here rather than letting pacemaker do so once # demote finishes. This way a promote cannot take place right # after this demote even if pacemaker is requested to do so. It # will first have to run a start/monitor op, to reprobe the state # of the other galera nodes and act accordingly. clear_master_score # record last commit for next promotion detect_last_commit rc=$? return $rc } galera_start() { local rc local galera_node galera_node=$(pcmk_to_galera_name $NODENAME) if [ -z "$galera_node" ]; then ocf_exit_reason "Could not determine galera name from pacemaker node <${NODENAME}>." return $OCF_ERR_CONFIGURED fi echo $OCF_RESKEY_wsrep_cluster_address | grep -q -F $galera_node if [ $? -ne 0 ]; then ocf_exit_reason "local node <${NODENAME}> (galera node <${galera_node}>) must be a member of the wsrep_cluster_address <${OCF_RESKEY_wsrep_cluster_address}> to start this galera instance" return $OCF_ERR_CONFIGURED fi - galera_status info - if [ $? -ne $OCF_NOT_RUNNING ]; then + galera_monitor + if [ $? -eq $OCF_RUNNING_MASTER ]; then ocf_exit_reason "master galera instance started outside of the cluster's control" return $OCF_ERR_GENERIC fi mysql_common_prepare_dirs detect_last_commit rc=$? if [ $rc -ne $OCF_SUCCESS ]; then return $rc fi master_exists if [ $? -eq 0 ]; then - ocf_log info "Master instances are already up, local node will join in when started" + ocf_log info "Master instances are already up, setting master score so this instance will join galera cluster." + set_master_score $NODENAME else clear_master_score detect_first_master fi return $OCF_SUCCESS } galera_monitor() { local rc local galera_node local status_loglevel="err" # Set loglevel to info during probe if ocf_is_probe; then status_loglevel="info" fi - # Check whether mysql is running or about to start after sync - galera_status $status_loglevel + mysql_common_status $status_loglevel rc=$? if [ $rc -eq $OCF_NOT_RUNNING ]; then - last_commit=$(get_last_commit $NODENAME) - if [ -n "$last_commit" ];then + last_commit=$(get_last_commit $node) + if [ -n "$last_commit" ]; then + # if last commit is set, this instance is considered started in slave mode rc=$OCF_SUCCESS - - if ocf_is_probe; then - # prevent state change during probe - return $rc - fi - master_exists if [ $? -ne 0 ]; then detect_first_master else - # a master instance exists and is healthy. - # start this node and mark it as "pending sync" - ocf_log info "cluster is running. start local node to join in" - galera_start_local_node - rc=$? + # a master instance exists and is healthy, promote this + # local read only instance + # so it can join the master galera cluster. + set_master_score fi fi return $rc elif [ $rc -ne $OCF_SUCCESS ]; then return $rc fi - # if we make it here, mysql is running or about to start after sync. - # Check cluster status now. + # if we make it here, mysql is running. Check cluster status now. galera_node=$(pcmk_to_galera_name $NODENAME) if [ -z "$galera_node" ]; then ocf_exit_reason "Could not determine galera name from pacemaker node <${NODENAME}>." return $OCF_ERR_CONFIGURED fi echo $OCF_RESKEY_wsrep_cluster_address | grep -q -F $galera_node if [ $? -ne 0 ]; then ocf_exit_reason "local node <${NODENAME}> (galera node <${galera_node}>) is started, but is not a member of the wsrep_cluster_address <${OCF_RESKEY_wsrep_cluster_address}>" return $OCF_ERR_GENERIC fi - check_sync_needed + is_primary if [ $? -eq 0 ]; then - # galera running and sync is needed: slave state + if ocf_is_probe; then - # prevent state change during probe - rc=$OCF_SUCCESS - else - check_sync_status - rc=$? + # restore master score during probe + # if we detect this is a master instance + set_master_score fi + rc=$OCF_RUNNING_MASTER else - is_primary - if [ $? -ne 0 ]; then - ocf_exit_reason "local node <${NODENAME}> is started, but not in primary mode. Unknown state." - rc=$OCF_ERR_GENERIC - else - # galera running, no need to sync: master state and everything's clear - rc=$OCF_RUNNING_MASTER - - if ocf_is_probe; then - # restore master score during probe - # if we detect this is a master instance - set_master_score - fi - fi + ocf_exit_reason "local node <${NODENAME}> is started, but not in primary mode. Unknown state." + rc=$OCF_ERR_GENERIC fi return $rc } galera_stop() { local rc # make sure the process is stopped mysql_common_stop - rc=$? + rc=$1 clear_last_commit clear_master_score clear_bootstrap_node - clear_sync_needed clear_no_grastate return $rc } galera_validate() { if ! ocf_is_ms; then ocf_exit_reason "Galera must be configured as a multistate Master/Slave resource." return $OCF_ERR_CONFIGURED fi if [ -z "$OCF_RESKEY_wsrep_cluster_address" ]; then ocf_exit_reason "Galera must be configured with a wsrep_cluster_address value." return $OCF_ERR_CONFIGURED fi mysql_common_validate } case "$1" in meta-data) meta_data exit $OCF_SUCCESS;; usage|help) usage exit $OCF_SUCCESS;; esac galera_validate rc=$? LSB_STATUS_STOPPED=3 if [ $rc -ne 0 ]; then case "$1" in stop) exit $OCF_SUCCESS;; monitor) exit $OCF_NOT_RUNNING;; status) exit $LSB_STATUS_STOPPED;; *) exit $rc;; esac fi if [ -z "${OCF_RESKEY_check_passwd}" ]; then # This value is automatically sourced from /etc/sysconfig/checkcluster if available OCF_RESKEY_check_passwd=${MYSQL_PASSWORD} fi if [ -z "${OCF_RESKEY_check_user}" ]; then # This value is automatically sourced from /etc/sysconfig/checkcluster if available OCF_RESKEY_check_user=${MYSQL_USERNAME} fi : ${OCF_RESKEY_check_user="root"} MYSQL_OPTIONS_CHECK="-nNE --user=${OCF_RESKEY_check_user}" if [ -n "${OCF_RESKEY_check_passwd}" ]; then MYSQL_OPTIONS_CHECK="$MYSQL_OPTIONS_CHECK --password=${OCF_RESKEY_check_passwd}" fi # This value is automatically sourced from /etc/sysconfig/checkcluster if available if [ -n "${MYSQL_HOST}" ]; then MYSQL_OPTIONS_CHECK="$MYSQL_OPTIONS_CHECK -h ${MYSQL_HOST}" fi # This value is automatically sourced from /etc/sysconfig/checkcluster if available if [ -n "${MYSQL_PORT}" ]; then MYSQL_OPTIONS_CHECK="$MYSQL_OPTIONS_CHECK -P ${MYSQL_PORT}" fi # What kind of method was invoked? case "$1" in start) galera_start;; stop) galera_stop;; - status) galera_status err;; + status) mysql_common_status err;; monitor) galera_monitor;; promote) galera_promote;; demote) galera_demote;; validate-all) exit $OCF_SUCCESS;; *) usage exit $OCF_ERR_UNIMPLEMENTED;; esac # vi:sw=4:ts=4:et: