Page Menu
Home
ClusterLabs Projects
Search
Configure Global Search
Log In
Files
F3154527
No One
Temporary
Actions
View File
Edit File
Delete File
View Transforms
Subscribe
Mute Notifications
Flag For Later
Award Token
Size
44 KB
Referenced Files
None
Subscribers
None
View Options
diff --git a/heartbeat/docker b/heartbeat/docker
index 11b46a85a..2adcadee8 100755
--- a/heartbeat/docker
+++ b/heartbeat/docker
@@ -1,605 +1,605 @@
#!/bin/sh
#
# The docker HA resource agent creates and launches a docker container
# based off a supplied docker image. Containers managed by this agent
# are both created and removed upon the agent's start and stop actions.
#
# Copyright (c) 2014 David Vossel <davidvossel@gmail.com>
# All Rights Reserved.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of version 2 of the GNU General Public License as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it would be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
#
# Further, this software is distributed without any warranty that it is
# free of the rightful claim of any third person regarding infringement
# or the like. Any license provided herein, whether implied or
# otherwise, applies only to this software file. Patent licenses, if
# any, provided herein do not apply to combinations of this program with
# other software, or any other product whatsoever.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write the Free Software Foundation,
# Inc., 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
#
#######################################################################
# Initialization:
: ${OCF_FUNCTIONS_DIR=${OCF_ROOT}/lib/heartbeat}
. ${OCF_FUNCTIONS_DIR}/ocf-shellfuncs
# Parameter defaults
OCF_RESKEY_reuse_default="0"
: ${OCF_RESKEY_reuse=${OCF_RESKEY_reuse_default}}
#######################################################################
meta_data()
{
cat <<END
<?xml version="1.0"?>
<!DOCTYPE resource-agent SYSTEM "ra-api-1.dtd">
<resource-agent name="docker" version="1.0">
<version>1.0</version>
<longdesc lang="en">
The docker HA resource agent creates and launches a docker container
based off a supplied docker image. Containers managed by this agent
are both created and removed upon the agent's start and stop actions.
</longdesc>
<shortdesc lang="en">Docker container resource agent.</shortdesc>
<parameters>
<parameter name="image" required="1" unique="0">
<longdesc lang="en">
The docker image to base this container off of.
</longdesc>
<shortdesc lang="en">docker image</shortdesc>
<content type="string"/>
</parameter>
<parameter name="name" required="0" unique="0">
<longdesc lang="en">
The name to give the created container. By default this will
be that resource's instance name.
</longdesc>
<shortdesc lang="en">docker container name</shortdesc>
<content type="string"/>
</parameter>
<parameter name="allow_pull" unique="0">
<longdesc lang="en">
Allow the image to be pulled from the configured docker registry when
the image does not exist locally. NOTE, this can drastically increase
the time required to start the container if the image repository is
pulled over the network.
</longdesc>
<shortdesc lang="en">Allow pulling non-local images</shortdesc>
<content type="boolean"/>
</parameter>
<parameter name="run_opts" required="0" unique="0">
<longdesc lang="en">
Add options to be appended to the 'docker run' command which is used
when creating the container during the start action. This option allows
users to do things such as setting a custom entry point and injecting
environment variables into the newly created container. Note the '-d'
option is supplied regardless of this value to force containers to run
in the background.
NOTE: Do not explicitly specify the --name argument in the run_opts. This
agent will set --name using either the resource's instance or the name
provided in the 'name' argument of this agent.
</longdesc>
<shortdesc lang="en">run options</shortdesc>
<content type="string"/>
</parameter>
<parameter name="run_cmd" required="0" unique="0">
<longdesc lang="en">
Specify a command to launch within the container once
it has initialized.
</longdesc>
<shortdesc lang="en">run command</shortdesc>
<content type="string"/>
</parameter>
<parameter name="mount_points" required="0" unique="0">
<longdesc lang="en">
A comma separated list of directories that the container is expecting to use.
The agent will ensure they exist by running 'mkdir -p'
</longdesc>
<shortdesc lang="en">Required mount points</shortdesc>
<content type="string"/>
</parameter>
<parameter name="monitor_cmd" required="0" unique="0">
<longdesc lang="en">
Specify the full path of a command to launch within the container to check
the health of the container. This command must return 0 to indicate that
the container is healthy. A non-zero return code will indicate that the
container has failed and should be recovered.
If 'docker exec' is supported, it is used to execute the command. If not,
nsenter is used.
Note: Using this method for monitoring processes inside a container
is not recommended, as containerd tries to track processes running
inside the container and does not deal well with many short-lived
processes being spawned. Ensure that your container monitors its
own processes and terminates on fatal error rather than invoking
a command from the outside.
</longdesc>
<shortdesc lang="en">monitor command</shortdesc>
<content type="string"/>
</parameter>
<parameter name="force_kill" required="0" unique="0">
<longdesc lang="en">
Kill a container immediately rather than waiting for it to gracefully
shutdown
</longdesc>
<shortdesc lang="en">force kill</shortdesc>
<content type="boolean"/>
</parameter>
<parameter name="reuse" required="0" unique="0">
<longdesc lang="en">
Allow the container to be reused once it is stopped. By default,
containers get removed once they are stopped. Enable this option
to have the particular one persist when this happens.
</longdesc>
<shortdesc lang="en">reuse container</shortdesc>
<content type="boolean" default="${OCF_RESKEY_reuse_default}"/>
</parameter>
<parameter name="query_docker_health" required="0" unique="0">
<longdesc lang="en">
Query the builtin healthcheck of docker (v1.12+) to determine health of the
container. If left empty or set to false it will not be used.
The healthcheck itself has to be configured within docker, e.g. via
HEALTHCHECK in Dockerfile. This option just queries in what condition
docker considers the container to be and lets ocf do its thing accordingly.
Note that the time a container is in "starting" state counts against the
monitor timeout.
This is an additional check besides the standard check for the container
to be running, and the optional monitor_cmd check. It doesn't disable or
override them, so all of them (if used) have to come back healthy for the
container to be considered healthy.
</longdesc>
<shortdesc lang="en">use healthcheck</shortdesc>
<content type="boolean"/>
</parameter>
</parameters>
<actions>
<action name="start" timeout="90s" />
<action name="stop" timeout="90s" />
<action name="monitor" timeout="30s" interval="30s" depth="0" />
<action name="meta-data" timeout="5s" />
<action name="validate-all" timeout="30s" />
</actions>
</resource-agent>
END
}
#######################################################################
REQUIRE_IMAGE_PULL=0
docker_usage()
{
cat <<END
usage: $0 {start|stop|monitor|validate-all|meta-data}
Expects to have a fully populated OCF RA-compliant environment set.
END
}
monitor_cmd_exec()
{
local rc=$OCF_SUCCESS
local out
if [ -z "$OCF_RESKEY_monitor_cmd" ]; then
return $rc
fi
if docker exec --help >/dev/null 2>&1; then
out=$(docker exec ${CONTAINER} $OCF_RESKEY_monitor_cmd 2>&1)
rc=$?
else
out=$(echo "$OCF_RESKEY_monitor_cmd" | nsenter --target $(docker inspect --type=container --format {{.State.Pid}} ${CONTAINER}) --mount --uts --ipc --net --pid 2>&1)
rc=$?
fi
if [ $rc -eq 127 ]; then
ocf_log err "monitor cmd failed (rc=$rc), output: $out"
ocf_exit_reason "monitor_cmd, ${OCF_RESKEY_monitor_cmd} , not found within container."
# there is no recovering from this, exit immediately
exit $OCF_ERR_ARGS
elif [ $rc -ne 0 ]; then
ocf_exit_reason "monitor cmd failed (rc=$rc), output: $out"
rc=$OCF_ERR_GENERIC
else
ocf_log debug "monitor cmd passed: exit code = $rc"
fi
return $rc
}
container_exists()
{
local err
err=$(docker inspect --type=container $CONTAINER 2>&1 >/dev/null)
if [ $? -ne $OCF_SUCCESS ]; then
case $err in
*"No such container"*)
# Return failure instead of exiting if container does not exist
return 1
;;
*)
# Exit if error running command
ocf_exit_reason "$err"
exit $OCF_ERR_GENERIC
;;
esac
fi
return $OCF_SUCCESS
}
remove_container()
{
if ocf_is_true "$OCF_RESKEY_reuse"; then
# never remove the container if we have reuse enabled.
return 0
fi
container_exists
if [ $? -ne 0 ]; then
# don't attempt to remove a container that doesn't exist
return 0
fi
ocf_log notice "Cleaning up inactive container, ${CONTAINER}."
ocf_run docker rm $CONTAINER
}
docker_simple_status()
{
local val
if [ ! -x "$(command -v docker)" ]; then
- ocf_log err "docker is not installed on this host"
+ ocf_exit_reason "docker is not installed on this host"
return $OCF_ERR_INSTALLED
fi
# let's first check if the daemon is up and running.
VERSION_OUT=$(docker version)
version_ret=$?
if [ $version_ret -eq 1 ]; then
ocf_exit_reason "Docker service is in error state while checking for ${CONTAINER}, based on image, ${OCF_RESKEY_image}: ${VERSION_OUT}"
return $OCF_ERR_GENERIC
fi
container_exists
if [ $? -ne 0 ]; then
return $OCF_NOT_RUNNING
fi
# retrieve the 'Running' attribute for the container
val=$(docker inspect --type=container --format {{.State.Running}} $CONTAINER 2>/dev/null)
if [ $? -ne 0 ]; then
#not running as a result of container not being found
return $OCF_NOT_RUNNING
fi
if ocf_is_true "$val"; then
# container exists and is running
return $OCF_SUCCESS
fi
return $OCF_NOT_RUNNING
}
docker_health_status()
{
if ocf_is_true "$OCF_RESKEY_query_docker_health"; then
local val
container_exists
if [ $? -ne 0 ]; then
return $OCF_NOT_RUNNING
fi
# retrieve the 'Health' attribute for the container
# This is a bash-style do-while loop to wait until instance is started.
# if starting takes longer than monitor timeout then upstream will make this fail.
while
val=$(docker inspect --type=container --format {{.State.Health.Status}} $CONTAINER 2>/dev/null)
if [ $? -ne 0 ]; then
#not healthy as a result of container not being found
return $OCF_NOT_RUNNING
fi
test "$val" = "starting"
do
sleep 1
done
if [ "$val" = "healthy" ]; then
# container exists and is healthy
return $OCF_SUCCESS
fi
return $OCF_NOT_RUNNING
fi
return 0
}
docker_monitor()
{
local rc=0
docker_simple_status
rc=$?
if [ $rc -ne 0 ]; then
return $rc
fi
docker_health_status
rc=$?
if [ $rc -ne 0 ]; then
return $rc
fi
monitor_cmd_exec
}
docker_create_mounts() {
oldIFS="$IFS"
IFS=","
for directory in $OCF_RESKEY_mount_points; do
mkdir -p "$directory"
done
IFS="$oldIFS"
}
docker_start()
{
docker_create_mounts
local run_opts="-d --name=${CONTAINER}"
# check to see if the container has already started
docker_simple_status
if [ $? -eq $OCF_SUCCESS ]; then
return $OCF_SUCCESS
fi
if [ -n "$OCF_RESKEY_run_opts" ]; then
run_opts="$run_opts $OCF_RESKEY_run_opts"
fi
if [ $REQUIRE_IMAGE_PULL -eq 1 ]; then
ocf_log notice "Beginning pull of image, ${OCF_RESKEY_image}"
docker pull "${OCF_RESKEY_image}"
if [ $? -ne 0 ]; then
ocf_exit_reason "failed to pull image ${OCF_RESKEY_image}"
return $OCF_ERR_GENERIC
fi
fi
if ocf_is_true "$OCF_RESKEY_reuse" && container_exists; then
ocf_log info "starting existing container $CONTAINER."
ocf_run docker start $CONTAINER
else
# make sure any previous container matching our container name is cleaned up first.
# we already know at this point it wouldn't be running
remove_container
ocf_log info "running container $CONTAINER for the first time"
ocf_run docker run $run_opts $OCF_RESKEY_image $OCF_RESKEY_run_cmd
fi
if [ $? -ne 0 ]; then
ocf_exit_reason "docker failed to launch container"
return $OCF_ERR_GENERIC
fi
# wait for monitor to pass before declaring that the container is started
while true; do
docker_simple_status
if [ $? -ne $OCF_SUCCESS ]; then
ocf_exit_reason "Newly created docker container exited after start"
return $OCF_ERR_GENERIC
fi
monitor_cmd_exec
if [ $? -eq $OCF_SUCCESS ]; then
ocf_log notice "Container $CONTAINER started successfully"
return $OCF_SUCCESS
fi
ocf_exit_reason "waiting on monitor_cmd to pass after start"
sleep 1
done
}
docker_stop()
{
local timeout=60
docker_simple_status
ret=$?
if [ $ret -eq $OCF_NOT_RUNNING ]; then
remove_container
return $OCF_SUCCESS
elif [ $ret -eq $OCF_ERR_GENERIC ]; then
return $OCF_ERR_GENERIC
fi
if [ -n "$OCF_RESKEY_CRM_meta_timeout" ]; then
timeout=$((($OCF_RESKEY_CRM_meta_timeout/1000) -10 ))
if [ $timeout -lt 10 ]; then
timeout=10
fi
fi
if ocf_is_true "$OCF_RESKEY_force_kill"; then
ocf_run docker kill $CONTAINER
else
ocf_log debug "waiting $timeout second[s] before killing container"
ocf_run docker stop -t=$timeout $CONTAINER
fi
if [ $? -ne 0 ]; then
ocf_exit_reason "Failed to stop container, ${CONTAINER}, based on image, ${OCF_RESKEY_image}."
return $OCF_ERR_GENERIC
fi
remove_container
if [ $? -ne 0 ]; then
ocf_exit_reason "Failed to remove stopped container, ${CONTAINER}, based on image, ${OCF_RESKEY_image}."
return $OCF_ERR_GENERIC
fi
return $OCF_SUCCESS
}
image_exists()
{
# if no tag was specified, use default "latest"
local COLON_FOUND=0
local SLASH_FOUND=0
local SERVER_NAME=""
local IMAGE_NAME="${OCF_RESKEY_image}"
local IMAGE_TAG="latest"
SLASH_FOUND="$(echo "${OCF_RESKEY_image}" | grep -o '/' | grep -c .)"
if [ ${SLASH_FOUND} -ge 1 ]; then
SERVER_NAME="$(echo ${IMAGE_NAME} | cut -d / -f 1-${SLASH_FOUND})"
IMAGE_NAME="$(echo ${IMAGE_NAME} | awk -F'/' '{print $NF}')"
fi
COLON_FOUND="$(echo "${IMAGE_NAME}" | grep -o ':' | grep -c .)"
if [ ${COLON_FOUND} -ge 1 ]; then
IMAGE_TAG="$(echo ${IMAGE_NAME} | awk -F':' '{print $NF}')"
IMAGE_NAME="$(echo ${IMAGE_NAME} | cut -d : -f 1-${COLON_FOUND})"
fi
# IMAGE_NAME might be following formats:
# - image
# - repository:port/image
# - docker.io/image (some distro will display "docker.io/" as prefix)
docker images | awk '{print $1 ":" $2}' | egrep -q -s "^(docker.io\/|${SERVER_NAME}\/)?${IMAGE_NAME}:${IMAGE_TAG}\$"
if [ $? -eq 0 ]; then
# image found
return 0
fi
if ocf_is_true "$OCF_RESKEY_allow_pull"; then
REQUIRE_IMAGE_PULL=1
ocf_log notice "Image (${OCF_RESKEY_image}) does not exist locally but will be pulled during start"
return 0
fi
# image not found.
return 1
}
docker_validate()
{
check_binary docker
if [ -z "$OCF_RESKEY_image" ]; then
ocf_exit_reason "'image' option is required"
exit $OCF_ERR_CONFIGURED
fi
if [ -n "$OCF_RESKEY_monitor_cmd" ]; then
docker exec --help >/dev/null 2>&1
if [ ! $? ]; then
ocf_log info "checking for nsenter, which is required when 'monitor_cmd' is specified"
check_binary nsenter
fi
fi
image_exists
if [ $? -ne 0 ]; then
ocf_exit_reason "base image, ${OCF_RESKEY_image}, could not be found."
exit $OCF_ERR_CONFIGURED
fi
return $OCF_SUCCESS
}
# TODO :
# When a user starts plural clones in a node in globally-unique, a user cannot appoint plural name parameters.
# When a user appoints reuse, the resource agent cannot connect plural clones with a container.
if ocf_is_true "$OCF_RESKEY_CRM_meta_globally_unique"; then
if [ -n "$OCF_RESKEY_name" ]; then
if [ -n "$OCF_RESKEY_CRM_meta_clone_node_max" ] && [ "$OCF_RESKEY_CRM_meta_clone_node_max" -ne 1 ]
then
ocf_exit_reason "Cannot make plural clones from the same name parameter."
exit $OCF_ERR_CONFIGURED
fi
if [ -n "$OCF_RESKEY_CRM_meta_master_node_max" ] && [ "$OCF_RESKEY_CRM_meta_master_node_max" -ne 1 ]
then
ocf_exit_reason "Cannot make plural master from the same name parameter."
exit $OCF_ERR_CONFIGURED
fi
fi
: ${OCF_RESKEY_name=`echo ${OCF_RESOURCE_INSTANCE} | tr ':' '-'`}
else
: ${OCF_RESKEY_name=${OCF_RESOURCE_INSTANCE}}
fi
if [ -n "$OCF_RESKEY_container" ]; then
# we'll keep the container attribute around for a bit in order not to break
# any existing deployments. The 'name' attribute is prefered now though.
CONTAINER=$OCF_RESKEY_container
ocf_log warn "The 'container' attribute is depreciated"
else
CONTAINER=$OCF_RESKEY_name
fi
case $__OCF_ACTION in
meta-data) meta_data
exit $OCF_SUCCESS;;
start)
docker_validate
docker_start;;
stop) docker_stop;;
monitor) docker_monitor;;
validate-all) docker_validate;;
usage|help) docker_usage
exit $OCF_SUCCESS
;;
*) docker_usage
exit $OCF_ERR_UNIMPLEMENTED
;;
esac
rc=$?
ocf_log debug "${OCF_RESOURCE_INSTANCE} $__OCF_ACTION : $rc"
exit $rc
diff --git a/heartbeat/podman b/heartbeat/podman
index 6be24ac79..53867bff2 100755
--- a/heartbeat/podman
+++ b/heartbeat/podman
@@ -1,628 +1,628 @@
#!/bin/sh
#
# The podman HA resource agent creates and launches a podman container
# based off a supplied podman image. Containers managed by this agent
# are both created and removed upon the agent's start and stop actions.
#
# Copyright (c) 2014 David Vossel <davidvossel@gmail.com>
# Michele Baldessari <michele@acksyn.org>
# All Rights Reserved.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of version 2 of the GNU General Public License as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it would be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
#
# Further, this software is distributed without any warranty that it is
# free of the rightful claim of any third person regarding infringement
# or the like. Any license provided herein, whether implied or
# otherwise, applies only to this software file. Patent licenses, if
# any, provided herein do not apply to combinations of this program with
# other software, or any other product whatsoever.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write the Free Software Foundation,
# Inc., 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
#
#######################################################################
# Initialization:
: ${OCF_FUNCTIONS_DIR=${OCF_ROOT}/lib/heartbeat}
. ${OCF_FUNCTIONS_DIR}/ocf-shellfuncs
# Parameter defaults
OCF_RESKEY_reuse_default="0"
: ${OCF_RESKEY_reuse=${OCF_RESKEY_reuse_default}}
#######################################################################
meta_data()
{
cat <<END
<?xml version="1.0"?>
<!DOCTYPE resource-agent SYSTEM "ra-api-1.dtd">
<resource-agent name="podman" version="1.0">
<version>1.0</version>
<longdesc lang="en">
The podman HA resource agent creates and launches a podman container
based off a supplied podman image. Containers managed by this agent
are both created and removed upon the agent's start and stop actions.
</longdesc>
<shortdesc lang="en">Podman container resource agent.</shortdesc>
<parameters>
<parameter name="image" required="1" unique="0">
<longdesc lang="en">
The podman image to base this container off of.
</longdesc>
<shortdesc lang="en">podman image</shortdesc>
<content type="string"/>
</parameter>
<parameter name="name" required="0" unique="0">
<longdesc lang="en">
The name to give the created container. By default this will
be that resource's instance name.
</longdesc>
<shortdesc lang="en">podman container name</shortdesc>
<content type="string"/>
</parameter>
<parameter name="allow_pull" unique="0">
<longdesc lang="en">
Allow the image to be pulled from the configured podman registry when
the image does not exist locally. NOTE, this can drastically increase
the time required to start the container if the image repository is
pulled over the network.
</longdesc>
<shortdesc lang="en">Allow pulling non-local images</shortdesc>
<content type="boolean"/>
</parameter>
<parameter name="run_opts" required="0" unique="0">
<longdesc lang="en">
Add options to be appended to the 'podman run' command which is used
when creating the container during the start action. This option allows
users to do things such as setting a custom entry point and injecting
environment variables into the newly created container. Note the '-d'
option is supplied regardless of this value to force containers to run
in the background.
NOTE: Do not explicitly specify the --name argument in the run_opts. This
agent will set --name using either the resource's instance or the name
provided in the 'name' argument of this agent.
</longdesc>
<shortdesc lang="en">run options</shortdesc>
<content type="string"/>
</parameter>
<parameter name="run_cmd" required="0" unique="0">
<longdesc lang="en">
Specify a command to launch within the container once
it has initialized.
</longdesc>
<shortdesc lang="en">run command</shortdesc>
<content type="string"/>
</parameter>
<parameter name="mount_points" required="0" unique="0">
<longdesc lang="en">
A comma separated list of directories that the container is expecting to use.
The agent will ensure they exist by running 'mkdir -p'
</longdesc>
<shortdesc lang="en">Required mount points</shortdesc>
<content type="string"/>
</parameter>
<parameter name="monitor_cmd" required="0" unique="0">
<longdesc lang="en">
Specify the full path of a command to launch within the container to check
the health of the container. This command must return 0 to indicate that
the container is healthy. A non-zero return code will indicate that the
container has failed and should be recovered.
Note: Using this method for monitoring processes inside a container
is not recommended, as containerd tries to track processes running
inside the container and does not deal well with many short-lived
processes being spawned. Ensure that your container monitors its
own processes and terminates on fatal error rather than invoking
a command from the outside.
</longdesc>
<shortdesc lang="en">monitor command</shortdesc>
<content type="string"/>
</parameter>
<parameter name="force_kill" required="0" unique="0">
<longdesc lang="en">
Kill a container immediately rather than waiting for it to gracefully
shutdown
</longdesc>
<shortdesc lang="en">force kill</shortdesc>
<content type="boolean"/>
</parameter>
<parameter name="reuse" required="0" unique="0">
<longdesc lang="en">
Allow the container to be reused once it is stopped. By default,
containers get removed once they are stopped. Enable this option
to have the particular one persist when this happens.
</longdesc>
<shortdesc lang="en">reuse container</shortdesc>
<content type="boolean" default="${OCF_RESKEY_reuse_default}"/>
</parameter>
<parameter name="drop_in_dependency" required="0" unique="0">
<longdesc lang="en">
Use transient drop-in files to add extra dependencies to the systemd
scopes associated to the container. During reboot, this prevents systemd
to stop the container before pacemaker.
</longdesc>
<shortdesc lang="en">drop-in dependency</shortdesc>
<content type="boolean"/>
</parameter>
</parameters>
<actions>
<action name="start" timeout="90s" />
<action name="stop" timeout="90s" />
<action name="monitor" timeout="30s" interval="30s" depth="0" />
<action name="meta-data" timeout="5s" />
<action name="validate-all" timeout="30s" />
</actions>
</resource-agent>
END
}
#######################################################################
REQUIRE_IMAGE_PULL=0
podman_usage()
{
cat <<END
usage: $0 {start|stop|monitor|validate-all|meta-data}
Expects to have a fully populated OCF RA-compliant environment set.
END
}
monitor_cmd_exec()
{
local rc=$OCF_SUCCESS
local out
out=$(podman exec ${CONTAINER} $OCF_RESKEY_monitor_cmd 2>&1)
rc=$?
# 125: no container with name or ID ${CONTAINER} found
# 126: container state improper (not running)
# 127: any other error
# 255: podman 2+: container not running
case "$rc" in
125|126|255)
rc=$OCF_NOT_RUNNING
;;
0)
ocf_log debug "monitor cmd passed: exit code = $rc"
;;
*)
ocf_exit_reason "monitor cmd failed (rc=$rc), output: $out"
rc=$OCF_ERR_GENERIC
;;
esac
return $rc
}
container_exists()
{
local rc
local out
out=$(podman exec ${CONTAINER} $OCF_RESKEY_monitor_cmd 2>&1)
rc=$?
# 125: no container with name or ID ${CONTAINER} found
if [ $rc -ne 125 ]; then
return 0
fi
return 1
}
remove_container()
{
local rc
local execids
if ocf_is_true "$OCF_RESKEY_reuse"; then
# never remove the container if we have reuse enabled.
return 0
fi
container_exists
if [ $? -ne 0 ]; then
# don't attempt to remove a container that doesn't exist
return 0
fi
ocf_log notice "Cleaning up inactive container, ${CONTAINER}."
ocf_run podman rm -v $CONTAINER
rc=$?
if [ $rc -ne 0 ]; then
# due to a podman bug (rhbz#1841485), sometimes a stopped
# container can still be associated with Exec sessions, in
# which case the "podman rm" has to be forced
execids=$(podman inspect $CONTAINER --format '{{len .ExecIDs}}')
if [ "$execids" -ne "0" ]; then
ocf_log warn "Inactive container ${CONTAINER} has lingering exec sessions. Force-remove it."
ocf_run podman rm -f $CONTAINER
rc=$?
fi
fi
return $rc
}
podman_simple_status()
{
local rc
# simple status is implemented via podman exec
# everything besides success is considered "not running"
monitor_cmd_exec
rc=$?
if [ $rc -ne $OCF_SUCCESS ]; then
rc=$OCF_NOT_RUNNING;
fi
return $rc
}
podman_monitor()
{
# We rely on running podman exec to monitor the container
# state because that command seems to be less prone to
# performance issue under IO load.
#
# For probes to work, we expect cmd_exec to be able to report
# when a container is not running. Here, we're not interested
# in distinguishing whether it's stopped or non existing
# (there's function container_exists for that)
monitor_cmd_exec
return $?
}
podman_create_mounts() {
oldIFS="$IFS"
IFS=","
for directory in $OCF_RESKEY_mount_points; do
mkdir -p "$directory"
done
IFS="$oldIFS"
}
podman_container_id()
{
# Retrieve the container ID by doing a "podman ps" rather than
# a "podman inspect", because the latter has performance issues
# under IO load.
# We could have run "podman start $CONTAINER" to get the ID back
# but if the container is stopped, the command will return a
# name instead of a container ID. This would break us.
podman ps --no-trunc --format '{{.ID}} {{.Names}}' | grep -F -w -m1 "$CONTAINER" | cut -d' ' -f1
}
create_transient_drop_in_dependency()
{
local cid=$1
local rc=$OCF_SUCCESS
if [ -z "$cid" ]; then
- ocf_log error "Container ID not found for \"$CONTAINER\". Not creating drop-in dependency"
+ ocf_exit_reason "Container ID not found for \"$CONTAINER\". Not creating drop-in dependency"
return $OCF_ERR_GENERIC
fi
ocf_log info "Creating drop-in dependency for \"$CONTAINER\" ($cid)"
for scope in "libpod-$cid.scope.d" "libpod-conmon-$cid.scope.d"; do
if [ $rc -eq $OCF_SUCCESS ] && [ ! -d /run/systemd/transient/"$scope" ]; then
mkdir -p /run/systemd/transient/"$scope" && \
printf "[Unit]\nBefore=pacemaker.service" > /run/systemd/transient/"$scope"/dep.conf && \
chmod ago+r /run/systemd/transient/"$scope" /run/systemd/transient/"$scope"/dep.conf
rc=$?
fi
done
if [ $rc -ne $OCF_SUCCESS ]; then
ocf_log error "Could not create drop-in dependency for \"$CONTAINER\" ($cid)"
else
systemctl daemon-reload
rc=$?
if [ $rc -ne $OCF_SUCCESS ]; then
ocf_log error "Could not refresh service definition after creating drop-in for \"$CONTAINER\""
fi
fi
return $rc
}
run_new_container()
{
local opts=$1
local image=$2
local cmd=$3
local rc
ocf_log info "running container $CONTAINER for the first time"
out=$(podman run $opts $image $cmd 2>&1)
rc=$?
if [ -n "$out" ]; then
out="$(echo "$out" | tr -s ' \t\r\n' ' ')"
if [ $rc -eq 0 ]; then
ocf_log info "$out"
else
ocf_log err "$out"
fi
fi
if [ $rc -eq 125 ]; then
# If an internal podman error occurred, it might be because
# the internal storage layer still references an old container
# with the same name, even though podman itself thinks there
# is no such container. If so, purge the storage layer to try
# to clean the corruption and try again.
if echo "$out" | grep -q "unknown.*flag"; then
ocf_exit_reason "$out"
return $rc
fi
ocf_log warn "Internal podman error while creating new container $CONTAINER. Retrying."
ocf_run podman rm --storage $CONTAINER
ocf_run podman run $opts $image $cmd
rc=$?
elif [ $rc -eq 127 ]; then
# rhbz#1972209: podman 3.0.x seems to be hit by a race
# where the cgroup is not yet set up properly when the OCI
# runtime configures the container. If that happens, recreate
# the container as long as we get the same error code or
# until start timeout preempts us.
while [ $rc -eq 127 ] && (echo "$out" | grep -q "cgroup.*scope not found") ; do
ocf_log warn "Internal podman error while assigning cgroup. Retrying."
# Arbitrary sleep to prevent consuming all CPU while looping
sleep 1
podman rm -f "$CONTAINER"
out=$(podman run $opts $image $cmd 2>&1)
rc=$?
done
# Log the created container ID if it succeeded
if [ $rc -eq 0 ]; then
ocf_log info "$out"
fi
fi
return $rc
}
podman_start()
{
local cid
local rc
podman_create_mounts
local run_opts="-d --name=${CONTAINER}"
# check to see if the container has already started
podman_simple_status
if [ $? -eq $OCF_SUCCESS ]; then
return $OCF_SUCCESS
fi
if [ -n "$OCF_RESKEY_run_opts" ]; then
run_opts="$run_opts $OCF_RESKEY_run_opts"
fi
if [ $REQUIRE_IMAGE_PULL -eq 1 ]; then
ocf_log notice "Beginning pull of image, ${OCF_RESKEY_image}"
podman pull "${OCF_RESKEY_image}"
if [ $? -ne 0 ]; then
ocf_exit_reason "failed to pull image ${OCF_RESKEY_image}"
return $OCF_ERR_GENERIC
fi
fi
if ocf_is_true "$OCF_RESKEY_reuse" && container_exists; then
ocf_log info "starting existing container $CONTAINER."
ocf_run podman start $CONTAINER
else
# make sure any previous container matching our container name is cleaned up first.
# we already know at this point it wouldn't be running
remove_container
run_new_container "$run_opts" $OCF_RESKEY_image "$OCF_RESKEY_run_cmd"
if [ $? -eq 125 ]; then
return $OCF_ERR_GENERIC
fi
fi
rc=$?
# if the container was stopped or didn't exist before, systemd
# removed the libpod* scopes. So always try to recreate the drop-ins
if [ $rc -eq 0 ] && ocf_is_true "$OCF_RESKEY_drop_in_dependency"; then
cid=$(podman_container_id)
create_transient_drop_in_dependency "$cid"
rc=$?
fi
if [ $rc -ne 0 ]; then
ocf_exit_reason "podman failed to launch container (rc: $rc)"
return $OCF_ERR_GENERIC
fi
# wait for monitor to pass before declaring that the container is started
while true; do
podman_simple_status
if [ $? -ne $OCF_SUCCESS ]; then
ocf_exit_reason "Newly created podman container exited after start"
return $OCF_ERR_GENERIC
fi
monitor_cmd_exec
if [ $? -eq $OCF_SUCCESS ]; then
ocf_log notice "Container $CONTAINER started successfully"
return $OCF_SUCCESS
fi
ocf_exit_reason "waiting on monitor_cmd to pass after start"
sleep 1
done
}
podman_stop()
{
local timeout=60
local rc
podman_simple_status
if [ $? -eq $OCF_NOT_RUNNING ]; then
remove_container
return $OCF_SUCCESS
fi
if [ -n "$OCF_RESKEY_CRM_meta_timeout" ]; then
timeout=$((($OCF_RESKEY_CRM_meta_timeout/1000) -10 ))
if [ $timeout -lt 10 ]; then
timeout=10
fi
fi
if ocf_is_true "$OCF_RESKEY_force_kill"; then
ocf_run podman kill $CONTAINER
rc=$?
else
ocf_log debug "waiting $timeout second[s] before killing container"
ocf_run podman stop -t=$timeout $CONTAINER
rc=$?
# on stop, systemd will automatically delete any transient
# drop-in conf that has been created earlier
fi
if [ $rc -ne 0 ]; then
# If the stop failed, it could be because the controlling conmon
# process died unexpectedly. If so, a generic error code is returned
# but the associated container exit code is -1. If that's the case,
# assume there's no failure and continue with the rm as usual.
if [ $rc -eq 125 ] && \
podman inspect --format '{{.State.Status}}:{{.State.ExitCode}}' $CONTAINER | grep -wq "stopped:-1"; then
ocf_log warn "Container ${CONTAINER} had an unexpected stop outcome. Trying to remove it anyway."
else
ocf_exit_reason "Failed to stop container, ${CONTAINER}, based on image, ${OCF_RESKEY_image}."
return $OCF_ERR_GENERIC
fi
fi
remove_container
if [ $? -ne 0 ]; then
ocf_exit_reason "Failed to remove stopped container, ${CONTAINER}, based on image, ${OCF_RESKEY_image}."
return $OCF_ERR_GENERIC
fi
return $OCF_SUCCESS
}
image_exists()
{
podman image exists "${OCF_RESKEY_image}"
if [ $? -eq 0 ]; then
# image found
return 0
fi
if ocf_is_true "$OCF_RESKEY_allow_pull"; then
REQUIRE_IMAGE_PULL=1
ocf_log notice "Image (${OCF_RESKEY_image}) does not exist locally but will be pulled during start"
return 0
fi
# image not found.
return 1
}
podman_validate()
{
check_binary podman
if [ -z "$OCF_RESKEY_image" ]; then
ocf_exit_reason "'image' option is required"
exit $OCF_ERR_CONFIGURED
fi
image_exists
if [ $? -ne 0 ]; then
ocf_exit_reason "base image, ${OCF_RESKEY_image}, could not be found."
exit $OCF_ERR_CONFIGURED
fi
return $OCF_SUCCESS
}
# TODO :
# When a user starts plural clones in a node in globally-unique, a user cannot appoint plural name parameters.
# When a user appoints reuse, the resource agent cannot connect plural clones with a container.
if ocf_is_true "$OCF_RESKEY_CRM_meta_globally_unique"; then
if [ -n "$OCF_RESKEY_name" ]; then
if [ -n "$OCF_RESKEY_CRM_meta_clone_node_max" ] && [ "$OCF_RESKEY_CRM_meta_clone_node_max" -ne 1 ]
then
ocf_exit_reason "Cannot make plural clones from the same name parameter."
exit $OCF_ERR_CONFIGURED
fi
if [ -n "$OCF_RESKEY_CRM_meta_master_node_max" ] && [ "$OCF_RESKEY_CRM_meta_master_node_max" -ne 1 ]
then
ocf_exit_reason "Cannot make plural master from the same name parameter."
exit $OCF_ERR_CONFIGURED
fi
fi
: ${OCF_RESKEY_name=`echo ${OCF_RESOURCE_INSTANCE} | tr ':' '-'`}
else
: ${OCF_RESKEY_name=${OCF_RESOURCE_INSTANCE}}
fi
CONTAINER=$OCF_RESKEY_name
# Note: we currently monitor podman containers by with the "podman exec"
# command, so make sure that invocation is always valid by enforcing the
# exec command to be non-empty
: ${OCF_RESKEY_monitor_cmd:=/bin/true}
# When OCF_RESKEY_drop_in_dependency is not populated, we
# look at another file-based way of enabling the option.
# Otherwise, consider it disabled.
if [ -z "$OCF_RESKEY_drop_in_dependency" ]; then
if [ -f "/etc/sysconfig/podman_drop_in" ] || \
[ -f "/etc/default/podman_drop_in" ]; then
OCF_RESKEY_drop_in_dependency=yes
fi
fi
case $__OCF_ACTION in
meta-data) meta_data
exit $OCF_SUCCESS;;
start)
podman_validate
podman_start;;
stop) podman_stop;;
monitor) podman_monitor;;
validate-all) podman_validate;;
usage|help) podman_usage
exit $OCF_SUCCESS
;;
*) podman_usage
exit $OCF_ERR_UNIMPLEMENTED
;;
esac
rc=$?
ocf_log debug "${OCF_RESOURCE_INSTANCE} $__OCF_ACTION : $rc"
exit $rc
diff --git a/heartbeat/zabbixserver b/heartbeat/zabbixserver
index b4b5b7c2b..cb9e023a2 100755
--- a/heartbeat/zabbixserver
+++ b/heartbeat/zabbixserver
@@ -1,315 +1,315 @@
#!/bin/sh
#
#
# zabbixserver OCF RA for zabbix_server daemon
#
# Copyright (c) 2012 Krzysztof Gajdemski <songo@debian.org.pl>
# All Rights Reserved.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of version 2 of the GNU General Public License as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it would be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
#
# Further, this software is distributed without any warranty that it is
# free of the rightful claim of any third person regarding infringement
# or the like. Any license provided herein, whether implied or
# otherwise, applies only to this software file. Patent licenses, if
# any, provided herein do not apply to combinations of this program with
# other software, or any other product whatsoever.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write the Free Software Foundation,
# Inc., 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
#
#######################################################################
# Initialization:
: ${OCF_FUNCTIONS_DIR=${OCF_ROOT}/lib/heartbeat}
. ${OCF_FUNCTIONS_DIR}/ocf-shellfuncs
#######################################################################
#
# Defaults
#
OCF_RESKEY_binary_default="zabbix_server"
OCF_RESKEY_pid_default="/var/run/zabbix-server/zabbix_server.pid"
OCF_RESKEY_config_default=""
: ${OCF_RESKEY_binary=${OCF_RESKEY_binary_default}}
: ${OCF_RESKEY_pid=${OCF_RESKEY_pid_default}}
: ${OCF_RESKEY_config=${OCF_RESKEY_config_default}}
# sleep interval when waiting for threads cleanup
sleepint=1
#
# Functions
#
zabbixserver_meta_data() {
cat <<END
<?xml version="1.0"?>
<!DOCTYPE resource-agent SYSTEM "ra-api-1.dtd">
<resource-agent name="zabbixserver" version="0.0.1">
<version>1.0</version>
<longdesc lang="en">
This is a Zabbix server Resource Agent for zabbix_server monitoring
daemon. See: http://www.zabbix.com/
</longdesc>
<shortdesc lang="en">Zabbix server resource agent</shortdesc>
<parameters>
<parameter name="binary" unique="0" required="0">
<longdesc lang="en">
Location of the zabbix_server binary.
</longdesc>
<shortdesc lang="en">Zabbix server binary</shortdesc>
<content type="string" default="${OCF_RESKEY_binary_default}" />
</parameter>
<parameter name="pid" unique="1" required="0">
<longdesc lang="en">
Path to zabbix_server pidfile. As it's created by daemon itself
it must be the same as specified in the Zabbix configuration file
with parameter 'PidFile='.
</longdesc>
<shortdesc lang="en">Path to pidfile</shortdesc>
<content type="string" default="${OCF_RESKEY_pid_default}" />
</parameter>
<parameter name="config" unique="1" required="0">
<longdesc lang="en">
Path to zabbix_server configuration file. Assumed server default
if not specified.
</longdesc>
<shortdesc lang="en">Path to configuration file</shortdesc>
<content type="string" default="${OCF_RESKEY_config_default}" />
</parameter>
</parameters>
<actions>
<action name="start" timeout="20s" />
<action name="stop" timeout="20s" />
<action name="monitor" timeout="20s" interval="10s" depth="0"/>
<action name="validate-all" timeout="20s" />
<action name="meta-data" timeout="5s" />
</actions>
</resource-agent>
END
}
#######################################################################
zabbixserver_usage() {
cat <<END
usage: $0 {start|stop|monitor|validate-all|meta-data}
Expects to have a fully populated OCF RA-compliant environment set.
END
}
#
# Get an actual PID from a given pidfile. If it can't
# be found then return 1
#
getpid() {
# pidfile doesn't exists
[ -f $1 ] || return 1
sed -n '1 { /[0-9]/p }' $1
return 0
}
#
# Check for the server configuration file
#
check_config() {
# check only when it is specified by user
if [ ! -z "$1" ] && [ ! -f "$1" ]; then
if ocf_is_probe; then
ocf_log info "Can't read configuration file $1 during probe"
else
- ocf_log err "Can't read configuration file $1"
+ ocf_exit_reason "Can't read configuration file $1"
return 1
fi
fi
return 0
}
#
# Start Zabbix daemon
#
startserver() {
local command
local params
command=$OCF_RESKEY_binary
# use additional parameters if specified
if [ "$OCF_RESKEY_config" ]; then
params="--config $OCF_RESKEY_config"
command="$command $params"
fi
ocf_log debug "Starting server using command: $command"
ocf_run $command
}
#
# Check the process status (PID is given as an argument)
#
process_status() {
local pid
pid=$1
# check if parent process is running
ocf_run -q kill -s 0 $pid 2> /dev/null 1>&2
}
#
# start the agent
#
zabbixserver_start() {
local rc
# check the resource status
zabbixserver_monitor
rc=$?
case "$rc" in
$OCF_SUCCESS)
ocf_log info "Resource is already running"
return $OCF_SUCCESS
;;
$OCF_NOT_RUNNING)
;;
*)
exit $OCF_ERR_GENERIC
;;
esac
# remove stale pidfile if it exists
if [ -f $OCF_RESKEY_pid ]; then
ocf_log info "Removing stale pidfile"
rm $OCF_RESKEY_pid
fi
startserver
if [ $? -ne 0 ]; then
- ocf_log err "Can't start Zabbix server"
+ ocf_exit_reason "Can't start Zabbix server"
return $OCF_ERR_GENERIC
fi
# wait if it starts really
while ! zabbixserver_monitor; do
ocf_log debug "Resource has not started yet, waiting"
sleep $sleepint
done
return $OCF_SUCCESS
}
#
# stop the agent
#
zabbixserver_stop() {
local pid
local rc
# check the resource status
zabbixserver_monitor
rc=$?
case "$rc" in
$OCF_SUCCESS)
;;
$OCF_NOT_RUNNING)
ocf_log info "Resource is already stopped"
return $OCF_SUCCESS
;;
*)
exit $OCF_ERR_GENERIC
;;
esac
pid=`getpid $OCF_RESKEY_pid`
if [ $? -ne 0 ]; then
- ocf_log err "Can't find process PID"
+ ocf_exit_reason "Can't find process PID"
return $OCF_ERR_GENERIC
fi
# kill the process
ocf_run -q kill $pid
if [ $? -ne 0 ]; then
- ocf_log err "Can't stop process (PID $pid)"
+ ocf_exit_reason "Can't stop process (PID $pid)"
return $OCF_ERR_GENERIC
fi
# Wait until the parent process terminates.
# NOTE: The parent may be still waiting for its children. A regular monitor
# function will not detect this condition because the pidfile may be
# removed just now.
while process_status $pid; do
ocf_log debug "Waiting for process to terminate..."
sleep $sleepint
done
# wait if it stops really
while zabbixserver_monitor; do
ocf_log debug "Resource has not stopped yet, waiting"
sleep $sleepint
done
# remove stale pidfile if it exists
if [ -f $OCF_RESKEY_pid ]; then
ocf_log debug "Pidfile still exists, removing"
rm $OCF_RESKEY_pid
fi
return $OCF_SUCCESS
}
#
# resource monitor
#
zabbixserver_monitor() {
local pid
pid=`getpid $OCF_RESKEY_pid`
if [ $? -eq 0 ]; then
process_status $pid
if [ $? -eq 0 ]; then
ocf_log debug "Resource is running"
return $OCF_SUCCESS
fi
fi
ocf_log info "Resource is not running"
return $OCF_NOT_RUNNING
}
#
# validate configuration
#
zabbixserver_validate_all() {
check_config $OCF_RESKEY_config || return $OCF_ERR_INSTALLED
ocf_mkstatedir root 755 `dirname $OCF_RESKEY_pid` || return $OCF_ERR_INSTALLED
return $OCF_SUCCESS
}
#
# main
#
OCF_REQUIRED_PARAMS=""
OCF_REQUIRED_BINARIES="$OCF_RESKEY_binary"
ocf_rarun $*
File Metadata
Details
Attached
Mime Type
text/x-diff
Expires
Wed, Feb 26, 2:11 PM (18 h, 58 m)
Storage Engine
blob
Storage Format
Raw Data
Storage Handle
1465421
Default Alt Text
(44 KB)
Attached To
Mode
rR Resource Agents
Attached
Detach File
Event Timeline
Log In to Comment