diff --git a/extra/resources/docker-wrapper b/extra/resources/docker-wrapper index b953e7482e..4b0b87bdd9 100755 --- a/extra/resources/docker-wrapper +++ b/extra/resources/docker-wrapper @@ -1,504 +1,536 @@ #!/bin/bash # # Copyright (c) 2015 David Vossel # All Rights Reserved. # # This program is free software; you can redistribute it and/or modify # it under the terms of version 2 of the GNU General Public License as # published by the Free Software Foundation. # # This program is distributed in the hope that it would be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. # # Further, this software is distributed without any warranty that it is # free of the rightful claim of any third person regarding infringement # or the like. Any license provided herein, whether implied or # otherwise, applies only to this software file. Patent licenses, if # any, provided herein do not apply to combinations of this program with # other software, or any other product whatsoever. # # You should have received a copy of the GNU General Public License # along with this program; if not, write the Free Software Foundation, # Inc., 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. # ####################################################################### # Initialization: : ${OCF_FUNCTIONS_DIR=${OCF_ROOT}/lib/heartbeat} . ${OCF_FUNCTIONS_DIR}/ocf-shellfuncs ####################################################################### +CONF_PREFIX="pcmk_docker" + meta_data() { cat < 1.0 Docker technology wrapper for pacemaker remote. docker wrapper - + Docker image to run resources within docker image - + Give resources within container access to cluster resources such as the CIB and the ability to manage cluster attributes. + +NOTE: Do not confuse this with the docker run command's +'--priviledged' option which gives a container permission +to access system devices. To toggle the docker run option, + set --priviledged=true as part of the ${CONF_PREFIS}_run_opts +arguments. The ${CONF_PREFIX}_privileged option only pertains +to whether or not the container has access to the cluster's +CIB or not. Some multistate resources need to be able to write +values to the cib, which would require enabling ${CONF_PREFIX}_privileged is privileged - + Add options to be appended to the 'docker run' command which is used when creating the container during the start action. This option allows users to do things such as setting a custom entry point and injecting environment variables into the newly created container. Note the '-d' option is supplied regardless of this value to force containers to run in the background. NOTE: Do not explicitly specify the --name argument in the run_opts. This agent will set --name using the resource's instance name run options + + +Allow the container to be reused after stopping the container. By default +containers are removed after stop. With the reuse option containers +will persist after the container stops. + +reuse container + + + END } ####################################################################### CLIENT="/usr/libexec/pacemaker/lrmd_internal_ctl" DOCKER_AGENT="/usr/lib/ocf/resource.d/heartbeat/docker" KEY_VAL_STR="" PROVIDER=$OCF_RESKEY_CRM_meta_provider CLASS=$OCF_RESKEY_CRM_meta_class TYPE=$OCF_RESKEY_CRM_meta_type CONTAINER=$OCF_RESKEY_CRM_meta_isolation_instance if [ -z "$CONTAINER" ]; then CONTAINER=$OCF_RESOURCE_INSTANCE fi RSC_STATE_DIR="${HA_RSCTMP}/docker-wrapper/${CONTAINER}-data/" RSC_STATE_FILE="$RSC_STATE_DIR/$OCF_RESOURCE_INSTANCE.state" CONNECTION_FAILURE=0 - -DOCKER_CLIENT="/usr/bin/docker" +HOST_LOG_DIR="${HA_RSCTMP}/docker-wrapper/${CONTAINER}-logs" +HOST_LOG_FILE="$HOST_LOG_DIR/pacemaker.log" +GUEST_LOG_DIR="/var/log/pcmk" +GUEST_LOG_FILE="$GUEST_LOG_DIR/pacemaker.log" pcmk_docker_wrapper_usage() { cat < $RSC_STATE_FILE fi } clear_state_file() { if [ -f "$RSC_STATE_FILE" ]; then rm -f $RSC_STATE_FILE fi } clear_state_dir() { [ -d "$RSC_STATE_DIR" ] || return 0 rm -rf $RSC_STATE_DIR } num_active_resources() { local count [ -d "$RSC_STATE_DIR" ] || return 0 count="$(ls $RSC_STATE_DIR | wc -w)" if [ $? -ne 0 ] || [ -z "$count" ]; then return 0 fi return $count } random_port() { local port=$(python -c 'import socket; s=socket.socket(); s.bind(("localhost", 0)); print(s.getsockname()[1]); s.close()') if [ $? -eq 0 ] && [ -n "$port" ]; then echo "$port" fi } get_active_port() { PORT="$(docker port $CONTAINER 3121 | awk -F: '{ print $2 }')" } # separate docker args from ocf resource args. separate_args() { local env key value # write out arguments to key value string for ocf agent while read -r line; do key="$(echo $line | awk -F= '{print $1}' | sed 's/^OCF_RESKEY_//g')" val="$(echo $line | awk -F= '{print $2}')" KEY_VAL_STR="$KEY_VAL_STR -k '$key' -v '$val'" - done < <(printenv | grep "^OCF.*" | grep -v "^OCF_RESKEY_pcmk_docker_.*") + done < <(printenv | grep "^OCF.*" | grep -v "^OCF_RESKEY_${CONF_PREFIX}_.*") # sanitize args for DOCKER agent's consumption while read -r line; do env="$(echo $line | awk -F= '{print $1}')" val="$(echo $line | awk -F= '{print $2}')" - key="$(echo "$env" | sed 's/^OCF_RESKEY_pcmk_docker/OCF_RESKEY/g')" + key="$(echo "$env" | sed "s/^OCF_RESKEY_${CONF_PREFIX}/OCF_RESKEY/g")" export $key="$val" - done < <(printenv | grep "^OCF_RESKEY_pcmk_docker_.*") + done < <(printenv | grep "^OCF_RESKEY_${CONF_PREFIX}_.*") - if ocf_is_true $OCF_RESKEY_pcmk_docker_privileged ; then + if ocf_is_true $OCF_RESKEY_privileged ; then export OCF_RESKEY_run_cmd="/usr/sbin/pacemaker_remoted" # on start set random port to run_opts # write port to state file... or potentially get from ps? maybe docker info or inspect as well? else export OCF_RESKEY_run_cmd="/usr/libexec/pacemaker/lrmd" fi export OCF_RESKEY_name="$CONTAINER" } monitor_container() { local rc $DOCKER_AGENT monitor rc=$? if [ $rc -ne $OCF_SUCCESS ]; then clear_state_dir return $rc fi poke_remote rc=$? if [ $rc -ne $OCF_SUCCESS ]; then # container is up without an active daemon. this is bad ocf_log err "Container, $CONTAINER, is active without a responsive pacemaker_remote instance" CONNECTION_FAILURE=1 return $OCF_ERR_GENERIC fi CONNECTION_FAILURE=0 return $rc } pcmk_docker_wrapper_monitor() { local rc monitor_container rc=$? if [ $rc -ne $OCF_SUCCESS ]; then return $rc fi client_action "monitor" rc=$? if [ $rc -eq $OCF_SUCCESS ] || [ $rc -eq $OCF_RUNNING_MASTER ]; then write_state_file else clear_state_file fi return $rc } pcmk_docker_wrapper_generic_action() { local rc monitor_container rc=$? if [ $? -ne $OCF_SUCCESS ]; then return $rc fi client_action "$1" } client_action() { local action=$1 local agent_type="-T $TYPE -C $CLASS" local rc=0 if [ -n "$PROVIDER" ]; then agent_type="$agent_type -P $PROVIDER" fi - if ocf_is_true $OCF_RESKEY_pcmk_docker_privileged ; then + if ocf_is_true $OCF_RESKEY_privileged ; then if [ -z "$PORT" ]; then get_active_port fi + export PCMK_logfile=$HOST_LOG_FILE ocf_log info "$CLIENT -c 'exec' -S '127.0.0.1' -p '$PORT' -a '$action' -r '$OCF_RESOURCE_INSTANCE' -n '$CONTAINER' '$agent_type' $KEY_VAL_STR " eval $CLIENT -c 'exec' -S '127.0.0.1' -p '$PORT' -a '$action' -r '$OCF_RESOURCE_INSTANCE' -n '$CONTAINER' '$agent_type' $KEY_VAL_STR else + export PCMK_logfile=$GUEST_LOG_FILE ocf_log info "$CLIENT -c \"exec\" -a $action -r \"$OCF_RESOURCE_INSTANCE\" $agent_type $KEY_VAL_STR" - echo "$CLIENT -c \"exec\" -a $action -r \"$OCF_RESOURCE_INSTANCE\" $agent_type $KEY_VAL_STR " | nsenter --target $(docker inspect --format {{.State.Pid}} ${CONTAINER}) --mount --uts --ipc --net --pid 2>&1 + echo "$CLIENT -c \"exec\" -a $action -r \"$OCF_RESOURCE_INSTANCE\" $agent_type $KEY_VAL_STR " | nsenter --target $(docker inspect --format {{.State.Pid}} ${CONTAINER}) --mount --uts --ipc --net --pid fi rc=$? ocf_log debug "Client action $action with result $rc" return $rc } poke_remote() { # verifies daemon in container is active - if ocf_is_true $OCF_RESKEY_pcmk_docker_privileged ; then + if ocf_is_true $OCF_RESKEY_privileged ; then get_active_port ocf_log info "Attempting to contect $CONTAINER on port $PORT" $CLIENT -c "poke" -S "127.0.0.1" -p $PORT -n $CONTAINER fi # no op for non privileged containers since we handed the # client monitor action as the monitor_cmd for the docker agent } start_container() { local rc monitor_container rc=$? if [ $rc -eq $OCF_SUCCESS ]; then return $rc fi - export OCF_RESKEY_run_opts="-e PCMK_debug=yes -e PCMK_logfile=/var/log/pacemaker.log $OCF_RESKEY_run_opts" - if ocf_is_true $OCF_RESKEY_pcmk_docker_privileged ; then + mkdir -p $HOST_LOG_DIR + export OCF_RESKEY_run_opts="-e PCMK_logfile=$GUEST_LOG_FILE $OCF_RESKEY_run_opts" + export OCF_RESKEY_run_opts="-v $HOST_LOG_DIR:$GUEST_LOG_DIR $OCF_RESKEY_run_opts" + if ocf_is_true $OCF_RESKEY_privileged ; then if ! [ -f "/etc/pacemaker/authkey" ]; then # generate an authkey if it doesn't exist. mkdir -p /etc/pacemaker/ dd if=/dev/urandom of=/etc/pacemaker/authkey bs=4096 count=1 > /dev/null 2>&1 chmod 600 /etc/pacemaker/authkey fi PORT=$(random_port) if [ -z "$PORT" ]; then ocf_exit_reason "Unable to assign random port for pacemaker remote" return $OCF_ERR_GENERIC fi export OCF_RESKEY_run_opts="-p 127.0.0.1:${PORT}:3121 $OCF_RESKEY_run_opts" export OCF_RESKEY_run_opts="-v /etc/pacemaker/authkey:/etc/pacemaker/authkey $OCF_RESKEY_run_opts" ocf_log debug "using privileged mode: run_opts=$OCF_RESKEY_run_opts" else export OCF_RESKEY_monitor_cmd="$CLIENT -c poke" fi $DOCKER_AGENT start rc=$? if [ $rc -ne $OCF_SUCCESS ]; then docker ps > /dev/null 2>&1 if [ $? -ne 0 ]; then ocf_exit_reason "docker daemon is inactive." fi return $rc fi monitor_container } pcmk_docker_wrapper_start() { local rc start_container rc=$? if [ $rc -ne $OCF_SUCCESS ]; then return $rc fi client_action "start" rc=$? if [ $? -ne "$OCF_SUCCESS" ]; then ocf_exit_reason "Failed to start agent within container" return $rc fi pcmk_docker_wrapper_monitor - return $? + rc=$? + if [ $rc -eq $OCF_SUCCESS ]; then + ocf_log notice "$OCF_RESOURCE_INSTANCE started successfully. Container's logfile can be found at $HOST_LOG_FILE" + fi + + return $rc } stop_container() { local rc local count num_active_resources count=$? if [ $count -ne 0 ]; then ocf_log err "Failed to stop agent within container. Killing container $CONTAINER with $count active resources" fi $DOCKER_AGENT "stop" rc=$? if [ $rc -ne $OCF_SUCCESS ]; then ocf_exit_reason "Docker container failed to stop" return $rc fi clear_state_dir return $rc } stop_resource() { local rc client_action "stop" rc=$? if [ $? -ne "$OCF_SUCCESS" ]; then export OCF_RESKEY_force_stop="true" kill_now=1 else clear_state_file fi } pcmk_docker_wrapper_stop() { local rc local kill_now=0 local all_stopped=0 pcmk_docker_wrapper_monitor rc=$? if [ $rc -eq $OCF_NOT_RUNNING ]; then rc=$OCF_SUCCESS num_active_resources if [ $? -eq 0 ]; then # stop container if no more resources are running ocf_log info "Gracefully stopping container $CONTAINER because no resources are left running." stop_container rc=$? fi return $rc fi # if we can't talk to the remote daemon but the container is # active, we have to force kill the container. if [ $CONNECTION_FAILURE -eq 1 ]; then export OCF_RESKEY_force_kill="true" stop_container return $? fi # If we've gotten this far, the container is up, and we # need to gracefully stop a resource within the container. client_action "stop" rc=$? if [ $? -ne "$OCF_SUCCESS" ]; then export OCF_RESKEY_force_stop="true" # force kill the container if we fail to stop a resource. stop_container rc=$? else clear_state_file num_active_resources if [ $? -eq 0 ]; then # stop container if no more resources are running ocf_log info "Gracefully stopping container $CONTAINER because last resource has stopped" stop_container rc=$? fi fi return $rc } pcmk_docker_wrapper_validate() { check_binary docker if [ -z "$CLASS" ] || [ -z "$TYPE" ]; then ocf_exit_reason "Update pacemaker to a version that supports container wrappers." return $OCF_ERR_CONFIGURED fi if ! [ -f "$DOCKER_AGENT" ]; then ocf_exit_reason "Requires $DOCKER_AGENT to be installed. update the resource-agents package" return $OCF_ERR_INSTALLED fi $DOCKER_AGENT validate-all return $? } case $__OCF_ACTION in meta-data) meta_data exit $OCF_SUCCESS ;; usage|help) pcmk_docker_wrapper_usage exit $OCF_SUCCESS ;; esac separate_args pcmk_docker_wrapper_validate rc=$? if [ $rc -ne 0 ]; then case $__OCF_ACTION in stop) exit $OCF_SUCCESS;; monitor) exit $OCF_NOT_RUNNING;; *) exit $rc;; esac fi case $__OCF_ACTION in start) pcmk_docker_wrapper_start;; stop) pcmk_docker_wrapper_stop;; monitor|status) pcmk_docker_wrapper_monitor;; reload|promote|demote|notify) pcmk_docker_wrapper_generic_action $__OCF_ACTION;; validate-all) pcmk_docker_wrapper_validate;; *) pcmk_docker_wrapper_usage exit $OCF_ERR_UNIMPLEMENTED ;; esac rc=$? ocf_log debug "Docker-wrapper ${OCF_RESOURCE_INSTANCE} $__OCF_ACTION : $rc" exit $rc diff --git a/lrmd/remote_ctl.c b/lrmd/remote_ctl.c index 32151d744f..c5787239d7 100644 --- a/lrmd/remote_ctl.c +++ b/lrmd/remote_ctl.c @@ -1,526 +1,526 @@ /* * Copyright (c) 2015 David Vossel * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * */ #include #include #include #include #include #include #include #include #include #include extern GHashTable *proxy_table; void lrmd_internal_set_proxy_callback(lrmd_t * lrmd, void *userdata, void (*callback)(lrmd_t *lrmd, void *userdata, xmlNode *msg)); /* *INDENT-OFF* */ static struct crm_option long_options[] = { {"help", 0, 0, '?'}, {"verbose", 0, 0, 'V', "\t\tPrint out logs and events to screen"}, {"quiet", 0, 0, 'Q', "\t\tSuppress all output to screen"}, {"tls", 1, 0, 'S', "\t\tSet tls host to contact"}, {"tls-port", 1, 0, 'p', "\t\tUse custom tls port"}, {"node", 1, 0, 'n', "\tNode name to use for ipc proxy"}, {"api-call", 1, 0, 'c', "\tDirectly relates to lrmd api functions"}, {"-spacer-", 1, 0, '-', "\nParameters for api-call option"}, {"action", 1, 0, 'a'}, {"rsc-id", 1, 0, 'r'}, {"provider", 1, 0, 'P'}, {"class", 1, 0, 'C'}, {"type", 1, 0, 'T'}, {"timeout", 1, 0, 't'}, {"param-key", 1, 0, 'k'}, {"param-val", 1, 0, 'v'}, {"-spacer-", 1, 0, '-'}, {0, 0, 0, 0} }; /* *INDENT-ON* */ static int wait_poke = 0; static int exec_call_id = 0; static gboolean client_start(gpointer user_data); static void try_connect(void); static struct { int verbose; int quiet; int print; int interval; int timeout; int port; const char *node_name; const char *api_call; const char *rsc_id; const char *provider; const char *class; const char *type; const char *action; const char *listen; const char *tls_host; lrmd_key_value_t *params; } options; GMainLoop *mainloop = NULL; lrmd_t *lrmd_conn = NULL; static void client_exit(int rc) { lrmd_api_delete(lrmd_conn); if (proxy_table) { g_hash_table_destroy(proxy_table); proxy_table = NULL; } exit(rc); } static void client_shutdown(int nsig) { lrmd_api_delete(lrmd_conn); lrmd_conn = NULL; } static void read_events(lrmd_event_data_t * event) { if (wait_poke && event->type == lrmd_event_poke) { client_exit(PCMK_OCF_OK); } if ((event->call_id == exec_call_id) && (event->type == lrmd_event_exec_complete)) { if (event->output) { crm_info("%s", event->output); } if (event->exit_reason) { fprintf(stderr, "%s%s\n", PCMK_OCF_REASON_PREFIX, event->exit_reason); } client_exit(event->rc); } } static gboolean timeout_err(gpointer data) { crm_err("timed out in remote_client\n"); client_exit(PCMK_OCF_TIMEOUT); return FALSE; } static void connection_events(lrmd_event_data_t * event) { int rc = event->connection_rc; if (event->type != lrmd_event_connect) { /* ignore */ return; } if (!rc) { client_start(NULL); return; } else { sleep(1); try_connect(); } } static void try_connect(void) { int tries = 10; static int num_tries = 0; int rc = 0; lrmd_conn->cmds->set_callback(lrmd_conn, connection_events); for (; num_tries < tries; num_tries++) { rc = lrmd_conn->cmds->connect_async(lrmd_conn, "lrmd", 10000); if (!rc) { num_tries++; return; /* we'll hear back in async callback */ } sleep(1); } crm_err("Failed to connect to pacemaker remote.\n"); client_exit(PCMK_OCF_UNKNOWN_ERROR); } static gboolean client_start(gpointer user_data) { int rc = 0; if (!lrmd_conn->cmds->is_connected(lrmd_conn)) { try_connect(); /* async connect, this funciton will get called back into. */ return 0; } lrmd_conn->cmds->set_callback(lrmd_conn, read_events); if (safe_str_eq(options.api_call, "ipc_debug")) { /* Do nothing, leave connection up just for debugging ipc proxy */ return 0; } if (options.timeout) { g_timeout_add(options.timeout, timeout_err, NULL); } if (safe_str_eq(options.api_call, "metadata")) { char *output = NULL; rc = lrmd_conn->cmds->get_metadata(lrmd_conn, options.class, options.provider, options.type, &output, 0); if (rc == pcmk_ok) { printf("%s", output); free(output); client_exit(PCMK_OCF_OK); } client_exit(PCMK_OCF_UNKNOWN_ERROR); } else if (safe_str_eq(options.api_call, "poke")) { rc = lrmd_conn->cmds->poke_connection(lrmd_conn); if (rc != pcmk_ok) { client_exit(PCMK_OCF_UNKNOWN_ERROR); } wait_poke = 1; } else { lrmd_rsc_info_t *rsc_info = NULL; rsc_info = lrmd_conn->cmds->get_rsc_info(lrmd_conn, options.rsc_id, 0); if (rsc_info == NULL) { rc = lrmd_conn->cmds->register_rsc(lrmd_conn, options.rsc_id, options.class, options.provider, options.type, 0); if (rc != 0){ crm_err("failed to register resource %s with pacemaker_remote. rc: %d\n", options.rsc_id, rc); client_exit(1); } } lrmd_free_rsc_info(rsc_info); rc = lrmd_conn->cmds->exec(lrmd_conn, options.rsc_id, options.action, NULL, options.interval, options.timeout, 0, 0, options.params); if (rc > 0) { exec_call_id = rc; } else { crm_err("execution of rsc %s failed. rc = %d\n", options.rsc_id, rc); client_exit(PCMK_OCF_UNKNOWN_ERROR); } } return 0; } static int remote_proxy_dispatch_internal(const char *buffer, ssize_t length, gpointer userdata) { /* Async responses from cib and friends back to clients via pacemaker_remoted */ xmlNode *xml = NULL; remote_proxy_t *proxy = userdata; uint32_t flags; xml = string2xml(buffer); if (xml == NULL) { crm_warn("Received a NULL msg from IPC service."); return 1; } flags = crm_ipc_buffer_flags(proxy->ipc); if (flags & crm_ipc_proxied_relay_response) { crm_trace("Passing response back to %.8s on %s: %.200s - request id: %d", proxy->session_id, proxy->node_name, buffer, proxy->last_request_id); remote_proxy_relay_response(lrmd_conn, proxy->session_id, xml, proxy->last_request_id); proxy->last_request_id = 0; } else { crm_trace("Passing event back to %.8s on %s: %.200s", proxy->session_id, proxy->node_name, buffer); remote_proxy_relay_event(lrmd_conn, proxy->session_id, xml); } free_xml(xml); return 1; } static void remote_proxy_disconnected(void *userdata) { remote_proxy_t *proxy = userdata; crm_trace("destroying %p", userdata); proxy->source = NULL; proxy->ipc = NULL; remote_proxy_notify_destroy(lrmd_conn, proxy->session_id); g_hash_table_remove(proxy_table, proxy->session_id); } static remote_proxy_t * remote_proxy_new(const char *node_name, const char *session_id, const char *channel) { static struct ipc_client_callbacks proxy_callbacks = { .dispatch = remote_proxy_dispatch_internal, .destroy = remote_proxy_disconnected }; remote_proxy_t *proxy = calloc(1, sizeof(remote_proxy_t)); proxy->node_name = strdup(node_name); proxy->session_id = strdup(session_id); if (safe_str_eq(channel, CRM_SYSTEM_CRMD)) { proxy->is_local = TRUE; } else { proxy->source = mainloop_add_ipc_client(channel, G_PRIORITY_LOW, 0, proxy, &proxy_callbacks); proxy->ipc = mainloop_get_ipc_client(proxy->source); if (proxy->source == NULL) { remote_proxy_free(proxy); return NULL; } } crm_trace("created proxy session ID %s", proxy->session_id); g_hash_table_insert(proxy_table, proxy->session_id, proxy); return proxy; } static void remote_proxy_cb(lrmd_t *lrmd, void *userdata, xmlNode *msg) { const char *op = crm_element_value(msg, F_LRMD_IPC_OP); const char *session = crm_element_value(msg, F_LRMD_IPC_SESSION); int msg_id = 0; /* sessions are raw ipc connections to IPC, * all we do is proxy requests/responses exactly * like they are given to us at the ipc level. */ CRM_CHECK(op != NULL, return); CRM_CHECK(session != NULL, return); crm_element_value_int(msg, F_LRMD_IPC_MSG_ID, &msg_id); /* This is msg from remote ipc client going to real ipc server */ if (safe_str_eq(op, "new")) { const char *channel = crm_element_value(msg, F_LRMD_IPC_IPC_SERVER); CRM_CHECK(channel != NULL, return); if (remote_proxy_new(options.node_name, session, channel) == NULL) { remote_proxy_notify_destroy(lrmd, session); } crm_info("new remote proxy client established to %s, session id %s", channel, session); } else if (safe_str_eq(op, "destroy")) { remote_proxy_end_session(session); } else if (safe_str_eq(op, "request")) { int flags = 0; xmlNode *request = get_message_xml(msg, F_LRMD_IPC_MSG); const char *name = crm_element_value(msg, F_LRMD_IPC_CLIENT); remote_proxy_t *proxy = g_hash_table_lookup(proxy_table, session); CRM_CHECK(request != NULL, return); if (proxy == NULL) { /* proxy connection no longer exists */ remote_proxy_notify_destroy(lrmd, session); return; } else if ((proxy->is_local == FALSE) && (crm_ipc_connected(proxy->ipc) == FALSE)) { remote_proxy_end_session(session); return; } proxy->last_request_id = 0; crm_element_value_int(msg, F_LRMD_IPC_MSG_FLAGS, &flags); crm_xml_add(request, XML_ACL_TAG_ROLE, "pacemaker-remote"); #if ENABLE_ACL CRM_ASSERT(options.node_name); crm_acl_get_set_user(request, F_LRMD_IPC_USER, options.node_name); #endif if (is_set(flags, crm_ipc_proxied)) { int rc = crm_ipc_send(proxy->ipc, request, flags, 5000, NULL); if(rc < 0) { xmlNode *op_reply = create_xml_node(NULL, "nack"); crm_err("Could not relay %s request %d from %s to %s for %s: %s (%d)", op, msg_id, proxy->node_name, crm_ipc_name(proxy->ipc), name, pcmk_strerror(rc), rc); /* Send a n'ack so the caller doesn't block */ crm_xml_add(op_reply, "function", __FUNCTION__); crm_xml_add_int(op_reply, "line", __LINE__); crm_xml_add_int(op_reply, "rc", rc); remote_proxy_relay_response(lrmd, session, op_reply, msg_id); free_xml(op_reply); } else { crm_trace("Relayed %s request %d from %s to %s for %s", op, msg_id, proxy->node_name, crm_ipc_name(proxy->ipc), name); proxy->last_request_id = msg_id; } } } else { crm_err("Unknown proxy operation: %s", op); } } int main(int argc, char **argv) { int option_index = 0; int argerr = 0; int flag; char *key = NULL; char *val = NULL; gboolean use_tls = FALSE; crm_trigger_t *trig; crm_set_options(NULL, "mode [options]", long_options, "Inject commands into the lrmd and watch for events\n"); while (1) { flag = crm_get_option(argc, argv, &option_index); if (flag == -1) break; switch (flag) { case '?': crm_help(flag, EX_OK); break; case 'V': options.verbose = 1; break; case 'Q': options.quiet = 1; options.verbose = 0; break; case 'n': options.node_name = optarg; break; case 'c': options.api_call = optarg; break; case 'a': options.action = optarg; break; case 'r': options.rsc_id = optarg; break; case 'P': options.provider = optarg; break; case 'C': options.class = optarg; break; case 'T': options.type = optarg; break; case 't': if(optarg) { options.timeout = atoi(optarg); } break; case 'k': key = optarg; if (key && val) { options.params = lrmd_key_value_add(options.params, key, val); key = val = NULL; } break; case 'v': val = optarg; if (key && val) { options.params = lrmd_key_value_add(options.params, key, val); key = val = NULL; } break; case 'S': options.tls_host = optarg; use_tls = TRUE; break; case 'p': if(optarg) { options.port = atoi(optarg); } use_tls = TRUE; break; default: ++argerr; break; } } if (argerr) { crm_help('?', EX_USAGE); } if (optind > argc) { ++argerr; } - crm_log_init("remote_client", LOG_INFO, TRUE, options.verbose ? TRUE : FALSE, argc, argv, FALSE); + crm_log_init("remote_client", LOG_INFO, FALSE, options.verbose ? TRUE : FALSE, argc, argv, FALSE); /* if we can't perform an api_call or listen for events, * there is nothing to do */ if (!options.api_call ) { crm_err("Nothing to be done. Please specify 'api-call'\n"); return PCMK_OCF_UNKNOWN_ERROR; } if (!options.timeout ) { options.timeout = 20000; } if (use_tls) { if (options.node_name == NULL) { crm_err("\"node\" option required when tls is in use.\n"); return PCMK_OCF_UNKNOWN_ERROR; } proxy_table = g_hash_table_new_full(crm_strcase_hash, crm_strcase_equal, NULL, remote_proxy_free); lrmd_conn = lrmd_remote_api_new(NULL, options.tls_host ? options.tls_host : "localhost", options.port); lrmd_internal_set_proxy_callback(lrmd_conn, NULL, remote_proxy_cb); } else { lrmd_conn = lrmd_api_new(); } trig = mainloop_add_trigger(G_PRIORITY_HIGH, client_start, NULL); mainloop_set_trigger(trig); mainloop_add_signal(SIGTERM, client_shutdown); mainloop = g_main_new(FALSE); g_main_run(mainloop); client_exit(0); return 0; }