diff --git a/crmd/crmd_utils.h b/crmd/crmd_utils.h index 2d345528a4..35bd229cce 100644 --- a/crmd/crmd_utils.h +++ b/crmd/crmd_utils.h @@ -1,123 +1,124 @@ /* * Copyright (C) 2004 Andrew Beekhof * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This software is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #ifndef CRMD_UTILS__H # define CRMD_UTILS__H # include # include # include /* For CIB_OP_MODIFY */ # define CLIENT_EXIT_WAIT 30 # define FAKE_TE_ID "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" # define fsa_cib_delete(section, data, options, call_id, user_name) \ if(fsa_cib_conn != NULL) { \ call_id = cib_internal_op( \ fsa_cib_conn, CIB_OP_DELETE, NULL, section, data, \ NULL, options, user_name); \ \ } else { \ crm_err("No CIB connection available"); \ } # define fsa_cib_update(section, data, options, call_id, user_name) \ if(fsa_cib_conn != NULL) { \ call_id = cib_internal_op( \ fsa_cib_conn, CIB_OP_MODIFY, NULL, section, data, \ NULL, options, user_name); \ \ } else { \ crm_err("No CIB connection available"); \ } # define fsa_cib_anon_update(section, data, options) \ if(fsa_cib_conn != NULL) { \ fsa_cib_conn->cmds->modify( \ fsa_cib_conn, section, data, options); \ \ } else { \ crm_err("No CIB connection available"); \ } extern gboolean fsa_has_quorum; extern int last_peer_update; extern int last_resource_update; enum node_update_flags { node_update_none = 0x0000, node_update_quick = 0x0001, node_update_cluster = 0x0010, node_update_peer = 0x0020, node_update_join = 0x0040, node_update_expected = 0x0100, }; gboolean crm_timer_stop(fsa_timer_t * timer); gboolean crm_timer_start(fsa_timer_t * timer); gboolean crm_timer_popped(gpointer data); gboolean is_timer_started(fsa_timer_t * timer); xmlNode *create_node_state(const char *uname, const char *in_cluster, const char *is_peer, const char *join_state, const char *exp_state, gboolean clear_shutdown, const char *src); int crmd_exit(int rc); gboolean stop_subsystem(struct crm_subsystem_s *centry, gboolean force_quit); gboolean start_subsystem(struct crm_subsystem_s *centry); void fsa_dump_actions(long long action, const char *text); void fsa_dump_inputs(int log_level, const char *text, long long input_register); gboolean update_dc(xmlNode * msg); void erase_node_from_join(const char *node); xmlNode *do_update_node_cib(crm_node_t * node, int flags, xmlNode * parent, const char *source); void populate_cib_nodes(enum node_update_flags flags, const char *source); void crm_update_quorum(gboolean quorum, gboolean force_update); void erase_status_tag(const char *uname, const char *tag, int options); void update_attrd(const char *host, const char *name, const char *value, const char *user_name, gboolean is_remote_node); int crmd_join_phase_count(enum crm_join_phase phase); void crmd_join_phase_log(int level); const char *get_timer_desc(fsa_timer_t * timer); gboolean too_many_st_failures(void); +void reset_st_fail_count(const char * target); # define fsa_register_cib_callback(id, flag, data, fn) do { \ fsa_cib_conn->cmds->register_callback( \ fsa_cib_conn, id, 10 * (1 + crm_active_peers()), \ flag, data, #fn, fn); \ } while(0) # define start_transition(state) do { \ switch(state) { \ case S_TRANSITION_ENGINE: \ register_fsa_action(A_TE_CANCEL); \ break; \ case S_POLICY_ENGINE: \ case S_IDLE: \ register_fsa_input(C_FSA_INTERNAL, I_PE_CALC, NULL); \ break; \ default: \ crm_debug("NOT starting a new transition in state %s", \ fsa_state2string(fsa_state)); \ break; \ } \ } while(0) #endif diff --git a/crmd/lrm.c b/crmd/lrm.c index 1f514eba9e..fe2d02b70d 100644 --- a/crmd/lrm.c +++ b/crmd/lrm.c @@ -1,2086 +1,2086 @@ /* * Copyright (C) 2004 Andrew Beekhof * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This software is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #include #include #include #include #include #include #include #include #include #include #include #include #include #define START_DELAY_THRESHOLD 5 * 60 * 1000 #define MAX_LRM_REG_FAILS 30 struct delete_event_s { int rc; const char *rsc; lrm_state_t *lrm_state; }; gboolean process_lrm_event(lrm_state_t * lrm_state, lrmd_event_data_t * op); static gboolean is_rsc_active(lrm_state_t * lrm_state, const char *rsc_id); static gboolean build_active_RAs(lrm_state_t * lrm_state, xmlNode * rsc_list); static gboolean stop_recurring_actions(gpointer key, gpointer value, gpointer user_data); static int delete_rsc_status(lrm_state_t * lrm_state, const char *rsc_id, int call_options, const char *user_name); static lrmd_event_data_t *construct_op(lrm_state_t * lrm_state, xmlNode * rsc_op, const char *rsc_id, const char *operation); static void do_lrm_rsc_op(lrm_state_t * lrm_state, lrmd_rsc_info_t * rsc, const char *operation, xmlNode * msg, xmlNode * request); void send_direct_ack(const char *to_host, const char *to_sys, lrmd_rsc_info_t * rsc, lrmd_event_data_t * op, const char *rsc_id); static gboolean lrm_state_verify_stopped(lrm_state_t * lrm_state, enum crmd_fsa_state cur_state, int log_level); static void lrm_connection_destroy(void) { if (is_set(fsa_input_register, R_LRM_CONNECTED)) { crm_crit("LRM Connection failed"); register_fsa_input(C_FSA_INTERNAL, I_ERROR, NULL); clear_bit(fsa_input_register, R_LRM_CONNECTED); } else { crm_info("LRM Connection disconnected"); } } static char * make_stop_id(const char *rsc, int call_id) { char *op_id = NULL; op_id = calloc(1, strlen(rsc) + 34); if (op_id != NULL) { snprintf(op_id, strlen(rsc) + 34, "%s:%d", rsc, call_id); } return op_id; } static void copy_instance_keys(gpointer key, gpointer value, gpointer user_data) { if (strstr(key, CRM_META "_") == NULL) { g_hash_table_replace(user_data, strdup((const char *)key), strdup((const char *)value)); } } static void copy_meta_keys(gpointer key, gpointer value, gpointer user_data) { if (strstr(key, CRM_META "_") != NULL) { g_hash_table_replace(user_data, strdup((const char *)key), strdup((const char *)value)); } } static void update_history_cache(lrm_state_t * lrm_state, lrmd_rsc_info_t * rsc, lrmd_event_data_t * op) { int target_rc = 0; rsc_history_t *entry = NULL; if (op->rsc_deleted) { crm_debug("Purged history for '%s' after %s", op->rsc_id, op->op_type); delete_rsc_status(lrm_state, op->rsc_id, cib_quorum_override, NULL); return; } if (safe_str_eq(op->op_type, RSC_NOTIFY)) { return; } crm_debug("Updating history for '%s' with %s op", op->rsc_id, op->op_type); entry = g_hash_table_lookup(lrm_state->resource_history, op->rsc_id); if (entry == NULL && rsc) { entry = calloc(1, sizeof(rsc_history_t)); entry->id = strdup(op->rsc_id); g_hash_table_insert(lrm_state->resource_history, entry->id, entry); entry->rsc.id = entry->id; entry->rsc.type = strdup(rsc->type); entry->rsc.class = strdup(rsc->class); if (rsc->provider) { entry->rsc.provider = strdup(rsc->provider); } else { entry->rsc.provider = NULL; } } else if (entry == NULL) { crm_info("Resource %s no longer exists, not updating cache", op->rsc_id); return; } target_rc = rsc_op_expected_rc(op); if (op->op_status == PCMK_LRM_OP_CANCELLED) { if (op->interval > 0) { GList *gIter, *gIterNext; crm_trace("Removing cancelled recurring op: %s_%s_%d", op->rsc_id, op->op_type, op->interval); for (gIter = entry->recurring_op_list; gIter != NULL; gIter = gIterNext) { lrmd_event_data_t *existing = gIter->data; gIterNext = gIter->next; if (safe_str_eq(op->rsc_id, existing->rsc_id) && safe_str_eq(op->op_type, existing->op_type) && op->interval == existing->interval) { lrmd_free_event(existing); entry->recurring_op_list = g_list_delete_link(entry->recurring_op_list, gIter); } } return; } else { crm_trace("Skipping %s_%s_%d rc=%d, status=%d", op->rsc_id, op->op_type, op->interval, op->rc, op->op_status); } } else if (did_rsc_op_fail(op, target_rc)) { /* We must store failed monitors here * - otherwise the block below will cause them to be forgetten them when a stop happens */ if (entry->failed) { lrmd_free_event(entry->failed); } entry->failed = lrmd_copy_event(op); } else if (op->interval == 0) { if (entry->last) { lrmd_free_event(entry->last); } entry->last = lrmd_copy_event(op); if (op->params && (safe_str_eq(CRMD_ACTION_START, op->op_type) || safe_str_eq(CRMD_ACTION_STATUS, op->op_type))) { if (entry->stop_params) { g_hash_table_destroy(entry->stop_params); } entry->stop_params = g_hash_table_new_full(crm_str_hash, g_str_equal, g_hash_destroy_str, g_hash_destroy_str); g_hash_table_foreach(op->params, copy_instance_keys, entry->stop_params); } } if (op->interval > 0) { crm_trace("Adding recurring op: %s_%s_%d", op->rsc_id, op->op_type, op->interval); entry->recurring_op_list = g_list_prepend(entry->recurring_op_list, lrmd_copy_event(op)); } else if (entry->recurring_op_list && safe_str_eq(op->op_type, RSC_STATUS) == FALSE) { GList *gIter = entry->recurring_op_list; crm_trace("Dropping %d recurring ops because of: %s_%s_%d", g_list_length(gIter), op->rsc_id, op->op_type, op->interval); for (; gIter != NULL; gIter = gIter->next) { lrmd_free_event(gIter->data); } g_list_free(entry->recurring_op_list); entry->recurring_op_list = NULL; } } void lrm_op_callback(lrmd_event_data_t * op) { const char *nodename = NULL; lrm_state_t *lrm_state = NULL; CRM_CHECK(op != NULL, return); /* determine the node name for this connection. */ nodename = op->remote_nodename ? op->remote_nodename : fsa_our_uname; if (op->type == lrmd_event_disconnect && (safe_str_eq(nodename, fsa_our_uname))) { /* if this is the local lrmd ipc connection, set the right bits in the * crmd when the connection goes down */ lrm_connection_destroy(); return; } else if (op->type != lrmd_event_exec_complete) { /* we only need to process execution results */ return; } lrm_state = lrm_state_find(nodename); CRM_ASSERT(lrm_state != NULL); process_lrm_event(lrm_state, op); } /* A_LRM_CONNECT */ void do_lrm_control(long long action, enum crmd_fsa_cause cause, enum crmd_fsa_state cur_state, enum crmd_fsa_input current_input, fsa_data_t * msg_data) { /* This only pertains to local lrmd connections. Remote connections are handled as * resources within the pengine. Connecting and disconnecting from remote lrmd instances * handled differently than the local. */ lrm_state_t *lrm_state = NULL; lrm_state = lrm_state_find_or_create(fsa_our_uname); if (lrm_state == NULL) { register_fsa_error(C_FSA_INTERNAL, I_ERROR, NULL); return; } if (action & A_LRM_DISCONNECT) { if (lrm_state_verify_stopped(lrm_state, cur_state, LOG_INFO) == FALSE) { if (action == A_LRM_DISCONNECT) { crmd_fsa_stall(FALSE); return; } } clear_bit(fsa_input_register, R_LRM_CONNECTED); crm_info("Disconnecting from the LRM"); lrm_state_disconnect(lrm_state); lrm_state_reset_tables(lrm_state); crm_notice("Disconnected from the LRM"); } if (action & A_LRM_CONNECT) { int ret = pcmk_ok; crm_debug("Connecting to the LRM"); ret = lrm_state_ipc_connect(lrm_state); if (ret != pcmk_ok) { if (lrm_state->num_lrm_register_fails < MAX_LRM_REG_FAILS) { crm_warn("Failed to sign on to the LRM %d" " (%d max) times", lrm_state->num_lrm_register_fails, MAX_LRM_REG_FAILS); crm_timer_start(wait_timer); crmd_fsa_stall(FALSE); return; } } if (ret != pcmk_ok) { crm_err("Failed to sign on to the LRM %d" " (max) times", lrm_state->num_lrm_register_fails); register_fsa_error(C_FSA_INTERNAL, I_ERROR, NULL); return; } set_bit(fsa_input_register, R_LRM_CONNECTED); crm_info("LRM connection established"); } if (action & ~(A_LRM_CONNECT | A_LRM_DISCONNECT)) { crm_err("Unexpected action %s in %s", fsa_action2string(action), __FUNCTION__); } } static void ghash_print_pending(gpointer key, gpointer value, gpointer user_data) { const char *stop_id = key; int *log_level = user_data; struct recurring_op_s *pending = value; do_crm_log(*log_level, "Pending action: %s (%s)", stop_id, pending->op_key); } static void ghash_print_pending_for_rsc(gpointer key, gpointer value, gpointer user_data) { const char *stop_id = key; char *rsc = user_data; struct recurring_op_s *pending = value; if (safe_str_eq(rsc, pending->rsc_id)) { crm_notice("%sction %s (%s) incomplete at shutdown", pending->interval == 0 ? "A" : "Recurring a", stop_id, pending->op_key); } } static void ghash_count_pending(gpointer key, gpointer value, gpointer user_data) { int *counter = user_data; struct recurring_op_s *pending = value; if (pending->interval > 0) { /* Ignore recurring actions in the shutdown calculations */ return; } (*counter)++; } static gboolean lrm_state_verify_stopped(lrm_state_t * lrm_state, enum crmd_fsa_state cur_state, int log_level) { int counter = 0; gboolean rc = TRUE; const char *when = "lrm disconnect"; GHashTableIter gIter; rsc_history_t *entry = NULL; crm_debug("Checking for active resources before exit"); if (cur_state == S_TERMINATE) { log_level = LOG_ERR; when = "shutdown"; } else if (is_set(fsa_input_register, R_SHUTDOWN)) { when = "shutdown... waiting"; } if (lrm_state->pending_ops) { if (lrm_state_is_connected(lrm_state) == TRUE) { /* Only log/complain about non-recurring actions */ g_hash_table_foreach_remove(lrm_state->pending_ops, stop_recurring_actions, lrm_state); } g_hash_table_foreach(lrm_state->pending_ops, ghash_count_pending, &counter); } if (counter > 0) { do_crm_log(log_level, "%d pending LRM operations at %s%s", counter, when); if (cur_state == S_TERMINATE || !is_set(fsa_input_register, R_SENT_RSC_STOP)) { g_hash_table_foreach(lrm_state->pending_ops, ghash_print_pending, &log_level); } else { rc = FALSE; } return rc; } if (lrm_state->resource_history == NULL) { return rc; } if (cur_state == S_TERMINATE || is_set(fsa_input_register, R_SHUTDOWN)) { /* At this point we're not waiting, we're just shutting down */ when = "shutdown"; } counter = 0; g_hash_table_iter_init(&gIter, lrm_state->resource_history); while (g_hash_table_iter_next(&gIter, NULL, (void **)&entry)) { if (is_rsc_active(lrm_state, entry->id) == FALSE) { continue; } counter++; crm_trace("Found %s active", entry->id); if (lrm_state->pending_ops) { g_hash_table_foreach(lrm_state->pending_ops, ghash_print_pending_for_rsc, entry->id); } } if (counter) { crm_err("%d resources were active at %s.", counter, when); } return rc; } static char * get_rsc_metadata(const char *type, const char *class, const char *provider) { int rc = 0; char *metadata = NULL; /* Always use a local connection for this operation */ lrm_state_t *lrm_state = lrm_state_find(fsa_our_uname); CRM_CHECK(type != NULL, return NULL); CRM_CHECK(class != NULL, return NULL); CRM_CHECK(lrm_state != NULL, return NULL); if (provider == NULL) { provider = "heartbeat"; } crm_trace("Retreiving metadata for %s::%s:%s", type, class, provider); rc = lrm_state_get_metadata(lrm_state, class, provider, type, &metadata, 0); if (metadata) { /* copy the metadata because the LRM likes using * g_alloc instead of cl_malloc */ char *m_copy = strdup(metadata); g_free(metadata); metadata = m_copy; } else { crm_warn("No metadata found for %s::%s:%s: %s (%d)", type, class, provider, pcmk_strerror(rc), rc); } return metadata; } typedef struct reload_data_s { char *key; char *metadata; time_t last_query; gboolean can_reload; GListPtr restart_list; } reload_data_t; static void g_hash_destroy_reload(gpointer data) { reload_data_t *reload = data; free(reload->key); free(reload->metadata); g_list_free_full(reload->restart_list, free); free(reload); } GHashTable *reload_hash = NULL; static GListPtr get_rsc_restart_list(lrmd_rsc_info_t * rsc, lrmd_event_data_t * op) { int len = 0; char *key = NULL; char *copy = NULL; const char *value = NULL; const char *provider = NULL; xmlNode *param = NULL; xmlNode *params = NULL; xmlNode *actions = NULL; xmlNode *metadata = NULL; time_t now = time(NULL); reload_data_t *reload = NULL; if (reload_hash == NULL) { reload_hash = g_hash_table_new_full(crm_str_hash, g_str_equal, NULL, g_hash_destroy_reload); } provider = rsc->provider; if (provider == NULL) { provider = "heartbeat"; } len = strlen(rsc->type) + strlen(rsc->class) + strlen(provider) + 4; /* coverity[returned_null] Ignore */ key = malloc(len); snprintf(key, len, "%s::%s:%s", rsc->type, rsc->class, provider); reload = g_hash_table_lookup(reload_hash, key); if (reload && ((now - 9) > reload->last_query) && safe_str_eq(op->op_type, RSC_START)) { reload = NULL; /* re-query */ } if (reload == NULL) { xmlNode *action = NULL; reload = calloc(1, sizeof(reload_data_t)); g_hash_table_replace(reload_hash, key, reload); reload->last_query = now; reload->key = key; key = NULL; reload->metadata = get_rsc_metadata(rsc->type, rsc->class, provider); if(reload->metadata == NULL) { goto cleanup; } metadata = string2xml(reload->metadata); if (metadata == NULL) { crm_err("Metadata for %s::%s:%s is not valid XML", rsc->provider, rsc->class, rsc->type); goto cleanup; } actions = find_xml_node(metadata, "actions", TRUE); for (action = __xml_first_child(actions); action != NULL; action = __xml_next(action)) { if (crm_str_eq((const char *)action->name, "action", TRUE)) { value = crm_element_value(action, "name"); if (safe_str_eq("reload", value)) { reload->can_reload = TRUE; break; } } } if (reload->can_reload == FALSE) { goto cleanup; } params = find_xml_node(metadata, "parameters", TRUE); for (param = __xml_first_child(params); param != NULL; param = __xml_next(param)) { if (crm_str_eq((const char *)param->name, "parameter", TRUE)) { value = crm_element_value(param, "unique"); if (crm_is_true(value)) { value = crm_element_value(param, "name"); if (value == NULL) { crm_err("%s: NULL param", key); continue; } crm_debug("Attr %s is not reloadable", value); copy = strdup(value); CRM_CHECK(copy != NULL, continue); reload->restart_list = g_list_append(reload->restart_list, copy); } } } } cleanup: free(key); free_xml(metadata); return reload->restart_list; } static void append_restart_list(lrmd_rsc_info_t * rsc, lrmd_event_data_t * op, xmlNode * update, const char *version) { int len = 0; char *list = NULL; char *digest = NULL; const char *value = NULL; xmlNode *restart = NULL; GListPtr restart_list = NULL; GListPtr lpc = NULL; if (op->interval > 0) { /* monitors are not reloadable */ return; } else if (op->params == NULL) { crm_debug("%s has no parameters", ID(update)); return; } else if (rsc == NULL) { return; } else if (crm_str_eq(CRMD_ACTION_STOP, op->op_type, TRUE)) { /* Stopped resources don't need to be reloaded */ return; } else if (compare_version("1.0.8", version) > 0) { /* Caller version does not support reloads */ return; } restart_list = get_rsc_restart_list(rsc, op); if (restart_list == NULL) { /* Resource does not support reloads */ return; } restart = create_xml_node(NULL, XML_TAG_PARAMS); for (lpc = restart_list; lpc != NULL; lpc = lpc->next) { const char *param = (const char *)lpc->data; int start = len; CRM_CHECK(param != NULL, continue); value = g_hash_table_lookup(op->params, param); if (value != NULL) { crm_xml_add(restart, param, value); } len += strlen(param) + 2; list = realloc(list, len + 1); sprintf(list + start, " %s ", param); } digest = calculate_operation_digest(restart, version); crm_xml_add(update, XML_LRM_ATTR_OP_RESTART, list); crm_xml_add(update, XML_LRM_ATTR_RESTART_DIGEST, digest); crm_trace("%s: %s, %s", rsc->id, digest, list); crm_log_xml_trace(restart, "restart digest source"); free_xml(restart); free(digest); free(list); } static gboolean build_operation_update(xmlNode * parent, lrmd_rsc_info_t * rsc, lrmd_event_data_t * op, const char *src) { int target_rc = 0; xmlNode *xml_op = NULL; const char *caller_version = CRM_FEATURE_SET; if (op == NULL) { return FALSE; } else if (AM_I_DC) { } else if (fsa_our_dc_version != NULL) { caller_version = fsa_our_dc_version; } else if (op->params == NULL) { caller_version = fsa_our_dc_version; } else { /* there is a small risk in formerly mixed clusters that * it will be sub-optimal. * however with our upgrade policy, the update we send * should still be completely supported anyway */ caller_version = g_hash_table_lookup(op->params, XML_ATTR_CRM_VERSION); crm_debug("Falling back to operation originator version: %s", caller_version); } target_rc = rsc_op_expected_rc(op); xml_op = create_operation_update(parent, op, caller_version, target_rc, src, LOG_DEBUG); if (xml_op) { append_restart_list(rsc, op, xml_op, caller_version); } return TRUE; } static gboolean is_rsc_active(lrm_state_t * lrm_state, const char *rsc_id) { rsc_history_t *entry = NULL; entry = g_hash_table_lookup(lrm_state->resource_history, rsc_id); if (entry == NULL || entry->last == NULL) { return FALSE; } crm_trace("Processing %s: %s.%d=%d", rsc_id, entry->last->op_type, entry->last->interval, entry->last->rc); if (entry->last->rc == PCMK_EXECRA_OK && safe_str_eq(entry->last->op_type, CRMD_ACTION_STOP)) { return FALSE; } else if (entry->last->rc == PCMK_EXECRA_OK && safe_str_eq(entry->last->op_type, CRMD_ACTION_MIGRATE)) { /* a stricter check is too complex... * leave that to the PE */ return FALSE; } else if (entry->last->rc == PCMK_EXECRA_NOT_RUNNING) { return FALSE; } else if (entry->last->interval == 0 && entry->last->rc == PCMK_EXECRA_NOT_CONFIGURED) { /* Badly configured resources can't be reliably stopped */ return FALSE; } return TRUE; } static gboolean build_active_RAs(lrm_state_t * lrm_state, xmlNode * rsc_list) { GHashTableIter iter; rsc_history_t *entry = NULL; g_hash_table_iter_init(&iter, lrm_state->resource_history); while (g_hash_table_iter_next(&iter, NULL, (void **)&entry)) { GList *gIter = NULL; xmlNode *xml_rsc = create_xml_node(rsc_list, XML_LRM_TAG_RESOURCE); crm_xml_add(xml_rsc, XML_ATTR_ID, entry->id); crm_xml_add(xml_rsc, XML_ATTR_TYPE, entry->rsc.type); crm_xml_add(xml_rsc, XML_AGENT_ATTR_CLASS, entry->rsc.class); crm_xml_add(xml_rsc, XML_AGENT_ATTR_PROVIDER, entry->rsc.provider); build_operation_update(xml_rsc, &(entry->rsc), entry->last, __FUNCTION__); build_operation_update(xml_rsc, &(entry->rsc), entry->failed, __FUNCTION__); for (gIter = entry->recurring_op_list; gIter != NULL; gIter = gIter->next) { build_operation_update(xml_rsc, &(entry->rsc), gIter->data, __FUNCTION__); } } return FALSE; } xmlNode * do_lrm_query_internal(lrm_state_t * lrm_state, gboolean is_replace) { xmlNode *xml_result = NULL; xmlNode *xml_state = NULL; xmlNode *xml_data = NULL; xmlNode *rsc_list = NULL; const char *uuid = NULL; if (safe_str_eq(lrm_state->node_name, fsa_our_uname)) { crm_node_t *peer = crm_get_peer(0, lrm_state->node_name); xml_state = do_update_node_cib(peer, node_update_cluster|node_update_peer, NULL, __FUNCTION__); /* The next two lines shouldn't be necessary for newer DCs */ crm_xml_add(xml_state, XML_NODE_JOIN_STATE, CRMD_JOINSTATE_MEMBER); crm_xml_add(xml_state, XML_NODE_EXPECTED, CRMD_JOINSTATE_MEMBER); uuid = fsa_our_uuid; } else { xml_state = create_xml_node(NULL, XML_CIB_TAG_STATE); crm_xml_add(xml_state, XML_NODE_IS_REMOTE, "true"); crm_xml_add(xml_state, XML_ATTR_ID, lrm_state->node_name); crm_xml_add(xml_state, XML_ATTR_UNAME, lrm_state->node_name); uuid = lrm_state->node_name; } xml_data = create_xml_node(xml_state, XML_CIB_TAG_LRM); crm_xml_add(xml_data, XML_ATTR_ID, uuid); rsc_list = create_xml_node(xml_data, XML_LRM_TAG_RESOURCES); /* Build a list of active (not always running) resources */ build_active_RAs(lrm_state, rsc_list); xml_result = create_cib_fragment(xml_state, XML_CIB_TAG_STATUS); crm_log_xml_trace(xml_state, "Current state of the LRM"); free_xml(xml_state); return xml_result; } xmlNode * do_lrm_query(gboolean is_replace, const char *node_name) { lrm_state_t *lrm_state = lrm_state_find(node_name); if (!lrm_state) { crm_err("Could not query lrm state for lrmd node %s", node_name); return NULL; } return do_lrm_query_internal(lrm_state, is_replace); } static void notify_deleted(lrm_state_t * lrm_state, ha_msg_input_t * input, const char *rsc_id, int rc) { lrmd_event_data_t *op = NULL; const char *from_sys = crm_element_value(input->msg, F_CRM_SYS_FROM); const char *from_host = crm_element_value(input->msg, F_CRM_HOST_FROM); crm_info("Notifying %s on %s that %s was%s deleted", from_sys, from_host, rsc_id, rc == pcmk_ok ? "" : " not"); op = construct_op(lrm_state, input->xml, rsc_id, CRMD_ACTION_DELETE); CRM_ASSERT(op != NULL); if (rc == pcmk_ok) { op->op_status = PCMK_LRM_OP_DONE; op->rc = PCMK_EXECRA_OK; } else { op->op_status = PCMK_LRM_OP_ERROR; op->rc = PCMK_EXECRA_UNKNOWN_ERROR; } send_direct_ack(from_host, from_sys, NULL, op, rsc_id); lrmd_free_event(op); if (safe_str_neq(from_sys, CRM_SYSTEM_TENGINE)) { /* this isn't expected - trigger a new transition */ time_t now = time(NULL); char *now_s = crm_itoa(now); crm_debug("Triggering a refresh after %s deleted %s from the LRM", from_sys, rsc_id); update_attr_delegate(fsa_cib_conn, cib_none, XML_CIB_TAG_CRMCONFIG, NULL, NULL, NULL, NULL, "last-lrm-refresh", now_s, FALSE, NULL); free(now_s); } } static gboolean lrm_remove_deleted_rsc(gpointer key, gpointer value, gpointer user_data) { struct delete_event_s *event = user_data; struct pending_deletion_op_s *op = value; if (safe_str_eq(event->rsc, op->rsc)) { notify_deleted(event->lrm_state, op->input, event->rsc, event->rc); return TRUE; } return FALSE; } static gboolean lrm_remove_deleted_op(gpointer key, gpointer value, gpointer user_data) { const char *rsc = user_data; struct recurring_op_s *pending = value; if (safe_str_eq(rsc, pending->rsc_id)) { crm_info("Removing op %s:%d for deleted resource %s", pending->op_key, pending->call_id, rsc); return TRUE; } return FALSE; } /* * Remove the rsc from the CIB * * Avoids refreshing the entire LRM section of this host */ #define rsc_template "//"XML_CIB_TAG_STATE"[@uname='%s']//"XML_LRM_TAG_RESOURCE"[@id='%s']" static int delete_rsc_status(lrm_state_t * lrm_state, const char *rsc_id, int call_options, const char *user_name) { char *rsc_xpath = NULL; int max = 0; int rc = pcmk_ok; CRM_CHECK(rsc_id != NULL, return -ENXIO); max = strlen(rsc_template) + strlen(rsc_id) + strlen(lrm_state->node_name) + 1; rsc_xpath = calloc(1, max); snprintf(rsc_xpath, max, rsc_template, lrm_state->node_name, rsc_id); rc = cib_internal_op(fsa_cib_conn, CIB_OP_DELETE, NULL, rsc_xpath, NULL, NULL, call_options | cib_xpath, user_name); free(rsc_xpath); return rc; } static void delete_rsc_entry(lrm_state_t * lrm_state, ha_msg_input_t * input, const char *rsc_id, GHashTableIter * rsc_gIter, int rc, const char *user_name) { struct delete_event_s event; CRM_CHECK(rsc_id != NULL, return); if (rc == pcmk_ok) { char *rsc_id_copy = strdup(rsc_id); if (rsc_gIter) g_hash_table_iter_remove(rsc_gIter); else g_hash_table_remove(lrm_state->resource_history, rsc_id_copy); crm_debug("sync: Sending delete op for %s", rsc_id_copy); delete_rsc_status(lrm_state, rsc_id_copy, cib_quorum_override, user_name); g_hash_table_foreach_remove(lrm_state->pending_ops, lrm_remove_deleted_op, rsc_id_copy); free(rsc_id_copy); } if (input) { notify_deleted(lrm_state, input, rsc_id, rc); } event.rc = rc; event.rsc = rsc_id; event.lrm_state = lrm_state; g_hash_table_foreach_remove(lrm_state->deletion_ops, lrm_remove_deleted_rsc, &event); } /* * Remove the op from the CIB * * Avoids refreshing the entire LRM section of this host */ #define op_template "//"XML_CIB_TAG_STATE"[@uname='%s']//"XML_LRM_TAG_RESOURCE"[@id='%s']/"XML_LRM_TAG_RSC_OP"[@id='%s']" #define op_call_template "//"XML_CIB_TAG_STATE"[@uname='%s']//"XML_LRM_TAG_RESOURCE"[@id='%s']/"XML_LRM_TAG_RSC_OP"[@id='%s' and @"XML_LRM_ATTR_CALLID"='%d']" static void delete_op_entry(lrm_state_t * lrm_state, lrmd_event_data_t * op, const char *rsc_id, const char *key, int call_id) { xmlNode *xml_top = NULL; if (op != NULL) { xml_top = create_xml_node(NULL, XML_LRM_TAG_RSC_OP); crm_xml_add_int(xml_top, XML_LRM_ATTR_CALLID, op->call_id); crm_xml_add(xml_top, XML_ATTR_TRANSITION_KEY, op->user_data); if (op->interval > 0) { char *op_id = generate_op_key(op->rsc_id, op->op_type, op->interval); /* Avoid deleting last_failure too (if it was a result of this recurring op failing) */ crm_xml_add(xml_top, XML_ATTR_ID, op_id); free(op_id); } crm_debug("async: Sending delete op for %s_%s_%d (call=%d)", op->rsc_id, op->op_type, op->interval, op->call_id); fsa_cib_conn->cmds->delete(fsa_cib_conn, XML_CIB_TAG_STATUS, xml_top, cib_quorum_override); } else if (rsc_id != NULL && key != NULL) { int max = 0; char *op_xpath = NULL; if (call_id > 0) { max = strlen(op_call_template) + strlen(rsc_id) + strlen(lrm_state->node_name) + strlen(key) + 10; op_xpath = calloc(1, max); snprintf(op_xpath, max, op_call_template, lrm_state->node_name, rsc_id, key, call_id); } else { max = strlen(op_template) + strlen(rsc_id) + strlen(lrm_state->node_name) + strlen(key) + 1; op_xpath = calloc(1, max); snprintf(op_xpath, max, op_template, lrm_state->node_name, rsc_id, key); } crm_debug("sync: Sending delete op for %s (call=%d)", rsc_id, call_id); fsa_cib_conn->cmds->delete(fsa_cib_conn, op_xpath, NULL, cib_quorum_override | cib_xpath); free(op_xpath); } else { crm_err("Not enough information to delete op entry: rsc=%p key=%p", rsc_id, key); return; } crm_log_xml_trace(xml_top, "op:cancel"); free_xml(xml_top); } void lrm_clear_last_failure(const char *rsc_id, const char *node_name) { char *attr = NULL; GHashTableIter iter; GList *lrm_state_list = lrm_state_get_list(); GList *state_entry; rsc_history_t *entry = NULL; attr = generate_op_key(rsc_id, "last_failure", 0); /* This clears last failure for every lrm state that has this rsc.*/ for (state_entry = lrm_state_list; state_entry != NULL; state_entry = state_entry->next) { lrm_state_t *lrm_state = state_entry->data; if (node_name != NULL) { if (strcmp(node_name, lrm_state->node_name) != 0) { /* filter by node_name if node_name is present */ continue; } } delete_op_entry(lrm_state, NULL, rsc_id, attr, 0); if (!lrm_state->resource_history) { continue; } g_hash_table_iter_init(&iter, lrm_state->resource_history); while (g_hash_table_iter_next(&iter, NULL, (void **)&entry)) { if (safe_str_eq(rsc_id, entry->id)) { lrmd_free_event(entry->failed); entry->failed = NULL; } } } free(attr); - + g_list_free(lrm_state_list); } static gboolean cancel_op(lrm_state_t * lrm_state, const char *rsc_id, const char *key, int op, gboolean remove) { int rc = pcmk_ok; struct recurring_op_s *pending = NULL; CRM_CHECK(op != 0, return FALSE); CRM_CHECK(rsc_id != NULL, return FALSE); if (key == NULL) { key = make_stop_id(rsc_id, op); } pending = g_hash_table_lookup(lrm_state->pending_ops, key); if (pending) { if (remove && pending->remove == FALSE) { pending->remove = TRUE; crm_debug("Scheduling %s for removal", key); } if (pending->cancelled) { crm_debug("Operation %s already cancelled", key); return TRUE; } pending->cancelled = TRUE; } else { crm_info("No pending op found for %s", key); return TRUE; } crm_debug("Cancelling op %d for %s (%s)", op, rsc_id, key); rc = lrm_state_cancel(lrm_state, pending->rsc_id, pending->op_type, pending->interval); if (rc == pcmk_ok) { crm_debug("Op %d for %s (%s): cancelled", op, rsc_id, key); } else { crm_debug("Op %d for %s (%s): Nothing to cancel", op, rsc_id, key); /* The caller needs to make sure the entry is * removed from the pending_ops list * * Usually by returning TRUE inside the worker function * supplied to g_hash_table_foreach_remove() * * Not removing the entry from pending_ops will block * the node from shutting down */ return FALSE; } return TRUE; } struct cancel_data { gboolean done; gboolean remove; const char *key; lrmd_rsc_info_t *rsc; lrm_state_t *lrm_state; }; static gboolean cancel_action_by_key(gpointer key, gpointer value, gpointer user_data) { struct cancel_data *data = user_data; struct recurring_op_s *op = (struct recurring_op_s *)value; if (safe_str_eq(op->op_key, data->key)) { data->done = TRUE; if (cancel_op(data->lrm_state, data->rsc->id, key, op->call_id, data->remove) == FALSE) { return TRUE; } } return FALSE; } static gboolean cancel_op_key(lrm_state_t * lrm_state, lrmd_rsc_info_t * rsc, const char *key, gboolean remove) { struct cancel_data data; CRM_CHECK(rsc != NULL, return FALSE); CRM_CHECK(key != NULL, return FALSE); data.key = key; data.rsc = rsc; data.done = FALSE; data.remove = remove; data.lrm_state = lrm_state; g_hash_table_foreach_remove(lrm_state->pending_ops, cancel_action_by_key, &data); return data.done; } static lrmd_rsc_info_t * get_lrm_resource(lrm_state_t * lrm_state, xmlNode * resource, xmlNode * op_msg, gboolean do_create) { lrmd_rsc_info_t *rsc = NULL; const char *id = ID(resource); const char *type = crm_element_value(resource, XML_ATTR_TYPE); const char *class = crm_element_value(resource, XML_AGENT_ATTR_CLASS); const char *provider = crm_element_value(resource, XML_AGENT_ATTR_PROVIDER); const char *long_id = crm_element_value(resource, XML_ATTR_ID_LONG); crm_trace("Retrieving %s from the LRM.", id); CRM_CHECK(id != NULL, return NULL); rsc = lrm_state_get_rsc_info(lrm_state, id, 0); if (!rsc && long_id) { rsc = lrm_state_get_rsc_info(lrm_state, long_id, 0); } if (!rsc && do_create) { CRM_CHECK(class != NULL, return NULL); CRM_CHECK(type != NULL, return NULL); crm_trace("Adding rsc %s before operation", id); lrm_state_register_rsc(lrm_state, id, class, provider, type, lrmd_opt_drop_recurring); rsc = lrm_state_get_rsc_info(lrm_state, id, 0); if (!rsc) { fsa_data_t *msg_data = NULL; crm_err("Could not add resource %s to LRM", id); register_fsa_error(C_FSA_INTERNAL, I_FAIL, NULL); } } return rsc; } static void delete_resource(lrm_state_t * lrm_state, const char *id, lrmd_rsc_info_t * rsc, GHashTableIter * gIter, const char *sys, const char *host, const char *user, ha_msg_input_t * request) { int rc = pcmk_ok; crm_info("Removing resource %s for %s (%s) on %s", id, sys, user ? user : "internal", host); if (rsc) { rc = lrm_state_unregister_rsc(lrm_state, id, 0); } if (rc == pcmk_ok) { crm_trace("Resource '%s' deleted", id); } else if (rc == -EINPROGRESS) { crm_info("Deletion of resource '%s' pending", id); if (request) { struct pending_deletion_op_s *op = NULL; char *ref = crm_element_value_copy(request->msg, XML_ATTR_REFERENCE); op = calloc(1, sizeof(struct pending_deletion_op_s)); op->rsc = strdup(rsc->id); op->input = copy_ha_msg_input(request); g_hash_table_insert(lrm_state->deletion_ops, ref, op); } return; } else { crm_warn("Deletion of resource '%s' for %s (%s) on %s failed: %d", id, sys, user ? user : "internal", host, rc); } delete_rsc_entry(lrm_state, request, id, gIter, rc, user); } /* A_LRM_INVOKE */ void do_lrm_invoke(long long action, enum crmd_fsa_cause cause, enum crmd_fsa_state cur_state, enum crmd_fsa_input current_input, fsa_data_t * msg_data) { gboolean done = FALSE; gboolean create_rsc = TRUE; lrm_state_t *lrm_state = NULL; const char *crm_op = NULL; const char *from_sys = NULL; const char *from_host = NULL; const char *operation = NULL; ha_msg_input_t *input = fsa_typed_data(fsa_dt_ha_msg); const char *user_name = NULL; const char *target_node = NULL; gboolean is_remote_node = FALSE; if (input->xml != NULL) { /* Remote node operations are routed here to their remote connections */ target_node = crm_element_value(input->xml, XML_LRM_ATTR_TARGET); } if (target_node == NULL) { target_node = fsa_our_uname; } else if (safe_str_neq(target_node, fsa_our_uname)) { is_remote_node = TRUE; } lrm_state = lrm_state_find(target_node); if (lrm_state == NULL && is_remote_node) { crm_err("no lrmd connection for remote node %s found on cluster node %s. Can not process request.", target_node, fsa_our_uname); return; } CRM_ASSERT(lrm_state != NULL); #if ENABLE_ACL user_name = crm_element_value(input->msg, F_CRM_USER); crm_trace("LRM command from user '%s'", user_name); #endif crm_op = crm_element_value(input->msg, F_CRM_TASK); from_sys = crm_element_value(input->msg, F_CRM_SYS_FROM); if (safe_str_neq(from_sys, CRM_SYSTEM_TENGINE)) { from_host = crm_element_value(input->msg, F_CRM_HOST_FROM); } crm_trace("LRM command from: %s", from_sys); if (safe_str_eq(crm_op, CRM_OP_LRM_DELETE)) { operation = CRMD_ACTION_DELETE; } else if (safe_str_eq(crm_op, CRM_OP_LRM_REFRESH)) { operation = CRM_OP_LRM_REFRESH; } else if (safe_str_eq(crm_op, CRM_OP_LRM_FAIL)) { rsc_history_t *entry = NULL; lrmd_event_data_t *op = NULL; lrmd_rsc_info_t *rsc = NULL; xmlNode *xml_rsc = find_xml_node(input->xml, XML_CIB_TAG_RESOURCE, TRUE); CRM_CHECK(xml_rsc != NULL, return); /* The lrmd can not fail a resource, it does not understand the * concept of success or failure in relation to a resource, it simply * executes operations and reports the results. We determine what a failure is. * Becaues of this, if we want to fail a resource we have to fake what we * understand a failure to look like. * * To do this we create a fake lrmd operation event for the resource * we want to fail. We then pass that event to the lrmd client callback * so it will be processed as if it actually came from the lrmd. */ op = construct_op(lrm_state, input->xml, ID(xml_rsc), "asyncmon"); free((char *)op->user_data); op->user_data = NULL; entry = g_hash_table_lookup(lrm_state->resource_history, op->rsc_id); /* Make sure the call id is greater than the last successful operation, * otherwise the failure will not result in a possible recovery of the resource * as it could appear the failure occurred before the successful start */ if (entry && entry->last) { op->call_id = entry->last->call_id + 1; if (op->call_id < 0) { op->call_id = 1; } } op->interval = 0; op->op_status = PCMK_LRM_OP_DONE; op->rc = PCMK_EXECRA_UNKNOWN_ERROR; CRM_ASSERT(op != NULL); #if ENABLE_ACL if (user_name && is_privileged(user_name) == FALSE) { crm_err("%s does not have permission to fail %s", user_name, ID(xml_rsc)); send_direct_ack(from_host, from_sys, NULL, op, ID(xml_rsc)); lrmd_free_event(op); return; } #endif rsc = get_lrm_resource(lrm_state, xml_rsc, input->xml, create_rsc); if (rsc) { crm_info("Failing resource %s...", rsc->id); process_lrm_event(lrm_state, op); op->op_status = PCMK_LRM_OP_DONE; op->rc = PCMK_EXECRA_OK; lrmd_free_rsc_info(rsc); } else { crm_info("Cannot find/create resource in order to fail it..."); crm_log_xml_warn(input->msg, "bad input"); } send_direct_ack(from_host, from_sys, NULL, op, ID(xml_rsc)); lrmd_free_event(op); return; } else if (input->xml != NULL) { operation = crm_element_value(input->xml, XML_LRM_ATTR_TASK); } if (safe_str_eq(crm_op, CRM_OP_LRM_REFRESH)) { int rc = pcmk_ok; xmlNode *fragment = do_lrm_query_internal(lrm_state, TRUE); fsa_cib_update(XML_CIB_TAG_STATUS, fragment, cib_quorum_override, rc, user_name); crm_info("Forced a local LRM refresh: call=%d", rc); if(strcmp(CRM_SYSTEM_CRMD, from_sys) != 0) { xmlNode *reply = create_request( CRM_OP_INVOKE_LRM, fragment, from_host, from_sys, CRM_SYSTEM_LRMD, fsa_our_uuid); crm_debug("ACK'ing refresh from %s (%s)", from_sys, from_host); if (relay_message(reply, TRUE) == FALSE) { crm_log_xml_err(reply, "Unable to route reply"); } free_xml(reply); } free_xml(fragment); } else if (safe_str_eq(crm_op, CRM_OP_LRM_QUERY)) { xmlNode *data = do_lrm_query_internal(lrm_state, FALSE); xmlNode *reply = create_reply(input->msg, data); if (relay_message(reply, TRUE) == FALSE) { crm_err("Unable to route reply"); crm_log_xml_err(reply, "reply"); } free_xml(reply); free_xml(data); } else if (safe_str_eq(operation, CRM_OP_PROBED)) { update_attrd(lrm_state->node_name, CRM_OP_PROBED, XML_BOOLEAN_TRUE, user_name, is_remote_node); } else if (safe_str_eq(crm_op, CRM_OP_REPROBE)) { GHashTableIter gIter; rsc_history_t *entry = NULL; crm_notice("Forcing the status of all resources to be redetected"); g_hash_table_iter_init(&gIter, lrm_state->resource_history); while (g_hash_table_iter_next(&gIter, NULL, (void **)&entry)) { delete_resource(lrm_state, entry->id, &entry->rsc, &gIter, from_sys, from_host, user_name, NULL); } /* Now delete the copy in the CIB */ erase_status_tag(lrm_state->node_name, XML_CIB_TAG_LRM, cib_scope_local); /* And finally, _delete_ the value in attrd * Setting it to FALSE results in the PE sending us back here again */ update_attrd(lrm_state->node_name, CRM_OP_PROBED, NULL, user_name, is_remote_node); if(strcmp(CRM_SYSTEM_CRMD, from_sys) != 0) { xmlNode *reply = create_request( CRM_OP_INVOKE_LRM, NULL, from_host, from_sys, CRM_SYSTEM_LRMD, fsa_our_uuid); crm_debug("ACK'ing re-probe from %s (%s)", from_sys, from_host); if (relay_message(reply, TRUE) == FALSE) { crm_log_xml_err(reply, "Unable to route reply"); } free_xml(reply); } } else if (operation != NULL) { lrmd_rsc_info_t *rsc = NULL; xmlNode *params = NULL; xmlNode *xml_rsc = find_xml_node(input->xml, XML_CIB_TAG_RESOURCE, TRUE); CRM_CHECK(xml_rsc != NULL, return); /* only the first 16 chars are used by the LRM */ params = find_xml_node(input->xml, XML_TAG_ATTRS, TRUE); if (safe_str_eq(operation, CRMD_ACTION_DELETE)) { create_rsc = FALSE; } rsc = get_lrm_resource(lrm_state, xml_rsc, input->xml, create_rsc); if (rsc == NULL && create_rsc) { crm_err("Invalid resource definition"); crm_log_xml_warn(input->msg, "bad input"); } else if (rsc == NULL) { lrmd_event_data_t *op = NULL; crm_notice("Not creating resource for a %s event: %s", operation, ID(input->xml)); delete_rsc_entry(lrm_state, input, ID(xml_rsc), NULL, pcmk_ok, user_name); op = construct_op(lrm_state, input->xml, ID(xml_rsc), operation); op->op_status = PCMK_LRM_OP_DONE; op->rc = PCMK_EXECRA_OK; CRM_ASSERT(op != NULL); send_direct_ack(from_host, from_sys, NULL, op, ID(xml_rsc)); lrmd_free_event(op); } else if (safe_str_eq(operation, CRMD_ACTION_CANCEL)) { lrmd_event_data_t *op = NULL; char *op_key = NULL; char *meta_key = NULL; int call = 0; const char *call_id = NULL; const char *op_task = NULL; const char *op_interval = NULL; CRM_CHECK(params != NULL, crm_log_xml_warn(input->xml, "Bad command"); return); meta_key = crm_meta_name(XML_LRM_ATTR_INTERVAL); op_interval = crm_element_value(params, meta_key); free(meta_key); meta_key = crm_meta_name(XML_LRM_ATTR_TASK); op_task = crm_element_value(params, meta_key); free(meta_key); meta_key = crm_meta_name(XML_LRM_ATTR_CALLID); call_id = crm_element_value(params, meta_key); free(meta_key); CRM_CHECK(op_task != NULL, crm_log_xml_warn(input->xml, "Bad command"); return); CRM_CHECK(op_interval != NULL, crm_log_xml_warn(input->xml, "Bad command"); return); op = construct_op(lrm_state, input->xml, rsc->id, op_task); CRM_ASSERT(op != NULL); op_key = generate_op_key(rsc->id, op_task, crm_parse_int(op_interval, "0")); crm_debug("PE requested op %s (call=%s) be cancelled", op_key, call_id ? call_id : "NA"); call = crm_parse_int(call_id, "0"); if (call == 0) { /* the normal case when the PE cancels a recurring op */ done = cancel_op_key(lrm_state, rsc, op_key, TRUE); } else { /* the normal case when the PE cancels an orphan op */ done = cancel_op(lrm_state, rsc->id, NULL, call, TRUE); } if (done == FALSE) { crm_debug("Nothing known about operation %d for %s", call, op_key); delete_op_entry(lrm_state, NULL, rsc->id, op_key, call); /* needed?? surely not otherwise the cancel_op_(_key) wouldn't * have failed in the first place */ g_hash_table_remove(lrm_state->pending_ops, op_key); } op->rc = PCMK_EXECRA_OK; op->op_status = PCMK_LRM_OP_DONE; send_direct_ack(from_host, from_sys, rsc, op, rsc->id); free(op_key); lrmd_free_event(op); } else if (rsc != NULL && safe_str_eq(operation, CRMD_ACTION_DELETE)) { #if ENABLE_ACL int cib_rc = delete_rsc_status(lrm_state, rsc->id, cib_dryrun | cib_sync_call, user_name); if (cib_rc != pcmk_ok) { lrmd_event_data_t *op = NULL; crm_err ("Attempted deletion of resource status '%s' from CIB for %s (user=%s) on %s failed: (rc=%d) %s", rsc->id, from_sys, user_name ? user_name : "unknown", from_host, cib_rc, pcmk_strerror(cib_rc)); op = construct_op(lrm_state, input->xml, rsc->id, operation); op->op_status = PCMK_LRM_OP_ERROR; if (cib_rc == -EACCES) { op->rc = PCMK_EXECRA_INSUFFICIENT_PRIV; } else { op->rc = PCMK_EXECRA_UNKNOWN_ERROR; } send_direct_ack(from_host, from_sys, NULL, op, rsc->id); lrmd_free_event(op); return; } #endif delete_resource(lrm_state, rsc->id, rsc, NULL, from_sys, from_host, user_name, input); } else if (rsc != NULL) { do_lrm_rsc_op(lrm_state, rsc, operation, input->xml, input->msg); } lrmd_free_rsc_info(rsc); } else { crm_err("Operation was neither a lrm_query, nor a rsc op. %s", crm_str(crm_op)); register_fsa_error(C_FSA_INTERNAL, I_ERROR, NULL); } } static lrmd_event_data_t * construct_op(lrm_state_t * lrm_state, xmlNode * rsc_op, const char *rsc_id, const char *operation) { lrmd_event_data_t *op = NULL; const char *op_delay = NULL; const char *op_timeout = NULL; const char *op_interval = NULL; GHashTable *params = NULL; const char *transition = NULL; CRM_LOG_ASSERT(rsc_id != NULL); op = calloc(1, sizeof(lrmd_event_data_t)); op->type = lrmd_event_exec_complete; op->op_type = strdup(operation); op->op_status = PCMK_LRM_OP_PENDING; op->rc = -1; op->rsc_id = strdup(rsc_id); op->interval = 0; op->timeout = 0; op->start_delay = 0; if (rsc_op == NULL) { CRM_LOG_ASSERT(safe_str_eq(CRMD_ACTION_STOP, operation)); op->user_data = NULL; /* the stop_all_resources() case * by definition there is no DC (or they'd be shutting * us down). * So we should put our version here. */ op->params = g_hash_table_new_full(crm_str_hash, g_str_equal, g_hash_destroy_str, g_hash_destroy_str); g_hash_table_insert(op->params, strdup(XML_ATTR_CRM_VERSION), strdup(CRM_FEATURE_SET)); crm_trace("Constructed %s op for %s", operation, rsc_id); return op; } params = xml2list(rsc_op); g_hash_table_remove(params, CRM_META "_op_target_rc"); op_delay = crm_meta_value(params, XML_OP_ATTR_START_DELAY); op_timeout = crm_meta_value(params, XML_ATTR_TIMEOUT); op_interval = crm_meta_value(params, XML_LRM_ATTR_INTERVAL); op->interval = crm_parse_int(op_interval, "0"); op->timeout = crm_parse_int(op_timeout, "0"); op->start_delay = crm_parse_int(op_delay, "0"); if (safe_str_neq(operation, RSC_STOP)) { op->params = params; } else { rsc_history_t *entry = g_hash_table_lookup(lrm_state->resource_history, rsc_id); /* If we do not have stop parameters cached, use * whatever we are given */ if (!entry || !entry->stop_params) { op->params = params; } else { /* Copy the cached parameter list so that we stop the resource * with the old attributes, not the new ones */ op->params = g_hash_table_new_full(crm_str_hash, g_str_equal, g_hash_destroy_str, g_hash_destroy_str); g_hash_table_foreach(params, copy_meta_keys, op->params); g_hash_table_foreach(entry->stop_params, copy_instance_keys, op->params); g_hash_table_destroy(params); params = NULL; } } /* sanity */ if (op->interval < 0) { op->interval = 0; } if (op->timeout <= 0) { op->timeout = op->interval; } if (op->start_delay < 0) { op->start_delay = 0; } transition = crm_element_value(rsc_op, XML_ATTR_TRANSITION_KEY); CRM_CHECK(transition != NULL, return op); op->user_data = strdup(transition); if (op->interval != 0) { if (safe_str_eq(operation, CRMD_ACTION_START) || safe_str_eq(operation, CRMD_ACTION_STOP)) { crm_err("Start and Stop actions cannot have an interval: %d", op->interval); op->interval = 0; } } crm_trace("Constructed %s op for %s: interval=%d", operation, rsc_id, op->interval); return op; } void send_direct_ack(const char *to_host, const char *to_sys, lrmd_rsc_info_t * rsc, lrmd_event_data_t * op, const char *rsc_id) { xmlNode *reply = NULL; xmlNode *update, *iter; xmlNode *fragment; crm_node_t *peer = NULL; CRM_CHECK(op != NULL, return); if (op->rsc_id == NULL) { CRM_LOG_ASSERT(rsc_id != NULL); op->rsc_id = strdup(rsc_id); } if (to_sys == NULL) { to_sys = CRM_SYSTEM_TENGINE; } peer = crm_get_peer(0, fsa_our_uname); update = do_update_node_cib(peer, node_update_none, NULL, __FUNCTION__); iter = create_xml_node(update, XML_CIB_TAG_LRM); crm_xml_add(iter, XML_ATTR_ID, fsa_our_uuid); iter = create_xml_node(iter, XML_LRM_TAG_RESOURCES); iter = create_xml_node(iter, XML_LRM_TAG_RESOURCE); crm_xml_add(iter, XML_ATTR_ID, op->rsc_id); build_operation_update(iter, rsc, op, __FUNCTION__); fragment = create_cib_fragment(update, XML_CIB_TAG_STATUS); reply = create_request(CRM_OP_INVOKE_LRM, fragment, to_host, to_sys, CRM_SYSTEM_LRMD, NULL); crm_log_xml_trace(update, "ACK Update"); crm_debug("ACK'ing resource op %s_%s_%d from %s: %s", op->rsc_id, op->op_type, op->interval, op->user_data, crm_element_value(reply, XML_ATTR_REFERENCE)); if (relay_message(reply, TRUE) == FALSE) { crm_log_xml_err(reply, "Unable to route reply"); } free_xml(fragment); free_xml(update); free_xml(reply); } gboolean verify_stopped(enum crmd_fsa_state cur_state, int log_level) { gboolean res = TRUE; GList *lrm_state_list = lrm_state_get_list(); GList *state_entry; for (state_entry = lrm_state_list; state_entry != NULL; state_entry = state_entry->next) { lrm_state_t *lrm_state = state_entry->data; if (!lrm_state_verify_stopped(lrm_state, cur_state, log_level)) { /* keep iterating through all even when false is returned */ res = FALSE; } } set_bit(fsa_input_register, R_SENT_RSC_STOP); g_list_free(lrm_state_list); return res; } struct stop_recurring_action_s { lrmd_rsc_info_t *rsc; lrm_state_t *lrm_state; }; static gboolean stop_recurring_action_by_rsc(gpointer key, gpointer value, gpointer user_data) { struct stop_recurring_action_s *event = user_data; struct recurring_op_s *op = (struct recurring_op_s *)value; if (op->interval != 0 && safe_str_eq(op->rsc_id, event->rsc->id)) { if (cancel_op(event->lrm_state, event->rsc->id, key, op->call_id, FALSE) == FALSE) { return TRUE; } } return FALSE; } static gboolean stop_recurring_actions(gpointer key, gpointer value, gpointer user_data) { lrm_state_t *lrm_state = user_data; struct recurring_op_s *op = (struct recurring_op_s *)value; gboolean remove = FALSE; if (op->interval != 0) { remove = cancel_op(lrm_state, op->rsc_id, key, op->call_id, FALSE); } return remove; } static void do_lrm_rsc_op(lrm_state_t * lrm_state, lrmd_rsc_info_t * rsc, const char *operation, xmlNode * msg, xmlNode * request) { int call_id = 0; char *op_id = NULL; lrmd_event_data_t *op = NULL; lrmd_key_value_t *params = NULL; fsa_data_t *msg_data = NULL; const char *transition = NULL; CRM_CHECK(rsc != NULL, return); if (msg != NULL) { transition = crm_element_value(msg, XML_ATTR_TRANSITION_KEY); if (transition == NULL) { crm_log_xml_err(msg, "Missing transition number"); } } op = construct_op(lrm_state, msg, rsc->id, operation); /* stop the monitor before stopping the resource */ if (crm_str_eq(operation, CRMD_ACTION_STOP, TRUE) || crm_str_eq(operation, CRMD_ACTION_DEMOTE, TRUE) || crm_str_eq(operation, CRMD_ACTION_PROMOTE, TRUE) || crm_str_eq(operation, CRMD_ACTION_MIGRATE, TRUE)) { struct stop_recurring_action_s data; data.rsc = rsc; data.lrm_state = lrm_state; g_hash_table_foreach_remove(lrm_state->pending_ops, stop_recurring_action_by_rsc, &data); } /* now do the op */ crm_info("Performing key=%s op=%s_%s_%d", transition, rsc->id, operation, op->interval); if (fsa_state != S_NOT_DC && fsa_state != S_POLICY_ENGINE && fsa_state != S_TRANSITION_ENGINE) { if (safe_str_neq(operation, "fail") && safe_str_neq(operation, CRMD_ACTION_STOP)) { crm_info("Discarding attempt to perform action %s on %s" " in state %s", operation, rsc->id, fsa_state2string(fsa_state)); op->rc = 99; op->op_status = PCMK_LRM_OP_ERROR; send_direct_ack(NULL, NULL, rsc, op, rsc->id); lrmd_free_event(op); free(op_id); return; } } op_id = generate_op_key(rsc->id, op->op_type, op->interval); if (op->interval > 0) { /* cancel it so we can then restart it without conflict */ cancel_op_key(lrm_state, rsc, op_id, FALSE); } if (op->params) { char *key = NULL; char *value = NULL; GHashTableIter iter; g_hash_table_iter_init(&iter, op->params); while (g_hash_table_iter_next(&iter, (gpointer *) & key, (gpointer *) & value)) { params = lrmd_key_value_add(params, key, value); } } call_id = lrm_state_exec(lrm_state, rsc->id, op->op_type, op->user_data, op->interval, op->timeout, op->start_delay, params); if (call_id <= 0) { crm_err("Operation %s on %s failed: %d", operation, rsc->id, call_id); register_fsa_error(C_FSA_INTERNAL, I_FAIL, NULL); } else { /* record all operations so we can wait * for them to complete during shutdown */ char *call_id_s = make_stop_id(rsc->id, call_id); struct recurring_op_s *pending = NULL; pending = calloc(1, sizeof(struct recurring_op_s)); crm_trace("Recording pending op: %d - %s %s", call_id, op_id, call_id_s); pending->call_id = call_id; pending->interval = op->interval; pending->op_type = strdup(operation); pending->op_key = strdup(op_id); pending->rsc_id = strdup(rsc->id); g_hash_table_replace(lrm_state->pending_ops, call_id_s, pending); if (op->interval > 0 && op->start_delay > START_DELAY_THRESHOLD) { char *uuid = NULL; int dummy = 0, target_rc = 0; crm_info("Faking confirmation of %s: execution postponed for over 5 minutes", op_id); decode_transition_key(op->user_data, &uuid, &dummy, &dummy, &target_rc); free(uuid); op->rc = target_rc; op->op_status = PCMK_LRM_OP_DONE; send_direct_ack(NULL, NULL, rsc, op, rsc->id); } } free(op_id); lrmd_free_event(op); return; } int last_resource_update = 0; static void cib_rsc_callback(xmlNode * msg, int call_id, int rc, xmlNode * output, void *user_data) { switch (rc) { case pcmk_ok: case -pcmk_err_diff_failed: case -pcmk_err_diff_resync: crm_trace("Resource update %d complete: rc=%d", call_id, rc); break; default: crm_warn("Resource update %d failed: (rc=%d) %s", call_id, rc, pcmk_strerror(rc)); } if (call_id == last_resource_update) { last_resource_update = 0; trigger_fsa(fsa_source); } } static int do_update_resource(lrm_state_t * lrm_state, lrmd_rsc_info_t * rsc, lrmd_event_data_t * op) { /* */ int rc = pcmk_ok; xmlNode *update, *iter = NULL; int call_opt = cib_quorum_override; const char *uuid = NULL; CRM_CHECK(op != NULL, return 0); if (fsa_state == S_ELECTION || fsa_state == S_PENDING) { crm_info("Sending update to local CIB in state: %s", fsa_state2string(fsa_state)); call_opt |= cib_scope_local; } iter = create_xml_node(iter, XML_CIB_TAG_STATUS); update = iter; iter = create_xml_node(iter, XML_CIB_TAG_STATE); if (safe_str_eq(lrm_state->node_name, fsa_our_uname)) { set_uuid(iter, XML_ATTR_UUID, lrm_state->node_name); uuid = fsa_our_uuid; } else { /* remote nodes uuid and uname are equal */ crm_xml_add(iter, XML_ATTR_UUID, lrm_state->node_name); uuid = lrm_state->node_name; crm_xml_add(iter, XML_NODE_IS_REMOTE, "true"); } crm_xml_add(iter, XML_ATTR_UNAME, lrm_state->node_name); crm_xml_add(iter, XML_ATTR_ORIGIN, __FUNCTION__); iter = create_xml_node(iter, XML_CIB_TAG_LRM); crm_xml_add(iter, XML_ATTR_ID, uuid); iter = create_xml_node(iter, XML_LRM_TAG_RESOURCES); iter = create_xml_node(iter, XML_LRM_TAG_RESOURCE); crm_xml_add(iter, XML_ATTR_ID, op->rsc_id); build_operation_update(iter, rsc, op, __FUNCTION__); if (rsc) { crm_xml_add(iter, XML_ATTR_TYPE, rsc->type); crm_xml_add(iter, XML_AGENT_ATTR_CLASS, rsc->class); crm_xml_add(iter, XML_AGENT_ATTR_PROVIDER, rsc->provider); CRM_CHECK(rsc->type != NULL, crm_err("Resource %s has no value for type", op->rsc_id)); CRM_CHECK(rsc->class != NULL, crm_err("Resource %s has no value for class", op->rsc_id)); } else { crm_warn("Resource %s no longer exists in the lrmd", op->rsc_id); goto cleanup; } /* make it an asyncronous call and be done with it * * Best case: * the resource state will be discovered during * the next signup or election. * * Bad case: * we are shutting down and there is no DC at the time, * but then why were we shutting down then anyway? * (probably because of an internal error) * * Worst case: * we get shot for having resources "running" when the really weren't * * the alternative however means blocking here for too long, which * isnt acceptable */ fsa_cib_update(XML_CIB_TAG_STATUS, update, call_opt, rc, NULL); if (rc > 0) { last_resource_update = rc; } /* the return code is a call number, not an error code */ crm_trace("Sent resource state update message: %d for %s=%d on %s", rc, op->op_type, op->interval, op->rsc_id); fsa_register_cib_callback(rc, FALSE, NULL, cib_rsc_callback); cleanup: free_xml(update); return rc; } void do_lrm_event(long long action, enum crmd_fsa_cause cause, enum crmd_fsa_state cur_state, enum crmd_fsa_input cur_input, fsa_data_t * msg_data) { CRM_CHECK(FALSE, return); } gboolean process_lrm_event(lrm_state_t * lrm_state, lrmd_event_data_t * op) { char *op_id = NULL; char *op_key = NULL; int update_id = 0; int log_level = LOG_ERR; gboolean removed = FALSE; lrmd_rsc_info_t *rsc = NULL; struct recurring_op_s *pending = NULL; CRM_CHECK(op != NULL, return FALSE); CRM_CHECK(op->rsc_id != NULL, return FALSE); op_id = make_stop_id(op->rsc_id, op->call_id); pending = g_hash_table_lookup(lrm_state->pending_ops, op_id); op_key = generate_op_key(op->rsc_id, op->op_type, op->interval); rsc = lrm_state_get_rsc_info(lrm_state, op->rsc_id, 0); switch (op->op_status) { case PCMK_LRM_OP_ERROR: case PCMK_LRM_OP_PENDING: case PCMK_LRM_OP_NOTSUPPORTED: break; case PCMK_LRM_OP_CANCELLED: log_level = LOG_INFO; break; case PCMK_LRM_OP_DONE: log_level = LOG_NOTICE; break; case PCMK_LRM_OP_TIMEOUT: log_level = LOG_DEBUG_3; crm_err("LRM operation %s (%d) %s (timeout=%dms)", op_key, op->call_id, services_lrm_status_str(op->op_status), op->timeout); break; default: crm_err("Mapping unknown status (%d) to ERROR", op->op_status); op->op_status = PCMK_LRM_OP_ERROR; } if (op->op_status == PCMK_LRM_OP_ERROR && (op->rc == PCMK_EXECRA_RUNNING_MASTER || op->rc == PCMK_EXECRA_NOT_RUNNING)) { /* Leave it up to the TE/PE to decide if this is an error */ op->op_status = PCMK_LRM_OP_DONE; log_level = LOG_INFO; } if (op->op_status != PCMK_LRM_OP_CANCELLED) { if (safe_str_eq(op->op_type, RSC_NOTIFY)) { /* Keep notify ops out of the CIB */ send_direct_ack(NULL, NULL, NULL, op, op->rsc_id); } else { update_id = do_update_resource(lrm_state, rsc, op); } } else if (op->interval == 0) { /* This will occur when "crm resource cleanup" is called while actions are in-flight */ crm_err("Op %s (call=%d): Cancelled", op_key, op->call_id); send_direct_ack(NULL, NULL, NULL, op, op->rsc_id); } else if (pending == NULL) { /* Operations that are cancelled may safely be removed * from the pending op list before the lrmd completion event * is received. Only report non-cancelled ops here. */ if (op->op_status != PCMK_LRM_OP_CANCELLED) { crm_err("Op %s (call=%d): No 'pending' entry", op_key, op->call_id); } } else if (op->user_data == NULL) { crm_err("Op %s (call=%d): No user data", op_key, op->call_id); } else if (pending->remove) { delete_op_entry(lrm_state, op, op->rsc_id, op_key, op->call_id); } else { /* Before a stop is called, no need to direct ack */ crm_trace("Op %s (call=%d): no delete event required", op_key, op->call_id); } if ((op->interval == 0) && g_hash_table_remove(lrm_state->pending_ops, op_id)) { removed = TRUE; crm_trace("Op %s (call=%d, stop-id=%s): Confirmed", op_key, op->call_id, op_id); } if (op->op_status == PCMK_LRM_OP_DONE) { do_crm_log(log_level, "LRM operation %s (call=%d, rc=%d, cib-update=%d, confirmed=%s) %s", op_key, op->call_id, op->rc, update_id, removed ? "true" : "false", lrmd_event_rc2str(op->rc)); } else { do_crm_log(log_level, "LRM operation %s (call=%d, status=%d, cib-update=%d, confirmed=%s) %s", op_key, op->call_id, op->op_status, update_id, removed ? "true" : "false", services_lrm_status_str(op->op_status)); } if (op->output) { char *prefix = g_strdup_printf("%s-%s_%s_%d:%d", lrm_state->node_name, op->rsc_id, op->op_type, op->interval, op->call_id); if (op->rc) { crm_log_output(LOG_NOTICE, prefix, op->output); } else { crm_log_output(LOG_DEBUG, prefix, op->output); } g_free(prefix); } if (op->rsc_deleted) { crm_info("Deletion of resource '%s' complete after %s", op->rsc_id, op_key); delete_rsc_entry(lrm_state, NULL, op->rsc_id, NULL, pcmk_ok, NULL); } /* If a shutdown was escalated while operations were pending, * then the FSA will be stalled right now... allow it to continue */ mainloop_set_trigger(fsa_source); update_history_cache(lrm_state, rsc, op); lrmd_free_rsc_info(rsc); free(op_key); free(op_id); return TRUE; } diff --git a/crmd/remote_lrmd_ra.c b/crmd/remote_lrmd_ra.c index 5e51f5e87f..07cd67cafc 100644 --- a/crmd/remote_lrmd_ra.c +++ b/crmd/remote_lrmd_ra.c @@ -1,642 +1,642 @@ /* * Copyright (C) 2013 David Vossel * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This software is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #include #include #include #include #include #include #include #include #include #include #define REMOTE_LRMD_RA "remote" /* The max start timeout before cmd retry */ #define MAX_START_TIMEOUT_MS 10000 int remote_ra_callid = 0; typedef struct remote_ra_cmd_s { /*! the local node the cmd is issued from */ char *owner; /*! the remote node the cmd is executed on */ char *rsc_id; /*! the action to execute */ char *action; /*! some string the client wants us to give it back */ char *userdata; /*! start delay in ms */ int start_delay; /*! timer id used for start delay. */ int delay_id; /*! timeout in ms for cmd */ int timeout; int remaining_timeout; /*! recurring interval in ms */ int interval; /*! interval timer id */ int interval_id; int reported_success; int monitor_timeout_id; /*! action parameters */ lrmd_key_value_t *params; /*! executed rc */ int rc; int op_status; int call_id; time_t start_time; gboolean cancel; } remote_ra_cmd_t; typedef struct remote_ra_data_s { crm_trigger_t *work; remote_ra_cmd_t *cur_cmd; GList *cmds; GList *recurring_cmds; } remote_ra_data_t; static int handle_remote_ra_start(lrm_state_t * lrm_state, remote_ra_cmd_t * cmd, int timeout_ms); static void free_cmd(gpointer user_data) { remote_ra_cmd_t *cmd = user_data; if (!cmd) { return; } if (cmd->delay_id) { g_source_remove(cmd->delay_id); } if (cmd->interval_id) { g_source_remove(cmd->interval_id); } if (cmd->monitor_timeout_id) { g_source_remove(cmd->monitor_timeout_id); } free(cmd->owner); free(cmd->rsc_id); free(cmd->action); free(cmd->userdata); lrmd_key_value_freeall(cmd->params); free(cmd); } static gboolean recurring_helper(gpointer data) { remote_ra_cmd_t *cmd = data; lrm_state_t *connection_rsc = NULL; cmd->interval_id = 0; connection_rsc = lrm_state_find(cmd->rsc_id); if (connection_rsc && connection_rsc->remote_ra_data) { remote_ra_data_t *ra_data = connection_rsc->remote_ra_data; ra_data->recurring_cmds = g_list_remove(ra_data->recurring_cmds, cmd); ra_data->cmds = g_list_append(ra_data->cmds, cmd); mainloop_set_trigger(ra_data->work); } return FALSE; } static gboolean start_delay_helper(gpointer data) { remote_ra_cmd_t *cmd = data; lrm_state_t *connection_rsc = NULL; cmd->delay_id = 0; connection_rsc = lrm_state_find(cmd->rsc_id); if (connection_rsc && connection_rsc->remote_ra_data) { remote_ra_data_t *ra_data = connection_rsc->remote_ra_data; mainloop_set_trigger(ra_data->work); } return FALSE; } static void report_remote_ra_result(remote_ra_cmd_t * cmd) { lrmd_event_data_t op = { 0, }; op.type = lrmd_event_exec_complete; op.rsc_id = cmd->rsc_id; op.op_type = cmd->action; op.user_data = cmd->userdata; op.timeout = cmd->timeout; op.interval = cmd->interval; op.rc = cmd->rc; op.op_status = cmd->op_status; if (cmd->params) { lrmd_key_value_t *tmp; op.params = g_hash_table_new_full(crm_str_hash, g_str_equal, g_hash_destroy_str, g_hash_destroy_str); for (tmp = cmd->params; tmp; tmp = tmp->next) { g_hash_table_insert(op.params, strdup(tmp->key), strdup(tmp->value)); } } op.call_id = cmd->call_id; op.remote_nodename = cmd->owner; lrm_op_callback(&op); if (op.params) { g_hash_table_destroy(op.params); } } static void update_remaining_timeout(remote_ra_cmd_t * cmd) { cmd->remaining_timeout = ((cmd->timeout / 1000) - (time(NULL) - cmd->start_time)) * 1000; } static gboolean retry_start_cmd_cb(gpointer data) { lrm_state_t *lrm_state = data; remote_ra_data_t *ra_data = lrm_state->remote_ra_data; remote_ra_cmd_t *cmd = NULL; int rc = -1; if (!ra_data || !ra_data->cur_cmd) { return FALSE; } cmd = ra_data->cur_cmd; if (safe_str_neq(cmd->action, "start")) { return FALSE; } update_remaining_timeout(cmd); if (cmd->remaining_timeout > 0) { rc = handle_remote_ra_start(lrm_state, cmd, cmd->remaining_timeout); } if (rc != 0) { cmd->rc = PCMK_EXECRA_UNKNOWN_ERROR; cmd->op_status = PCMK_LRM_OP_ERROR; report_remote_ra_result(cmd); if (ra_data->cmds) { mainloop_set_trigger(ra_data->work); } ra_data->cur_cmd = NULL; free_cmd(cmd); } else { /* wait for connection event */ } return FALSE; } static gboolean monitor_timeout_cb(gpointer data) { lrm_state_t *lrm_state = NULL; remote_ra_cmd_t *cmd = data; crm_debug("Poke async response timed out for node %s", cmd->rsc_id); cmd->monitor_timeout_id = 0; cmd->op_status = PCMK_LRM_OP_TIMEOUT; cmd->rc = PCMK_EXECRA_UNKNOWN_ERROR; lrm_state = lrm_state_find(cmd->rsc_id); if (lrm_state && lrm_state->remote_ra_data) { remote_ra_data_t *ra_data = lrm_state->remote_ra_data; if (ra_data->cur_cmd == cmd) { ra_data->cur_cmd = NULL; } if (ra_data->cmds) { mainloop_set_trigger(ra_data->work); } } report_remote_ra_result(cmd); free_cmd(cmd); return FALSE; } void remote_lrm_op_callback(lrmd_event_data_t * op) { lrm_state_t *lrm_state = NULL; remote_ra_data_t *ra_data = NULL; remote_ra_cmd_t *cmd = NULL; crm_debug("remote connection event - event_type:%s node:%s action:%s rc:%s op_status:%s", lrmd_event_type2str(op->type), op->remote_nodename, op->op_type ? op->op_type : "none", lrmd_event_rc2str(op->rc), services_lrm_status_str(op->op_status)); /* filter all EXEC events up */ if (op->type == lrmd_event_exec_complete) { lrm_op_callback(op); return; } lrm_state = lrm_state_find(op->remote_nodename); if (!lrm_state || !lrm_state->remote_ra_data) { crm_debug("lrm_state info not found for remote lrmd connection event"); return; } ra_data = lrm_state->remote_ra_data; if (!ra_data->cur_cmd) { crm_debug("no event to match"); return; } cmd = ra_data->cur_cmd; /* Start actions and migrate from actions complete after connection * comes back to us. */ if (op->type == lrmd_event_connect && (safe_str_eq(cmd->action, "start") || safe_str_eq(cmd->action, "migrate_from"))) { if (op->connection_rc < 0) { update_remaining_timeout(cmd); /* There isn't much of a reason to reschedule if the timeout is too small */ if (cmd->remaining_timeout > 3000) { crm_trace("rescheduling start, remaining timeout %d", cmd->remaining_timeout); g_timeout_add(1000, retry_start_cmd_cb, lrm_state); return; } else { crm_trace("can't reschedule start, remaining timeout too small %d", cmd->remaining_timeout); } cmd->op_status = PCMK_LRM_OP_TIMEOUT; cmd->rc = PCMK_EXECRA_UNKNOWN_ERROR; } else { cmd->rc = PCMK_EXECRA_OK; cmd->op_status = PCMK_LRM_OP_DONE; } crm_debug("remote lrmd connect event matched %s action. ", cmd->action); report_remote_ra_result(cmd); if (ra_data->cmds) { mainloop_set_trigger(ra_data->work); } ra_data->cur_cmd = NULL; free_cmd(cmd); return; } else if (op->type == lrmd_event_poke && safe_str_eq(cmd->action, "monitor")) { if (cmd->monitor_timeout_id) { g_source_remove(cmd->monitor_timeout_id); cmd->monitor_timeout_id = 0; } /* Only report success the first time, after that only worry about failures. * For this function, if we get the poke pack, it is always a success. Pokes * only fail if the send fails, or the response times out. */ if (!cmd->reported_success) { cmd->rc = PCMK_EXECRA_OK; cmd->op_status = PCMK_LRM_OP_DONE; report_remote_ra_result(cmd); cmd->reported_success = 1; } crm_debug("remote lrmd poke event matched %s action. ", cmd->action); /* success, keep rescheduling if interval is present. */ if (cmd->interval && (cmd->cancel == FALSE)) { ra_data->recurring_cmds = g_list_append(ra_data->recurring_cmds, cmd); cmd->interval_id = g_timeout_add(cmd->interval, recurring_helper, cmd); cmd = NULL; /* prevent free */ } if (ra_data->cmds) { mainloop_set_trigger(ra_data->work); } ra_data->cur_cmd = NULL; free_cmd(cmd); return; } crm_debug("Event did not match %s action", ra_data->cur_cmd->action); } static int handle_remote_ra_start(lrm_state_t * lrm_state, remote_ra_cmd_t * cmd, int timeout_ms) { const char *server = NULL; lrmd_key_value_t *tmp = NULL; int port = 0; int timeout_used = timeout_ms > MAX_START_TIMEOUT_MS ? MAX_START_TIMEOUT_MS : timeout_ms; for (tmp = cmd->params; tmp; tmp = tmp->next) { if (safe_str_eq(tmp->key, "addr") || safe_str_eq(tmp->key, "server")) { server = tmp->value; } if (safe_str_eq(tmp->key, "port")) { port = atoi(tmp->value); } } return lrm_state_remote_connect_async(lrm_state, server, port, timeout_used); } static gboolean handle_remote_ra_exec(gpointer user_data) { int rc = 0; lrm_state_t *lrm_state = user_data; remote_ra_data_t *ra_data = lrm_state->remote_ra_data; remote_ra_cmd_t *cmd; GList *first = NULL; if (ra_data->cur_cmd) { /* still waiting on previous cmd */ return TRUE; } while (ra_data->cmds) { first = ra_data->cmds; cmd = first->data; if (cmd->delay_id) { /* still waiting for start delay timer to trip */ return TRUE; } ra_data->cmds = g_list_remove_link(ra_data->cmds, first); g_list_free_1(first); if (!strcmp(cmd->action, "start") || !strcmp(cmd->action, "migrate_from")) { xmlNode *status = create_xml_node(NULL, XML_CIB_TAG_STATE); /* clear node status in cib */ crm_xml_add(status, XML_ATTR_ID, lrm_state->node_name); lrm_state_reset_tables(lrm_state); fsa_cib_delete(XML_CIB_TAG_STATUS, status, cib_quorum_override, rc, NULL); crm_info("Forced a remote LRM refresh before connection start: call=%d", rc); crm_log_xml_trace(status, "CLEAR LRM"); - free(status); + free_xml(status); rc = handle_remote_ra_start(lrm_state, cmd, cmd->timeout); if (rc == 0) { /* take care of this later when we get async connection result */ crm_debug("began remote lrmd connect, waiting for connect event."); ra_data->cur_cmd = cmd; return TRUE; } else { crm_debug("connect failed, not expecting to match any connection event later"); cmd->rc = PCMK_EXECRA_UNKNOWN_ERROR; cmd->op_status = PCMK_LRM_OP_ERROR; } report_remote_ra_result(cmd); } else if (!strcmp(cmd->action, "monitor")) { if (lrm_state_is_connected(lrm_state) == TRUE) { rc = lrm_state_poke_connection(lrm_state); if (rc < 0) { cmd->rc = PCMK_EXECRA_UNKNOWN_ERROR; cmd->op_status = PCMK_LRM_OP_ERROR; } } else { rc = -1; cmd->op_status = PCMK_LRM_OP_DONE; cmd->rc = PCMK_EXECRA_NOT_RUNNING; } if (rc == 0) { crm_debug("poked remote lrmd at node %s, waiting for async response.", cmd->rsc_id); ra_data->cur_cmd = cmd; cmd->monitor_timeout_id = g_timeout_add(cmd->timeout, monitor_timeout_cb, cmd); return TRUE; } report_remote_ra_result(cmd); } else if (!strcmp(cmd->action, "stop")) { lrm_state_disconnect(lrm_state); cmd->rc = PCMK_EXECRA_OK; cmd->op_status = PCMK_LRM_OP_DONE; if (ra_data->cmds) { g_list_free_full(ra_data->cmds, free_cmd); } if (ra_data->recurring_cmds) { g_list_free_full(ra_data->recurring_cmds, free_cmd); } ra_data->cmds = NULL; ra_data->recurring_cmds = NULL; report_remote_ra_result(cmd); } else if (!strcmp(cmd->action, "migrate_to")) { /* no-op. */ cmd->rc = PCMK_EXECRA_OK; cmd->op_status = PCMK_LRM_OP_DONE; report_remote_ra_result(cmd); } free_cmd(cmd); } return TRUE; } static void remote_ra_data_init(lrm_state_t * lrm_state) { remote_ra_data_t *ra_data = NULL; if (lrm_state->remote_ra_data) { return; } ra_data = calloc(1, sizeof(remote_ra_data_t)); ra_data->work = mainloop_add_trigger(G_PRIORITY_HIGH, handle_remote_ra_exec, lrm_state); lrm_state->remote_ra_data = ra_data; } void remote_ra_cleanup(lrm_state_t * lrm_state) { remote_ra_data_t *ra_data = lrm_state->remote_ra_data; if (!ra_data) { return; } if (ra_data->cmds) { g_list_free_full(ra_data->cmds, free_cmd); } if (ra_data->recurring_cmds) { g_list_free_full(ra_data->recurring_cmds, free_cmd); } mainloop_destroy_trigger(ra_data->work); free(ra_data); lrm_state->remote_ra_data = NULL; } gboolean is_remote_lrmd_ra(const char *agent, const char *provider, const char *id) { if (agent && provider && !strcmp(agent, REMOTE_LRMD_RA) && !strcmp(provider, "pacemaker")) { return TRUE; } if (id && lrm_state_find(id)) { return TRUE; } return FALSE; } lrmd_rsc_info_t * remote_ra_get_rsc_info(lrm_state_t * lrm_state, const char *rsc_id) { lrmd_rsc_info_t *info = NULL; if ((lrm_state_find(rsc_id))) { info = calloc(1, sizeof(lrmd_rsc_info_t)); info->id = strdup(rsc_id); info->type = strdup(REMOTE_LRMD_RA); info->class = strdup("ocf"); info->provider = strdup("pacemaker"); } return info; } static gboolean is_remote_ra_supported_action(const char *action) { if (!action) { return FALSE; } else if (strcmp(action, "start") && strcmp(action, "stop") && strcmp(action, "migrate_to") && strcmp(action, "migrate_from") && strcmp(action, "monitor")) { return FALSE; } return TRUE; } static GList * remove_cmd(GList * list, const char *action, int interval) { remote_ra_cmd_t *cmd = NULL; GListPtr gIter = NULL; for (gIter = list; gIter != NULL; gIter = gIter->next) { cmd = gIter->data; if (cmd->interval == interval && safe_str_eq(cmd->action, action)) { break; } cmd = NULL; } if (cmd) { list = g_list_remove(list, cmd); free_cmd(cmd); } return list; } int remote_ra_cancel(lrm_state_t * lrm_state, const char *rsc_id, const char *action, int interval) { lrm_state_t *connection_rsc = NULL; remote_ra_data_t *ra_data = NULL; connection_rsc = lrm_state_find(rsc_id); if (!connection_rsc || !connection_rsc->remote_ra_data) { return -EINVAL; } ra_data = connection_rsc->remote_ra_data; ra_data->cmds = remove_cmd(ra_data->cmds, action, interval); ra_data->recurring_cmds = remove_cmd(ra_data->recurring_cmds, action, interval); if (ra_data->cur_cmd && (ra_data->cur_cmd->interval == interval) && (safe_str_eq(ra_data->cur_cmd->action, action))) { ra_data->cur_cmd->cancel = TRUE; } return 0; } int remote_ra_exec(lrm_state_t * lrm_state, const char *rsc_id, const char *action, const char *userdata, int interval, /* ms */ int timeout, /* ms */ int start_delay, /* ms */ lrmd_key_value_t * params) { int rc = 0; lrm_state_t *connection_rsc = NULL; remote_ra_cmd_t *cmd = NULL; remote_ra_data_t *ra_data = NULL; if (is_remote_ra_supported_action(action) == FALSE) { rc = -EINVAL; goto exec_done; } connection_rsc = lrm_state_find(rsc_id); if (!connection_rsc) { rc = -EINVAL; goto exec_done; } remote_ra_data_init(connection_rsc); cmd = calloc(1, sizeof(remote_ra_cmd_t)); cmd->owner = strdup(lrm_state->node_name); cmd->rsc_id = strdup(rsc_id); cmd->action = strdup(action); cmd->userdata = strdup(userdata); cmd->interval = interval; cmd->timeout = timeout; cmd->start_delay = start_delay; cmd->params = params; cmd->start_time = time(NULL); remote_ra_callid++; if (remote_ra_callid <= 0) { remote_ra_callid = 1; } cmd->call_id = remote_ra_callid; if (cmd->start_delay) { cmd->delay_id = g_timeout_add(cmd->start_delay, start_delay_helper, cmd); } ra_data = connection_rsc->remote_ra_data; ra_data->cmds = g_list_append(ra_data->cmds, cmd); mainloop_set_trigger(ra_data->work); return cmd->call_id; exec_done: lrmd_key_value_freeall(params); return rc; } diff --git a/crmd/te_callbacks.c b/crmd/te_callbacks.c index 4c80431a9f..fffc9729a9 100644 --- a/crmd/te_callbacks.c +++ b/crmd/te_callbacks.c @@ -1,535 +1,549 @@ /* * Copyright (C) 2004 Andrew Beekhof * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This software is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #include #include #include #include #include #include #include #include #include /* For ONLINESTATUS etc */ void te_update_confirm(const char *event, xmlNode * msg); extern char *te_uuid; gboolean shuttingdown = FALSE; crm_graph_t *transition_graph; crm_trigger_t *transition_trigger = NULL; /* #define rsc_op_template "//"XML_TAG_DIFF_ADDED"//"XML_TAG_CIB"//"XML_CIB_TAG_STATE"[@uname='%s']"//"XML_LRM_TAG_RSC_OP"[@id='%s]" */ #define rsc_op_template "//"XML_TAG_DIFF_ADDED"//"XML_TAG_CIB"//"XML_LRM_TAG_RSC_OP"[@id='%s']" static const char * get_node_id(xmlNode * rsc_op) { xmlNode *node = rsc_op; while (node != NULL && safe_str_neq(XML_CIB_TAG_STATE, TYPE(node))) { node = node->parent; } CRM_CHECK(node != NULL, return NULL); return ID(node); } static void process_resource_updates(xmlXPathObject * xpathObj) { /* */ int lpc = 0, max = numXpathResults(xpathObj); for (lpc = 0; lpc < max; lpc++) { xmlNode *rsc_op = getXpathResult(xpathObj, lpc); const char *node = get_node_id(rsc_op); process_graph_event(rsc_op, node); } } void te_update_diff(const char *event, xmlNode * msg) { int lpc, max; int rc = -1; const char *op = NULL; xmlNode *diff = NULL; xmlXPathObject *xpathObj = NULL; int diff_add_updates = 0; int diff_add_epoch = 0; int diff_add_admin_epoch = 0; int diff_del_updates = 0; int diff_del_epoch = 0; int diff_del_admin_epoch = 0; CRM_CHECK(msg != NULL, return); crm_element_value_int(msg, F_CIB_RC, &rc); if (transition_graph == NULL) { crm_trace("No graph"); return; } else if (rc < pcmk_ok) { crm_trace("Filter rc=%d (%s)", rc, pcmk_strerror(rc)); return; } else if (transition_graph->complete == TRUE && fsa_state != S_IDLE && fsa_state != S_TRANSITION_ENGINE && fsa_state != S_POLICY_ENGINE) { crm_trace("Filter state=%s, complete=%d", fsa_state2string(fsa_state), transition_graph->complete); return; } op = crm_element_value(msg, F_CIB_OPERATION); diff = get_message_xml(msg, F_CIB_UPDATE_RESULT); cib_diff_version_details(diff, &diff_add_admin_epoch, &diff_add_epoch, &diff_add_updates, &diff_del_admin_epoch, &diff_del_epoch, &diff_del_updates); crm_debug("Processing diff (%s): %d.%d.%d -> %d.%d.%d (%s)", op, diff_del_admin_epoch, diff_del_epoch, diff_del_updates, diff_add_admin_epoch, diff_add_epoch, diff_add_updates, fsa_state2string(fsa_state)); log_cib_diff(LOG_DEBUG_2, diff, __FUNCTION__); if (cib_config_changed(NULL, NULL, &diff)) { abort_transition(INFINITY, tg_restart, "Non-status change", diff); goto bail; /* configuration changed */ } /* Tickets Attributes - Added/Updated */ xpathObj = xpath_search(diff, "//" F_CIB_UPDATE_RESULT "//" XML_TAG_DIFF_ADDED "//" XML_CIB_TAG_TICKETS); if (numXpathResults(xpathObj) > 0) { xmlNode *aborted = getXpathResult(xpathObj, 0); abort_transition(INFINITY, tg_restart, "Ticket attribute: update", aborted); goto bail; } freeXpathObject(xpathObj); /* Tickets Attributes - Removed */ xpathObj = xpath_search(diff, "//" F_CIB_UPDATE_RESULT "//" XML_TAG_DIFF_REMOVED "//" XML_CIB_TAG_TICKETS); if (numXpathResults(xpathObj) > 0) { xmlNode *aborted = getXpathResult(xpathObj, 0); abort_transition(INFINITY, tg_restart, "Ticket attribute: removal", aborted); goto bail; } freeXpathObject(xpathObj); /* Transient Attributes - Added/Updated */ xpathObj = xpath_search(diff, "//" F_CIB_UPDATE_RESULT "//" XML_TAG_DIFF_ADDED "//" XML_TAG_TRANSIENT_NODEATTRS "//" XML_CIB_TAG_NVPAIR); max = numXpathResults(xpathObj); for (lpc = 0; lpc < max; lpc++) { xmlNode *attr = getXpathResult(xpathObj, lpc); const char *name = crm_element_value(attr, XML_NVPAIR_ATTR_NAME); const char *value = NULL; if (safe_str_eq(CRM_OP_PROBED, name)) { value = crm_element_value(attr, XML_NVPAIR_ATTR_VALUE); } if (crm_is_true(value) == FALSE) { abort_transition(INFINITY, tg_restart, "Transient attribute: update", attr); crm_log_xml_trace(attr, "Abort"); goto bail; } } freeXpathObject(xpathObj); /* Transient Attributes - Removed */ xpathObj = xpath_search(diff, "//" F_CIB_UPDATE_RESULT "//" XML_TAG_DIFF_REMOVED "//" XML_TAG_TRANSIENT_NODEATTRS); if (numXpathResults(xpathObj) > 0) { xmlNode *aborted = getXpathResult(xpathObj, 0); abort_transition(INFINITY, tg_restart, "Transient attribute: removal", aborted); goto bail; } freeXpathObject(xpathObj); /* * Check for and fast-track the processing of LRM refreshes * In large clusters this can result in _huge_ speedups * * Unfortunately we can only do so when there are no pending actions * Otherwise we could miss updates we're waiting for and stall * */ xpathObj = NULL; if (transition_graph->pending == 0) { xpathObj = xpath_search(diff, "//" F_CIB_UPDATE_RESULT "//" XML_TAG_DIFF_ADDED "//" XML_LRM_TAG_RESOURCE); } max = numXpathResults(xpathObj); if (max > 1) { /* Updates by, or in response to, TE actions will never contain updates * for more than one resource at a time */ crm_debug("Detected LRM refresh - %d resources updated: Skipping all resource events", max); crm_log_xml_trace(diff, "lrm-refresh"); abort_transition(INFINITY, tg_restart, "LRM Refresh", NULL); goto bail; } freeXpathObject(xpathObj); /* Process operation updates */ xpathObj = xpath_search(diff, "//" F_CIB_UPDATE_RESULT "//" XML_TAG_DIFF_ADDED "//" XML_LRM_TAG_RSC_OP); if (numXpathResults(xpathObj)) { process_resource_updates(xpathObj); } freeXpathObject(xpathObj); /* Detect deleted (as opposed to replaced or added) actions - eg. crm_resource -C */ xpathObj = xpath_search(diff, "//" XML_TAG_DIFF_REMOVED "//" XML_LRM_TAG_RSC_OP); max = numXpathResults(xpathObj); for (lpc = 0; lpc < max; lpc++) { int path_max = 0; const char *op_id = NULL; char *rsc_op_xpath = NULL; xmlXPathObject *op_match = NULL; xmlNode *match = getXpathResult(xpathObj, lpc); CRM_CHECK(match != NULL, continue); op_id = ID(match); path_max = strlen(rsc_op_template) + strlen(op_id) + 1; rsc_op_xpath = calloc(1, path_max); snprintf(rsc_op_xpath, path_max, rsc_op_template, op_id); op_match = xpath_search(diff, rsc_op_xpath); if (numXpathResults(op_match) == 0) { /* Prevent false positives by matching cancelations too */ const char *node = get_node_id(match); crm_action_t *cancelled = get_cancel_action(op_id, node); if (cancelled == NULL) { crm_debug("No match for deleted action %s (%s on %s)", rsc_op_xpath, op_id, node); abort_transition(INFINITY, tg_restart, "Resource op removal", match); freeXpathObject(op_match); free(rsc_op_xpath); goto bail; } else { crm_debug("Deleted lrm_rsc_op %s on %s was for graph event %d", op_id, node, cancelled->id); } } freeXpathObject(op_match); free(rsc_op_xpath); } bail: freeXpathObject(xpathObj); } gboolean process_te_message(xmlNode * msg, xmlNode * xml_data) { const char *from = crm_element_value(msg, F_ORIG); const char *sys_to = crm_element_value(msg, F_CRM_SYS_TO); const char *sys_from = crm_element_value(msg, F_CRM_SYS_FROM); const char *ref = crm_element_value(msg, F_CRM_REFERENCE); const char *op = crm_element_value(msg, F_CRM_TASK); const char *type = crm_element_value(msg, F_CRM_MSG_TYPE); crm_trace("Processing %s (%s) message", op, ref); crm_log_xml_trace(msg, "ipc"); if (op == NULL) { /* error */ } else if (sys_to == NULL || strcasecmp(sys_to, CRM_SYSTEM_TENGINE) != 0) { crm_trace("Bad sys-to %s", crm_str(sys_to)); return FALSE; } else if (safe_str_eq(op, CRM_OP_INVOKE_LRM) && safe_str_eq(sys_from, CRM_SYSTEM_LRMD) /* && safe_str_eq(type, XML_ATTR_RESPONSE) */ ) { xmlXPathObject *xpathObj = NULL; crm_log_xml_trace(msg, "Processing (N)ACK"); crm_debug("Processing (N)ACK %s from %s", crm_element_value(msg, F_CRM_REFERENCE), from); xpathObj = xpath_search(xml_data, "//" XML_LRM_TAG_RSC_OP); if (numXpathResults(xpathObj)) { process_resource_updates(xpathObj); freeXpathObject(xpathObj); } else { crm_log_xml_err(msg, "Invalid (N)ACK"); freeXpathObject(xpathObj); return FALSE; } } else { crm_err("Unknown command: %s::%s from %s", type, op, sys_from); } crm_trace("finished processing message"); return TRUE; } GHashTable *stonith_failures = NULL; struct st_fail_rec { int count; }; gboolean too_many_st_failures(void) { GHashTableIter iter; const char *key = NULL; struct st_fail_rec *value = NULL; if (stonith_failures == NULL) { return FALSE; } g_hash_table_iter_init(&iter, stonith_failures); while (g_hash_table_iter_next(&iter, (gpointer *) & key, (gpointer *) & value)) { if (value->count > 10) { crm_notice("Too many failures to fence %s (%d), giving up", key, value->count); return TRUE; } } return FALSE; } +void +reset_st_fail_count(const char *target) +{ + struct st_fail_rec *rec = NULL; + + if (stonith_failures) { + rec = g_hash_table_lookup(stonith_failures, target); + } + + if (rec) { + rec->count = 0; + } +} + void tengine_stonith_callback(stonith_t * stonith, stonith_callback_data_t * data) { char *uuid = NULL; int target_rc = -1; int stonith_id = -1; int transition_id = -1; crm_action_t *action = NULL; struct st_fail_rec *rec = NULL; int call_id = data->call_id; int rc = data->rc; char *userdata = data->userdata; CRM_CHECK(userdata != NULL, return); crm_notice("Stonith operation %d/%s: %s (%d)", call_id, (char *)userdata, pcmk_strerror(rc), rc); if (AM_I_DC == FALSE) { return; } /* crm_info("call=%d, optype=%d, node_name=%s, result=%d, node_list=%s, action=%s", */ /* op->call_id, op->optype, op->node_name, op->op_result, */ /* (char *)op->node_list, op->private_data); */ /* filter out old STONITH actions */ CRM_CHECK(decode_transition_key(userdata, &uuid, &transition_id, &stonith_id, &target_rc), crm_err("Invalid event detected"); goto bail; ); if (transition_graph->complete || stonith_id < 0 || safe_str_neq(uuid, te_uuid) || transition_graph->id != transition_id) { crm_info("Ignoring STONITH action initiated outside of the current transition"); goto bail; } /* this will mark the event complete if a match is found */ action = get_action(stonith_id, FALSE); if (action == NULL) { crm_err("Stonith action not matched"); goto bail; } stop_te_timer(action->timer); if (stonith_failures == NULL) { stonith_failures = g_hash_table_new_full(crm_str_hash, g_str_equal, g_hash_destroy_str, free); } if (rc == pcmk_ok) { const char *target = crm_element_value(action->xml, XML_LRM_ATTR_TARGET); const char *uuid = crm_element_value(action->xml, XML_LRM_ATTR_TARGET_UUID); crm_debug("Stonith operation %d for %s passed", call_id, target); if (action->confirmed == FALSE) { action->confirmed = TRUE; if (action->sent_update == FALSE) { send_stonith_update(action, target, uuid); } } rec = g_hash_table_lookup(stonith_failures, target); if (rec) { rec->count = 0; } } else { const char *target = crm_element_value_const(action->xml, XML_LRM_ATTR_TARGET); const char *allow_fail = crm_meta_value(action->params, XML_ATTR_TE_ALLOWFAIL); action->failed = TRUE; if (crm_is_true(allow_fail) == FALSE) { crm_notice("Stonith operation %d for %s failed (%s): aborting transition.", call_id, target, pcmk_strerror(rc)); abort_transition(INFINITY, tg_restart, "Stonith failed", NULL); } rec = g_hash_table_lookup(stonith_failures, target); if (rec) { rec->count++; } else { rec = malloc(sizeof(struct st_fail_rec)); rec->count = 1; g_hash_table_insert(stonith_failures, strdup(target), rec); } } update_graph(transition_graph, action); trigger_graph(); bail: free(userdata); free(uuid); return; } void cib_fencing_updated(xmlNode * msg, int call_id, int rc, xmlNode * output, void *user_data) { if (rc < pcmk_ok) { crm_err("Fencing update %d for %s: failed - %s (%d)", call_id, (char *)user_data, pcmk_strerror(rc), rc); crm_log_xml_warn(msg, "Failed update"); abort_transition(INFINITY, tg_shutdown, "CIB update failed", NULL); } else { crm_info("Fencing update %d for %s: complete", call_id, (char *)user_data); } free(user_data); } void cib_action_updated(xmlNode * msg, int call_id, int rc, xmlNode * output, void *user_data) { if (rc < pcmk_ok) { crm_err("Update %d FAILED: %s", call_id, pcmk_strerror(rc)); } } void cib_failcount_updated(xmlNode * msg, int call_id, int rc, xmlNode * output, void *user_data) { if (rc < pcmk_ok) { crm_err("Update %d FAILED: %s", call_id, pcmk_strerror(rc)); } } gboolean action_timer_callback(gpointer data) { crm_action_timer_t *timer = NULL; CRM_CHECK(data != NULL, return FALSE); timer = (crm_action_timer_t *) data; stop_te_timer(timer); crm_warn("Timer popped (timeout=%d, abort_level=%d, complete=%s)", timer->timeout, transition_graph->abort_priority, transition_graph->complete ? "true" : "false"); CRM_CHECK(timer->action != NULL, return FALSE); if (transition_graph->complete) { crm_warn("Ignoring timeout while not in transition"); } else if (timer->reason == timeout_action_warn) { print_action(LOG_WARNING, "Action missed its timeout: ", timer->action); /* Don't check the FSA state * * We might also be in S_INTEGRATION or some other state waiting for this * action so we can close the transition and continue */ } else { /* fail the action */ gboolean send_update = TRUE; const char *task = crm_element_value(timer->action->xml, XML_LRM_ATTR_TASK); print_action(LOG_ERR, "Aborting transition, action lost: ", timer->action); timer->action->failed = TRUE; timer->action->confirmed = TRUE; abort_transition(INFINITY, tg_restart, "Action lost", NULL); update_graph(transition_graph, timer->action); trigger_graph(); if (timer->action->type != action_type_rsc) { send_update = FALSE; } else if (safe_str_eq(task, "cancel")) { /* we dont need to update the CIB with these */ send_update = FALSE; } if (send_update) { /* cib_action_update(timer->action, PCMK_LRM_OP_PENDING, PCMK_EXECRA_STATUS_UNKNOWN); */ cib_action_update(timer->action, PCMK_LRM_OP_TIMEOUT, PCMK_EXECRA_UNKNOWN_ERROR); } } return FALSE; } diff --git a/crmd/te_events.c b/crmd/te_events.c index 112f056c53..f2b9b629e9 100644 --- a/crmd/te_events.c +++ b/crmd/te_events.c @@ -1,543 +1,550 @@ /* * Copyright (C) 2004 Andrew Beekhof * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This software is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #include #include #include #include #include #include #include #include char *failed_stop_offset = NULL; char *failed_start_offset = NULL; int match_graph_event(int action_id, xmlNode * event, const char *event_node, int op_status, int op_rc, int target_rc); gboolean fail_incompletable_actions(crm_graph_t * graph, const char *down_node) { const char *target = NULL; xmlNode *last_action = NULL; GListPtr gIter = NULL; GListPtr gIter2 = NULL; if (graph == NULL || graph->complete) { return FALSE; } gIter = graph->synapses; for (; gIter != NULL; gIter = gIter->next) { synapse_t *synapse = (synapse_t *) gIter->data; if (synapse->confirmed) { continue; } gIter2 = synapse->actions; for (; gIter2 != NULL; gIter2 = gIter2->next) { crm_action_t *action = (crm_action_t *) gIter2->data; if (action->type == action_type_pseudo || action->confirmed) { continue; } else if (action->type == action_type_crm) { const char *task = crm_element_value(action->xml, XML_LRM_ATTR_TASK); if (safe_str_eq(task, CRM_OP_FENCE)) { continue; } } target = crm_element_value(action->xml, XML_LRM_ATTR_TARGET_UUID); if (safe_str_eq(target, down_node)) { action->failed = TRUE; synapse->failed = TRUE; last_action = action->xml; stop_te_timer(action->timer); update_graph(graph, action); if (synapse->executed) { crm_notice("Action %d (%s) was pending on %s (offline)", action->id, ID(action->xml), down_node); } else { crm_notice("Action %d (%s) is scheduled for %s (offline)", action->id, ID(action->xml), down_node); } } } } if (last_action != NULL) { crm_warn("Node %s shutdown resulted in un-runnable actions", down_node); abort_transition(INFINITY, tg_restart, "Node failure", last_action); return TRUE; } return FALSE; } static const char * get_uname_from_event(xmlNode * event) { xmlNode *node = event; while (node != NULL && safe_str_neq(XML_CIB_TAG_STATE, TYPE(node))) { node = node->parent; } CRM_CHECK(node != NULL, return NULL); return crm_element_value(node, XML_ATTR_UNAME); } static gboolean get_is_remote_from_event(xmlNode * event) { xmlNode *node = event; while (node != NULL && safe_str_neq(XML_CIB_TAG_STATE, TYPE(node))) { node = node->parent; } CRM_CHECK(node != NULL, return FALSE); return crm_element_value(node, XML_NODE_IS_REMOTE) ? TRUE : FALSE; } static gboolean update_failcount(xmlNode * event, const char *event_node_uuid, int rc, int target_rc, gboolean do_update) { int interval = 0; char *task = NULL; char *rsc_id = NULL; char *attr_name = NULL; const char *value = NULL; const char *id = crm_element_value(event, XML_LRM_ATTR_TASK_KEY); const char *on_uname = get_uname_from_event(event); + const char *origin = crm_element_value(event, XML_ATTR_ORIGIN); if (rc == 99) { /* this is an internal code for "we're busy, try again" */ return FALSE; } else if (rc == target_rc) { return FALSE; } + if (safe_str_eq(origin, "build_active_RAs")) { + crm_debug("No update for %s (rc=%d) on %s: Old failure from lrm status refresh", + id, rc, on_uname); + return FALSE; + } + if (failed_stop_offset == NULL) { failed_stop_offset = strdup(INFINITY_S); } if (failed_start_offset == NULL) { failed_start_offset = strdup(INFINITY_S); } if (on_uname == NULL) { /* uname not in event, check cache */ on_uname = get_uname(event_node_uuid); CRM_CHECK(on_uname != NULL, return TRUE); } CRM_CHECK(parse_op_key(id, &rsc_id, &task, &interval), crm_err("Couldn't parse: %s", ID(event)); goto bail); CRM_CHECK(task != NULL, goto bail); CRM_CHECK(rsc_id != NULL, goto bail); if (do_update || interval > 0) { do_update = TRUE; } else if (safe_str_eq(task, CRMD_ACTION_START)) { do_update = TRUE; value = failed_start_offset; } else if (safe_str_eq(task, CRMD_ACTION_STOP)) { do_update = TRUE; value = failed_stop_offset; } else if (safe_str_eq(task, CRMD_ACTION_STOP)) { do_update = TRUE; value = failed_stop_offset; } else if (safe_str_eq(task, CRMD_ACTION_PROMOTE)) { do_update = TRUE; } else if (safe_str_eq(task, CRMD_ACTION_DEMOTE)) { do_update = TRUE; } if (value == NULL || safe_str_neq(value, INFINITY_S)) { value = XML_NVPAIR_ATTR_VALUE "++"; } if (do_update) { char *now = crm_itoa(time(NULL)); gboolean is_remote_node = get_is_remote_from_event(event); crm_warn("Updating failcount for %s on %s after failed %s:" " rc=%d (update=%s, time=%s)", rsc_id, on_uname, task, rc, value, now); attr_name = crm_concat("fail-count", rsc_id, '-'); update_attrd(on_uname, attr_name, value, NULL, is_remote_node); free(attr_name); attr_name = crm_concat("last-failure", rsc_id, '-'); update_attrd(on_uname, attr_name, now, NULL, is_remote_node); free(attr_name); free(now); } bail: free(rsc_id); free(task); return TRUE; } static int status_from_rc(crm_action_t * action, int orig_status, int rc, int target_rc) { int status = orig_status; if (target_rc == rc) { crm_trace("Target rc: == %d", rc); if (status != PCMK_LRM_OP_DONE) { crm_trace("Re-mapping op status to" " PCMK_LRM_OP_DONE for rc=%d", rc); status = PCMK_LRM_OP_DONE; } } else { status = PCMK_LRM_OP_ERROR; } /* 99 is the code we use for direct nack's */ if (rc != 99 && status != PCMK_LRM_OP_DONE) { const char *task, *uname; task = crm_element_value(action->xml, XML_LRM_ATTR_TASK_KEY); uname = crm_element_value(action->xml, XML_LRM_ATTR_TARGET); crm_warn("Action %d (%s) on %s failed (target: %d vs. rc: %d): %s", action->id, task, uname, target_rc, rc, services_lrm_status_str(status)); } return status; } /* * returns the ID of the action if a match is found * returns -1 if a match was not found * returns -2 if a match was found but the action failed (and was * not allowed to) */ int match_graph_event(int action_id, xmlNode * event, const char *event_node, int op_status, int op_rc, int target_rc) { const char *target = NULL; const char *allow_fail = NULL; const char *this_event = NULL; crm_action_t *action = NULL; action = get_action(action_id, FALSE); if (action == NULL) { return -1; } op_status = status_from_rc(action, op_status, op_rc, target_rc); if (op_status != PCMK_LRM_OP_DONE) { update_failcount(event, event_node, op_rc, target_rc, FALSE); } /* Process OP status */ switch (op_status) { case PCMK_LRM_OP_PENDING: crm_debug("Ignoring pending operation"); return action->id; break; case PCMK_LRM_OP_DONE: break; case PCMK_LRM_OP_ERROR: case PCMK_LRM_OP_TIMEOUT: case PCMK_LRM_OP_NOTSUPPORTED: action->failed = TRUE; break; case PCMK_LRM_OP_CANCELLED: /* do nothing?? */ crm_err("Dont know what to do for cancelled ops yet"); break; default: action->failed = TRUE; crm_err("Unsupported action result: %d", op_status); } /* stop this event's timer if it had one */ stop_te_timer(action->timer); action->confirmed = TRUE; update_graph(transition_graph, action); trigger_graph(); if (action->failed) { allow_fail = crm_meta_value(action->params, XML_ATTR_TE_ALLOWFAIL); if (crm_is_true(allow_fail)) { action->failed = FALSE; } } if (action->failed) { abort_transition(action->synapse->priority + 1, tg_restart, "Event failed", event); } this_event = crm_element_value(event, XML_LRM_ATTR_TASK_KEY); target = crm_element_value(action->xml, XML_LRM_ATTR_TARGET); crm_info("Action %s (%d) confirmed on %s (rc=%d)", crm_str(this_event), action->id, crm_str(target), op_status); return action->id; } crm_action_t * get_action(int id, gboolean confirmed) { GListPtr gIter = NULL; GListPtr gIter2 = NULL; gIter = transition_graph->synapses; for (; gIter != NULL; gIter = gIter->next) { synapse_t *synapse = (synapse_t *) gIter->data; gIter2 = synapse->actions; for (; gIter2 != NULL; gIter2 = gIter2->next) { crm_action_t *action = (crm_action_t *) gIter2->data; if (action->id == id) { if (confirmed) { stop_te_timer(action->timer); action->confirmed = TRUE; } return action; } } } return NULL; } crm_action_t * get_cancel_action(const char *id, const char *node) { const char *task = NULL; const char *target = NULL; GListPtr gIter = NULL; GListPtr gIter2 = NULL; gIter = transition_graph->synapses; for (; gIter != NULL; gIter = gIter->next) { synapse_t *synapse = (synapse_t *) gIter->data; gIter2 = synapse->actions; for (; gIter2 != NULL; gIter2 = gIter2->next) { crm_action_t *action = (crm_action_t *) gIter2->data; task = crm_element_value(action->xml, XML_LRM_ATTR_TASK); if (safe_str_neq(CRMD_ACTION_CANCEL, task)) { continue; } task = crm_element_value(action->xml, XML_LRM_ATTR_TASK_KEY); if (safe_str_neq(task, id)) { continue; } target = crm_element_value(action->xml, XML_LRM_ATTR_TARGET_UUID); if (safe_str_neq(target, node)) { continue; } return action; } } return NULL; } crm_action_t * match_down_event(int id, const char *target, const char *filter, bool quiet) { const char *this_action = NULL; const char *this_node = NULL; crm_action_t *match = NULL; GListPtr gIter = NULL; GListPtr gIter2 = NULL; gIter = transition_graph->synapses; for (; gIter != NULL; gIter = gIter->next) { synapse_t *synapse = (synapse_t *) gIter->data; /* lookup event */ gIter2 = synapse->actions; for (; gIter2 != NULL; gIter2 = gIter2->next) { crm_action_t *action = (crm_action_t *) gIter2->data; if (id > 0 && action->id == id) { match = action; break; } this_action = crm_element_value(action->xml, XML_LRM_ATTR_TASK); if (action->type != action_type_crm) { continue; } else if (safe_str_eq(this_action, CRM_OP_LRM_REFRESH)) { continue; } else if (filter != NULL && safe_str_neq(this_action, filter)) { continue; } this_node = crm_element_value(action->xml, XML_LRM_ATTR_TARGET_UUID); if (this_node == NULL) { crm_log_xml_err(action->xml, "No node uuid"); } if (safe_str_neq(this_node, target)) { crm_debug("Action %d : Node mismatch: %s", action->id, this_node); continue; } match = action; id = action->id; break; } if (match != NULL) { /* stop this event's timer if it had one */ break; } } if (match != NULL) { /* stop this event's timer if it had one */ crm_debug("Match found for action %d: %s on %s", id, crm_element_value(match->xml, XML_LRM_ATTR_TASK_KEY), target); } else if (id > 0) { crm_err("No match for action %d", id); } else if(quiet == FALSE) { crm_warn("No match for shutdown action on %s", target); } return match; } gboolean process_graph_event(xmlNode * event, const char *event_node) { int rc = -1; int status = -1; int callid = -1; int action = -1; int target_rc = -1; int transition_num = -1; char *update_te_uuid = NULL; gboolean stop_early = FALSE; gboolean passed = FALSE; const char *id = NULL; const char *desc = NULL; const char *magic = NULL; CRM_ASSERT(event != NULL); /* */ id = crm_element_value(event, XML_LRM_ATTR_TASK_KEY); crm_element_value_int(event, XML_LRM_ATTR_RC, &rc); crm_element_value_int(event, XML_LRM_ATTR_OPSTATUS, &status); crm_element_value_int(event, XML_LRM_ATTR_CALLID, &callid); magic = crm_element_value(event, XML_ATTR_TRANSITION_KEY); if (magic == NULL) { /* non-change */ return FALSE; } if (decode_transition_key(magic, &update_te_uuid, &transition_num, &action, &target_rc) == FALSE) { crm_err("Invalid event %s.%d detected: %s", id, callid, magic); abort_transition(INFINITY, tg_restart, "Bad event", event); return FALSE; } if (status == PCMK_LRM_OP_PENDING) { goto bail; } if (transition_num == -1) { desc = "initiated outside of the cluster"; abort_transition(INFINITY, tg_restart, "Unexpected event", event); } else if (action < 0 || crm_str_eq(update_te_uuid, te_uuid, TRUE) == FALSE) { desc = "initiated by a different node"; abort_transition(INFINITY, tg_restart, "Foreign event", event); stop_early = TRUE; /* This could be an lrm status refresh */ } else if (transition_graph->id != transition_num) { desc = "arrived really late"; abort_transition(INFINITY, tg_restart, "Old event", event); stop_early = TRUE; /* This could be an lrm status refresh */ } else if (transition_graph->complete) { desc = "arrived late"; abort_transition(INFINITY, tg_restart, "Inactive graph", event); } else if (match_graph_event(action, event, event_node, status, rc, target_rc) < 0) { desc = "unknown"; abort_transition(INFINITY, tg_restart, "Unknown event", event); } else if (rc == target_rc) { passed = TRUE; crm_trace("Processed update to %s: %s", id, magic); } if (passed == FALSE) { if (update_failcount(event, event_node, rc, target_rc, transition_num == -1)) { /* Turns out this wasn't an lrm status refresh update aferall */ stop_early = FALSE; desc = "failed"; } crm_info("Detected action (%d.%d) %s.%d=%s: %s", transition_num, action, id, callid, lrmd_event_rc2str(rc), desc); } bail: free(update_te_uuid); return stop_early; } diff --git a/crmd/te_utils.c b/crmd/te_utils.c index aa22fa393c..3dcbf471c7 100644 --- a/crmd/te_utils.c +++ b/crmd/te_utils.c @@ -1,407 +1,412 @@ /* * Copyright (C) 2004 Andrew Beekhof * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This software is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #include #include #include #include #include #include #include #include #include crm_trigger_t *stonith_reconnect = NULL; GListPtr stonith_cleanup_list = NULL; static gboolean fail_incompletable_stonith(crm_graph_t * graph) { GListPtr lpc = NULL; const char *task = NULL; xmlNode *last_action = NULL; if (graph == NULL) { return FALSE; } for (lpc = graph->synapses; lpc != NULL; lpc = lpc->next) { GListPtr lpc2 = NULL; synapse_t *synapse = (synapse_t *) lpc->data; if (synapse->confirmed) { continue; } for (lpc2 = synapse->actions; lpc2 != NULL; lpc2 = lpc2->next) { crm_action_t *action = (crm_action_t *) lpc2->data; if (action->type != action_type_crm || action->confirmed) { continue; } task = crm_element_value(action->xml, XML_LRM_ATTR_TASK); if (task && safe_str_eq(task, CRM_OP_FENCE)) { action->failed = TRUE; last_action = action->xml; update_graph(graph, action); crm_notice("Failing action %d (%s): STONITHd terminated", action->id, ID(action->xml)); } } } if (last_action != NULL) { crm_warn("STONITHd failure resulted in un-runnable actions"); abort_transition(INFINITY, tg_restart, "Stonith failure", last_action); return TRUE; } return FALSE; } static void tengine_stonith_connection_destroy(stonith_t * st, stonith_event_t * e) { if (is_set(fsa_input_register, R_ST_REQUIRED)) { crm_crit("Fencing daemon connection failed"); mainloop_set_trigger(stonith_reconnect); } else { crm_info("Fencing daemon disconnected"); } /* cbchan will be garbage at this point, arrange for it to be reset */ stonith_api->state = stonith_disconnected; if (AM_I_DC) { fail_incompletable_stonith(transition_graph); trigger_graph(); } } #if SUPPORT_CMAN # include #endif static void tengine_stonith_notify(stonith_t * st, stonith_event_t * st_event) { static char *client_id = NULL; if(client_id == NULL) { client_id = g_strdup_printf("%s.%d", crm_system_name, getpid()); } if (st_event == NULL) { crm_err("Notify data not found"); return; } if (st_event->result == pcmk_ok && crm_str_eq(st_event->target, fsa_our_uname, TRUE)) { crm_err("We were alegedly just fenced by %s for %s!", st_event->executioner, st_event->origin); register_fsa_error_adv(C_FSA_INTERNAL, I_ERROR, NULL, NULL, __FUNCTION__); return; } + if (st_event->result == pcmk_ok && + safe_str_eq(st_event->operation, T_STONITH_NOTIFY_FENCE)) { + reset_st_fail_count(st_event->target); + } + crm_notice("Peer %s was%s terminated (%s) by %s for %s: %s (ref=%s) by client %s", st_event->target, st_event->result == pcmk_ok ? "" : " not", st_event->operation, st_event->executioner ? st_event->executioner : "", st_event->origin, pcmk_strerror(st_event->result), st_event->id, st_event->client_origin ? st_event->client_origin : ""); #if SUPPORT_CMAN if (st_event->result == pcmk_ok && is_cman_cluster()) { int local_rc = 0; char *target_copy = strdup(st_event->target); /* In case fenced hasn't noticed yet * * Any fencing that has been inititated will be completed by way of the fence_pcmk redirect */ local_rc = fenced_external(target_copy); if (local_rc != 0) { crm_err("Could not notify CMAN that '%s' is now fenced: %d", st_event->target, local_rc); } else { crm_notice("Notified CMAN that '%s' is now fenced", st_event->target); } free(target_copy); } #endif if (st_event->result == pcmk_ok) { gboolean we_are_executioner = safe_str_eq(st_event->executioner, fsa_our_uname); crm_trace("target=%s dc=%s", st_event->target, fsa_our_dc); if (fsa_our_dc == NULL || safe_str_eq(fsa_our_dc, st_event->target)) { crm_notice("Target %s our leader %s (recorded: %s)", fsa_our_dc ? "was" : "may have been", st_event->target, fsa_our_dc ? fsa_our_dc : ""); /* Given the CIB resyncing that occurs around elections, * have one node update the CIB now and, if the new DC is different, * have them do so too after the election */ if (we_are_executioner) { const char *uuid = get_uuid(st_event->target); send_stonith_update(NULL, st_event->target, uuid); } stonith_cleanup_list = g_list_append(stonith_cleanup_list, strdup(st_event->target)); } else if (AM_I_DC && st_event->client_origin && safe_str_neq(st_event->client_origin, client_id)) { const char *uuid = get_uuid(st_event->target); /* If a remote process outside of pacemaker invoked stonith to * fence someone, report the fencing result to the cib * and abort the transition graph. */ crm_info("External fencing operation from %s fenced %s", st_event->client_origin, st_event->target); send_stonith_update(NULL, st_event->target, uuid); abort_transition(INFINITY, tg_restart, "External Fencing Operation", NULL); } } } gboolean te_connect_stonith(gpointer user_data) { int lpc = 0; int rc = pcmk_ok; if (stonith_api == NULL) { stonith_api = stonith_api_new(); } if (stonith_api->state != stonith_disconnected) { crm_trace("Still connected"); return TRUE; } for (lpc = 0; lpc < 30; lpc++) { crm_debug("Attempting connection to fencing daemon..."); sleep(1); rc = stonith_api->cmds->connect(stonith_api, crm_system_name, NULL); if (rc == pcmk_ok) { break; } if (user_data != NULL) { crm_err("Sign-in failed: triggered a retry"); mainloop_set_trigger(stonith_reconnect); return TRUE; } crm_err("Sign-in failed: pausing and trying again in 2s..."); sleep(1); } CRM_CHECK(rc == pcmk_ok, return TRUE); /* If not, we failed 30 times... just get out */ stonith_api->cmds->register_notification(stonith_api, T_STONITH_NOTIFY_DISCONNECT, tengine_stonith_connection_destroy); stonith_api->cmds->register_notification(stonith_api, T_STONITH_NOTIFY_FENCE, tengine_stonith_notify); crm_trace("Connected"); return TRUE; } gboolean stop_te_timer(crm_action_timer_t * timer) { const char *timer_desc = "action timer"; if (timer == NULL) { return FALSE; } if (timer->reason == timeout_abort) { timer_desc = "global timer"; crm_trace("Stopping %s", timer_desc); } if (timer->source_id != 0) { crm_trace("Stopping %s", timer_desc); g_source_remove(timer->source_id); timer->source_id = 0; } else { crm_trace("%s was already stopped", timer_desc); return FALSE; } return TRUE; } gboolean te_graph_trigger(gpointer user_data) { enum transition_status graph_rc = -1; if (transition_graph == NULL) { crm_debug("Nothing to do"); return TRUE; } crm_trace("Invoking graph %d in state %s", transition_graph->id, fsa_state2string(fsa_state)); switch (fsa_state) { case S_STARTING: case S_PENDING: case S_NOT_DC: case S_HALT: case S_ILLEGAL: case S_STOPPING: case S_TERMINATE: return TRUE; break; default: break; } if (transition_graph->complete == FALSE) { graph_rc = run_graph(transition_graph); print_graph(LOG_DEBUG_3, transition_graph); if (graph_rc == transition_active) { crm_trace("Transition not yet complete"); return TRUE; } else if (graph_rc == transition_pending) { crm_trace("Transition not yet complete - no actions fired"); return TRUE; } if (graph_rc != transition_complete) { crm_warn("Transition failed: %s", transition_status(graph_rc)); print_graph(LOG_NOTICE, transition_graph); } } crm_debug("Transition %d is now complete", transition_graph->id); transition_graph->complete = TRUE; notify_crmd(transition_graph); return TRUE; } void trigger_graph_processing(const char *fn, int line) { mainloop_set_trigger(transition_trigger); crm_trace("%s:%d - Triggered graph processing", fn, line); } void abort_transition_graph(int abort_priority, enum transition_action abort_action, const char *abort_text, xmlNode * reason, const char *fn, int line) { const char *magic = NULL; CRM_CHECK(transition_graph != NULL, return); if (reason) { int diff_add_updates = 0; int diff_add_epoch = 0; int diff_add_admin_epoch = 0; int diff_del_updates = 0; int diff_del_epoch = 0; int diff_del_admin_epoch = 0; xmlNode *diff = get_xpath_object("//" F_CIB_UPDATE_RESULT "//diff", reason, LOG_DEBUG_2); magic = crm_element_value(reason, XML_ATTR_TRANSITION_MAGIC); if (diff) { cib_diff_version_details(diff, &diff_add_admin_epoch, &diff_add_epoch, &diff_add_updates, &diff_del_admin_epoch, &diff_del_epoch, &diff_del_updates); if (crm_str_eq(TYPE(reason), XML_CIB_TAG_NVPAIR, TRUE)) { crm_info ("%s:%d - Triggered transition abort (complete=%d, tag=%s, id=%s, name=%s, value=%s, magic=%s, cib=%d.%d.%d) : %s", fn, line, transition_graph->complete, TYPE(reason), ID(reason), NAME(reason), VALUE(reason), magic ? magic : "NA", diff_add_admin_epoch, diff_add_epoch, diff_add_updates, abort_text); } else { crm_info ("%s:%d - Triggered transition abort (complete=%d, tag=%s, id=%s, magic=%s, cib=%d.%d.%d) : %s", fn, line, transition_graph->complete, TYPE(reason), ID(reason), magic ? magic : "NA", diff_add_admin_epoch, diff_add_epoch, diff_add_updates, abort_text); } } else { crm_info ("%s:%d - Triggered transition abort (complete=%d, tag=%s, id=%s, magic=%s) : %s", fn, line, transition_graph->complete, TYPE(reason), ID(reason), magic ? magic : "NA", abort_text); } } else { crm_info("%s:%d - Triggered transition abort (complete=%d) : %s", fn, line, transition_graph->complete, abort_text); } switch (fsa_state) { case S_STARTING: case S_PENDING: case S_NOT_DC: case S_HALT: case S_ILLEGAL: case S_STOPPING: case S_TERMINATE: crm_info("Abort suppressed: state=%s (complete=%d)", fsa_state2string(fsa_state), transition_graph->complete); return; default: break; } if (magic == NULL && reason != NULL) { crm_log_xml_debug(reason, "Cause"); } /* Make sure any queued calculations are discarded ASAP */ free(fsa_pe_ref); fsa_pe_ref = NULL; if (transition_graph->complete) { if (transition_timer->period_ms > 0) { crm_timer_stop(transition_timer); crm_timer_start(transition_timer); } else if (too_many_st_failures() == FALSE) { register_fsa_input(C_FSA_INTERNAL, I_PE_CALC, NULL); } else { register_fsa_input(C_FSA_INTERNAL, I_TE_SUCCESS, NULL); } return; } update_abort_priority(transition_graph, abort_priority, abort_action, abort_text); mainloop_set_trigger(transition_trigger); } diff --git a/doc/Pacemaker_Explained/en-US/Ch-Resources.txt b/doc/Pacemaker_Explained/en-US/Ch-Resources.txt index a3d862b527..3436bf8152 100644 --- a/doc/Pacemaker_Explained/en-US/Ch-Resources.txt +++ b/doc/Pacemaker_Explained/en-US/Ch-Resources.txt @@ -1,690 +1,706 @@ = Cluster Resources = == What is a Cluster Resource == indexterm:[Resource] The role of a resource agent is to abstract the service it provides and present a consistent view to the cluster, which allows the cluster to be agnostic about the resources it manages. The cluster doesn't need to understand how the resource works because it relies on the resource agent to do the right thing when given a +start+, +stop+ or +monitor+ command. For this reason it is crucial that resource agents are well tested. Typically resource agents come in the form of shell scripts, however they can be written using any technology (such as C, Python or Perl) that the author is comfortable with. [[s-resource-supported]] == Supported Resource Classes == indexterm:[Resource,class] There are six classes of agents supported by Pacemaker: * OCF * LSB * Upstart * Systemd * Fencing * Service * Nagios indexterm:[Resource,Heartbeat] indexterm:[Heartbeat,Resources] Version 1 of Heartbeat came with its own style of resource agents and it is highly likely that many people have written their own agents based on its conventions. footnote:[ See http://wiki.linux-ha.org/HeartbeatResourceAgent for more information ] Although deprecated with the release of Heartbeat v2, they were supported by Pacemaker up until the release of 1.1.8 to enable administrators to continue to use these agents. === Open Cluster Framework === indexterm:[Resource,OCF] indexterm:[OCF,Resources] indexterm:[Open Cluster Framework,Resources] The OCF standard footnote:[ http://www.opencf.org/cgi-bin/viewcvs.cgi/specs/ra/resource-agent-api.txt?rev=HEAD - at least as it relates to resource agents. ] footnote:[ The Pacemaker implementation has been somewhat extended from the OCF Specs, but none of those changes are incompatible with the original OCF specification. ] is basically an extension of the Linux Standard Base conventions for init scripts to: * support parameters, * make them self describing and * extensible OCF specs have strict definitions of the exit codes that actions must return. footnote:[ Included with the cluster is the ocf-tester script, which can be useful in this regard. ] The cluster follows these specifications exactly, and giving the wrong exit code will cause the cluster to behave in ways you will likely find puzzling and annoying. In particular, the cluster needs to distinguish a completely stopped resource from one which is in some erroneous and indeterminate state. Parameters are passed to the script as environment variables, with the special prefix +OCF_RESKEY_+. So, a parameter which the user thinks of as ip it will be passed to the script as +OCF_RESKEY_ip+. The number and purpose of the parameters is completely arbitrary, however your script should advertise any that it supports using the +meta-data+ command. The OCF class is the most preferred one as it is an industry standard, highly flexible (allowing parameters to be passed to agents in a non-positional manner) and self-describing. For more information, see the http://www.linux-ha.org/wiki/OCF_Resource_Agents[reference] and <>. === Linux Standard Base === indexterm:[Resource,LSB] indexterm:[LSB,Resources] indexterm:[Linux Standard Base,Resources] LSB resource agents are those found in '/etc/init.d'. Generally they are provided by the OS/distribution and, in order to be used with the cluster, they must conform to the LSB Spec. footnote:[ See http://refspecs.linux-foundation.org/LSB_3.0.0/LSB-Core-generic/LSB-Core-generic/iniscrptact.html for the LSB Spec (as it relates to init scripts). ] Many distributions claim LSB compliance but ship with broken init scripts. For details on how to check if your init script is LSB-compatible, see <>. The most common problems are: * Not implementing the status operation at all * Not observing the correct exit status codes for start/stop/status actions * Starting a started resource returns an error (this violates the LSB spec) * Stopping a stopped resource returns an error (this violates the LSB spec) === Systemd === indexterm:[Resource,Systemd] indexterm:[Systemd,Resources] Some newer distributions have replaced the old http://en.wikipedia.org/wiki/Init#SysV-style[SYS-V] style of initialization daemons (and scripts) with an alternative called http://www.freedesktop.org/wiki/Software/systemd[Systemd]. Pacemaker is able to manage these services _if they are present_. Instead of +init scripts+, systemd has +unit files+. Generally the services (or unit files) are provided by the OS/distribution but there are some instructions for converting from init scripts at: http://0pointer.de/blog/projects/systemd-for-admins-3.html [NOTE] ====== Remember to make sure the computer is +not+ configured to start any services at boot time that should be controlled by the cluster. ====== === Upstart === indexterm:[Resource,Upstart] indexterm:[Upstart,Resources] Some newer distributions have replaced the old http://en.wikipedia.org/wiki/Init#SysV-style[SYS-V] style of initialization daemons (and scripts) with an alternative called http://upstart.ubuntu.com[Upstart]. Pacemaker is able to manage these services _if they are present_. Instead of +init scripts+, upstart has +jobs+. Generally the services (or jobs) are provided by the OS/distribution. [NOTE] ====== Remember to make sure the computer is +not+ configured to start any services at boot time that should be controlled by the cluster. ====== === System Services === indexterm:[Resource,System Services] indexterm:[System Service,Resources] Since there are now many "common" types of system services (+systemd+, +upstart+, and +lsb+), Pacemaker supports a special alias which intelligently figures out which one applies to a given cluster node. This is particularly useful when the cluster contains a mix of +systemd+, +upstart+, and +lsb+. In order, Pacemaker will try to find the named service as: . an LSB (SYS-V) init script . a Systemd unit file . an Upstart job === STONITH === indexterm:[Resource,STONITH] indexterm:[STONITH,Resources] There is also an additional class, STONITH, which is used exclusively for fencing related resources. This is discussed later in <>. === Nagios Plugins === indexterm:[Resource,Nagios Plugins] indexterm:[Nagios Plugins,Resources] Nagios plugins allow us to monitor services on the remote hosts. http://nagiosplugins.org[Nagios Plugins]. Pacemaker is able to do remote monitoring with the plugins _if they are present_. An use case is to configure them as resources belonging to a resource container, which usually is a VM, and the container will be restarted if any of them has failed. While they can also be configured as ordinary resources to be just used for monitoring hosts or services via network. The supported parameters are same as the long options of a nagios plugin. [[primitive-resource]] == Resource Properties == These values tell the cluster which script to use for the resource, where to find that script and what standards it conforms to. .Properties of a Primitive Resource [width="95%",cols="1m,6<",options="header",align="center"] |========================================================= |Field |Description |id |Your name for the resource indexterm:[id,Resource] indexterm:[Resource,Property,id] |class |The standard the script conforms to. Allowed values: +ocf+, +service+, +upstart+, +systemd+, +lsb+, +stonith+ indexterm:[class,Resource] indexterm:[Resource,Property,class] |type |The name of the Resource Agent you wish to use. Eg. _IPaddr_ or _Filesystem_ indexterm:[type,Resource] indexterm:[Resource,Property,type] |provider |The OCF spec allows multiple vendors to supply the same ResourceAgent. To use the OCF resource agents supplied with Heartbeat, you should specify +heartbeat+ here. indexterm:[provider,Resource] indexterm:[Resource,Property,provider] |========================================================= Resource definitions can be queried with the `crm_resource` tool. For example [source,C] # crm_resource --resource Email --query-xml might produce: .An example system resource ===== [source,XML] ===== [NOTE] ===== One of the main drawbacks to system services (such as LSB, Systemd and Upstart) resources is that they do not allow any parameters! ===== .An example OCF resource ===== [source,XML] ------- ------- ===== [[s-resource-options]] == Resource Options == Options are used by the cluster to decide how your resource should behave and can be easily set using the `--meta` option of the `crm_resource` command. .Options for a Primitive Resource [width="95%",cols="1m,1,4<",options="header",align="center"] |========================================================= |Field |Default |Description |priority |+0+ |If not all resources can be active, the cluster will stop lower priority resources in order to keep higher priority ones active. indexterm:[priority,Resource Option] indexterm:[Resource,Option,priority] |target-role |+Started+ |What state should the cluster attempt to keep this resource in? Allowed values: * 'Stopped' - Force the resource to be stopped * 'Started' - Allow the resource to be started (In the case of <> resources, they will not promoted to master) * 'Master' - Allow the resource to be started and, if appropriate, promoted indexterm:[target-role,Resource Option] indexterm:[Resource,Option,target-role] |is-managed |+TRUE+ |Is the cluster allowed to start and stop the resource? Allowed values: +true+, +false+ indexterm:[is-managed,Resource Option] indexterm:[Resource,Option,is-managed] |resource-stickiness |Calculated |How much does the resource prefer to stay where it is? Defaults to the value of +resource-stickiness+ in the +rsc_defaults+ section indexterm:[resource-stickiness,Resource Option] indexterm:[Resource,Option,resource-stickiness] |requires |Calculated |Under what conditions can the resource be started. ('Since 1.1.8') Defaults to +fencing+ unless +stonith-enabled+ is 'false' or +class+ is 'stonith' - under those conditions the default is +quorum+. Possible values: * 'nothing' - can always be started * 'quorum' - The cluster can only start this resource if a majority of the configured nodes are active * 'fencing' - The cluster can only start this resource if a majority of the configured nodes are active _and_ any failed or unknown nodes have been powered off. * 'unfencing' - The cluster can only start this resource if a majority of the configured nodes are active _and_ any failed or unknown nodes have been powered off _and_ only on nodes that have been 'unfenced' indexterm: Option[requires,Resource] indexterm:[Resource,Option,requires] |migration-threshold |+INFINITY+ (disabled) |How many failures may occur for this resource on a node, before this node is marked ineligible to host this resource. indexterm:[migration-threshold,Resource Option] indexterm:[Resource,Option,migration-threshold] |failure-timeout |+0+ (disabled) |How many seconds to wait before acting as if the failure had not occurred, and potentially allowing the resource back to the node on which it failed. indexterm:[failure-timeout,Resource Option] indexterm:[Resource,Option,failure-timeout] |multiple-active |+stop_start+ |What should the cluster do if it ever finds the resource active on more than one node. Allowed values: * 'block' - mark the resource as unmanaged * 'stop_only' - stop all active instances and leave them that way * 'stop_start' - stop all active instances and start the resource in one location only indexterm:[multiple-active,Resource Option] indexterm:[Resource,Option,multiple-active] +|remote-node +|++ (disabled) +|The name of the remote-node this resource defines. This both enables the resource as a remote-node and defines the unique name used to identify the remote-node. If no other parameters are set, this value will also be assumed as the hostname to connect to at port 3121. +WARNING+ This value cannot overlap with any resource or node IDs. + +|remote-port +|+3121+ +|Configure a custom port to use for the guest connection to pacemaker_remote. + +|remote-addr +|+remote-node+ value used as hostname +|The ip address or hostname to connect to if remote-node's name is not the hostname of the guest. + +|+remote-connect-timeout+ +|+60s+ +|How long before a pending guest connection will time out. + |========================================================= If you performed the following commands on the previous LSB Email resource [source,C] ------- # crm_resource --meta --resource Email --set-parameter priority --property-value 100 # crm_resource --meta --resource Email --set-parameter multiple-active --property-value block ------- the resulting resource definition would be .An LSB resource with cluster options ===== [source,XML] ------- ------- ===== [[s-resource-defaults]] == Setting Global Defaults for Resource Options == To set a default value for a resource option, simply add it to the +rsc_defaults+ section with `crm_attribute`. Thus, [source,C] # crm_attribute --type rsc_defaults --attr-name is-managed --attr-value false would prevent the cluster from starting or stopping any of the resources in the configuration (unless of course the individual resources were specifically enabled and had +is-managed+ set to +true+). == Instance Attributes == The scripts of some resource classes (LSB not being one of them) can be given parameters which determine how they behave and which instance of a service they control. If your resource agent supports parameters, you can add them with the `crm_resource` command. For instance [source,C] # crm_resource --resource Public-IP --set-parameter ip --property-value 1.2.3.4 would create an entry in the resource like this: .An example OCF resource with instance attributes ===== [source,XML] ------- ------- ===== For an OCF resource, the result would be an environment variable called +OCF_RESKEY_ip+ with a value of +1.2.3.4+. The list of instance attributes supported by an OCF script can be found by calling the resource script with the `meta-data` command. The output contains an XML description of all the supported attributes, their purpose and default values. .Displaying the metadata for the Dummy resource agent template ===== [source,C] ------- # export OCF_ROOT=/usr/lib/ocf # $OCF_ROOT/resource.d/pacemaker/Dummy meta-data ------- [source,XML] ------- 1.0 This is a Dummy Resource Agent. It does absolutely nothing except keep track of whether its running or not. Its purpose in life is for testing and to serve as a template for RA writers. Dummy resource agent Location to store the resource state in. State file Dummy attribute that can be changed to cause a reload Dummy attribute that can be changed to cause a reload ------- ===== == Resource Operations == indexterm:[Resource,Action] === Monitoring Resources for Failure === By default, the cluster will not ensure your resources are still healthy. To instruct the cluster to do this, you need to add a +monitor+ operation to the resource's definition. .An OCF resource with a recurring health check ===== [source,XML] ------- ------- ===== .Properties of an Operation [width="95%",cols="1m,6<",options="header",align="center"] |========================================================= |Field |Description |id |Your name for the action. Must be unique. indexterm:[id,Action Property] indexterm:[Action,Property,id] |name |The action to perform. Common values: +monitor+, +start+, +stop+ indexterm:[name,Action Property] indexterm:[Action,Property,name] |interval |How frequently (in seconds) to perform the operation. Default value: +0+, meaning never. indexterm:[interval,Action Property] indexterm:[Action,Property,interval] |timeout |How long to wait before declaring the action has failed. indexterm:[timeout,Action Property] indexterm:[Action,Property,timeout] |on-fail |The action to take if this action ever fails. Allowed values: * 'ignore' - Pretend the resource did not fail * 'block' - Don't perform any further operations on the resource * 'stop' - Stop the resource and do not start it elsewhere * 'restart' - Stop the resource and start it again (possibly on a different node) * 'fence' - STONITH the node on which the resource failed * 'standby' - Move _all_ resources away from the node on which the resource failed The default for the +stop+ operation is +fence+ when STONITH is enabled and +block+ otherwise. All other operations default to +stop+. indexterm:[on-fail,Action Property] indexterm:[Action,Property,on-fail] |enabled |If +false+, the operation is treated as if it does not exist. Allowed values: +true+, +false+ indexterm:[enabled,Action Property] indexterm:[Action,Property,enabled] |========================================================= [[s-operation-defaults]] === Setting Global Defaults for Operations === To set a default value for a operation option, simply add it to the +op_defaults+ section with `crm_attribute`. Thus, [source,C] # crm_attribute --type op_defaults --attr-name timeout --attr-value 20s would default each operation's +timeout+ to 20 seconds. If an operation's definition also includes a value for +timeout+, then that value would be used instead (for that operation only). ==== When Resources Take a Long Time to Start/Stop ==== There are a number of implicit operations that the cluster will always perform - +start+, +stop+ and a non-recurring +monitor+ operation (used at startup to check the resource isn't already active). If one of these is taking too long, then you can create an entry for them and simply specify a new value. .An OCF resource with custom timeouts for its implicit actions ===== [source,XML] ------- ------- ===== ==== Multiple Monitor Operations ==== Provided no two operations (for a single resource) have the same name and interval you can have as many monitor operations as you like. In this way you can do a superficial health check every minute and progressively more intense ones at higher intervals. To tell the resource agent what kind of check to perform, you need to provide each monitor with a different value for a common parameter. The OCF standard creates a special parameter called +OCF_CHECK_LEVEL+ for this purpose and dictates that it is _"made available to the resource agent without the normal +OCF_RESKEY+ prefix"_. Whatever name you choose, you can specify it by adding an +instance_attributes+ block to the op tag. Note that it is up to each resource agent to look for the parameter and decide how to use it. .An OCF resource with two recurring health checks, performing different levels of checks - specified via +OCF_CHECK_LEVEL+. ===== [source,XML] ------- ------- ===== ==== Disabling a Monitor Operation ==== The easiest way to stop a recurring monitor is to just delete it. However, there can be times when you only want to disable it temporarily. In such cases, simply add +enabled="false"+ to the operation's definition. .Example of an OCF resource with a disabled health check ===== [source,XML] ------- ------- ===== This can be achieved from the command-line by executing [source,C] # cibadmin -M -X '' Once you've done whatever you needed to do, you can then re-enable it with diff --git a/doc/Pacemaker_Remote/en-US/Ch-Example.txt b/doc/Pacemaker_Remote/en-US/Ch-Example.txt index 33b70dfbb8..ca94044945 100644 --- a/doc/Pacemaker_Remote/en-US/Ch-Example.txt +++ b/doc/Pacemaker_Remote/en-US/Ch-Example.txt @@ -1,107 +1,107 @@ = Quick Example = If you already know how to use pacemaker, you'll likely be able to grasp this new concept of remote-nodes by reading through this quick example without having to sort through all the detailed walk-through steps. Here are the key configuration ingredients that make this possible using libvirt and KVM virtual guests. These steps strip everything down to the very basics. == Mile High View of Configuration Steps == * +Put an authkey with this path, /etc/pacemaker/authkey, on every cluster-node and virtual machine+. This secures remote communication and authentication. Run this command if you want to make a somewhat random authkey. [source,C] ---- dd if=/dev/urandom of=/etc/pacemaker/authkey bs=4096 count=1 ---- * +Install pacemaker_remote packages every virtual machine, enable pacemaker_remote on startup, and poke hole in firewall for tcp port 3121.+ [source,C] ---- yum install pacemaker-remote resource-agents systemctl enable pacemaker_remote # If you just want to see this work, disable iptables and ip6tables on most distros. # You may have to put selinux in permissive mode as well for the time being. firewall-cmd --add-port 3121/tcp --permanent ---- * +Give each virtual machine a static network address and unique hostname+ * +Tell pacemaker to launch a virtual machine and that the virtual machine is a remote-node capable of running resources by using the "remote-node" meta-attribute.+ with pcs [source,C] ---- # pcs resource create vm-guest1 VirtualDomain hypervisor="qemu:///system" config="vm-guest1.xml" meta +remote-node=guest1+ ---- raw xml [source,XML] ---- ---- In the example above the meta-attribute 'remote-node=guest1' tells pacemaker that this resource is a remote-node with the hostname 'guest1' that is capable of being integrated into the cluster. The cluster will attempt to contact the virtual machine's pacemaker_remote service at the hostname 'guest1' after it launches. == What those steps just did == Those steps just told pacemaker to launch a virtual machine called vm-guest1 and integrate that virtual machine as a remote-node called 'guest1'. Example crm_mon output after guest1 is integrated into cluster. [source,C] ---- Last updated: Wed Mar 13 13:52:39 2013 Last change: Wed Mar 13 13:25:17 2013 via crmd on node1 Stack: corosync Current DC: node1 (24815808) - partition with quorum -Version: 1.1.9 +Version: 1.1.10 2 Nodes configured, unknown expected votes 2 Resources configured. Online: [ node1 guest1] vm-guest1 (ocf::heartbeat:VirtualDomain): Started node1 ---- Now, you could place a resource, such as a webserver on guest1. [source,C] ---- # pcs resource create webserver apache params configfile=/etc/httpd/conf/httpd.conf op monitor interval=30s # pcs constraint webserver prefers guest1 ---- Now the crm_mon output would show a webserver launched on the guest1 remote-node. [source,C] ---- Last updated: Wed Mar 13 13:52:39 2013 Last change: Wed Mar 13 13:25:17 2013 via crmd on node1 Stack: corosync Current DC: node1 (24815808) - partition with quorum -Version: 1.1.9 +Version: 1.1.10 2 Nodes configured, unknown expected votes 2 Resources configured. Online: [ node1 guest1] vm-guest1 (ocf::heartbeat:VirtualDomain): Started node1 webserver (ocf::heartbeat::apache): Started guest1 ---- == Accessing Cluster from Remote-node == It is worth noting that after 'guest1' is integrated into the cluster, all the pacemaker cli tools immediately become available to the remote node. This means things like crm_mon, crm_resource, and crm_attribute will work natively on the remote-node as long as the connection between the remote-node and cluster-node exists. This is particularly important for any master/slave resources executing on the remote-node that need access to crm_master to set the nodes transient attributes. diff --git a/doc/Pacemaker_Remote/en-US/Ch-Intro.txt b/doc/Pacemaker_Remote/en-US/Ch-Intro.txt index a9d575a054..a505ea1916 100644 --- a/doc/Pacemaker_Remote/en-US/Ch-Intro.txt +++ b/doc/Pacemaker_Remote/en-US/Ch-Intro.txt @@ -1,55 +1,55 @@ = Extending High Availability Cluster into Virtual Nodes = == Overview == -The recent addition of the +pacemaker_remote+ service supported by +Pacemaker version 1.1.9.1 and greater+ allows nodes not running the cluster stack (pacemaker+corosync) to integrate into the cluster and have the cluster manage their resources just as if they were a real cluster node. This means that pacemaker clusters are now capable of managing both launching virtual environments (KVM/LXC) as well as launching the resources that live withing those virtual environments without requiring the virtual environments to run pacemaker or corosync. +The recent addition of the +pacemaker_remote+ service supported by +Pacemaker version 1.1.10 and greater+ allows nodes not running the cluster stack (pacemaker+corosync) to integrate into the cluster and have the cluster manage their resources just as if they were a real cluster node. This means that pacemaker clusters are now capable of managing both launching virtual environments (KVM/LXC) as well as launching the resources that live withing those virtual environments without requiring the virtual environments to run pacemaker or corosync. == Terms == +cluster-node+ - A baremetal hardware node running the High Availability stack (pacemaker + corosync) +remote-node+ - A virtual guest node running the pacemaker_remote service. +pacemaker_remote+ - A service daemon capable of performing remote application management within virtual guests (kvm and lxc) in both pacemaker cluster environments and standalone (non-cluster) environments. This service is an enhanced version of pacemaker's local resource manage daemon (LRMD) that is capable of managing and monitoring LSB, OCF, upstart, and systemd resources on a guest remotely. It also allows for most of pacemaker's cli tools (crm_mon, crm_resource, crm_master, crm_attribute, ect..) to work natively on remote-nodes. +LXC+ - A Linux Container defined by the libvirt-lxc Linux container driver. http://libvirt.org/drvlxc.html == Virtual Machine Use Case == The use of pacemaker_remote in virtual machines solves a deployment scenario that has traditionally been difficult to solve. +"I want a pacemaker cluster to manage virtual machine resources, but I also want pacemaker to be able to manage the resources that live within those virtual machines."+ In the past, users desiring this deployment had to make a decision. They would either have to sacrifice the ability of monitoring resources residing within virtual guests by running the cluster stack on the baremetal nodes, or run another cluster instance on the virtual guests where they potentially run into corosync scalability issues. There is a third scenario where the virtual guests run the cluster stack and join the same network as the baremetal nodes, but that can quickly hit issues with scalability as well. With the pacemaker_remote service we have a new option. * The baremetal cluster-nodes run the cluster stack (paceamaker+corosync). * The virtual remote-nodes run the pacemaker_remote service (nearly zero configuration required on the virtual machine side) * The cluster stack on the cluster-nodes launch the virtual machines and immediately connect to the pacemaker_remote service, allowing the virtual machines to integrate into the cluster just as if they were a real cluster-node. The key difference here between the virtual machine remote-nodes and the cluster-nodes is that the remote-nodes are not running the cluster stack. This means the remote nodes will never become the DC, and they do not take place in quorum. On the hand this also means that the remote-nodes are not bound to the scalability limits associated with the cluster stack either. +No 16 node corosync member limits+ to deal with. That isn't to say remote-nodes can scale indefinitely, but the expectation is that remote-nodes scale horizontally much further than cluster-nodes. Other than the quorum limitation, these remote-nodes behave just like cluster nodes in respects to resource management. The cluster is fully capable of managing and monitoring resources on each remote-node. You can build constraints against remote-nodes, put them in standby, or whatever else you'd expect to be able to do with normal cluster-nodes. They even show up in the crm_mon output as you would expect cluster-nodes to. To solidify the concept, an example cluster deployment integrating remote-nodes could look like this. * 16 cluster-nodes running corosync+pacemaker stack. * 64 pacemaker managed virtual machine resources running pacemaker_remote configured as remote-nodes. * 64 pacemaker managed webserver and database resources configured to run on the 64 remote-nodes. With this deployment you would have 64 webservers and databases running on 64 virtual machines on 16 hardware nodes all of which are managed and monitored by the same pacemaker deployment. == Linux Container Use Case == +I want to isolate and limit the system resources (cpu, memory, filesystem) a cluster resource can consume without using virtual machines.+ Using pacemaker_remote with Linux containers (libvirt-lxc) opens up some interesting possibilities for isolating resources in the cluster without the use of a hypervisor. We now have the ability to both define a contained environment with cpu and memory utilization limits and then assign resources to that contained environment all managed from within pacemaker. The LXC Walk-through section of this document outlines how pacemaker_remote can be used to bring Linux containers into the cluster as remote-nodes capable of executing resources. == Expanding the Cluster Stack == === Traditional HA Stack === image::images/pcmk-ha-cluster-stack.png["The Traditional Pacemaker Corosync HA Stack.",width="17cm",height="9cm",align="center"] === Remote-Node Enabled HA Stack === The stack grows one additional layer vertical so we can go further horizontal. image::images/pcmk-ha-remote-stack.png["Placing Pacemaker Remote into the Traditional HA Stack.",width="20cm",height="10cm",align="center"] diff --git a/doc/Pacemaker_Remote/en-US/Ch-KVM-Tutorial.txt b/doc/Pacemaker_Remote/en-US/Ch-KVM-Tutorial.txt index a57d7d73f1..fe0077524f 100644 --- a/doc/Pacemaker_Remote/en-US/Ch-KVM-Tutorial.txt +++ b/doc/Pacemaker_Remote/en-US/Ch-KVM-Tutorial.txt @@ -1,483 +1,483 @@ = KVM Walk-through = +What this tutorial is:+ This tutorial is an in-depth walk-through of how to get pacemaker to manage a KVM guest instance and integrate that guest into the cluster as a remote-node. +What this tutorial is not:+ This tutorial is not a realistic deployment scenario. The steps shown here are meant to get users familiar with the concept of remote-nodes as quickly as possible. == Step 1: Setup the Host == -This tutorial was created using Fedora 18 on the host and guest nodes. Anything that is capable of running libvirt and pacemaker v1.1.9.1 or greater will do though. An installation guide for installing Fedora 18 can be found here, http://docs.fedoraproject.org/en-US/Fedora/18/html/Installation_Guide/. +This tutorial was created using Fedora 18 on the host and guest nodes. Anything that is capable of running libvirt and pacemaker v1.1.10 or greater will do though. An installation guide for installing Fedora 18 can be found here, http://docs.fedoraproject.org/en-US/Fedora/18/html/Installation_Guide/. Fedora 18 (or similar distro) host preparation steps. === SElinux and Firewall === In order to simply this tutorial we will disable the selinux and the firewall on the host. +WARNING:+ These actions will open a significant security threat to machines exposed to the outside world. [source,C] ---- # setenforce 0 # sed -i.bak "s/SELINUX=enforcing/SELINUX=permissive/g" /etc/selinux/config # systemctl disable iptables.service # systemctl disable ip6tables.service # rm '/etc/systemd/system/basic.target.wants/iptables.service' # rm '/etc/systemd/system/basic.target.wants/ip6tables.service' # systemctl stop iptables.service # systemctl stop ip6tables.service ---- === Install Cluster Software === [source,C] ---- # yum install -y pacemaker corosync pcs resource-agents ---- === Setup Corosync === Running the command below will attempt to detect the network address corosync should bind to. [source,C] ---- # export corosync_addr=`ip addr | grep "inet " | tail -n 1 | awk '{print $4}' | sed s/255/0/g` ---- Display and verify that address is correct [source,C] ---- # echo $corosync_addr ---- In many cases the address will be 192.168.1.0 if you are behind a standard home router. Now copy over the example corosync.conf. This code will inject your bindaddress and enable the vote quorum api which is required by pacemaker. [source,C] ---- # cp /etc/corosync/corosync.conf.example /etc/corosync/corosync.conf # sed -i.bak "s/.*\tbindnetaddr:.*/bindnetaddr:\ $corosync_addr/g" /etc/corosync/corosync.conf # cat << END >> /etc/corosync/corosync.conf quorum { provider: corosync_votequorum expected_votes: 2 } END ---- === Verify Cluster Software === Start the cluster [source,C] ---- # pcs cluster start ---- Verify corosync membership [source,C] ---- # pcs status corosync Membership information Nodeid Votes Name 1795270848 1 example-host (local) ---- Verify pacemaker status. At first the 'pcs cluster status' output will look like this. [source,C] ---- # pcs status Last updated: Thu Mar 14 12:26:00 2013 Last change: Thu Mar 14 12:25:55 2013 via crmd on example-host Stack: corosync Current DC: - Version: 1.1.9.1 + Version: 1.1.10 1 Nodes configured, unknown expected votes 0 Resources configured. ---- After about a minute you should see your host as a single node in the cluster. [source,C] ---- # pcs status Last updated: Thu Mar 14 12:28:23 2013 Last change: Thu Mar 14 12:25:55 2013 via crmd on example-host Stack: corosync Current DC: example-host (1795270848) - partition WITHOUT quorum Version: 1.1.8-9b13ea1 1 Nodes configured, unknown expected votes 0 Resources configured. Online: [ example-host ] ---- Go ahead and stop the cluster for now after verifying everything is in order. [source,C] ---- # pcs cluster stop ---- === Install Virtualization Software === [source,C] ---- # yum install -y kvm libvirt qemu-system qemu-kvm bridge-utils virt-manager # systemctl enable libvirtd.service ---- reboot the host == Step2: Create the KVM guest == I am not going to outline the installation steps required to create a kvm guest. There are plenty of tutorials available elsewhere that do that. I recommend using a Fedora 18 or greater distro as your guest as that is what I am testing this tutorial with. === Setup Guest Network === Run the commands below to set up a static ip address (192.168.122.10) and hostname (guest1). [source,C] ---- export remote_hostname=guest1 export remote_ip=192.168.122.10 export remote_gateway=192.168.122.1 yum remove -y NetworkManager rm -f /etc/hostname cat << END >> /etc/hostname $remote_hostname END hostname $remote_hostname cat << END >> /etc/sysconfig/network HOSTNAME=$remote_hostname GATEWAY=$remote_gateway END sed -i.bak "s/.*BOOTPROTO=.*/BOOTPROTO=none/g" /etc/sysconfig/network-scripts/ifcfg-eth0 cat << END >> /etc/sysconfig/network-scripts/ifcfg-eth0 IPADDR0=$remote_ip PREFIX0=24 GATEWAY0=$remote_gateway DNS1=$remote_gateway END systemctl restart network systemctl enable network.service systemctl enable sshd systemctl start sshd echo "checking connectivity" ping www.google.com ---- To simplify the tutorial we'll go ahead and disable selinux on the guest. We'll also need to poke a hole through the firewall on port 3121 (the default port for pacemaker_remote) so the host can contact the guest. [source,C] ---- # setenforce 0 # sed -i.bak "s/SELINUX=enforcing/SELINUX=permissive/g" /etc/selinux/config # firewall-cmd --add-port 3121/tcp --permanent ---- If you still encounter connection issues just disable iptables and ipv6tables on the guest like we did on the host to guarantee you'll be able to contact the guest from the host. At this point you should be able to ssh into the guest from the host. === Setup Pacemaker Remote === On the +HOST+ machine run these commands to generate an authkey and copy it to the /etc/pacemaker folder on both the host and guest. [source,C] ---- # mkdir /etc/pacemaker # dd if=/dev/urandom of=/etc/pacemaker/authkey bs=4096 count=1 # scp -r /etc/pacemaker root@192.168.122.10:/etc/ ---- Now on the +GUEST+ install pacemaker-remote package and enable the daemon to run at startup. In the commands below you will notice the 'pacemaker' and 'pacemaker_remote' packages are being installed. The 'pacemaker' package is not required. The only reason it is being installed for this tutorial is because it contains the a 'Dummy' resource agent we will be using later on to test the remote-node. [source,C] ---- # yum install -y pacemaker paceamaker-remote resource-agents # systemctl enable pacemaker_remote.service ---- Now start pacemaker_remote on the guest and verify the start was successful. [source,C] ---- # systemctl start pacemaker_remote.service # systemctl status pacemaker_remote pacemaker_remote.service - Pacemaker Remote Service Loaded: loaded (/usr/lib/systemd/system/pacemaker_remote.service; enabled) Active: active (running) since Thu 2013-03-14 18:24:04 EDT; 2min 8s ago Main PID: 1233 (pacemaker_remot) CGroup: name=systemd:/system/pacemaker_remote.service └─1233 /usr/sbin/pacemaker_remoted Mar 14 18:24:04 guest1 systemd[1]: Starting Pacemaker Remote Service... Mar 14 18:24:04 guest1 systemd[1]: Started Pacemaker Remote Service. Mar 14 18:24:04 guest1 pacemaker_remoted[1233]: notice: lrmd_init_remote_tls_server: Starting a tls listener on port 3121. ---- === Verify Host Connection to Guest === Before moving forward it's worth going ahead and verifying the host can contact the guest on port 3121. Here's a trick you can use. Connect using telnet from the host. The connection will get destroyed, but how it is destroyed tells you whether it worked or not. First add guest1 to the host machine's /etc/hosts file if you haven't already. This is required unless you have dns setup in a way where guest1's address can be discovered. [source,C] ---- # cat << END >> /etc/hosts 192.168.122.10 guest1 END ---- If running the telnet command on the host results in this output before disconnecting, the connection works. [source,C] ---- # telnet guest1 3121 Trying 192.168.122.10... Connected to guest1. Escape character is '^]'. Connection closed by foreign host. ---- If you see this, the connection is not working. [source,C] ---- # telnet guest1 3121 Trying 192.168.122.10... telnet: connect to address 192.168.122.10: No route to host ---- Once you can successfully connect to the guest from the host, shutdown the guest. Pacemaker will be managing the virtual machine from this point forward. == Step3: Integrate KVM guest into Cluster. == Now the fun part, integrating the virtual machine you've just created into the cluster. It is incredibly simple. === Start the Cluster === On the host, start pacemaker. [source,C] ---- # pcs cluster start ---- Wait for the host to become the DC. The output of 'pcs status' should look similar to this after about a minute. [source,C] ---- Last updated: Thu Mar 14 16:41:22 2013 Last change: Thu Mar 14 16:41:08 2013 via crmd on example-host Stack: corosync Current DC: example-host (1795270848) - partition WITHOUT quorum -Version: 1.1.9.1 +Version: 1.1.10 1 Nodes configured, unknown expected votes 0 Resources configured. Online: [ example-host ] ---- Now enable the cluster to work without quorum or stonith. This is required just for the sake of getting this tutorial to work with a single cluster-node. [source,C] ---- # pcs property set stonith-enabled=false # pcs property set no-quorum-policy=ignore ---- === Integrate KVM Guest as remote-node === If you didn't already do this earlier in the verify host to guest connection section, add the KVM guest's ip to the host's /etc/hosts file so we can connect by hostname. The command below will do that if you used the same ip address I used earlier. [source,C] ---- # cat << END >> /etc/hosts 192.168.122.10 guest1 END ---- We will use the +VirtualDomain+ resource agent for the management of the virtual machine. This agent requires the virtual machine's xml config to be dumped to a file on disk. To do this pick out the name of the virtual machine you just created from the output of this list. [source,C] ---- # virsh list --all Id Name State ______________________________________________ - guest1 shut off ---- In my case I named it guest1. Dump the xml to a file somewhere on the host using the following command. [source,C] ---- # virsh dumpxml guest1 > /root/guest1.xml ---- Now just register the resource with pacemaker and you're set! [source,C] ---- # pcs resource create vm-guest1 VirtualDomain hypervisor="qemu:///system" config="/root/guest1.xml" meta remote-node=guest1 ---- Once the 'vm-guest1' resource is started you will see 'guest1' appear in the 'pcs status' output as a node. The final 'pcs status' output should look something like this. [source,C] ---- Last updated: Fri Mar 15 09:30:30 2013 Last change: Thu Mar 14 17:21:35 2013 via cibadmin on example-host Stack: corosync Current DC: example-host (1795270848) - partition WITHOUT quorum -Version: 1.1.9.1 +Version: 1.1.10 2 Nodes configured, unknown expected votes 2 Resources configured. Online: [ example-host guest1 ] Full list of resources: vm-guest1 (ocf::heartbeat:VirtualDomain): Started example-host ---- === Starting Resources on KVM Guest === The commands below demonstrate how resources can be executed on both the remote-node and the cluster-node. Create a few Dummy resources. Dummy resources are real resource agents used just for testing purposes. They actually execute on the host they are assigned to just like an apache server or database would, except their execution just means a file was created. When the resource is stopped, that the file it created is removed. [source,C] ---- # pcs resource create FAKE1 ocf:pacemaker:Dummy # pcs resource create FAKE2 ocf:pacemaker:Dummy # pcs resource create FAKE3 ocf:pacemaker:Dummy # pcs resource create FAKE4 ocf:pacemaker:Dummy # pcs resource create FAKE5 ocf:pacemaker:Dummy ---- Now check your 'pcs status' output. In the resource section you should see something like the following, where some of the resources got started on the cluster-node, and some started on the remote-node. [source,C] ---- Full list of resources: vm-guest1 (ocf::heartbeat:VirtualDomain): Started example-host FAKE1 (ocf::pacemaker:Dummy): Started guest1 FAKE2 (ocf::pacemaker:Dummy): Started guest1 FAKE3 (ocf::pacemaker:Dummy): Started example-host FAKE4 (ocf::pacemaker:Dummy): Started guest1 FAKE5 (ocf::pacemaker:Dummy): Started example-host ---- The remote-node, 'guest1', reacts just like any other node in the cluster. For example, pick out a resource that is running on your cluster-node. For my purposes I am picking FAKE3 from the output above. We can force FAKE3 to run on 'guest1' in the exact same way we would any other node. [source,C] ---- # pcs constraint FAKE3 prefers guest1 ---- Now looking at the bottom of the 'pcs status' output you'll see FAKE3 is on 'guest1'. [source,C] ---- Full list of resources: vm-guest1 (ocf::heartbeat:VirtualDomain): Started example-host FAKE1 (ocf::pacemaker:Dummy): Started guest1 FAKE2 (ocf::pacemaker:Dummy): Started guest1 FAKE3 (ocf::pacemaker:Dummy): Started guest1 FAKE4 (ocf::pacemaker:Dummy): Started example-host FAKE5 (ocf::pacemaker:Dummy): Started example-host ---- === Testing Remote-node Recovery and Fencing === Pacemaker's policy engine is smart enough to know fencing remote-nodes associated with a virtual machine means shutting off/rebooting the virtual machine. No special configuration is necessary to make this happen. If you are interested in testing this functionality out, trying stopping the guest's pacemaker_remote daemon. This would be equivalent of abruptly terminating a cluster-node's corosync membership without properly shutting it down. ssh into the guest and run this command. [source,C] ---- # kill -9 `pidof pacemaker_remoted` ---- After a few seconds or so you'll see this in your 'pcs status' output. The 'guest1' node will be show as offline as it is being recovered. [source,C] ---- Last updated: Fri Mar 15 11:00:31 2013 Last change: Fri Mar 15 09:54:16 2013 via cibadmin on example-host Stack: corosync Current DC: example-host (1795270848) - partition WITHOUT quorum -Version: 1.1.9.1 +Version: 1.1.10 2 Nodes configured, unknown expected votes 7 Resources configured. Online: [ example-host ] OFFLINE: [ guest1 ] Full list of resources: vm-guest1 (ocf::heartbeat:VirtualDomain): Started example-host FAKE1 (ocf::pacemaker:Dummy): Stopped FAKE2 (ocf::pacemaker:Dummy): Stopped FAKE3 (ocf::pacemaker:Dummy): Stopped FAKE4 (ocf::pacemaker:Dummy): Started example-host FAKE5 (ocf::pacemaker:Dummy): Started example-host Failed actions: guest1_monitor_30000 (node=example-host, call=3, rc=7, status=complete): not running ---- Once recovery of the guest is complete, you'll see it automatically get re-integrated into the cluster. The final 'pcs status' output should look something like this. [source,C] ---- Last updated: Fri Mar 15 11:03:17 2013 Last change: Fri Mar 15 09:54:16 2013 via cibadmin on example-host Stack: corosync Current DC: example-host (1795270848) - partition WITHOUT quorum -Version: 1.1.9.1 +Version: 1.1.10 2 Nodes configured, unknown expected votes 7 Resources configured. Online: [ example-host guest1 ] Full list of resources: vm-guest1 (ocf::heartbeat:VirtualDomain): Started example-host FAKE1 (ocf::pacemaker:Dummy): Started guest1 FAKE2 (ocf::pacemaker:Dummy): Started guest1 FAKE3 (ocf::pacemaker:Dummy): Started guest1 FAKE4 (ocf::pacemaker:Dummy): Started example-host FAKE5 (ocf::pacemaker:Dummy): Started example-host Failed actions: guest1_monitor_30000 (node=example-host, call=3, rc=7, status=complete): not running ---- === Accessing Cluster Tools from Remote-node === Besides just allowing the cluster to manage resources on a remote-node, pacemaker_remote has one other trick. +The pacemaker_remote daemon allows nearly all the pacemaker tools (crm_resource, crm_mon, crm_attribute, crm_master) to work on remote nodes natively.+ Try it, run +crm_mon+ or +pcs status+ on the guest after pacemaker has integrated the remote-node into the cluster. These tools just work. These means resource agents such as master/slave resources which need access to tools like crm_master work seamlessly on the remote-nodes. diff --git a/doc/Pacemaker_Remote/en-US/Ch-LXC-Tutorial.txt b/doc/Pacemaker_Remote/en-US/Ch-LXC-Tutorial.txt index bcb1c3cc48..c3459c086a 100644 --- a/doc/Pacemaker_Remote/en-US/Ch-LXC-Tutorial.txt +++ b/doc/Pacemaker_Remote/en-US/Ch-LXC-Tutorial.txt @@ -1,328 +1,328 @@ = Linux Container (LXC) Walk-through = +What this tutorial is:+ This tutorial demonstrates how pacemaker_remote can be used with Linux containers (managed by libvirt-lxc) to run cluster resources in an isolated environment. +What this tutorial is not:+ This tutorial is not a realistic deployment scenario. The steps shown here are meant to introduce users to the concept of managing Linux container environments with Pacemaker. == Step 1: Setup LXC Host == -This tutorial was tested with Fedora 18. Anything that is capable of running libvirt and pacemaker v1.1.9.1 or greater will do though. An installation guide for installing Fedora 18 can be found here, http://docs.fedoraproject.org/en-US/Fedora/18/html/Installation_Guide/. +This tutorial was tested with Fedora 18. Anything that is capable of running libvirt and pacemaker v1.1.10 or greater will do though. An installation guide for installing Fedora 18 can be found here, http://docs.fedoraproject.org/en-US/Fedora/18/html/Installation_Guide/. Fedora 18 (or similar distro) host preparation steps. === SElinux and Firewall Rules === In order to simply this tutorial we will disable the selinux and the firewall on the host. WARNING: These actions pose a significant security issues to machines exposed to the outside world. Basically, just don't do this on your production system. [source,C] ---- # setenforce 0 # sed -i.bak "s/SELINUX=enforcing/SELINUX=permissive/g" /etc/selinux/config # firewall-cmd --add-port 3121/tcp --permanent # systemctl disable iptables.service # systemctl disable ip6tables.service # rm '/etc/systemd/system/basic.target.wants/iptables.service' # rm '/etc/systemd/system/basic.target.wants/ip6tables.service' # systemctl stop iptables.service # systemctl stop ip6tables.service ---- === Install Cluster Software on Host === [source,C] ---- # yum install -y pacemaker pacemaker-remote corosync pcs resource-agents ---- === Configure Corosync === Running the command below will attempt to detect the network address corosync should bind to. [source,C] ---- # export corosync_addr=`ip addr | grep "inet " | tail -n 1 | awk '{print $4}' | sed s/255/0/g` ---- Display and verify the address is correct [source,C] ---- # echo $corosync_addr ---- In most cases the address will be 192.168.1.0 if you are behind a standard home router. Now copy over the example corosync.conf. This code will inject your bindaddress and enable the vote quorum api which is required by pacemaker. [source,C] ---- # cp /etc/corosync/corosync.conf.example /etc/corosync/corosync.conf # sed -i.bak "s/.*\tbindnetaddr:.*/bindnetaddr:\ $corosync_addr/g" /etc/corosync/corosync.conf # cat << END >> /etc/corosync/corosync.conf quorum { provider: corosync_votequorum expected_votes: 2 } END ---- === Verify Cluster === Start the cluster [source,C] ---- # pcs cluster start ---- Verify corosync membership [source,C] ---- # pcs status corosync Membership information Nodeid Votes Name 1795270848 1 example-host (local) ---- Verify pacemaker status. At first the 'pcs cluster status' output will look like this. [source,C] ---- # pcs status Last updated: Thu Mar 14 12:26:00 2013 Last change: Thu Mar 14 12:25:55 2013 via crmd on example-host Stack: corosync Current DC: - Version: 1.1.9.1 + Version: 1.1.10 1 Nodes configured, unknown expected votes 0 Resources configured. ---- After about a minute you should see your host as a single node in the cluster. [source,C] ---- # pcs status Last updated: Thu Mar 14 12:28:23 2013 Last change: Thu Mar 14 12:25:55 2013 via crmd on example-host Stack: corosync Current DC: example-host (1795270848) - partition WITHOUT quorum Version: 1.1.8-9b13ea1 1 Nodes configured, unknown expected votes 0 Resources configured. Online: [ example-host ] ---- Go ahead and stop the cluster for now after verifying everything is in order. [source,C] ---- # pcs cluster stop ---- == Step 2: Setup LXC Environment == === Install Libvirt LXC software === [source,C] ---- # yum install -y libvirt libvirt-daemon-lxc wget # systemctl enable libvirtd ---- At this point, restart the host. === Generate Libvirt LXC domains === I've attempted to simply this tutorial by creating a script to auto generate the libvirt-lxc xml domain definitions. Download the script to whatever directory you want the containers to live in. In this example I am using the /root/lxc/ directory. [source,C] ---- # mkdir /root/lxc/ # cd /root/lxc/ # wget https://raw.github.com/davidvossel/pcmk-lxc-autogen/master/lxc-autogen # chmod 755 lxc-autogen ---- Now execute the script. [source,C] ---- # ./lxc-autogen ---- After executing the script you will see a bunch of directories and xml files are generated. Those xml files are the libvirt-lxc domain definitions, and the directories are used as some special mount points for each container. If you open up one of the xml files you'll be able to see how the cpu, memory, and filesystem resources for the container are defined. You can use the libvirt-lxc driver's documentation found here, http://libvirt.org/drvlxc.html, as a reference to help understand all the parts of the xml file. The lxc-autogen script is not complicated and is worth exploring in order to grasp how the environment is generated. It is worth noting that this environment is dependent on use of libvirt's default network interface. Verify the commands below look the same as your environment. The default network address 192.168.122.1 should have been generated by automatically when you installed the virtualization software. [source,C] ---- # virsh net-list Name State Autostart Persistent ________________________________________________________ default active yes yes # virsh net-dumpxml default | grep -e "ip address=" ---- === Generate the Authkey === Generate the authkey used to secure connections between the host and the lxc guest pacemaker_remote instances. This is sort of a funny case because the lxc guests and the host will share the same key file in the /etc/pacemaker/ directory. If in a different deployment where the lxc guests do not share the host's /etc/pacemaker directory, this key will have to be copied into each lxc guest. [source,C] ---- # dd if=/dev/urandom of=/etc/pacemaker/authkey bs=4096 count=1 ---- == Step 3: Integrate LXC guests into Cluster. == === Start Cluster === On the host, start pacemaker. [source,C] ---- # pcs cluster start ---- Wait for the host to become the DC. The output of 'pcs status' should look similar to this after about a minute. [source,C] ---- Last updated: Thu Mar 14 16:41:22 2013 Last change: Thu Mar 14 16:41:08 2013 via crmd on example-host Stack: corosync Current DC: example-host (1795270848) - partition WITHOUT quorum -Version: 1.1.9.1 +Version: 1.1.10 1 Nodes configured, unknown expected votes 0 Resources configured. Online: [ example-host ] ---- Now enable the cluster to work without quorum or stonith. This is required just for the sake of getting this tutorial to work with a single cluster-node. [source,C] ---- # pcs property set stonith-enabled=false # pcs property set no-quorum-policy=ignore ---- === Integrate LXC Guests as remote-nodes === If you ran the 'lxc-autogen' script with default parameters, 3 lxc domain definitions were created as .xml files. If you used the same directory I used for the lxc environment, the config files will be located in /root/lxc. Replace the 'config' parameters in the following pcs commands if yours should be different. The pcs commands below each configure a lxc guest as a remote-node in pacemaker. Behind the scenes each lxc guest is launching an instance of pacemaker_remote allowing pacemaker to integrate the lxc guests as remote-nodes. The meta-attribute 'remote-node=' used in each command is what tells pacemaker that the lxc guest is both a resource and a remote-node capable of running resources. In this case, the 'remote-node' attribute also indicates to pacemaker that it can contact each lxc's pacemaker_remote service by using the remote-node name as the hostname. If you look in the /etc/hosts/ file you will see entries for each lxc guest. These entries were auto-generated earlier by the 'lxc-autogen' script. [source,C] ---- # pcs resource create container1 VirtualDomain force_stop="true" hypervisor="lxc:///" config="/root/lxc/lxc1.xml" meta remote-node=lxc1 # pcs resource create container2 VirtualDomain force_stop="true" hypervisor="lxc:///" config="/root/lxc/lxc2.xml" meta remote-node=lxc2 # pcs resource create container3 VirtualDomain force_stop="true" hypervisor="lxc:///" config="/root/lxc/lxc3.xml" meta remote-node=lxc3 ---- After creating the container resources you 'pcs status' should look like this. [source,C] ---- Last updated: Mon Mar 18 17:15:46 2013 Last change: Mon Mar 18 17:15:26 2013 via cibadmin on guest1 Stack: corosync Current DC: example-host (175810752) - partition WITHOUT quorum -Version: 1.1.9.1 +Version: 1.1.10 4 Nodes configured, unknown expected votes 6 Resources configured. Online: [ example-host lxc1 lxc2 lxc3 ] Full list of resources: container3 (ocf::heartbeat:VirtualDomain): Started example-host container1 (ocf::heartbeat:VirtualDomain): Started example-host container2 (ocf::heartbeat:VirtualDomain): Started example-host ---- === Starting Resources on LXC Guests === Now that the lxc guests are integrated into the cluster, lets generate some Dummy resources to run on them. Dummy resources are real resource agents used just for testing purposes. They actually execute on the node they are assigned to just like an apache server or database would, except their execution just means a file was created. When the resource is stopped, that the file it created is removed. [source,C] ---- # pcs resource create FAKE1 ocf:pacemaker:Dummy # pcs resource create FAKE2 ocf:pacemaker:Dummy # pcs resource create FAKE3 ocf:pacemaker:Dummy # pcs resource create FAKE4 ocf:pacemaker:Dummy # pcs resource create FAKE5 ocf:pacemaker:Dummy ---- After creating the Dummy resources you will see that the resource got distributed among all the nodes. The 'pcs status' output should look similar to this. [source,C] ---- Last updated: Mon Mar 18 17:31:54 2013 Last change: Mon Mar 18 17:31:05 2013 via cibadmin on example-host Stack: corosync Current DC: example=host (175810752) - partition WITHOUT quorum -Version: 1.1.9.1 +Version: 1.1.10 4 Nodes configured, unknown expected votes 11 Resources configured. Online: [ example-host lxc1 lxc2 lxc3 ] Full list of resources: container3 (ocf::heartbeat:VirtualDomain): Started example-host container1 (ocf::heartbeat:VirtualDomain): Started example-host container2 (ocf::heartbeat:VirtualDomain): Started example-host FAKE1 (ocf::pacemaker:Dummy): Started lxc1 FAKE2 (ocf::pacemaker:Dummy): Started lxc2 FAKE3 (ocf::pacemaker:Dummy): Started lxc3 FAKE4 (ocf::pacemaker:Dummy): Started lxc1 FAKE5 (ocf::pacemaker:Dummy): Started lxc2 ---- To witness that Dummy agents are running within the lxc guests browse one of the lxc domain's filesystem folders. Each lxc guest has a custom mount point for the '/var/run/'directory, which is the location the Dummy resources write their state files to. [source,C] ---- # ls lxc1-filesystem/var/run/ Dummy-FAKE4.state Dummy-FAKE.state ---- If you are curious, take a look at lxc1.xml to see how the filesystem is mounted. === Testing LXC Guest Failure === You will be able to see each pacemaker_remoted process running in each lxc guest from the host machine. [source,C] ---- # ps -A | grep -e pacemaker_remote* 9142 pts/2 00:00:00 pacemaker_remot 10148 pts/4 00:00:00 pacemaker_remot 10942 pts/6 00:00:00 pacemaker_remot ---- In order to see how the cluster reacts to a failed lxc guest. Try killing one of the pacemaker_remote instances. [source,C] ---- # kill -9 9142 ---- After a few moments the lxc guest that was running that instance of pacemaker_remote will be recovered along with all the resources running within that container. diff --git a/doc/Pacemaker_Remote/en-US/Ch-Options.txt b/doc/Pacemaker_Remote/en-US/Ch-Options.txt index 3ca1800d37..17e8a34faf 100644 --- a/doc/Pacemaker_Remote/en-US/Ch-Options.txt +++ b/doc/Pacemaker_Remote/en-US/Ch-Options.txt @@ -1,51 +1,51 @@ = Configuration Explained = The walk-through examples use some of these options, but don't explain exactly what they mean or do. This section is meant to be the go-to resource for all the options available for configuring remote-nodes. == Resource Options == When configuring a virtual machine or lxc resource to act as a remote-node, these are the metadata options available to both enable the resource as a remote-node and define the connection parameters. .Metadata Options for configurint KVM/LXC resources as remote-nodes [width="95%",cols="1m,1,4<",options="header",align="center"] |========================================================= |Option |Default |Description |+remote-node+ | |The name of the remote-node this resource defines. This both enables the resource as a remote-node and defines the unique name used to identify the remote-node. If no other parameters are set, this value will also be assumed as the hostname to connect to at port 3121. +WARNING+ This value cannot overlap with any resource or node IDs. |+remote-port+ |3121 |Configure a custom port to use for the guest connection to pacemaker_remote. |+remote-addr+ -|node name +|+remote-node+ value used as hostname |The ip address or hostname to connect to if remote-node's name is not the hostname of the guest. |+remote-connect-timeout+ |60s |How long before a pending guest connection will time out. |========================================================= == Host and Guest Authentication == Authentication and encryption of the connection between cluster-nodes (pacemaker) to remote-nodes (pacemaker_remote) is achieved using TLS with PSK encryption/authentication on +tcp port 3121+. This means both the cluster-node and remote-node must share the same private key. By default this +key must be placed at "/etc/pacemaker/authkey" on both cluster-nodes and remote-nodes+. == Pacemaker and pacemaker_remote Options == If you need to change the default port or authkey location for either pacemaker or pacemaker_remote, there are environment variables you can set that affect both of those daemons. These environment variables can be enabled by placing them in the /etc/sysconfig/pacemaker file. [source,C] ---- #==#==# Pacemaker Remote # Use a custom directory for finding the authkey. PCMK_authkey_location=/etc/pacemaker/authkey # # Specify a custom port for Pacemaker Remote connections PCMK_remote_port=3121 ---- diff --git a/lib/common/remote.c b/lib/common/remote.c index fd302cb572..8b00f1660b 100644 --- a/lib/common/remote.c +++ b/lib/common/remote.c @@ -1,903 +1,908 @@ /* * Copyright (c) 2008 Andrew Beekhof * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef HAVE_GNUTLS_GNUTLS_H # undef KEYFILE # include #endif #ifdef HAVE_GNUTLS_GNUTLS_H const int psk_tls_kx_order[] = { GNUTLS_KX_DHE_PSK, GNUTLS_KX_PSK, }; const int anon_tls_kx_order[] = { GNUTLS_KX_ANON_DH, GNUTLS_KX_DHE_RSA, GNUTLS_KX_DHE_DSS, GNUTLS_KX_RSA, 0 }; int crm_initiate_client_tls_handshake(crm_remote_t * remote, int timeout_ms) { int rc = 0; int pollrc = 0; time_t start = time(NULL); do { rc = gnutls_handshake(*remote->tls_session); if (rc == GNUTLS_E_INTERRUPTED || rc == GNUTLS_E_AGAIN) { pollrc = crm_remote_ready(remote, 1000); if (pollrc < 0) { /* poll returned error, there is no hope */ rc = -1; } } } while (((time(NULL) - start) < (timeout_ms / 1000)) && (rc == GNUTLS_E_INTERRUPTED || rc == GNUTLS_E_AGAIN)); if (rc < 0) { crm_trace("gnutls_handshake() failed with %d", rc); } return rc; } void * crm_create_anon_tls_session(int csock, int type /* GNUTLS_SERVER, GNUTLS_CLIENT */ , void *credentials) { gnutls_session_t *session = gnutls_malloc(sizeof(gnutls_session_t)); gnutls_init(session, type); # ifdef HAVE_GNUTLS_PRIORITY_SET_DIRECT /* http://www.manpagez.com/info/gnutls/gnutls-2.10.4/gnutls_81.php#Echo-Server-with-anonymous-authentication */ gnutls_priority_set_direct(*session, "NORMAL:+ANON-DH", NULL); /* gnutls_priority_set_direct (*session, "NONE:+VERS-TLS-ALL:+CIPHER-ALL:+MAC-ALL:+SIGN-ALL:+COMP-ALL:+ANON-DH", NULL); */ # else gnutls_set_default_priority(*session); gnutls_kx_set_priority(*session, anon_tls_kx_order); # endif gnutls_transport_set_ptr(*session, (gnutls_transport_ptr_t) GINT_TO_POINTER(csock)); switch (type) { case GNUTLS_SERVER: gnutls_credentials_set(*session, GNUTLS_CRD_ANON, (gnutls_anon_server_credentials_t) credentials); break; case GNUTLS_CLIENT: gnutls_credentials_set(*session, GNUTLS_CRD_ANON, (gnutls_anon_client_credentials_t) credentials); break; } return session; } void * create_psk_tls_session(int csock, int type /* GNUTLS_SERVER, GNUTLS_CLIENT */ , void *credentials) { gnutls_session_t *session = gnutls_malloc(sizeof(gnutls_session_t)); gnutls_init(session, type); # ifdef HAVE_GNUTLS_PRIORITY_SET_DIRECT gnutls_priority_set_direct(*session, "NORMAL:+DHE-PSK:+PSK", NULL); # else gnutls_set_default_priority(*session); gnutls_kx_set_priority(*session, psk_tls_kx_order); # endif gnutls_transport_set_ptr(*session, (gnutls_transport_ptr_t) GINT_TO_POINTER(csock)); switch (type) { case GNUTLS_SERVER: gnutls_credentials_set(*session, GNUTLS_CRD_PSK, (gnutls_psk_server_credentials_t) credentials); break; case GNUTLS_CLIENT: gnutls_credentials_set(*session, GNUTLS_CRD_PSK, (gnutls_psk_client_credentials_t) credentials); break; } return session; } static int crm_send_tls(gnutls_session_t * session, const char *buf, size_t len) { const char *unsent = buf; int rc = 0; int total_send; if (buf == NULL) { return -1; } total_send = len; crm_trace("Message size: %d", len); while (TRUE) { rc = gnutls_record_send(*session, unsent, len); if (rc == GNUTLS_E_INTERRUPTED || rc == GNUTLS_E_AGAIN) { crm_debug("Retry"); } else if (rc < 0) { crm_err("Connection terminated rc = %d", rc); break; } else if (rc < len) { crm_debug("Only sent %d of %d bytes", rc, len); len -= rc; unsent += rc; } else { crm_debug("Sent %d bytes", rc); break; } } return rc < 0 ? rc : total_send; } /*! * \internal * \brief Read bytes off non blocking tls session. * * \param session - tls session to read * \param max_size - max bytes allowed to read for buffer. 0 assumes no limit * * \note only use with NON-Blocking sockets. Should only be used after polling socket. * This function will return once max_size is met, the socket read buffer * is empty, or an error is encountered. * * \retval '\0' terminated buffer on success */ static char * crm_recv_tls(gnutls_session_t * session, size_t max_size, size_t * recv_len, int *disconnected) { char *buf = NULL; int rc = 0; size_t len = 0; size_t chunk_size = max_size ? max_size : 1024; size_t buf_size = 0; size_t read_size = 0; if (session == NULL) { if (disconnected) { *disconnected = 1; } goto done; } buf = calloc(1, chunk_size + 1); buf_size = chunk_size; while (TRUE) { read_size = buf_size - len; /* automatically grow the buffer when needed if max_size is not set. */ if (!max_size && (read_size < (chunk_size / 2))) { buf_size += chunk_size; crm_trace("Grow buffer by %d more bytes. buf is now %d bytes", (int)chunk_size, buf_size); buf = realloc(buf, buf_size + 1); CRM_ASSERT(buf != NULL); read_size = buf_size - len; } rc = gnutls_record_recv(*session, buf + len, read_size); if (rc > 0) { crm_trace("Got %d more bytes.", rc); len += rc; /* always null terminate buffer, the +1 to alloc always allows for this. */ buf[len] = '\0'; } if (max_size && (max_size == read_size)) { crm_trace("Buffer max read size %d met", max_size); goto done; } /* process any errors. */ if (rc == GNUTLS_E_INTERRUPTED) { crm_trace("EINTR encoutered, retry tls read"); } else if (rc == GNUTLS_E_AGAIN) { crm_trace("non-blocking, exiting read on rc = %d", rc); goto done; } else if (rc <= 0) { if (rc == 0) { crm_debug("EOF encoutered during TLS read"); } else { crm_debug("Error receiving message: %s (%d)", gnutls_strerror(rc), rc); } if (disconnected) { *disconnected = 1; } goto done; } } done: if (recv_len) { *recv_len = len; } if (!len) { free(buf); buf = NULL; } return buf; } #endif static int crm_send_plaintext(int sock, const char *buf, size_t len) { int rc = 0; const char *unsent = buf; int total_send; if (buf == NULL) { return -1; } total_send = len; crm_trace("Message on socket %d: size=%d", sock, len); retry: rc = write(sock, unsent, len); if (rc < 0) { switch (errno) { case EINTR: case EAGAIN: crm_trace("Retry"); goto retry; default: crm_perror(LOG_ERR, "Could only write %d of the remaining %d bytes", rc, (int)len); break; } } else if (rc < len) { crm_trace("Only sent %d of %d remaining bytes", rc, len); len -= rc; unsent += rc; goto retry; } else { crm_trace("Sent %d bytes: %.100s", rc, buf); } return rc < 0 ? rc : total_send; } /*! * \internal * \brief Read bytes off non blocking socket. * * \param session - tls session to read * \param max_size - max bytes allowed to read for buffer. 0 assumes no limit * * \note only use with NON-Blocking sockets. Should only be used after polling socket. * This function will return once max_size is met, the socket read buffer * is empty, or an error is encountered. * * \retval '\0' terminated buffer on success */ static char * crm_recv_plaintext(int sock, size_t max_size, size_t * recv_len, int *disconnected) { char *buf = NULL; ssize_t rc = 0; ssize_t len = 0; ssize_t chunk_size = max_size ? max_size : 1024; size_t buf_size = 0; size_t read_size = 0; if (sock <= 0) { if (disconnected) { *disconnected = 1; } goto done; } buf = calloc(1, chunk_size + 1); buf_size = chunk_size; while (TRUE) { errno = 0; read_size = buf_size - len; /* automatically grow the buffer when needed if max_size is not set. */ if (!max_size && (read_size < (chunk_size / 2))) { buf_size += chunk_size; crm_trace("Grow buffer by %d more bytes. buf is now %d bytes", (int)chunk_size, buf_size); buf = realloc(buf, buf_size + 1); CRM_ASSERT(buf != NULL); read_size = buf_size - len; } rc = read(sock, buf + len, chunk_size); if (rc > 0) { crm_trace("Got %d more bytes. errno=%d", (int)rc, errno); len += rc; /* always null terminate buffer, the +1 to alloc always allows for this. */ buf[len] = '\0'; } if (max_size && (max_size == read_size)) { crm_trace("Buffer max read size %d met", max_size); goto done; } if (rc > 0) { continue; } else if (rc == 0) { if (disconnected) { *disconnected = 1; } crm_trace("EOF encoutered during read"); goto done; } /* process errors */ if (errno == EINTR) { crm_trace("EINTER encoutered, retry socket read."); } else if (errno == EAGAIN) { crm_trace("non-blocking, exiting read on rc = %d", rc); goto done; } else if (errno <= 0) { if (disconnected) { *disconnected = 1; } crm_debug("Error receiving message: %d", (int)rc); goto done; } } done: if (recv_len) { *recv_len = len; } if (!len) { free(buf); buf = NULL; } return buf; } static int crm_remote_send_raw(crm_remote_t * remote, const char *buf, size_t len) { int rc = -ESOCKTNOSUPPORT; if (remote->tcp_socket) { rc = crm_send_plaintext(remote->tcp_socket, buf, len); #ifdef HAVE_GNUTLS_GNUTLS_H } else if (remote->tls_session) { rc = crm_send_tls(remote->tls_session, buf, len); #endif } else { crm_err("Unsupported connection type"); } return rc; } int crm_remote_send(crm_remote_t * remote, xmlNode * msg) { int rc = -1; char *xml_text = NULL; int len = 0; xml_text = dump_xml_unformatted(msg); if (xml_text) { len = strlen(xml_text); } else { crm_err("Invalid XML, can not send msg"); return -1; } rc = crm_remote_send_raw(remote, xml_text, len); if (rc >= 0) { rc = crm_remote_send_raw(remote, REMOTE_MSG_TERMINATOR, strlen(REMOTE_MSG_TERMINATOR)); } if (rc < 0) { crm_err("Failed to send remote msg, rc = %d", rc); } free(xml_text); return rc; } /*! * \internal * \brief handles the recv buffer and parsing out msgs. * \note new_data is owned by this function once it is passed in. */ xmlNode * crm_remote_parse_buffer(crm_remote_t * remote) { char *buf = NULL; char *start = NULL; char *end = NULL; xmlNode *xml = NULL; if (remote->buffer == NULL) { return NULL; } /* take ownership of the buffer */ buf = remote->buffer; remote->buffer = NULL; /* MSGS are separated by a '\r\n\r\n'. Split a message off the buffer and return it. */ start = buf; end = strstr(start, REMOTE_MSG_TERMINATOR); while (!xml && end) { /* grab the message */ end[0] = '\0'; end += strlen(REMOTE_MSG_TERMINATOR); xml = string2xml(start); if (xml == NULL) { crm_err("Couldn't parse: '%.120s'", start); } start = end; end = strstr(start, REMOTE_MSG_TERMINATOR); } if (xml && start) { /* we have msgs left over, save it until next time */ remote->buffer = strdup(start); free(buf); } else if (!xml) { /* no msg present */ remote->buffer = buf; } return xml; } /*! * \internal * \brief Determine if a remote session has data to read * * \retval 0, timeout occured. * \retval positive, data is ready to be read * \retval negative, session has ended */ int crm_remote_ready(crm_remote_t * remote, int timeout /* ms */ ) { struct pollfd fds = { 0, }; int sock = 0; int rc = 0; time_t start; if (remote->tcp_socket) { sock = remote->tcp_socket; #ifdef HAVE_GNUTLS_GNUTLS_H } else if (remote->tls_session) { void *sock_ptr = gnutls_transport_get_ptr(*remote->tls_session); sock = GPOINTER_TO_INT(sock_ptr); #endif } else { crm_err("Unsupported connection type"); } if (sock <= 0) { crm_trace("No longer connected"); return -ENOTCONN; } start = time(NULL); errno = 0; do { fds.fd = sock; fds.events = POLLIN; /* If we got an EINTR while polling, and we have a * specific timeout we are trying to honor, attempt * to adjust the timeout to the closest second. */ if (errno == EINTR && (timeout > 0)) { timeout = timeout - ((time(NULL) - start) * 1000); if (timeout < 1000) { timeout = 1000; } } rc = poll(&fds, 1, timeout); } while (rc < 0 && errno == EINTR); return rc; } /*! * \internal * \brief Read data off the socket until at least one full message is present or timeout occures. * \retval TRUE message read * \retval FALSE full message not read */ gboolean crm_remote_recv(crm_remote_t * remote, int total_timeout /*ms */ , int *disconnected) { int ret; size_t request_len = 0; time_t start = time(NULL); char *raw_request = NULL; int remaining_timeout = 0; if (total_timeout == 0) { total_timeout = 10000; } else if (total_timeout < 0) { total_timeout = 60000; } *disconnected = 0; remaining_timeout = total_timeout; while ((remaining_timeout > 0) && !(*disconnected)) { /* read some more off the tls buffer if we still have time left. */ crm_trace("waiting to receive remote msg, starting timeout %d, remaining_timeout %d", total_timeout, remaining_timeout); ret = crm_remote_ready(remote, remaining_timeout); raw_request = NULL; if (ret == 0) { crm_err("poll timed out (%d ms) while waiting to receive msg", remaining_timeout); return FALSE; } else if (ret < 0) { if (errno != EINTR) { crm_debug("poll returned error while waiting for msg, rc: %d, errno: %d", ret, errno); *disconnected = 1; return FALSE; } crm_debug("poll EINTR encountered during poll, retrying"); } else if (remote->tcp_socket) { raw_request = crm_recv_plaintext(remote->tcp_socket, 0, &request_len, disconnected); #ifdef HAVE_GNUTLS_GNUTLS_H } else if (remote->tls_session) { raw_request = crm_recv_tls(remote->tls_session, 0, &request_len, disconnected); #endif } else { crm_err("Unsupported connection type"); } remaining_timeout = remaining_timeout - ((time(NULL) - start) * 1000); if (!raw_request) { crm_debug("Empty msg received after poll"); continue; } if (remote->buffer) { int old_len = strlen(remote->buffer); crm_trace("Expanding recv buffer from %d to %d", old_len, old_len + request_len); remote->buffer = realloc(remote->buffer, old_len + request_len + 1); memcpy(remote->buffer + old_len, raw_request, request_len); *(remote->buffer + old_len + request_len) = '\0'; free(raw_request); } else { remote->buffer = raw_request; } if (strstr(remote->buffer, REMOTE_MSG_TERMINATOR)) { return TRUE; } } return FALSE; } struct tcp_async_cb_data { gboolean success; int sock; void *userdata; void (*callback) (void *userdata, int sock); int timeout; /*ms */ time_t start; }; static gboolean check_connect_finished(gpointer userdata) { struct tcp_async_cb_data *cb_data = userdata; int rc = 0; int sock = cb_data->sock; int error = 0; fd_set rset, wset; socklen_t len = sizeof(error); struct timeval ts = { 0, }; if (cb_data->success == TRUE) { goto dispatch_done; } FD_ZERO(&rset); FD_SET(sock, &rset); wset = rset; crm_trace("fd %d: checking to see if connect finished", sock); rc = select(sock + 1, &rset, &wset, NULL, &ts); if (rc < 0) { rc = errno; if ((errno == EINPROGRESS) || (errno == EAGAIN)) { /* reschedule if there is still time left */ if ((time(NULL) - cb_data->start) < (cb_data->timeout / 1000)) { goto reschedule; } else { rc = -ETIMEDOUT; } } crm_trace("fd %d: select failed %d connect dispatch ", rc); goto dispatch_done; } else if (rc == 0) { if ((time(NULL) - cb_data->start) < (cb_data->timeout / 1000)) { goto reschedule; } crm_debug("fd %d: timeout during select", sock); rc = -ETIMEDOUT; goto dispatch_done; } else { crm_trace("fd %d: select returned success", sock); rc = 0; } /* can we read or write to the socket now? */ if (FD_ISSET(sock, &rset) || FD_ISSET(sock, &wset)) { if (getsockopt(sock, SOL_SOCKET, SO_ERROR, &error, &len) < 0) { crm_trace("fd %d: call to getsockopt failed", sock); rc = -1; goto dispatch_done; } if (error) { crm_trace("fd %d: error returned from getsockopt: %d", sock, error); rc = -1; goto dispatch_done; } } else { crm_trace("neither read nor write set after select"); rc = -1; goto dispatch_done; } dispatch_done: if (!rc) { crm_trace("fd %d: connected", sock); /* Success, set the return code to the sock to report to the callback */ rc = cb_data->sock; cb_data->sock = 0; } else { close(sock); } - free(cb_data); if (cb_data->callback) { cb_data->callback(cb_data->userdata, rc); } + free(cb_data); return FALSE; reschedule: /* will check again next interval */ return TRUE; } static int internal_tcp_connect_async(int sock, const struct sockaddr *addr, socklen_t addrlen, int timeout /* ms */ , void *userdata, void (*callback) (void *userdata, int sock)) { int rc = 0; int flag = 0; int interval = 500; struct tcp_async_cb_data *cb_data = NULL; if ((flag = fcntl(sock, F_GETFL)) >= 0) { if (fcntl(sock, F_SETFL, flag | O_NONBLOCK) < 0) { crm_err("fcntl() write failed"); return -1; } } rc = connect(sock, addr, addrlen); if (rc < 0 && (errno != EINPROGRESS) && (errno != EAGAIN)) { return -1; } cb_data = calloc(1, sizeof(struct tcp_async_cb_data)); cb_data->userdata = userdata; cb_data->callback = callback; cb_data->sock = sock; cb_data->timeout = timeout; cb_data->start = time(NULL); if (rc == 0) { /* The connect was successful immediately, we still return to mainloop * and let this callback get called later. This avoids the user of this api * to have to account for the fact the callback could be invoked within this * function before returning. */ cb_data->success = TRUE; interval = 1; } /* Check connect finished is mostly doing a non-block poll on the socket * to see if we can read/write to it. Once we can, the connect has completed. * This method allows us to connect to the server without blocking mainloop. * * This is a poor man's way of polling to see when the connection finished. * At some point we should figure out a way to use a mainloop fd callback for this. * Something about the way mainloop is currently polling prevents this from working at the * moment though. */ crm_trace("fd %d: scheduling to check if connect finished in %dms second", sock, interval); g_timeout_add(interval, check_connect_finished, cb_data); return 0; } static int internal_tcp_connect(int sock, const struct sockaddr *addr, socklen_t addrlen) { int flag = 0; int rc = connect(sock, addr, addrlen); if (rc == 0) { if ((flag = fcntl(sock, F_GETFL)) >= 0) { if (fcntl(sock, F_SETFL, flag | O_NONBLOCK) < 0) { crm_err("fcntl() write failed"); return -1; } } } return rc; } /*! * \internal * \brief tcp connection to server at specified port * \retval negative, failed to connect. */ int crm_remote_tcp_connect_async(const char *host, int port, int timeout, /*ms */ void *userdata, void (*callback) (void *userdata, int sock)) { - struct addrinfo *res; - struct addrinfo *rp; + struct addrinfo *res = NULL; + struct addrinfo *rp = NULL; struct addrinfo hints; const char *server = host; int ret_ga; - int sock; + int sock = -1; /* getaddrinfo */ memset(&hints, 0, sizeof(struct addrinfo)); hints.ai_family = AF_UNSPEC; /* Allow IPv4 or IPv6 */ hints.ai_socktype = SOCK_STREAM; hints.ai_flags = AI_CANONNAME; crm_debug("Looking up %s", server); ret_ga = getaddrinfo(server, NULL, &hints, &res); if (ret_ga) { crm_err("getaddrinfo: %s", gai_strerror(ret_ga)); return -1; } if (!res || !res->ai_addr) { crm_err("getaddrinfo failed"); - return -1; + goto async_cleanup; } for (rp = res; rp != NULL; rp = rp->ai_next) { struct sockaddr *addr = rp->ai_addr; if (!addr) { continue; } if (rp->ai_canonname) { server = res->ai_canonname; } crm_debug("Got address %s for %s", server, host); /* create socket */ sock = socket(rp->ai_family, SOCK_STREAM, IPPROTO_TCP); if (sock == -1) { crm_err("Socket creation failed for remote client connection."); continue; } if (addr->sa_family == AF_INET6) { struct sockaddr_in6 *addr_in = (struct sockaddr_in6 *)addr; addr_in->sin6_port = htons(port); } else { struct sockaddr_in *addr_in = (struct sockaddr_in *)addr; addr_in->sin_port = htons(port); crm_info("Attempting to connect to remote server at %s:%d", inet_ntoa(addr_in->sin_addr), port); } if (callback) { if (internal_tcp_connect_async (sock, rp->ai_addr, rp->ai_addrlen, timeout, userdata, callback) == 0) { - return 0; /* Success for now, we'll hear back later in the callback */ + sock = 0; + goto async_cleanup; /* Success for now, we'll hear back later in the callback */ } } else { if (internal_tcp_connect(sock, rp->ai_addr, rp->ai_addrlen) == 0) { break; /* Success */ } } close(sock); sock = -1; } - freeaddrinfo(res); +async_cleanup: + + if (res) { + freeaddrinfo(res); + } return sock; } int crm_remote_tcp_connect(const char *host, int port) { return crm_remote_tcp_connect_async(host, port, -1, NULL, NULL); } diff --git a/lib/services/services_linux.c b/lib/services/services_linux.c index 233bd31dae..6192ccf75f 100644 --- a/lib/services/services_linux.c +++ b/lib/services/services_linux.c @@ -1,685 +1,686 @@ /* * Copyright (C) 2010 Andrew Beekhof * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * This software is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include #ifndef _GNU_SOURCE # define _GNU_SOURCE #endif #include #include #include #include #include #include #include #include #include #include "crm/crm.h" #include "crm/common/mainloop.h" #include "crm/services.h" #include "services_private.h" #if SUPPORT_CIBSECRETS # include "crm/common/cib_secrets.h" #endif static inline void set_fd_opts(int fd, int opts) { int flag; if ((flag = fcntl(fd, F_GETFL)) >= 0) { if (fcntl(fd, F_SETFL, flag | opts) < 0) { crm_err("fcntl() write failed"); } } else { crm_err("fcntl() read failed"); } } static gboolean read_output(int fd, svc_action_t * op) { char *data = NULL; int rc = 0, len = 0; gboolean is_err = FALSE; char buf[500]; static const size_t buf_read_len = sizeof(buf) - 1; crm_trace("%p", op); if (fd < 0) { return FALSE; } if (fd == op->opaque->stderr_fd) { is_err = TRUE; if (op->stderr_data) { len = strlen(op->stderr_data); data = op->stderr_data; } } else if (op->stdout_data) { len = strlen(op->stdout_data); data = op->stdout_data; } do { rc = read(fd, buf, buf_read_len); if (rc > 0) { buf[rc] = 0; data = realloc(data, len + rc + 1); sprintf(data + len, "%s", buf); len += rc; } else if (errno != EINTR) { /* error or EOF * Cleanup happens in pipe_done() */ rc = FALSE; break; } } while (rc == buf_read_len || rc < 0); if (data != NULL && is_err) { op->stderr_data = data; } else if (data != NULL) { op->stdout_data = data; } return rc; } static int dispatch_stdout(gpointer userdata) { svc_action_t *op = (svc_action_t *) userdata; return read_output(op->opaque->stdout_fd, op); } static int dispatch_stderr(gpointer userdata) { svc_action_t *op = (svc_action_t *) userdata; return read_output(op->opaque->stderr_fd, op); } static void pipe_out_done(gpointer user_data) { svc_action_t *op = (svc_action_t *) user_data; crm_trace("%p", op); op->opaque->stdout_gsource = NULL; if (op->opaque->stdout_fd > STDOUT_FILENO) { close(op->opaque->stdout_fd); } op->opaque->stdout_fd = -1; } static void pipe_err_done(gpointer user_data) { svc_action_t *op = (svc_action_t *) user_data; op->opaque->stderr_gsource = NULL; if (op->opaque->stderr_fd > STDERR_FILENO) { close(op->opaque->stderr_fd); } op->opaque->stderr_fd = -1; } static struct mainloop_fd_callbacks stdout_callbacks = { .dispatch = dispatch_stdout, .destroy = pipe_out_done, }; static struct mainloop_fd_callbacks stderr_callbacks = { .dispatch = dispatch_stderr, .destroy = pipe_err_done, }; static void set_ocf_env(const char *key, const char *value, gpointer user_data) { if (setenv(key, value, 1) != 0) { crm_perror(LOG_ERR, "setenv failed for key:%s and value:%s", key, value); } } static void set_ocf_env_with_prefix(gpointer key, gpointer value, gpointer user_data) { char buffer[500]; snprintf(buffer, sizeof(buffer), "OCF_RESKEY_%s", (char *)key); set_ocf_env(buffer, value, user_data); } static void add_OCF_env_vars(svc_action_t * op) { if (!op->standard || strcasecmp("ocf", op->standard) != 0) { return; } if (op->params) { g_hash_table_foreach(op->params, set_ocf_env_with_prefix, NULL); } set_ocf_env("OCF_RA_VERSION_MAJOR", "1", NULL); set_ocf_env("OCF_RA_VERSION_MINOR", "0", NULL); set_ocf_env("OCF_ROOT", OCF_ROOT_DIR, NULL); if (op->rsc) { set_ocf_env("OCF_RESOURCE_INSTANCE", op->rsc, NULL); } if (op->agent != NULL) { set_ocf_env("OCF_RESOURCE_TYPE", op->agent, NULL); } /* Notes: this is not added to specification yet. Sept 10,2004 */ if (op->provider != NULL) { set_ocf_env("OCF_RESOURCE_PROVIDER", op->provider, NULL); } } gboolean recurring_action_timer(gpointer data) { svc_action_t *op = data; crm_debug("Scheduling another invokation of %s", op->id); /* Clean out the old result */ free(op->stdout_data); op->stdout_data = NULL; free(op->stderr_data); op->stderr_data = NULL; services_action_async(op, NULL); return FALSE; } void operation_finalize(svc_action_t * op) { int recurring = 0; if (op->interval) { if (op->cancel) { op->status = PCMK_LRM_OP_CANCELLED; cancel_recurring_action(op); } else { recurring = 1; op->opaque->repeat_timer = g_timeout_add(op->interval, recurring_action_timer, (void *)op); } } if (op->opaque->callback) { op->opaque->callback(op); } op->pid = 0; if (!recurring) { /* * If this is a recurring action, do not free explicitly. * It will get freed whenever the action gets cancelled. */ services_action_free(op); } } static void operation_finished(mainloop_child_t * p, pid_t pid, int core, int signo, int exitcode) { svc_action_t *op = mainloop_child_userdata(p); char *prefix = g_strdup_printf("%s:%d", op->id, op->pid); mainloop_clear_child_userdata(p); op->status = PCMK_LRM_OP_DONE; CRM_ASSERT(op->pid == pid); if (op->opaque->stderr_gsource) { /* Make sure we have read everything from the buffer. * Depending on the priority mainloop gives the fd, operation_finished * could occur before all the reads are done. Force the read now.*/ dispatch_stderr(op); } if (op->opaque->stdout_gsource) { /* Make sure we have read everything from the buffer. * Depending on the priority mainloop gives the fd, operation_finished * could occur before all the reads are done. Force the read now.*/ dispatch_stdout(op); } if (signo) { if (mainloop_child_timeout(p)) { crm_warn("%s - timed out after %dms", prefix, op->timeout); op->status = PCMK_LRM_OP_TIMEOUT; op->rc = PCMK_OCF_TIMEOUT; } else { crm_warn("%s - terminated with signal %d", prefix, signo); op->status = PCMK_LRM_OP_ERROR; op->rc = PCMK_OCF_SIGNAL; } } else { op->rc = exitcode; crm_debug("%s - exited with rc=%d", prefix, exitcode); } crm_log_output(LOG_NOTICE, prefix, op->stderr_data); crm_log_output(LOG_DEBUG, prefix, op->stdout_data); g_free(prefix); operation_finalize(op); } gboolean services_os_action_execute(svc_action_t * op, gboolean synchronous) { int rc, lpc; int stdout_fd[2]; int stderr_fd[2]; sigset_t mask; sigset_t old_mask; if (pipe(stdout_fd) < 0) { crm_err("pipe() failed"); } if (pipe(stderr_fd) < 0) { crm_err("pipe() failed"); } if (synchronous) { sigemptyset(&mask); sigaddset(&mask, SIGCHLD); sigemptyset(&old_mask); if (sigprocmask(SIG_BLOCK, &mask, &old_mask) < 0) { crm_perror(LOG_ERR, "sigprocmask() failed"); } } op->pid = fork(); switch (op->pid) { case -1: crm_err("fork() failed"); close(stdout_fd[0]); close(stdout_fd[1]); close(stderr_fd[0]); close(stderr_fd[1]); return FALSE; case 0: /* Child */ /* Man: The call setpgrp() is equivalent to setpgid(0,0) * _and_ compiles on BSD variants too * need to investigate if it works the same too. */ setpgid(0, 0); close(stdout_fd[0]); close(stderr_fd[0]); if (STDOUT_FILENO != stdout_fd[1]) { if (dup2(stdout_fd[1], STDOUT_FILENO) != STDOUT_FILENO) { crm_err("dup2() failed (stdout)"); } close(stdout_fd[1]); } if (STDERR_FILENO != stderr_fd[1]) { if (dup2(stderr_fd[1], STDERR_FILENO) != STDERR_FILENO) { crm_err("dup2() failed (stderr)"); } close(stderr_fd[1]); } /* close all descriptors except stdin/out/err and channels to logd */ for (lpc = getdtablesize() - 1; lpc > STDERR_FILENO; lpc--) { close(lpc); } #if SUPPORT_CIBSECRETS if (replace_secret_params(op->rsc, op->params) < 0) { /* replacing secrets failed! */ if (safe_str_eq(op->action,"stop")) { /* don't fail on stop! */ crm_info("proceeding with the stop operation for %s", op->rsc); } else { crm_err("failed to get secrets for %s, " "considering resource not configured", op->rsc); _exit(PCMK_OCF_NOT_CONFIGURED); } } #endif /* Setup environment correctly */ add_OCF_env_vars(op); /* execute the RA */ execvp(op->opaque->exec, op->opaque->args); switch (errno) { /* see execve(2) */ case ENOENT: /* No such file or directory */ case EISDIR: /* Is a directory */ rc = PCMK_OCF_NOT_INSTALLED; #if SUPPORT_NAGIOS if (safe_str_eq(op->standard, "nagios")) { rc = NAGIOS_NOT_INSTALLED; } #endif break; case EACCES: /* permission denied (various errors) */ rc = PCMK_OCF_INSUFFICIENT_PRIV; #if SUPPORT_NAGIOS if (safe_str_eq(op->standard, "nagios")) { rc = NAGIOS_INSUFFICIENT_PRIV; } #endif break; default: rc = PCMK_OCF_UNKNOWN_ERROR; break; } _exit(rc); } /* Only the parent reaches here */ close(stdout_fd[1]); close(stderr_fd[1]); op->opaque->stdout_fd = stdout_fd[0]; set_fd_opts(op->opaque->stdout_fd, O_NONBLOCK); op->opaque->stderr_fd = stderr_fd[0]; set_fd_opts(op->opaque->stderr_fd, O_NONBLOCK); if (synchronous) { int status = 0; int timeout = op->timeout; int sfd = -1; time_t start = -1; struct pollfd fds[3]; int wait_rc = 0; sfd = signalfd(-1, &mask, 0); if (sfd < 0) { crm_perror(LOG_ERR, "signalfd() failed"); } fds[0].fd = op->opaque->stdout_fd; fds[0].events = POLLIN; fds[0].revents = 0; fds[1].fd = op->opaque->stderr_fd; fds[1].events = POLLIN; fds[1].revents = 0; fds[2].fd = sfd; fds[2].events = POLLIN; fds[2].revents = 0; crm_trace("Waiting for %d", op->pid); start = time(NULL); do { int poll_rc = poll(fds, 3, timeout); if (poll_rc > 0) { if (fds[0].revents & POLLIN) { read_output(op->opaque->stdout_fd, op); } if (fds[1].revents & POLLIN) { read_output(op->opaque->stderr_fd, op); } if (fds[2].revents & POLLIN) { struct signalfd_siginfo fdsi; ssize_t s; s = read(sfd, &fdsi, sizeof(struct signalfd_siginfo)); if (s != sizeof(struct signalfd_siginfo)) { crm_perror(LOG_ERR, "Read from signal fd %d failed", sfd); } else if (fdsi.ssi_signo == SIGCHLD) { wait_rc = waitpid(op->pid, &status, WNOHANG); if (wait_rc < 0){ crm_perror(LOG_ERR, "waitpid() for %d failed", op->pid); } else if (wait_rc > 0) { break; } } } } else if (poll_rc == 0) { timeout = 0; break; } else if (poll_rc < 0) { if (errno != EINTR) { crm_perror(LOG_ERR, "poll() failed"); break; } } timeout = op->timeout - (time(NULL) - start) * 1000; } while ((op->timeout < 0 || timeout > 0)); crm_trace("Child done: %d", op->pid); if (wait_rc <= 0) { int killrc = kill(op->pid, SIGKILL); op->rc = PCMK_OCF_UNKNOWN_ERROR; if (op->timeout > 0 && timeout <= 0) { op->status = PCMK_LRM_OP_TIMEOUT; crm_warn("%s:%d - timed out after %dms", op->id, op->pid, op->timeout); } else { op->status = PCMK_LRM_OP_ERROR; } if (killrc && errno != ESRCH) { crm_err("kill(%d, KILL) failed: %d", op->pid, errno); } /* * From sigprocmask(2): * It is not possible to block SIGKILL or SIGSTOP. Attempts to do so are silently ignored. * * This makes it safe to skip WNOHANG here */ waitpid(op->pid, &status, 0); } else if (WIFEXITED(status)) { op->status = PCMK_LRM_OP_DONE; op->rc = WEXITSTATUS(status); crm_info("Managed %s process %d exited with rc=%d", op->id, op->pid, op->rc); } else if (WIFSIGNALED(status)) { int signo = WTERMSIG(status); op->status = PCMK_LRM_OP_ERROR; crm_err("Managed %s process %d exited with signal=%d", op->id, op->pid, signo); } #ifdef WCOREDUMP if (WCOREDUMP(status)) { crm_err("Managed %s process %d dumped core", op->id, op->pid); } #endif read_output(op->opaque->stdout_fd, op); read_output(op->opaque->stderr_fd, op); close(op->opaque->stdout_fd); close(op->opaque->stderr_fd); + close(sfd); if (sigismember(&old_mask, SIGCHLD) == 0) { if (sigprocmask(SIG_UNBLOCK, &mask, NULL) < 0) { crm_perror(LOG_ERR, "sigprocmask() to unblocked failed"); } } } else { crm_trace("Async waiting for %d - %s", op->pid, op->opaque->exec); mainloop_child_add(op->pid, op->timeout, op->id, op, operation_finished); op->opaque->stdout_gsource = mainloop_add_fd(op->id, G_PRIORITY_LOW, op->opaque->stdout_fd, op, &stdout_callbacks); op->opaque->stderr_gsource = mainloop_add_fd(op->id, G_PRIORITY_LOW, op->opaque->stderr_fd, op, &stderr_callbacks); } return TRUE; } GList * services_os_get_directory_list(const char *root, gboolean files, gboolean executable) { GList *list = NULL; struct dirent **namelist; int entries = 0, lpc = 0; char buffer[PATH_MAX]; entries = scandir(root, &namelist, NULL, alphasort); if (entries <= 0) { return list; } for (lpc = 0; lpc < entries; lpc++) { struct stat sb; if ('.' == namelist[lpc]->d_name[0]) { free(namelist[lpc]); continue; } snprintf(buffer, sizeof(buffer), "%s/%s", root, namelist[lpc]->d_name); if (stat(buffer, &sb)) { continue; } if (S_ISDIR(sb.st_mode)) { if (files) { free(namelist[lpc]); continue; } } else if (S_ISREG(sb.st_mode)) { if (files == FALSE) { free(namelist[lpc]); continue; } else if (executable && (sb.st_mode & S_IXUSR) == 0 && (sb.st_mode & S_IXGRP) == 0 && (sb.st_mode & S_IXOTH) == 0) { free(namelist[lpc]); continue; } } list = g_list_append(list, strdup(namelist[lpc]->d_name)); free(namelist[lpc]); } free(namelist); return list; } GList * resources_os_list_lsb_agents(void) { return get_directory_list(LSB_ROOT_DIR, TRUE, TRUE); } GList * resources_os_list_ocf_providers(void) { return get_directory_list(OCF_ROOT_DIR "/resource.d", FALSE, TRUE); } GList * resources_os_list_ocf_agents(const char *provider) { GList *gIter = NULL; GList *result = NULL; GList *providers = NULL; if (provider) { char buffer[500]; snprintf(buffer, sizeof(buffer), "%s/resource.d/%s", OCF_ROOT_DIR, provider); return get_directory_list(buffer, TRUE, TRUE); } providers = resources_os_list_ocf_providers(); for (gIter = providers; gIter != NULL; gIter = gIter->next) { GList *tmp1 = result; GList *tmp2 = resources_os_list_ocf_agents(gIter->data); if (tmp2) { result = g_list_concat(tmp1, tmp2); } } g_list_free_full(providers, free); return result; } #if SUPPORT_NAGIOS GList * resources_os_list_nagios_agents(void) { GList *plugin_list = NULL; GList *result = NULL; GList *gIter = NULL; plugin_list = get_directory_list(NAGIOS_PLUGIN_DIR, TRUE, TRUE); /* Make sure both the plugin and its metadata exist */ for (gIter = plugin_list; gIter != NULL; gIter = gIter->next) { const char *plugin = gIter->data; char *metadata = g_strdup_printf(NAGIOS_METADATA_DIR "/%s.xml", plugin); struct stat st; if (stat(metadata, &st) == 0) { result = g_list_append(result, strdup(plugin)); } g_free(metadata); } g_list_free_full(plugin_list, free); return result; } #endif diff --git a/tools/cibadmin.c b/tools/cibadmin.c index 886fd9ca77..479c8c2190 100644 --- a/tools/cibadmin.c +++ b/tools/cibadmin.c @@ -1,626 +1,638 @@ /* * Copyright (C) 2004 Andrew Beekhof * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This software is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #include #include #include #include #include #include #include #include #include #include #include #include #include int exit_code = pcmk_ok; int message_timer_id = -1; int message_timeout_ms = 30; GMainLoop *mainloop = NULL; const char *host = NULL; void usage(const char *cmd, int exit_status); int do_init(void); int do_work(xmlNode * input, int command_options, xmlNode ** output); gboolean admin_message_timeout(gpointer data); void cib_connection_destroy(gpointer user_data); void cibadmin_op_callback(xmlNode * msg, int call_id, int rc, xmlNode * output, void *user_data); int command_options = 0; const char *cib_action = NULL; typedef struct str_list_s { int num_items; char *value; struct str_list_s *next; } str_list_t; char *obj_type = NULL; char *status = NULL; char *migrate_from = NULL; char *migrate_res = NULL; char *subtype = NULL; char *reset = NULL; int request_id = 0; int operation_status = 0; cib_t *the_cib = NULL; gboolean force_flag = FALSE; +gboolean quiet = FALSE; +int bump_log_num = 0; /* *INDENT-OFF* */ static struct crm_option long_options[] = { {"help", 0, 0, '?', "\tThis text"}, {"version", 0, 0, '$', "\tVersion information" }, {"verbose", 0, 0, 'V', "\tIncrease debug output\n"}, {"-spacer-", 0, 0, '-', "Commands:"}, {"upgrade", 0, 0, 'u', "\tUpgrade the configuration to the latest syntax"}, {"query", 0, 0, 'Q', "\tQuery the contents of the CIB"}, {"erase", 0, 0, 'E', "\tErase the contents of the whole CIB"}, {"bump", 0, 0, 'B', "\tIncrease the CIB's epoch value by 1"}, {"create", 0, 0, 'C', "\tCreate an object in the CIB. Will fail if the object already exists."}, {"modify", 0, 0, 'M', "\tFind the object somewhere in the CIB's XML tree and update it. Fails if the object does not exist unless -c is specified"}, {"patch", 0, 0, 'P', "\tSupply an update in the form of an xml diff (See also: crm_diff)"}, {"replace", 0, 0, 'R', "\tRecursivly replace an object in the CIB"}, {"delete", 0, 0, 'D', "\tDelete the first object matching the supplied criteria, Eg. "}, {"-spacer-", 0, 0, '-', "\n\tThe tagname and all attributes must match in order for the element to be deleted"}, {"delete-all", 0, 0, 'd', "\tWhen used with --xpath, remove all matching objects in the configuration instead of just the first one"}, {"md5-sum", 0, 0, '5', "\tCalculate the on-disk CIB digest"}, {"md5-sum-versioned", 0, 0, '6', "\tCalculate an on-the-wire versioned CIB digest"}, {"sync", 0, 0, 'S', "\t(Advanced) Force a refresh of the CIB to all nodes\n"}, {"make-slave", 0, 0, 'r', NULL, 1}, {"make-master", 0, 0, 'w', NULL, 1}, {"is-master", 0, 0, 'm', NULL, 1}, {"empty", 0, 0, 'a', "\tOutput an empty CIB"}, {"blank", 0, 0, 'a', NULL, 1}, {"-spacer-",1, 0, '-', "\nAdditional options:"}, {"force", 0, 0, 'f'}, {"timeout", 1, 0, 't', "Time (in seconds) to wait before declaring the operation failed"}, {"sync-call", 0, 0, 's', "Wait for call to complete before returning"}, {"local", 0, 0, 'l', "\tCommand takes effect locally. Should only be used for queries"}, {"allow-create",0, 0, 'c', "(Advanced) Allow the target of a --modify,-M operation to be created if they do not exist"}, {"no-children", 0, 0, 'n', "(Advanced) When querying an object, do not return include its children in the result\n"}, {"no-bcast", 0, 0, 'b', NULL, 1}, {"-spacer-", 0, 0, '-', "Data:"}, {"xml-text", 1, 0, 'X', "Retrieve XML from the supplied string"}, {"xml-file", 1, 0, 'x', "Retrieve XML from the named file"}, {"xml-pipe", 0, 0, 'p', "Retrieve XML from stdin\n"}, {"scope", 1, 0, 'o', "Limit the scope of the operation to a specific section of the CIB."}, {"-spacer-", 0, 0, '-', "\tValid values are: nodes, resources, constraints, crm_config, rsc_defaults, op_defaults, status"}, {"xpath", 1, 0, 'A', "A valid XPath to use instead of --scope,-o"}, {"node-path", 0, 0, 'e', "When performing XPath queries, return the address of any matches found."}, {"-spacer-", 0, 0, '-', " Eg: /cib/configuration/resources/master[@id='ms_RH1_SCS']/primitive[@id='prm_RH1_SCS']", pcmk_option_paragraph}, {"node", 1, 0, 'N', "(Advanced) Send command to the specified host\n"}, {"-space-", 0, 0, '!', NULL, 1}, {"-spacer-", 0, 0, '-', "\nExamples:\n"}, {"-spacer-", 0, 0, '-', "Query the configuration from the local node:", pcmk_option_paragraph}, {"-spacer-", 0, 0, '-', " cibadmin --query --local", pcmk_option_example}, {"-spacer-", 0, 0, '-', "Query the just the cluster options configuration:", pcmk_option_paragraph}, {"-spacer-", 0, 0, '-', " cibadmin --query --scope crm_config", pcmk_option_example}, {"-spacer-", 0, 0, '-', "Query all 'target-role' settings:", pcmk_option_paragraph}, {"-spacer-", 0, 0, '-', " cibadmin --query --xpath \"//nvpair[@name='target-role']\"", pcmk_option_example}, {"-spacer-", 0, 0, '-', "Remove all 'is-managed' settings:", pcmk_option_paragraph}, {"-spacer-", 0, 0, '-', " cibadmin --delete-all --xpath \"//nvpair[@name='is-managed']\"", pcmk_option_example}, {"-spacer-", 0, 0, '-', "Remove the resource named 'old':", pcmk_option_paragraph}, {"-spacer-", 0, 0, '-', " cibadmin --delete --xml-text ''", pcmk_option_example}, {"-spacer-", 0, 0, '-', "Remove all resources from the configuration:", pcmk_option_paragraph}, {"-spacer-", 0, 0, '-', " cibadmin --replace --scope resources --xml-text ''", pcmk_option_example}, {"-spacer-", 0, 0, '-', "Replace the complete configuration with the contents of $HOME/pacemaker.xml:", pcmk_option_paragraph}, {"-spacer-", 0, 0, '-', " cibadmin --replace --xml-file $HOME/pacemaker.xml", pcmk_option_example}, {"-spacer-", 0, 0, '-', "Replace the constraints section of the configuration with the contents of $HOME/constraints.xml:", pcmk_option_paragraph}, {"-spacer-", 0, 0, '-', " cibadmin --replace --scope constraints --xml-file $HOME/constraints.xml", pcmk_option_example}, {"-spacer-", 0, 0, '-', "Increase the configuration version to prevent old configurations from being loaded accidentally:", pcmk_option_paragraph}, {"-spacer-", 0, 0, '-', " cibadmin --modify --xml-text ''", pcmk_option_example}, {"-spacer-", 0, 0, '-', "Edit the configuration with your favorite $EDITOR:", pcmk_option_paragraph}, {"-spacer-", 0, 0, '-', " cibadmin --query > $HOME/local.xml", pcmk_option_example}, {"-spacer-", 0, 0, '-', " $EDITOR $HOME/local.xml", pcmk_option_example}, {"-spacer-", 0, 0, '-', " cibadmin --replace --xml-file $HOME/local.xml", pcmk_option_example}, {"-spacer-", 0, 0, '-', "SEE ALSO:"}, {"-spacer-", 0, 0, '-', " crm(8), pcs(8), crm_shadow(8)"}, /* Legacy options */ {"host", 1, 0, 'h', NULL, 1}, {"force-quorum", 0, 0, 'f', NULL, 1}, {"obj_type", 1, 0, 'o', NULL, 1}, {F_CRM_DATA, 1, 0, 'X', NULL, 1}, {CIB_OP_ERASE, 0, 0, 'E', NULL, 1}, {CIB_OP_QUERY, 0, 0, 'Q', NULL, 1}, {CIB_OP_CREATE, 0, 0, 'C', NULL, 1}, {CIB_OP_REPLACE, 0, 0, 'R', NULL, 1}, {CIB_OP_UPDATE, 0, 0, 'U', NULL, 1}, {CIB_OP_MODIFY, 0, 0, 'M', NULL, 1}, {CIB_OP_DELETE, 0, 0, 'D', NULL, 1}, {CIB_OP_BUMP, 0, 0, 'B', NULL, 1}, {CIB_OP_SYNC, 0, 0, 'S', NULL, 1}, {CIB_OP_SLAVE, 0, 0, 'r', NULL, 1}, {CIB_OP_MASTER, 0, 0, 'w', NULL, 1}, {CIB_OP_ISMASTER,0, 0, 'm', NULL, 1}, {0, 0, 0, 0} }; /* *INDENT-ON* */ static void print_xml_output(xmlNode * xml) { char *buffer; if (!xml) { return; } else if (xml->type != XML_ELEMENT_NODE) { return; } if (command_options & cib_xpath_address) { const char *id = crm_element_value(xml, XML_ATTR_ID); if (safe_str_eq((const char *)xml->name, "xpath-query")) { xmlNode *child = NULL; for (child = xml->children; child; child = child->next) { print_xml_output(child); } } else { printf("%s\n", id); } } else { buffer = dump_xml_formatted(xml); fprintf(stdout, "%s", crm_str(buffer)); free(buffer); } } int main(int argc, char **argv) { int argerr = 0; int flag; const char *source = NULL; char *admin_input_xml = NULL; char *admin_input_file = NULL; gboolean dangerous_cmd = FALSE; gboolean admin_input_stdin = FALSE; xmlNode *output = NULL; xmlNode *input = NULL; int option_index = 0; - crm_log_init(NULL, LOG_CRIT, FALSE, FALSE, argc, argv, FALSE); + crm_system_name = "cibadmin"; crm_set_options(NULL, "command [options] [data]", long_options, "Provides direct access to the cluster configuration." "\n\nAllows the configuration, or sections of it, to be queried, modified, replaced and deleted." "\n\nWhere necessary, XML data will be obtained using the -X, -x, or -p options\n"); if (argc < 2) { crm_help('?', EX_USAGE); } while (1) { flag = crm_get_option(argc, argv, &option_index); if (flag == -1) break; switch (flag) { case 't': message_timeout_ms = atoi(optarg); if (message_timeout_ms < 1) { message_timeout_ms = 30; } break; case 'A': obj_type = strdup(optarg); command_options |= cib_xpath; break; case 'e': command_options |= cib_xpath_address; break; case 'u': cib_action = CIB_OP_UPGRADE; dangerous_cmd = TRUE; break; case 'E': cib_action = CIB_OP_ERASE; dangerous_cmd = TRUE; break; case 'Q': cib_action = CIB_OP_QUERY; + quiet = TRUE; break; case 'P': cib_action = CIB_OP_APPLY_DIFF; break; case 'S': cib_action = CIB_OP_SYNC; break; case 'U': case 'M': cib_action = CIB_OP_MODIFY; break; case 'R': cib_action = CIB_OP_REPLACE; break; case 'C': cib_action = CIB_OP_CREATE; break; case 'D': cib_action = CIB_OP_DELETE; break; case '5': cib_action = "md5-sum"; break; case '6': cib_action = "md5-sum-versioned"; break; case 'c': command_options |= cib_can_create; break; case 'n': command_options |= cib_no_children; break; case 'm': cib_action = CIB_OP_ISMASTER; command_options |= cib_scope_local; break; case 'B': cib_action = CIB_OP_BUMP; break; case 'r': dangerous_cmd = TRUE; cib_action = CIB_OP_SLAVE; break; case 'w': dangerous_cmd = TRUE; cib_action = CIB_OP_MASTER; command_options |= cib_scope_local; break; case 'V': command_options = command_options | cib_verbose; - crm_bump_log_level(argc, argv); + bump_log_num++; break; case '?': case '$': case '!': crm_help(flag, EX_OK); break; case 'o': crm_trace("Option %c => %s", flag, optarg); obj_type = strdup(optarg); break; case 'X': crm_trace("Option %c => %s", flag, optarg); admin_input_xml = strdup(optarg); break; case 'x': crm_trace("Option %c => %s", flag, optarg); admin_input_file = strdup(optarg); break; case 'p': admin_input_stdin = TRUE; break; case 'N': case 'h': host = strdup(optarg); break; case 'l': command_options |= cib_scope_local; break; case 'd': cib_action = CIB_OP_DELETE; command_options |= cib_multiple; dangerous_cmd = TRUE; break; case 'b': dangerous_cmd = TRUE; command_options |= cib_inhibit_bcast; command_options |= cib_scope_local; break; case 's': command_options |= cib_sync_call; break; case 'f': force_flag = TRUE; command_options |= cib_quorum_override; break; case 'a': output = createEmptyCib(); crm_xml_add(output, XML_ATTR_CRM_VERSION, CRM_FEATURE_SET); if (optind >= argc) { crm_xml_add(output, XML_ATTR_VALIDATION, LATEST_SCHEMA_VERSION); } else { crm_xml_add(output, XML_ATTR_VALIDATION, argv[optind]); } crm_xml_add_int(output, XML_ATTR_GENERATION_ADMIN, 1); crm_xml_add_int(output, XML_ATTR_GENERATION, 0); crm_xml_add_int(output, XML_ATTR_NUMUPDATES, 0); admin_input_xml = dump_xml_formatted(output); fprintf(stdout, "%s\n", crm_str(admin_input_xml)); goto bail; break; default: printf("Argument code 0%o (%c)" " is not (?yet?) supported\n", flag, flag); ++argerr; break; } } + + if (bump_log_num > 0) { + quiet = FALSE; + } + crm_log_init(NULL, LOG_CRIT, FALSE, FALSE, argc, argv, quiet); + while (bump_log_num > 0) { + crm_bump_log_level(argc, argv); + bump_log_num--; + } if (optind < argc) { printf("non-option ARGV-elements: "); while (optind < argc) printf("%s ", argv[optind++]); printf("\n"); crm_help('?', EX_USAGE); } if (optind > argc || cib_action == NULL) { ++argerr; } if (argerr) { crm_help('?', EX_USAGE); } if (dangerous_cmd && force_flag == FALSE) { fprintf(stderr, "The supplied command is considered dangerous." " To prevent accidental destruction of the cluster," " the --force flag is required in order to proceed.\n"); fflush(stderr); exit_code = -EINVAL; goto bail; } if (admin_input_file != NULL) { input = filename2xml(admin_input_file); source = admin_input_file; } else if (admin_input_xml != NULL) { source = "input string"; input = string2xml(admin_input_xml); } else if (admin_input_stdin) { source = "STDIN"; input = stdin2xml(); } if (input != NULL) { crm_log_xml_debug(input, "[admin input]"); } else if (source) { fprintf(stderr, "Couldn't parse input from %s.\n", source); exit_code = -EINVAL; goto bail; } if (safe_str_eq(cib_action, "md5-sum")) { char *digest = NULL; if (input == NULL) { fprintf(stderr, "Please supply XML to process with -X, -x or -p\n"); exit_code = -EINVAL; goto bail; } digest = calculate_on_disk_digest(input); fprintf(stderr, "Digest: "); fprintf(stdout, "%s\n", crm_str(digest)); free(digest); goto bail; } else if (safe_str_eq(cib_action, "md5-sum-versioned")) { char *digest = NULL; const char *version = NULL; if (input == NULL) { fprintf(stderr, "Please supply XML to process with -X, -x or -p\n"); exit_code = -EINVAL; goto bail; } version = crm_element_value(input, XML_ATTR_CRM_VERSION); digest = calculate_xml_versioned_digest(input, FALSE, TRUE, version); fprintf(stderr, "Versioned (%s) digest: ", version); fprintf(stdout, "%s\n", crm_str(digest)); free(digest); goto bail; } exit_code = do_init(); if (exit_code != pcmk_ok) { crm_err("Init failed, could not perform requested operations"); fprintf(stderr, "Init failed, could not perform requested operations\n"); return -exit_code; } exit_code = do_work(input, command_options, &output); if (exit_code > 0) { /* wait for the reply by creating a mainloop and running it until * the callbacks are invoked... */ request_id = exit_code; the_cib->cmds->register_callback(the_cib, request_id, message_timeout_ms, FALSE, NULL, "cibadmin_op_callback", cibadmin_op_callback); mainloop = g_main_new(FALSE); crm_trace("%s waiting for reply from the local CIB", crm_system_name); crm_info("Starting mainloop"); g_main_run(mainloop); } else if (exit_code < 0) { crm_err("Call failed: %s", pcmk_strerror(exit_code)); fprintf(stderr, "Call failed: %s\n", pcmk_strerror(exit_code)); operation_status = exit_code; if (exit_code == -pcmk_err_dtd_validation) { if (crm_str_eq(cib_action, CIB_OP_UPGRADE, TRUE)) { xmlNode *obj = NULL; int version = 0, rc = 0; rc = the_cib->cmds->query(the_cib, NULL, &obj, command_options); if (rc == pcmk_ok) { update_validation(&obj, &version, TRUE, FALSE); } } else if (output) { validate_xml_verbose(output); } } } if (output != NULL) { print_xml_output(output); free_xml(output); } crm_trace("%s exiting normally", crm_system_name); free_xml(input); free(admin_input_xml); free(admin_input_file); the_cib->cmds->signoff(the_cib); cib_delete(the_cib); bail: return crm_exit(-exit_code); } int do_work(xmlNode * input, int call_options, xmlNode ** output) { /* construct the request */ the_cib->call_timeout = message_timeout_ms; if (strcasecmp(CIB_OP_REPLACE, cib_action) == 0 && safe_str_eq(crm_element_name(input), XML_TAG_CIB)) { xmlNode *status = get_object_root(XML_CIB_TAG_STATUS, input); if (status == NULL) { create_xml_node(input, XML_CIB_TAG_STATUS); } } if (strcasecmp(CIB_OP_SYNC, cib_action) == 0) { crm_trace("Performing %s op...", cib_action); return the_cib->cmds->sync_from(the_cib, host, obj_type, call_options); } else if (strcasecmp(CIB_OP_SLAVE, cib_action) == 0 && (call_options ^ cib_scope_local)) { crm_trace("Performing %s op on all nodes...", cib_action); return the_cib->cmds->set_slave_all(the_cib, call_options); } else if (strcasecmp(CIB_OP_MASTER, cib_action) == 0) { crm_trace("Performing %s op on all nodes...", cib_action); return the_cib->cmds->set_master(the_cib, call_options); } else if (cib_action != NULL) { crm_trace("Passing \"%s\" to variant_op...", cib_action); return cib_internal_op(the_cib, cib_action, host, obj_type, input, output, call_options, NULL); } else { crm_err("You must specify an operation"); } return -EINVAL; } int do_init(void) { int rc = pcmk_ok; the_cib = cib_new(); rc = the_cib->cmds->signon(the_cib, crm_system_name, cib_command); if (rc != pcmk_ok) { crm_err("Signon to CIB failed: %s", pcmk_strerror(rc)); fprintf(stderr, "Signon to CIB failed: %s\n", pcmk_strerror(rc)); } return rc; } void cib_connection_destroy(gpointer user_data) { crm_err("Connection to the CIB terminated... exiting"); g_main_quit(mainloop); return; } void cibadmin_op_callback(xmlNode * msg, int call_id, int rc, xmlNode * output, void *user_data) { exit_code = rc; if (safe_str_eq(cib_action, CIB_OP_ISMASTER) && rc != pcmk_ok) { crm_info("CIB on %s is _not_ the master instance", host ? host : "localhost"); fprintf(stderr, "CIB on %s is _not_ the master instance\n", host ? host : "localhost"); } else if (safe_str_eq(cib_action, CIB_OP_ISMASTER)) { crm_info("CIB on %s _is_ the master instance", host ? host : "localhost"); fprintf(stderr, "CIB on %s _is_ the master instance\n", host ? host : "localhost"); } else if (rc != 0) { crm_warn("Call %s failed (%d): %s", cib_action, rc, pcmk_strerror(rc)); fprintf(stderr, "Call %s failed (%d): %s\n", cib_action, rc, pcmk_strerror(rc)); print_xml_output(output); } else if (safe_str_eq(cib_action, CIB_OP_QUERY) && output == NULL) { crm_err("Output expected in query response"); crm_log_xml_err(msg, "no output"); } else if (output == NULL) { crm_info("Call passed"); } else { crm_info("Call passed"); print_xml_output(output); } if (call_id == request_id) { g_main_quit(mainloop); } else { crm_info("Message was not the response we were looking for (%d vs. %d", call_id, request_id); } }