diff --git a/crmd/lrm.c b/crmd/lrm.c index 282d5da65f..2631c84872 100644 --- a/crmd/lrm.c +++ b/crmd/lrm.c @@ -1,2562 +1,2667 @@ /* * Copyright (C) 2004 Andrew Beekhof * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This software is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #define START_DELAY_THRESHOLD 5 * 60 * 1000 #define MAX_LRM_REG_FAILS 30 #define s_if_plural(i) (((i) == 1)? "" : "s") struct delete_event_s { int rc; const char *rsc; lrm_state_t *lrm_state; }; static gboolean is_rsc_active(lrm_state_t * lrm_state, const char *rsc_id); static gboolean build_active_RAs(lrm_state_t * lrm_state, xmlNode * rsc_list); static gboolean stop_recurring_actions(gpointer key, gpointer value, gpointer user_data); static int delete_rsc_status(lrm_state_t * lrm_state, const char *rsc_id, int call_options, const char *user_name); static lrmd_event_data_t *construct_op(lrm_state_t * lrm_state, xmlNode * rsc_op, const char *rsc_id, const char *operation); static void do_lrm_rsc_op(lrm_state_t * lrm_state, lrmd_rsc_info_t * rsc, const char *operation, xmlNode * msg, xmlNode * request); void send_direct_ack(const char *to_host, const char *to_sys, lrmd_rsc_info_t * rsc, lrmd_event_data_t * op, const char *rsc_id); static gboolean lrm_state_verify_stopped(lrm_state_t * lrm_state, enum crmd_fsa_state cur_state, int log_level); static int do_update_resource(const char *node_name, lrmd_rsc_info_t * rsc, lrmd_event_data_t * op); static void lrm_connection_destroy(void) { if (is_set(fsa_input_register, R_LRM_CONNECTED)) { crm_crit("LRM Connection failed"); register_fsa_input(C_FSA_INTERNAL, I_ERROR, NULL); clear_bit(fsa_input_register, R_LRM_CONNECTED); } else { crm_info("LRM Connection disconnected"); } } static char * make_stop_id(const char *rsc, int call_id) { return crm_strdup_printf("%s:%d", rsc, call_id); } static void copy_instance_keys(gpointer key, gpointer value, gpointer user_data) { if (strstr(key, CRM_META "_") == NULL) { g_hash_table_replace(user_data, strdup((const char *)key), strdup((const char *)value)); } } static void copy_meta_keys(gpointer key, gpointer value, gpointer user_data) { if (strstr(key, CRM_META "_") != NULL) { g_hash_table_replace(user_data, strdup((const char *)key), strdup((const char *)value)); } } /*! * \internal * \brief Remove a recurring operation from a resource's history * * \param[in,out] history Resource history to modify * \param[in] op Operation to remove * * \return TRUE if the operation was found and removed, FALSE otherwise */ static gboolean history_remove_recurring_op(rsc_history_t *history, const lrmd_event_data_t *op) { GList *iter; for (iter = history->recurring_op_list; iter != NULL; iter = iter->next) { lrmd_event_data_t *existing = iter->data; if ((op->interval == existing->interval) && crm_str_eq(op->rsc_id, existing->rsc_id, TRUE) && safe_str_eq(op->op_type, existing->op_type)) { history->recurring_op_list = g_list_delete_link(history->recurring_op_list, iter); lrmd_free_event(existing); return TRUE; } } return FALSE; } /*! * \internal * \brief Free all recurring operations in resource history * * \param[in,out] history Resource history to modify */ static void history_free_recurring_ops(rsc_history_t *history) { GList *iter; for (iter = history->recurring_op_list; iter != NULL; iter = iter->next) { lrmd_free_event(iter->data); } g_list_free(history->recurring_op_list); history->recurring_op_list = NULL; } /*! * \internal * \brief Free resource history * * \param[in,out] history Resource history to free */ void history_free(gpointer data) { rsc_history_t *history = (rsc_history_t*)data; if (history->stop_params) { g_hash_table_destroy(history->stop_params); } /* Don't need to free history->rsc.id because it's set to history->id */ free(history->rsc.type); free(history->rsc.standard); free(history->rsc.provider); lrmd_free_event(history->failed); lrmd_free_event(history->last); free(history->id); history_free_recurring_ops(history); free(history); } static void update_history_cache(lrm_state_t * lrm_state, lrmd_rsc_info_t * rsc, lrmd_event_data_t * op) { int target_rc = 0; rsc_history_t *entry = NULL; if (op->rsc_deleted) { crm_debug("Purged history for '%s' after %s", op->rsc_id, op->op_type); delete_rsc_status(lrm_state, op->rsc_id, cib_quorum_override, NULL); return; } if (safe_str_eq(op->op_type, RSC_NOTIFY)) { return; } crm_debug("Updating history for '%s' with %s op", op->rsc_id, op->op_type); entry = g_hash_table_lookup(lrm_state->resource_history, op->rsc_id); if (entry == NULL && rsc) { entry = calloc(1, sizeof(rsc_history_t)); entry->id = strdup(op->rsc_id); g_hash_table_insert(lrm_state->resource_history, entry->id, entry); entry->rsc.id = entry->id; entry->rsc.type = strdup(rsc->type); entry->rsc.standard = strdup(rsc->standard); if (rsc->provider) { entry->rsc.provider = strdup(rsc->provider); } else { entry->rsc.provider = NULL; } } else if (entry == NULL) { crm_info("Resource %s no longer exists, not updating cache", op->rsc_id); return; } entry->last_callid = op->call_id; target_rc = rsc_op_expected_rc(op); if (op->op_status == PCMK_LRM_OP_CANCELLED) { if (op->interval > 0) { crm_trace("Removing cancelled recurring op: %s_%s_%d", op->rsc_id, op->op_type, op->interval); history_remove_recurring_op(entry, op); return; } else { crm_trace("Skipping %s_%s_%d rc=%d, status=%d", op->rsc_id, op->op_type, op->interval, op->rc, op->op_status); } } else if (did_rsc_op_fail(op, target_rc)) { /* Store failed monitors here, otherwise the block below will cause them * to be forgotten when a stop happens. */ if (entry->failed) { lrmd_free_event(entry->failed); } entry->failed = lrmd_copy_event(op); } else if (op->interval == 0) { if (entry->last) { lrmd_free_event(entry->last); } entry->last = lrmd_copy_event(op); if (op->params && (safe_str_eq(CRMD_ACTION_START, op->op_type) || safe_str_eq("reload", op->op_type) || safe_str_eq(CRMD_ACTION_STATUS, op->op_type))) { if (entry->stop_params) { g_hash_table_destroy(entry->stop_params); } entry->stop_params = crm_str_table_new(); g_hash_table_foreach(op->params, copy_instance_keys, entry->stop_params); } } if (op->interval > 0) { /* Ensure there are no duplicates */ history_remove_recurring_op(entry, op); crm_trace("Adding recurring op: %s_%s_%d", op->rsc_id, op->op_type, op->interval); entry->recurring_op_list = g_list_prepend(entry->recurring_op_list, lrmd_copy_event(op)); } else if (entry->recurring_op_list && safe_str_eq(op->op_type, RSC_STATUS) == FALSE) { crm_trace("Dropping %d recurring ops because of: %s_%s_%d", g_list_length(entry->recurring_op_list), op->rsc_id, op->op_type, op->interval); history_free_recurring_ops(entry); } } /*! * \internal * \brief Send a direct OK ack for a resource task * * \param[in] lrm_state LRM connection * \param[in] input Input message being ack'ed * \param[in] rsc_id ID of affected resource * \param[in] rsc Affected resource (if available) * \param[in] task Operation task being ack'ed * \param[in] ack_host Name of host to send ack to * \param[in] ack_sys IPC system name to ack */ static void send_task_ok_ack(lrm_state_t *lrm_state, ha_msg_input_t *input, const char *rsc_id, lrmd_rsc_info_t *rsc, const char *task, const char *ack_host, const char *ack_sys) { lrmd_event_data_t *op = construct_op(lrm_state, input->xml, rsc_id, task); - CRM_ASSERT(op != NULL); op->rc = PCMK_OCF_OK; op->op_status = PCMK_LRM_OP_DONE; send_direct_ack(ack_host, ack_sys, rsc, op, rsc_id); lrmd_free_event(op); } void lrm_op_callback(lrmd_event_data_t * op) { const char *nodename = NULL; lrm_state_t *lrm_state = NULL; CRM_CHECK(op != NULL, return); /* determine the node name for this connection. */ nodename = op->remote_nodename ? op->remote_nodename : fsa_our_uname; if (op->type == lrmd_event_disconnect && (safe_str_eq(nodename, fsa_our_uname))) { /* if this is the local lrmd ipc connection, set the right bits in the * crmd when the connection goes down */ lrm_connection_destroy(); return; } else if (op->type != lrmd_event_exec_complete) { /* we only need to process execution results */ return; } lrm_state = lrm_state_find(nodename); CRM_ASSERT(lrm_state != NULL); process_lrm_event(lrm_state, op, NULL); } /* A_LRM_CONNECT */ void do_lrm_control(long long action, enum crmd_fsa_cause cause, enum crmd_fsa_state cur_state, enum crmd_fsa_input current_input, fsa_data_t * msg_data) { /* This only pertains to local lrmd connections. Remote connections are handled as * resources within the pengine. Connecting and disconnecting from remote lrmd instances * handled differently than the local. */ lrm_state_t *lrm_state = NULL; if(fsa_our_uname == NULL) { return; /* Nothing to do */ } lrm_state = lrm_state_find_or_create(fsa_our_uname); if (lrm_state == NULL) { register_fsa_error(C_FSA_INTERNAL, I_ERROR, NULL); return; } if (action & A_LRM_DISCONNECT) { if (lrm_state_verify_stopped(lrm_state, cur_state, LOG_INFO) == FALSE) { if (action == A_LRM_DISCONNECT) { crmd_fsa_stall(FALSE); return; } } clear_bit(fsa_input_register, R_LRM_CONNECTED); crm_info("Disconnecting from the LRM"); lrm_state_disconnect(lrm_state); lrm_state_reset_tables(lrm_state, FALSE); crm_notice("Disconnected from the LRM"); } if (action & A_LRM_CONNECT) { int ret = pcmk_ok; crm_debug("Connecting to the LRM"); ret = lrm_state_ipc_connect(lrm_state); if (ret != pcmk_ok) { if (lrm_state->num_lrm_register_fails < MAX_LRM_REG_FAILS) { crm_warn("Failed to connect to the LRM %d time%s (%d max)", lrm_state->num_lrm_register_fails, s_if_plural(lrm_state->num_lrm_register_fails), MAX_LRM_REG_FAILS); crm_timer_start(wait_timer); crmd_fsa_stall(FALSE); return; } } if (ret != pcmk_ok) { crm_err("Failed to connect to the LRM the max allowed %d time%s", lrm_state->num_lrm_register_fails, s_if_plural(lrm_state->num_lrm_register_fails)); register_fsa_error(C_FSA_INTERNAL, I_ERROR, NULL); return; } set_bit(fsa_input_register, R_LRM_CONNECTED); crm_info("LRM connection established"); } if (action & ~(A_LRM_CONNECT | A_LRM_DISCONNECT)) { crm_err("Unexpected action %s in %s", fsa_action2string(action), __FUNCTION__); } } static gboolean lrm_state_verify_stopped(lrm_state_t * lrm_state, enum crmd_fsa_state cur_state, int log_level) { int counter = 0; gboolean rc = TRUE; const char *when = "lrm disconnect"; GHashTableIter gIter; const char *key = NULL; rsc_history_t *entry = NULL; struct recurring_op_s *pending = NULL; crm_debug("Checking for active resources before exit"); if (cur_state == S_TERMINATE) { log_level = LOG_ERR; when = "shutdown"; } else if (is_set(fsa_input_register, R_SHUTDOWN)) { when = "shutdown... waiting"; } if (lrm_state->pending_ops && lrm_state_is_connected(lrm_state) == TRUE) { guint removed = g_hash_table_foreach_remove( lrm_state->pending_ops, stop_recurring_actions, lrm_state); guint nremaining = g_hash_table_size(lrm_state->pending_ops); if (removed || nremaining) { crm_notice("Stopped %u recurring operation%s at %s (%u remaining)", removed, s_if_plural(removed), when, nremaining); } } if (lrm_state->pending_ops) { g_hash_table_iter_init(&gIter, lrm_state->pending_ops); while (g_hash_table_iter_next(&gIter, NULL, (void **)&pending)) { /* Ignore recurring actions in the shutdown calculations */ if (pending->interval == 0) { counter++; } } } if (counter > 0) { do_crm_log(log_level, "%d pending LRM operation%s at %s", counter, s_if_plural(counter), when); if (cur_state == S_TERMINATE || !is_set(fsa_input_register, R_SENT_RSC_STOP)) { g_hash_table_iter_init(&gIter, lrm_state->pending_ops); while (g_hash_table_iter_next(&gIter, (gpointer*)&key, (gpointer*)&pending)) { do_crm_log(log_level, "Pending action: %s (%s)", key, pending->op_key); } } else { rc = FALSE; } return rc; } if (lrm_state->resource_history == NULL) { return rc; } if (is_set(fsa_input_register, R_SHUTDOWN)) { /* At this point we're not waiting, we're just shutting down */ when = "shutdown"; } counter = 0; g_hash_table_iter_init(&gIter, lrm_state->resource_history); while (g_hash_table_iter_next(&gIter, NULL, (gpointer*)&entry)) { if (is_rsc_active(lrm_state, entry->id) == FALSE) { continue; } counter++; if (log_level == LOG_ERR) { crm_info("Found %s active at %s", entry->id, when); } else { crm_trace("Found %s active at %s", entry->id, when); } if (lrm_state->pending_ops) { GHashTableIter hIter; g_hash_table_iter_init(&hIter, lrm_state->pending_ops); while (g_hash_table_iter_next(&hIter, (gpointer*)&key, (gpointer*)&pending)) { if (crm_str_eq(entry->id, pending->rsc_id, TRUE)) { crm_notice("%sction %s (%s) incomplete at %s", pending->interval == 0 ? "A" : "Recurring a", key, pending->op_key, when); } } } } if (counter) { crm_err("%d resource%s active at %s", counter, (counter == 1)? " was" : "s were", when); } return rc; } static char * build_parameter_list(const lrmd_event_data_t *op, const struct ra_metadata_s *metadata, xmlNode *result, enum ra_param_flags_e param_type, bool invert_for_xml) { int len = 0; int max = 0; char *list = NULL; GList *iter = NULL; /* Newer resource agents support the "private" parameter attribute to * indicate sensitive parameters. For backward compatibility with older * agents, this list is used if the agent doesn't specify any as "private". */ const char *secure_terms[] = { "password", "passwd", "user", }; if (is_not_set(metadata->ra_flags, ra_uses_private) && (param_type == ra_param_private)) { max = DIMOF(secure_terms); } for (iter = metadata->ra_params; iter != NULL; iter = iter->next) { struct ra_param_s *param = (struct ra_param_s *) iter->data; bool accept = FALSE; if (is_set(param->rap_flags, param_type)) { accept = TRUE; } else if (max) { for (int lpc = 0; lpc < max; lpc++) { if (safe_str_eq(secure_terms[lpc], param->rap_name)) { accept = TRUE; break; } } } if (accept) { int start = len; crm_trace("Attr %s is %s", param->rap_name, ra_param_flag2text(param_type)); len += strlen(param->rap_name) + 2; // include spaces around list = realloc_safe(list, len + 1); // include null terminator // spaces before and after make parsing simpler sprintf(list + start, " %s ", param->rap_name); } else { crm_trace("Rejecting %s for %s", param->rap_name, ra_param_flag2text(param_type)); } if (result && (invert_for_xml? !accept : accept)) { const char *v = g_hash_table_lookup(op->params, param->rap_name); if (v != NULL) { crm_trace("Adding attr %s=%s to the xml result", param->rap_name, v); crm_xml_add(result, param->rap_name, v); } } } return list; } static void append_restart_list(lrmd_event_data_t *op, struct ra_metadata_s *metadata, xmlNode *update, const char *version) { char *list = NULL; char *digest = NULL; xmlNode *restart = NULL; CRM_LOG_ASSERT(op->params != NULL); if (op->interval > 0) { /* monitors are not reloadable */ return; } if (is_set(metadata->ra_flags, ra_supports_reload)) { restart = create_xml_node(NULL, XML_TAG_PARAMS); /* Add any parameters with unique="1" to the "op-force-restart" list. * * (Currently, we abuse "unique=0" to indicate reloadability. This is * nonstandard and should eventually be replaced once the OCF standard * is updated with something better.) */ list = build_parameter_list(op, metadata, restart, ra_param_unique, FALSE); } else { /* Resource does not support reloads */ return; } digest = calculate_operation_digest(restart, version); /* Add "op-force-restart" and "op-restart-digest" to indicate the resource supports reload, * no matter if it actually supports any parameters with unique="1"). */ crm_xml_add(update, XML_LRM_ATTR_OP_RESTART, list? list: ""); crm_xml_add(update, XML_LRM_ATTR_RESTART_DIGEST, digest); crm_trace("%s: %s, %s", op->rsc_id, digest, list); crm_log_xml_trace(restart, "restart digest source"); free_xml(restart); free(digest); free(list); } static void append_secure_list(lrmd_event_data_t *op, struct ra_metadata_s *metadata, xmlNode *update, const char *version) { char *list = NULL; char *digest = NULL; xmlNode *secure = NULL; CRM_LOG_ASSERT(op->params != NULL); /* * To keep XML_LRM_ATTR_OP_SECURE short, we want it to contain the * secure parameters but XML_LRM_ATTR_SECURE_DIGEST to be based on * the insecure ones */ secure = create_xml_node(NULL, XML_TAG_PARAMS); list = build_parameter_list(op, metadata, secure, ra_param_private, TRUE); if (list != NULL) { digest = calculate_operation_digest(secure, version); crm_xml_add(update, XML_LRM_ATTR_OP_SECURE, list); crm_xml_add(update, XML_LRM_ATTR_SECURE_DIGEST, digest); crm_trace("%s: %s, %s", op->rsc_id, digest, list); crm_log_xml_trace(secure, "secure digest source"); } else { crm_trace("%s: no secure parameters", op->rsc_id); } free_xml(secure); free(digest); free(list); } static gboolean build_operation_update(xmlNode * parent, lrmd_rsc_info_t * rsc, lrmd_event_data_t * op, const char *node_name, const char *src) { int target_rc = 0; xmlNode *xml_op = NULL; struct ra_metadata_s *metadata = NULL; const char *caller_version = NULL; lrm_state_t *lrm_state = NULL; if (op == NULL) { return FALSE; } target_rc = rsc_op_expected_rc(op); /* there is a small risk in formerly mixed clusters that it will * be sub-optimal. * * however with our upgrade policy, the update we send should * still be completely supported anyway */ caller_version = g_hash_table_lookup(op->params, XML_ATTR_CRM_VERSION); CRM_LOG_ASSERT(caller_version != NULL); if(caller_version == NULL) { caller_version = CRM_FEATURE_SET; } crm_trace("Building %s operation update with originator version: %s", op->rsc_id, caller_version); xml_op = create_operation_update(parent, op, caller_version, target_rc, fsa_our_uname, src, LOG_DEBUG); if (xml_op == NULL) { return TRUE; } if ((rsc == NULL) || (op == NULL) || (op->params == NULL) || !crm_op_needs_metadata(rsc->standard, op->op_type)) { crm_trace("No digests needed for %s action on %s (params=%p rsc=%p)", op->op_type, op->rsc_id, op->params, rsc); return TRUE; } lrm_state = lrm_state_find(node_name); if (lrm_state == NULL) { crm_warn("Cannot calculate digests for operation %s_%s_%d because we have no LRM connection to %s", op->rsc_id, op->op_type, op->interval, node_name); return TRUE; } metadata = metadata_cache_get(lrm_state->metadata_cache, rsc); if (metadata == NULL) { /* For now, we always collect resource agent meta-data via a local, * synchronous, direct execution of the agent. This has multiple issues: * the lrmd should execute agents, not the crmd; meta-data for * Pacemaker Remote nodes should be collected on those nodes, not * locally; and the meta-data call shouldn't eat into the timeout of the * real action being performed. * * These issues are planned to be addressed by having the PE schedule * a meta-data cache check at the beginning of each transition. Once * that is working, this block will only be a fallback in case the * initial collection fails. */ char *metadata_str = NULL; int rc = lrm_state_get_metadata(lrm_state, rsc->standard, rsc->provider, rsc->type, &metadata_str, 0); if (rc != pcmk_ok) { crm_warn("Failed to get metadata for %s (%s:%s:%s)", rsc->id, rsc->standard, rsc->provider, rsc->type); return TRUE; } metadata = metadata_cache_update(lrm_state->metadata_cache, rsc, metadata_str); free(metadata_str); if (metadata == NULL) { crm_warn("Failed to update metadata for %s (%s:%s:%s)", rsc->id, rsc->standard, rsc->provider, rsc->type); return TRUE; } } #if ENABLE_VERSIONED_ATTRS crm_xml_add(xml_op, XML_ATTR_RA_VERSION, metadata->ra_version); #endif crm_trace("Including additional digests for %s::%s:%s", rsc->standard, rsc->provider, rsc->type); append_restart_list(op, metadata, xml_op, caller_version); append_secure_list(op, metadata, xml_op, caller_version); return TRUE; } static gboolean is_rsc_active(lrm_state_t * lrm_state, const char *rsc_id) { rsc_history_t *entry = NULL; entry = g_hash_table_lookup(lrm_state->resource_history, rsc_id); if (entry == NULL || entry->last == NULL) { return FALSE; } crm_trace("Processing %s: %s.%d=%d", rsc_id, entry->last->op_type, entry->last->interval, entry->last->rc); if (entry->last->rc == PCMK_OCF_OK && safe_str_eq(entry->last->op_type, CRMD_ACTION_STOP)) { return FALSE; } else if (entry->last->rc == PCMK_OCF_OK && safe_str_eq(entry->last->op_type, CRMD_ACTION_MIGRATE)) { /* a stricter check is too complex... * leave that to the PE */ return FALSE; } else if (entry->last->rc == PCMK_OCF_NOT_RUNNING) { return FALSE; } else if (entry->last->interval == 0 && entry->last->rc == PCMK_OCF_NOT_CONFIGURED) { /* Badly configured resources can't be reliably stopped */ return FALSE; } return TRUE; } static gboolean build_active_RAs(lrm_state_t * lrm_state, xmlNode * rsc_list) { GHashTableIter iter; rsc_history_t *entry = NULL; g_hash_table_iter_init(&iter, lrm_state->resource_history); while (g_hash_table_iter_next(&iter, NULL, (void **)&entry)) { GList *gIter = NULL; xmlNode *xml_rsc = create_xml_node(rsc_list, XML_LRM_TAG_RESOURCE); crm_xml_add(xml_rsc, XML_ATTR_ID, entry->id); crm_xml_add(xml_rsc, XML_ATTR_TYPE, entry->rsc.type); crm_xml_add(xml_rsc, XML_AGENT_ATTR_CLASS, entry->rsc.standard); crm_xml_add(xml_rsc, XML_AGENT_ATTR_PROVIDER, entry->rsc.provider); if (entry->last && entry->last->params) { const char *container = g_hash_table_lookup(entry->last->params, CRM_META"_"XML_RSC_ATTR_CONTAINER); if (container) { crm_trace("Resource %s is a part of container resource %s", entry->id, container); crm_xml_add(xml_rsc, XML_RSC_ATTR_CONTAINER, container); } } build_operation_update(xml_rsc, &(entry->rsc), entry->failed, lrm_state->node_name, __FUNCTION__); build_operation_update(xml_rsc, &(entry->rsc), entry->last, lrm_state->node_name, __FUNCTION__); for (gIter = entry->recurring_op_list; gIter != NULL; gIter = gIter->next) { build_operation_update(xml_rsc, &(entry->rsc), gIter->data, lrm_state->node_name, __FUNCTION__); } } return FALSE; } static xmlNode * do_lrm_query_internal(lrm_state_t *lrm_state, int update_flags) { xmlNode *xml_state = NULL; xmlNode *xml_data = NULL; xmlNode *rsc_list = NULL; crm_node_t *peer = NULL; peer = crm_get_peer_full(0, lrm_state->node_name, CRM_GET_PEER_ANY); CRM_CHECK(peer != NULL, return NULL); xml_state = create_node_state_update(peer, update_flags, NULL, __FUNCTION__); if (xml_state == NULL) { return NULL; } xml_data = create_xml_node(xml_state, XML_CIB_TAG_LRM); crm_xml_add(xml_data, XML_ATTR_ID, peer->uuid); rsc_list = create_xml_node(xml_data, XML_LRM_TAG_RESOURCES); /* Build a list of active (not always running) resources */ build_active_RAs(lrm_state, rsc_list); crm_log_xml_trace(xml_state, "Current state of the LRM"); return xml_state; } xmlNode * do_lrm_query(gboolean is_replace, const char *node_name) { lrm_state_t *lrm_state = lrm_state_find(node_name); if (!lrm_state) { crm_err("Could not query lrm state for lrmd node %s", node_name); return NULL; } return do_lrm_query_internal(lrm_state, node_update_cluster|node_update_peer); } static void notify_deleted(lrm_state_t * lrm_state, ha_msg_input_t * input, const char *rsc_id, int rc) { lrmd_event_data_t *op = NULL; const char *from_sys = crm_element_value(input->msg, F_CRM_SYS_FROM); const char *from_host = crm_element_value(input->msg, F_CRM_HOST_FROM); crm_info("Notifying %s on %s that %s was%s deleted", from_sys, (from_host? from_host : "localhost"), rsc_id, ((rc == pcmk_ok)? "" : " not")); op = construct_op(lrm_state, input->xml, rsc_id, CRMD_ACTION_DELETE); - CRM_ASSERT(op != NULL); if (rc == pcmk_ok) { op->op_status = PCMK_LRM_OP_DONE; op->rc = PCMK_OCF_OK; } else { op->op_status = PCMK_LRM_OP_ERROR; op->rc = PCMK_OCF_UNKNOWN_ERROR; } send_direct_ack(from_host, from_sys, NULL, op, rsc_id); lrmd_free_event(op); if (safe_str_neq(from_sys, CRM_SYSTEM_TENGINE)) { /* this isn't expected - trigger a new transition */ time_t now = time(NULL); char *now_s = crm_itoa(now); crm_debug("Triggering a refresh after %s deleted %s from the LRM", from_sys, rsc_id); update_attr_delegate(fsa_cib_conn, cib_none, XML_CIB_TAG_CRMCONFIG, NULL, NULL, NULL, NULL, "last-lrm-refresh", now_s, FALSE, NULL, NULL); free(now_s); } } static gboolean lrm_remove_deleted_rsc(gpointer key, gpointer value, gpointer user_data) { struct delete_event_s *event = user_data; struct pending_deletion_op_s *op = value; if (crm_str_eq(event->rsc, op->rsc, TRUE)) { notify_deleted(event->lrm_state, op->input, event->rsc, event->rc); return TRUE; } return FALSE; } static gboolean lrm_remove_deleted_op(gpointer key, gpointer value, gpointer user_data) { const char *rsc = user_data; struct recurring_op_s *pending = value; if (crm_str_eq(rsc, pending->rsc_id, TRUE)) { crm_info("Removing op %s:%d for deleted resource %s", pending->op_key, pending->call_id, rsc); return TRUE; } return FALSE; } /* * Remove the rsc from the CIB * * Avoids refreshing the entire LRM section of this host */ #define rsc_template "//"XML_CIB_TAG_STATE"[@uname='%s']//"XML_LRM_TAG_RESOURCE"[@id='%s']" static int delete_rsc_status(lrm_state_t * lrm_state, const char *rsc_id, int call_options, const char *user_name) { char *rsc_xpath = NULL; int rc = pcmk_ok; CRM_CHECK(rsc_id != NULL, return -ENXIO); rsc_xpath = crm_strdup_printf(rsc_template, lrm_state->node_name, rsc_id); rc = cib_internal_op(fsa_cib_conn, CIB_OP_DELETE, NULL, rsc_xpath, NULL, NULL, call_options | cib_xpath, user_name); free(rsc_xpath); return rc; } static void delete_rsc_entry(lrm_state_t * lrm_state, ha_msg_input_t * input, const char *rsc_id, GHashTableIter * rsc_gIter, int rc, const char *user_name) { struct delete_event_s event; CRM_CHECK(rsc_id != NULL, return); if (rc == pcmk_ok) { char *rsc_id_copy = strdup(rsc_id); if (rsc_gIter) g_hash_table_iter_remove(rsc_gIter); else g_hash_table_remove(lrm_state->resource_history, rsc_id_copy); crm_debug("sync: Sending delete op for %s", rsc_id_copy); delete_rsc_status(lrm_state, rsc_id_copy, cib_quorum_override, user_name); g_hash_table_foreach_remove(lrm_state->pending_ops, lrm_remove_deleted_op, rsc_id_copy); free(rsc_id_copy); } if (input) { notify_deleted(lrm_state, input, rsc_id, rc); } event.rc = rc; event.rsc = rsc_id; event.lrm_state = lrm_state; g_hash_table_foreach_remove(lrm_state->deletion_ops, lrm_remove_deleted_rsc, &event); } /*! * \internal * \brief Erase an LRM history entry from the CIB, given the operation data * * \param[in] lrm_state LRM state of the desired node * \param[in] op Operation whose history should be deleted */ static void erase_lrm_history_by_op(lrm_state_t *lrm_state, lrmd_event_data_t *op) { xmlNode *xml_top = NULL; CRM_CHECK(op != NULL, return); xml_top = create_xml_node(NULL, XML_LRM_TAG_RSC_OP); crm_xml_add_int(xml_top, XML_LRM_ATTR_CALLID, op->call_id); crm_xml_add(xml_top, XML_ATTR_TRANSITION_KEY, op->user_data); if (op->interval > 0) { char *op_id = generate_op_key(op->rsc_id, op->op_type, op->interval); /* Avoid deleting last_failure too (if it was a result of this recurring op failing) */ crm_xml_add(xml_top, XML_ATTR_ID, op_id); free(op_id); } crm_debug("Erasing LRM resource history for %s_%s_%d (call=%d)", op->rsc_id, op->op_type, op->interval, op->call_id); fsa_cib_conn->cmds->remove(fsa_cib_conn, XML_CIB_TAG_STATUS, xml_top, cib_quorum_override); crm_log_xml_trace(xml_top, "op:cancel"); free_xml(xml_top); } /* Define xpath to find LRM resource history entry by node and resource */ #define XPATH_HISTORY \ "/" XML_TAG_CIB "/" XML_CIB_TAG_STATUS \ "/" XML_CIB_TAG_STATE "[@" XML_ATTR_UNAME "='%s']" \ "/" XML_CIB_TAG_LRM "/" XML_LRM_TAG_RESOURCES \ "/" XML_LRM_TAG_RESOURCE "[@" XML_ATTR_ID "='%s']" \ "/" XML_LRM_TAG_RSC_OP /* ... and also by operation key */ #define XPATH_HISTORY_ID XPATH_HISTORY \ "[@" XML_ATTR_ID "='%s']" /* ... and also by operation key and operation call ID */ #define XPATH_HISTORY_CALL XPATH_HISTORY \ "[@" XML_ATTR_ID "='%s' and @" XML_LRM_ATTR_CALLID "='%d']" /* ... and also by operation key and original operation key */ #define XPATH_HISTORY_ORIG XPATH_HISTORY \ "[@" XML_ATTR_ID "='%s' and @" XML_LRM_ATTR_TASK_KEY "='%s']" /*! * \internal * \brief Erase an LRM history entry from the CIB, given operation identifiers * * \param[in] lrm_state LRM state of the node to clear history for * \param[in] rsc_id Name of resource to clear history for * \param[in] key Operation key of operation to clear history for * \param[in] orig_op If specified, delete only if it has this original op * \param[in] call_id If specified, delete entry only if it has this call ID */ static void erase_lrm_history_by_id(lrm_state_t *lrm_state, const char *rsc_id, const char *key, const char *orig_op, int call_id) { char *op_xpath = NULL; CRM_CHECK((rsc_id != NULL) && (key != NULL), return); if (call_id > 0) { op_xpath = crm_strdup_printf(XPATH_HISTORY_CALL, lrm_state->node_name, rsc_id, key, call_id); } else if (orig_op) { op_xpath = crm_strdup_printf(XPATH_HISTORY_ORIG, lrm_state->node_name, rsc_id, key, orig_op); } else { op_xpath = crm_strdup_printf(XPATH_HISTORY_ID, lrm_state->node_name, rsc_id, key); } crm_debug("Erasing LRM resource history for %s on %s (call=%d)", key, rsc_id, call_id); fsa_cib_conn->cmds->remove(fsa_cib_conn, op_xpath, NULL, cib_quorum_override | cib_xpath); free(op_xpath); } static inline gboolean last_failed_matches_op(rsc_history_t *entry, const char *op, int interval) { if (entry == NULL) { return FALSE; } if (op == NULL) { return TRUE; } return (safe_str_eq(op, entry->failed->op_type) && (interval == entry->failed->interval)); } /*! * \internal * \brief Clear a resource's last failure * * Erase a resource's last failure on a particular node from both the * LRM resource history in the CIB, and the resource history remembered * for the LRM state. * * \param[in] rsc_id Resource name * \param[in] node_name Node name * \param[in] operation If specified, only clear if matching this operation * \param[in] interval If operation is specified, it has this interval in ms */ void lrm_clear_last_failure(const char *rsc_id, const char *node_name, const char *operation, int interval) { char *op_key = NULL; char *orig_op_key = NULL; lrm_state_t *lrm_state = NULL; lrm_state = lrm_state_find(node_name); if (lrm_state == NULL) { return; } /* Erase from CIB */ op_key = generate_op_key(rsc_id, "last_failure", 0); if (operation) { orig_op_key = generate_op_key(rsc_id, operation, interval); } erase_lrm_history_by_id(lrm_state, rsc_id, op_key, orig_op_key, 0); free(op_key); free(orig_op_key); /* Remove from memory */ if (lrm_state->resource_history) { rsc_history_t *entry = g_hash_table_lookup(lrm_state->resource_history, rsc_id); if (last_failed_matches_op(entry, operation, interval)) { lrmd_free_event(entry->failed); entry->failed = NULL; } } } /* Returns: gboolean - cancellation is in progress */ static gboolean cancel_op(lrm_state_t * lrm_state, const char *rsc_id, const char *key, int op, gboolean remove) { int rc = pcmk_ok; char *local_key = NULL; struct recurring_op_s *pending = NULL; CRM_CHECK(op != 0, return FALSE); CRM_CHECK(rsc_id != NULL, return FALSE); if (key == NULL) { local_key = make_stop_id(rsc_id, op); key = local_key; } pending = g_hash_table_lookup(lrm_state->pending_ops, key); if (pending) { if (remove && pending->remove == FALSE) { pending->remove = TRUE; crm_debug("Scheduling %s for removal", key); } if (pending->cancelled) { crm_debug("Operation %s already cancelled", key); free(local_key); return FALSE; } pending->cancelled = TRUE; } else { crm_info("No pending op found for %s", key); free(local_key); return FALSE; } crm_debug("Cancelling op %d for %s (%s)", op, rsc_id, key); rc = lrm_state_cancel(lrm_state, pending->rsc_id, pending->op_type, pending->interval); if (rc == pcmk_ok) { crm_debug("Op %d for %s (%s): cancelled", op, rsc_id, key); free(local_key); return TRUE; } crm_debug("Op %d for %s (%s): Nothing to cancel", op, rsc_id, key); /* The caller needs to make sure the entry is * removed from the pending_ops list * * Usually by returning TRUE inside the worker function * supplied to g_hash_table_foreach_remove() * * Not removing the entry from pending_ops will block * the node from shutting down */ free(local_key); return FALSE; } struct cancel_data { gboolean done; gboolean remove; const char *key; lrmd_rsc_info_t *rsc; lrm_state_t *lrm_state; }; static gboolean cancel_action_by_key(gpointer key, gpointer value, gpointer user_data) { gboolean remove = FALSE; struct cancel_data *data = user_data; struct recurring_op_s *op = (struct recurring_op_s *)value; if (crm_str_eq(op->op_key, data->key, TRUE)) { data->done = TRUE; remove = !cancel_op(data->lrm_state, data->rsc->id, key, op->call_id, data->remove); } return remove; } static gboolean cancel_op_key(lrm_state_t * lrm_state, lrmd_rsc_info_t * rsc, const char *key, gboolean remove) { guint removed = 0; struct cancel_data data; CRM_CHECK(rsc != NULL, return FALSE); CRM_CHECK(key != NULL, return FALSE); data.key = key; data.rsc = rsc; data.done = FALSE; data.remove = remove; data.lrm_state = lrm_state; removed = g_hash_table_foreach_remove(lrm_state->pending_ops, cancel_action_by_key, &data); crm_trace("Removed %u op cache entries, new size: %u", removed, g_hash_table_size(lrm_state->pending_ops)); return data.done; } -static lrmd_rsc_info_t * -get_lrm_resource(lrm_state_t * lrm_state, xmlNode * resource, xmlNode * op_msg, gboolean do_create) +/*! + * \internal + * \brief Retrieve resource information from LRM + * + * \param[in] lrm_state LRM connection to use + * \param[in] rsc_xml XML containing resource configuration + * \param[in] do_create If true, register resource with LRM if not already + * \param[out] rsc_info Where to store resource information obtained from LRM + * + * \retval pcmk_ok Success (and rsc_info holds newly allocated result) + * \retval -EINVAL Required information is missing from arguments + * \retval -ENOTCONN No active connection to LRM + * \retval -ENODEV Resource not found + * \retval -errno Error communicating with lrmd when registering resource + * + * \note Caller is responsible for freeing result on success. + */ +static int +get_lrm_resource(lrm_state_t *lrm_state, xmlNode *rsc_xml, gboolean do_create, + lrmd_rsc_info_t **rsc_info) { - lrmd_rsc_info_t *rsc = NULL; - const char *id = ID(resource); - const char *type = crm_element_value(resource, XML_ATTR_TYPE); - const char *class = crm_element_value(resource, XML_AGENT_ATTR_CLASS); - const char *provider = crm_element_value(resource, XML_AGENT_ATTR_PROVIDER); - const char *long_id = crm_element_value(resource, XML_ATTR_ID_LONG); + const char *id = ID(rsc_xml); - crm_trace("Retrieving %s from the LRM.", id); - CRM_CHECK(id != NULL, return NULL); + CRM_CHECK(lrm_state && rsc_xml && rsc_info, return -EINVAL); + CRM_CHECK(id, return -EINVAL); - rsc = lrm_state_get_rsc_info(lrm_state, id, 0); - - if (!rsc && long_id) { - rsc = lrm_state_get_rsc_info(lrm_state, long_id, 0); + if (lrm_state_is_connected(lrm_state) == FALSE) { + return -ENOTCONN; } - if (!rsc && do_create) { - CRM_CHECK(class != NULL, return NULL); - CRM_CHECK(type != NULL, return NULL); + crm_trace("Retrieving resource information for %s from the LRM", id); + *rsc_info = lrm_state_get_rsc_info(lrm_state, id, 0); - crm_trace("Adding rsc %s before operation", id); + // If resource isn't known by ID, try clone name, if provided + if (!*rsc_info) { + const char *long_id = crm_element_value(rsc_xml, XML_ATTR_ID_LONG); - lrm_state_register_rsc(lrm_state, id, class, provider, type, lrmd_opt_drop_recurring); + if (long_id) { + *rsc_info = lrm_state_get_rsc_info(lrm_state, long_id, 0); + } + } - rsc = lrm_state_get_rsc_info(lrm_state, id, 0); + if ((*rsc_info == NULL) && do_create) { + const char *class = crm_element_value(rsc_xml, XML_AGENT_ATTR_CLASS); + const char *provider = crm_element_value(rsc_xml, XML_AGENT_ATTR_PROVIDER); + const char *type = crm_element_value(rsc_xml, XML_ATTR_TYPE); + int rc; - if (!rsc) { + crm_trace("Registering resource %s with LRM", id); + rc = lrm_state_register_rsc(lrm_state, id, class, provider, type, + lrmd_opt_drop_recurring); + if (rc != pcmk_ok) { fsa_data_t *msg_data = NULL; - crm_err("Could not add resource %s to LRM %s", id, lrm_state->node_name); - /* only register this as a internal error if this involves the local - * lrmd. Otherwise we're likely dealing with an unresponsive remote-node - * which is not a FSA failure. */ + crm_err("Could not register resource %s with LRM on %s: %s " + CRM_XS " rc=%d", + id, lrm_state->node_name, pcmk_strerror(rc), rc); + + /* Register this as an internal error if this involves the local + * lrmd. Otherwise, we're likely dealing with an unresponsive remote + * node, which is not an FSA failure. + */ if (lrm_state_is_local(lrm_state) == TRUE) { register_fsa_error(C_FSA_INTERNAL, I_FAIL, NULL); } + return rc; } - } - return rsc; + *rsc_info = lrm_state_get_rsc_info(lrm_state, id, 0); + } + return *rsc_info? pcmk_ok : -ENODEV; } static void delete_resource(lrm_state_t * lrm_state, const char *id, lrmd_rsc_info_t * rsc, GHashTableIter * gIter, const char *sys, const char *host, const char *user, ha_msg_input_t * request, gboolean unregister) { int rc = pcmk_ok; crm_info("Removing resource %s for %s (%s) on %s", id, sys, user ? user : "internal", host); if (rsc && unregister) { rc = lrm_state_unregister_rsc(lrm_state, id, 0); } if (rc == pcmk_ok) { crm_trace("Resource '%s' deleted", id); } else if (rc == -EINPROGRESS) { crm_info("Deletion of resource '%s' pending", id); if (request) { struct pending_deletion_op_s *op = NULL; char *ref = crm_element_value_copy(request->msg, XML_ATTR_REFERENCE); op = calloc(1, sizeof(struct pending_deletion_op_s)); op->rsc = strdup(rsc->id); op->input = copy_ha_msg_input(request); g_hash_table_insert(lrm_state->deletion_ops, ref, op); } return; } else { crm_warn("Deletion of resource '%s' for %s (%s) on %s failed: %d", id, sys, user ? user : "internal", host, rc); } delete_rsc_entry(lrm_state, request, id, gIter, rc, user); } static int get_fake_call_id(lrm_state_t *lrm_state, const char *rsc_id) { int call_id = 999999999; rsc_history_t *entry = NULL; if(lrm_state) { entry = g_hash_table_lookup(lrm_state->resource_history, rsc_id); } /* Make sure the call id is greater than the last successful operation, * otherwise the failure will not result in a possible recovery of the resource * as it could appear the failure occurred before the successful start */ if (entry) { call_id = entry->last_callid + 1; } if (call_id < 0) { call_id = 1; } return call_id; } static void -force_reprobe(lrm_state_t *lrm_state, const char *from_sys, const char *from_host, const char *user_name, gboolean is_remote_node) +fake_op_status(lrm_state_t *lrm_state, lrmd_event_data_t *op, int op_status, + enum ocf_exitcode op_exitcode) { - GHashTableIter gIter; - rsc_history_t *entry = NULL; + op->call_id = get_fake_call_id(lrm_state, op->rsc_id); + op->t_run = time(NULL); + op->t_rcchange = op->t_run; + op->op_status = op_status; + op->rc = op_exitcode; +} +static void +force_reprobe(lrm_state_t *lrm_state, const char *from_sys, + const char *from_host, const char *user_name, + gboolean is_remote_node) +{ + GHashTableIter gIter; + rsc_history_t *entry = NULL; - crm_info("clearing resource history on node %s", lrm_state->node_name); - g_hash_table_iter_init(&gIter, lrm_state->resource_history); - while (g_hash_table_iter_next(&gIter, NULL, (void **)&entry)) { - /* only unregister the resource during a reprobe if it is not a remote connection - * resource. otherwise unregistering the connection will terminate remote-node - * membership */ - gboolean unregister = TRUE; - - if (is_remote_lrmd_ra(NULL, NULL, entry->id)) { - lrm_state_t *remote_lrm_state = lrm_state_find(entry->id); - if (remote_lrm_state) { - /* when forcing a reprobe, make sure to clear remote node before - * clearing the remote node's connection resource */ - force_reprobe(remote_lrm_state, from_sys, from_host, user_name, TRUE); - } - unregister = FALSE; + crm_info("Clearing resource history on node %s", lrm_state->node_name); + g_hash_table_iter_init(&gIter, lrm_state->resource_history); + while (g_hash_table_iter_next(&gIter, NULL, (void **)&entry)) { + /* only unregister the resource during a reprobe if it is not a remote connection + * resource. otherwise unregistering the connection will terminate remote-node + * membership */ + gboolean unregister = TRUE; + + if (is_remote_lrmd_ra(NULL, NULL, entry->id)) { + lrm_state_t *remote_lrm_state = lrm_state_find(entry->id); + if (remote_lrm_state) { + /* when forcing a reprobe, make sure to clear remote node before + * clearing the remote node's connection resource */ + force_reprobe(remote_lrm_state, from_sys, from_host, user_name, TRUE); } - - delete_resource(lrm_state, entry->id, &entry->rsc, &gIter, from_sys, from_host, - user_name, NULL, unregister); + unregister = FALSE; } - /* Now delete the copy in the CIB */ - erase_status_tag(lrm_state->node_name, XML_CIB_TAG_LRM, cib_scope_local); + delete_resource(lrm_state, entry->id, &entry->rsc, &gIter, from_sys, from_host, + user_name, NULL, unregister); + } - /* And finally, _delete_ the value in attrd - * Setting it to FALSE results in the PE sending us back here again - */ - update_attrd(lrm_state->node_name, CRM_OP_PROBED, NULL, user_name, is_remote_node); + /* Now delete the copy in the CIB */ + erase_status_tag(lrm_state->node_name, XML_CIB_TAG_LRM, cib_scope_local); + + /* And finally, _delete_ the value in attrd + * Setting it to FALSE results in the PE sending us back here again + */ + update_attrd(lrm_state->node_name, CRM_OP_PROBED, NULL, user_name, is_remote_node); } static void synthesize_lrmd_failure(lrm_state_t *lrm_state, xmlNode *action, int rc) { lrmd_event_data_t *op = NULL; + lrmd_rsc_info_t *rsc_info = NULL; const char *operation = crm_element_value(action, XML_LRM_ATTR_TASK); const char *target_node = crm_element_value(action, XML_LRM_ATTR_TARGET); xmlNode *xml_rsc = find_xml_node(action, XML_CIB_TAG_RESOURCE, TRUE); - if(xml_rsc == NULL) { + if ((xml_rsc == NULL) || (ID(xml_rsc) == NULL)) { /* @TODO Should we do something else, like direct ack? */ - crm_info("Skipping %s=%d on %s (%p): no resource", - crm_element_value(action, XML_LRM_ATTR_TASK_KEY), rc, target_node, lrm_state); + crm_info("Can't fake %s failure (%d) on %s without resource configuration", + crm_element_value(action, XML_LRM_ATTR_TASK_KEY), rc, + target_node); return; } else if(operation == NULL) { /* This probably came from crm_resource -C, nothing to do */ - crm_info("Skipping %s=%d on %s (%p): no operation", - crm_element_value(action, XML_ATTR_TRANSITION_KEY), rc, target_node, lrm_state); + crm_info("Can't fake %s failure (%d) on %s without operation", + ID(xml_rsc), rc, target_node); return; } op = construct_op(lrm_state, action, ID(xml_rsc), operation); - CRM_ASSERT(op != NULL); - op->call_id = get_fake_call_id(lrm_state, op->rsc_id); - if(safe_str_eq(operation, RSC_NOTIFY)) { - /* Notifications can't fail yet */ + if (safe_str_eq(operation, RSC_NOTIFY)) { // Notifications can't fail + fake_op_status(lrm_state, op, PCMK_LRM_OP_DONE, PCMK_OCF_OK); + } else { + fake_op_status(lrm_state, op, PCMK_LRM_OP_ERROR, rc); + } + + crm_info("Faking %s_%s_%d result (%d) on %s", + op->rsc_id, op->op_type, op->interval, op->rc, target_node); + + /* Process the result as if it came from the LRM, if possible + * (i.e. resource info can be obtained from the lrm_state). + */ + if (lrm_state) { + rsc_info = lrm_state_get_rsc_info(lrm_state, op->rsc_id, 0); + } + if (rsc_info) { + process_lrm_event(lrm_state, op, NULL); + + } else { + /* If we can't process the result normally, at least write it to the CIB + * if possible, so the PE can act on it. + */ + char *standard = crm_element_value_copy(xml_rsc, XML_AGENT_ATTR_CLASS); + char *provider = crm_element_value_copy(xml_rsc, XML_AGENT_ATTR_PROVIDER); + char *type = crm_element_value_copy(xml_rsc, XML_ATTR_TYPE); + + if (standard && type) { + rsc_info = lrmd_new_rsc_info(op->rsc_id, standard, provider, type); + do_update_resource(target_node, rsc_info, op); + lrmd_free_rsc_info(rsc_info); + } else { + // @TODO Should we direct ack? + crm_info("Can't fake %s failure (%d) on %s without resource standard and type", + crm_element_value(action, XML_LRM_ATTR_TASK_KEY), rc, + target_node); + } + } + lrmd_free_event(op); +} + +/*! + * \internal + * \brief Get target of an LRM operation + * + * \param[in] xml LRM operation data XML + * + * \return LRM operation target node name (local node or Pacemaker Remote node) + */ +static const char * +lrm_op_target(xmlNode *xml) +{ + const char *target = NULL; + + if (xml) { + target = crm_element_value(xml, XML_LRM_ATTR_TARGET); + } + if (target == NULL) { + target = fsa_our_uname; + } + return target; +} + +static void +fail_lrm_resource(xmlNode *xml, lrm_state_t *lrm_state, const char *user_name, + const char *from_host, const char *from_sys) +{ + lrmd_event_data_t *op = NULL; + lrmd_rsc_info_t *rsc = NULL; + xmlNode *xml_rsc = find_xml_node(xml, XML_CIB_TAG_RESOURCE, TRUE); + + CRM_CHECK(xml_rsc != NULL, return); + + /* The lrmd simply executes operations and reports the results, without any + * concept of success or failure, so to fail a resource, we must fake what a + * failure looks like. + * + * To do this, we create a fake lrmd operation event for the resource, and + * pass that event to the lrmd client callback so it will be processed as if + * it came from the lrmd. + */ + op = construct_op(lrm_state, xml, ID(xml_rsc), "asyncmon"); + fake_op_status(lrm_state, op, PCMK_LRM_OP_DONE, PCMK_OCF_UNKNOWN_ERROR); + + free((char*) op->user_data); + op->user_data = NULL; + op->interval = 0; + +#if ENABLE_ACL + if (user_name && is_privileged(user_name) == FALSE) { + crm_err("%s does not have permission to fail %s", user_name, ID(xml_rsc)); + send_direct_ack(from_host, from_sys, NULL, op, ID(xml_rsc)); + lrmd_free_event(op); + return; + } +#endif + + if (get_lrm_resource(lrm_state, xml_rsc, TRUE, &rsc) == pcmk_ok) { + crm_info("Failing resource %s...", rsc->id); + process_lrm_event(lrm_state, op, NULL); op->op_status = PCMK_LRM_OP_DONE; op->rc = PCMK_OCF_OK; + lrmd_free_rsc_info(rsc); } else { - op->op_status = PCMK_LRM_OP_ERROR; - op->rc = rc; + crm_info("Cannot find/create resource in order to fail it..."); + crm_log_xml_warn(xml, "bad input"); } - op->t_run = time(NULL); - op->t_rcchange = op->t_run; - crm_info("Faking result %d for %s_%s_%d on %s (%p)", op->rc, op->rsc_id, op->op_type, op->interval, target_node, lrm_state); + send_direct_ack(from_host, from_sys, NULL, op, ID(xml_rsc)); + lrmd_free_event(op); +} - if(lrm_state) { - process_lrm_event(lrm_state, op, NULL); +static void +handle_refresh_op(lrm_state_t *lrm_state, const char *user_name, + const char *from_host, const char *from_sys) +{ + int rc = pcmk_ok; + xmlNode *fragment = do_lrm_query_internal(lrm_state, node_update_all); + + fsa_cib_update(XML_CIB_TAG_STATUS, fragment, cib_quorum_override, rc, user_name); + crm_info("Forced a local LRM refresh: call=%d", rc); + + if (safe_str_neq(CRM_SYSTEM_CRMD, from_sys)) { + xmlNode *reply = create_request(CRM_OP_INVOKE_LRM, fragment, from_host, + from_sys, CRM_SYSTEM_LRMD, + fsa_our_uuid); + + crm_debug("ACK'ing refresh from %s (%s)", from_sys, from_host); + + if (relay_message(reply, TRUE) == FALSE) { + crm_log_xml_err(reply, "Unable to route reply"); + } + free_xml(reply); + } + + free_xml(fragment); +} + +static void +handle_query_op(xmlNode *msg, lrm_state_t *lrm_state) +{ + xmlNode *data = do_lrm_query_internal(lrm_state, node_update_all); + xmlNode *reply = create_reply(msg, data); + + if (relay_message(reply, TRUE) == FALSE) { + crm_err("Unable to route reply"); + crm_log_xml_err(reply, "reply"); + } + free_xml(reply); + free_xml(data); +} + +static void +handle_reprobe_op(lrm_state_t *lrm_state, const char *from_sys, + const char *from_host, const char *user_name, + gboolean is_remote_node) +{ + crm_notice("Forcing the status of all resources to be redetected"); + force_reprobe(lrm_state, from_sys, from_host, user_name, is_remote_node); + + if (safe_str_neq(CRM_SYSTEM_PENGINE, from_sys) + && safe_str_neq(CRM_SYSTEM_TENGINE, from_sys)) { + + xmlNode *reply = create_request(CRM_OP_INVOKE_LRM, NULL, from_host, + from_sys, CRM_SYSTEM_LRMD, + fsa_our_uuid); + + crm_debug("ACK'ing re-probe from %s (%s)", from_sys, from_host); + + if (relay_message(reply, TRUE) == FALSE) { + crm_log_xml_err(reply, "Unable to route reply"); + } + free_xml(reply); + } +} + +static bool do_lrm_cancel(ha_msg_input_t *input, lrm_state_t *lrm_state, + lrmd_rsc_info_t *rsc, const char *from_host, const char *from_sys) +{ + char *op_key = NULL; + char *meta_key = NULL; + int call = 0; + const char *call_id = NULL; + const char *op_task = NULL; + const char *op_interval = NULL; + gboolean in_progress = FALSE; + xmlNode *params = find_xml_node(input->xml, XML_TAG_ATTRS, TRUE); + + CRM_CHECK(params != NULL, return FALSE); + + meta_key = crm_meta_name(XML_LRM_ATTR_INTERVAL); + op_interval = crm_element_value(params, meta_key); + free(meta_key); + CRM_CHECK(op_interval != NULL, return FALSE); + + meta_key = crm_meta_name(XML_LRM_ATTR_TASK); + op_task = crm_element_value(params, meta_key); + free(meta_key); + CRM_CHECK(op_task != NULL, return FALSE); + + meta_key = crm_meta_name(XML_LRM_ATTR_CALLID); + call_id = crm_element_value(params, meta_key); + free(meta_key); + + op_key = generate_op_key(rsc->id, op_task, crm_parse_int(op_interval, "0")); + + crm_debug("PE requested op %s (call=%s) be cancelled", + op_key, (call_id? call_id : "NA")); + call = crm_parse_int(call_id, "0"); + if (call == 0) { + /* the normal case when the PE cancels a recurring op */ + in_progress = cancel_op_key(lrm_state, rsc, op_key, TRUE); } else { - lrmd_rsc_info_t rsc; + /* the normal case when the PE cancels an orphan op */ + in_progress = cancel_op(lrm_state, rsc->id, NULL, call, TRUE); + } + + // Acknowledge cancellation operation if for a remote connection resource + if (!in_progress || is_remote_lrmd_ra(NULL, NULL, rsc->id)) { + char *op_id = make_stop_id(rsc->id, call); + + if (is_remote_lrmd_ra(NULL, NULL, rsc->id) == FALSE) { + crm_info("Nothing known about operation %d for %s", call, op_key); + } + erase_lrm_history_by_id(lrm_state, rsc->id, op_key, NULL, call); + send_task_ok_ack(lrm_state, input, rsc->id, rsc, op_task, + from_host, from_sys); - rsc.id = strdup(op->rsc_id); - rsc.type = crm_element_value_copy(xml_rsc, XML_ATTR_TYPE); - rsc.standard = crm_element_value_copy(xml_rsc, XML_AGENT_ATTR_CLASS); - rsc.provider = crm_element_value_copy(xml_rsc, XML_AGENT_ATTR_PROVIDER); + /* needed at least for cancellation of a remote operation */ + g_hash_table_remove(lrm_state->pending_ops, op_id); + free(op_id); - do_update_resource(target_node, &rsc, op); + } else { + /* No ack is needed since abcdaa8, but peers with older versions + * in a rolling upgrade need one. We didn't bump the feature set + * at that commit, so we can only compare against the previous + * CRM version (3.0.8). If any peers have feature set 3.0.9 but + * not abcdaa8, they will time out waiting for the ack (no + * released versions of Pacemaker are affected). + */ + const char *peer_version = crm_element_value(params, XML_ATTR_CRM_VERSION); - free(rsc.id); - free(rsc.type); - free(rsc.standard); - free(rsc.provider); + if (compare_version(peer_version, "3.0.8") <= 0) { + crm_info("Sending compatibility ack for %s cancellation to %s (CRM version %s)", + op_key, from_host, peer_version); + send_task_ok_ack(lrm_state, input, rsc->id, rsc, op_task, + from_host, from_sys); + } } - lrmd_free_event(op); + + free(op_key); + return TRUE; } +static void +do_lrm_delete(ha_msg_input_t *input, lrm_state_t *lrm_state, + lrmd_rsc_info_t *rsc, const char *from_sys, const char *from_host, + bool crm_rsc_delete, const char *user_name) +{ + gboolean unregister = TRUE; + +#if ENABLE_ACL + int cib_rc = delete_rsc_status(lrm_state, rsc->id, + cib_dryrun|cib_sync_call, user_name); + + if (cib_rc != pcmk_ok) { + lrmd_event_data_t *op = NULL; + + crm_err("Could not delete resource status of %s for %s (user %s) on %s: %s" + CRM_XS " rc=%d", + rsc->id, from_sys, (user_name? user_name : "unknown"), + from_host, pcmk_strerror(cib_rc), cib_rc); + + op = construct_op(lrm_state, input->xml, rsc->id, CRMD_ACTION_DELETE); + op->op_status = PCMK_LRM_OP_ERROR; + + if (cib_rc == -EACCES) { + op->rc = PCMK_OCF_INSUFFICIENT_PRIV; + } else { + op->rc = PCMK_OCF_UNKNOWN_ERROR; + } + send_direct_ack(from_host, from_sys, NULL, op, rsc->id); + lrmd_free_event(op); + lrmd_free_rsc_info(rsc); + return; + } +#endif + + if (crm_rsc_delete && is_remote_lrmd_ra(NULL, NULL, rsc->id)) { + unregister = FALSE; + } + + delete_resource(lrm_state, rsc->id, rsc, NULL, from_sys, from_host, + user_name, input, unregister); +} /* A_LRM_INVOKE */ void do_lrm_invoke(long long action, enum crmd_fsa_cause cause, enum crmd_fsa_state cur_state, enum crmd_fsa_input current_input, fsa_data_t * msg_data) { - gboolean create_rsc = TRUE; lrm_state_t *lrm_state = NULL; const char *crm_op = NULL; const char *from_sys = NULL; const char *from_host = NULL; const char *operation = NULL; ha_msg_input_t *input = fsa_typed_data(fsa_dt_ha_msg); const char *user_name = NULL; const char *target_node = NULL; gboolean is_remote_node = FALSE; - gboolean crm_rsc_delete = FALSE; + bool crm_rsc_delete = FALSE; - if (input->xml != NULL) { - /* Remote node operations are routed here to their remote connections */ - target_node = crm_element_value(input->xml, XML_LRM_ATTR_TARGET); - } - if (target_node == NULL) { - target_node = fsa_our_uname; - } else if (safe_str_neq(target_node, fsa_our_uname)) { - is_remote_node = TRUE; - } + target_node = lrm_op_target(input->xml); + is_remote_node = safe_str_neq(target_node, fsa_our_uname); lrm_state = lrm_state_find(target_node); - - if (lrm_state == NULL && is_remote_node) { - crm_err("Failing action because remote node %s has no connection to cluster node %s", - target_node, fsa_our_uname); - - /* The action must be recorded here and in the CIB as failed */ + if ((lrm_state == NULL) && is_remote_node) { + crm_err("Failing action because local node has never had connection to remote node %s", + target_node); synthesize_lrmd_failure(NULL, input->xml, PCMK_OCF_CONNECTION_DIED); return; } - CRM_ASSERT(lrm_state != NULL); #if ENABLE_ACL user_name = crm_acl_get_set_user(input->msg, F_CRM_USER, NULL); crm_trace("LRM command from user '%s'", user_name); #endif crm_op = crm_element_value(input->msg, F_CRM_TASK); from_sys = crm_element_value(input->msg, F_CRM_SYS_FROM); if (safe_str_neq(from_sys, CRM_SYSTEM_TENGINE)) { from_host = crm_element_value(input->msg, F_CRM_HOST_FROM); } - - crm_trace("LRM command from: %s", from_sys); + crm_trace("LRM %s command from %s", crm_op, from_sys); if (safe_str_eq(crm_op, CRM_OP_LRM_DELETE)) { - /* remember this delete op came from crm_resource */ - crm_rsc_delete = TRUE; + crm_rsc_delete = TRUE; // Only crm_resource uses this op operation = CRMD_ACTION_DELETE; - } else if (safe_str_eq(crm_op, CRM_OP_LRM_REFRESH)) { - operation = CRM_OP_LRM_REFRESH; - } else if (safe_str_eq(crm_op, CRM_OP_LRM_FAIL)) { - lrmd_event_data_t *op = NULL; - lrmd_rsc_info_t *rsc = NULL; - xmlNode *xml_rsc = find_xml_node(input->xml, XML_CIB_TAG_RESOURCE, TRUE); - - CRM_CHECK(xml_rsc != NULL, return); - - /* The lrmd can not fail a resource, it does not understand the - * concept of success or failure in relation to a resource, it simply - * executes operations and reports the results. We determine what a failure is. - * Because of this, if we want to fail a resource we have to fake what we - * understand a failure to look like. - * - * To do this we create a fake lrmd operation event for the resource - * we want to fail. We then pass that event to the lrmd client callback - * so it will be processed as if it actually came from the lrmd. */ - op = construct_op(lrm_state, input->xml, ID(xml_rsc), "asyncmon"); - CRM_ASSERT(op != NULL); - - free((char *)op->user_data); - op->user_data = NULL; - op->call_id = get_fake_call_id(lrm_state, op->rsc_id); - op->interval = 0; - op->op_status = PCMK_LRM_OP_DONE; - op->rc = PCMK_OCF_UNKNOWN_ERROR; - op->t_run = time(NULL); - op->t_rcchange = op->t_run; - -#if ENABLE_ACL - if (user_name && is_privileged(user_name) == FALSE) { - crm_err("%s does not have permission to fail %s", user_name, ID(xml_rsc)); - send_direct_ack(from_host, from_sys, NULL, op, ID(xml_rsc)); - lrmd_free_event(op); - return; - } -#endif - - rsc = get_lrm_resource(lrm_state, xml_rsc, input->xml, create_rsc); - if (rsc) { - crm_info("Failing resource %s...", rsc->id); - process_lrm_event(lrm_state, op, NULL); - op->op_status = PCMK_LRM_OP_DONE; - op->rc = PCMK_OCF_OK; - lrmd_free_rsc_info(rsc); - } else { - crm_info("Cannot find/create resource in order to fail it..."); - crm_log_xml_warn(input->msg, "bad input"); - } - - send_direct_ack(from_host, from_sys, NULL, op, ID(xml_rsc)); - lrmd_free_event(op); + fail_lrm_resource(input->xml, lrm_state, user_name, from_host, + from_sys); return; } else if (input->xml != NULL) { operation = crm_element_value(input->xml, XML_LRM_ATTR_TASK); } if (safe_str_eq(crm_op, CRM_OP_LRM_REFRESH)) { - int rc = pcmk_ok; - xmlNode *fragment = do_lrm_query_internal(lrm_state, node_update_all); - - fsa_cib_update(XML_CIB_TAG_STATUS, fragment, cib_quorum_override, rc, user_name); - crm_info("Forced a local LRM refresh: call=%d", rc); - - if (safe_str_neq(CRM_SYSTEM_CRMD, from_sys)) { - xmlNode *reply = create_request( - CRM_OP_INVOKE_LRM, fragment, - from_host, from_sys, CRM_SYSTEM_LRMD, fsa_our_uuid); - - crm_debug("ACK'ing refresh from %s (%s)", from_sys, from_host); - - if (relay_message(reply, TRUE) == FALSE) { - crm_log_xml_err(reply, "Unable to route reply"); - } - free_xml(reply); - } - - free_xml(fragment); + handle_refresh_op(lrm_state, user_name, from_host, from_sys); } else if (safe_str_eq(crm_op, CRM_OP_LRM_QUERY)) { - xmlNode *data = do_lrm_query_internal(lrm_state, node_update_all); - xmlNode *reply = create_reply(input->msg, data); - - if (relay_message(reply, TRUE) == FALSE) { - crm_err("Unable to route reply"); - crm_log_xml_err(reply, "reply"); - } - free_xml(reply); - free_xml(data); + handle_query_op(input->msg, lrm_state); } else if (safe_str_eq(operation, CRM_OP_PROBED)) { - update_attrd(lrm_state->node_name, CRM_OP_PROBED, XML_BOOLEAN_TRUE, user_name, is_remote_node); - - } else if (safe_str_eq(operation, CRM_OP_REPROBE) || safe_str_eq(crm_op, CRM_OP_REPROBE)) { - crm_notice("Forcing the status of all resources to be redetected"); - - force_reprobe(lrm_state, from_sys, from_host, user_name, is_remote_node); + update_attrd(lrm_state->node_name, CRM_OP_PROBED, XML_BOOLEAN_TRUE, + user_name, is_remote_node); - if (safe_str_neq(CRM_SYSTEM_PENGINE, from_sys) - && safe_str_neq(CRM_SYSTEM_TENGINE, from_sys)) { - - xmlNode *reply = create_request( - CRM_OP_INVOKE_LRM, NULL, - from_host, from_sys, CRM_SYSTEM_LRMD, fsa_our_uuid); - - crm_debug("ACK'ing re-probe from %s (%s)", from_sys, from_host); - - if (relay_message(reply, TRUE) == FALSE) { - crm_log_xml_err(reply, "Unable to route reply"); - } - free_xml(reply); - } + } else if (safe_str_eq(operation, CRM_OP_REPROBE) + || safe_str_eq(crm_op, CRM_OP_REPROBE)) { + handle_reprobe_op(lrm_state, from_sys, from_host, user_name, + is_remote_node); } else if (operation != NULL) { lrmd_rsc_info_t *rsc = NULL; - xmlNode *params = NULL; xmlNode *xml_rsc = find_xml_node(input->xml, XML_CIB_TAG_RESOURCE, TRUE); + gboolean create_rsc = safe_str_neq(operation, CRMD_ACTION_DELETE); + int rc; - CRM_CHECK(xml_rsc != NULL, return); - - params = find_xml_node(input->xml, XML_TAG_ATTRS, TRUE); - - if (safe_str_eq(operation, CRMD_ACTION_DELETE)) { - create_rsc = FALSE; - } + // We can't return anything meaningful without a resource ID + CRM_CHECK(xml_rsc && ID(xml_rsc), return); - if(lrm_state_is_connected(lrm_state) == FALSE) { - synthesize_lrmd_failure(lrm_state, input->xml, PCMK_OCF_CONNECTION_DIED); + rc = get_lrm_resource(lrm_state, xml_rsc, create_rsc, &rsc); + if (rc == -ENOTCONN) { + synthesize_lrmd_failure(lrm_state, input->xml, + PCMK_OCF_CONNECTION_DIED); return; - } - - rsc = get_lrm_resource(lrm_state, xml_rsc, input->xml, create_rsc); - if (rsc == NULL && create_rsc) { - crm_err("Invalid resource definition for %s", ID(xml_rsc)); - crm_log_xml_warn(input->msg, "bad input"); - /* if the operation couldn't complete because we can't register - * the resource, return a generic error */ - synthesize_lrmd_failure(lrm_state, input->xml, PCMK_OCF_NOT_CONFIGURED); - - } else if (rsc == NULL) { - crm_notice("Not creating resource for a %s event: %s", operation, ID(input->xml)); - delete_rsc_entry(lrm_state, input, ID(xml_rsc), NULL, pcmk_ok, user_name); - - /* Deleting something that does not exist is a success */ + } else if (!create_rsc) { + /* Delete of malformed or nonexistent resource + * (deleting something that does not exist is a success) + */ + crm_notice("Not registering resource '%s' for a %s event " + CRM_XS " get-rc=%d (%s) transition-key=%s", + ID(xml_rsc), operation, + rc, pcmk_strerror(rc), ID(input->xml)); + delete_rsc_entry(lrm_state, input, ID(xml_rsc), NULL, pcmk_ok, + user_name); send_task_ok_ack(lrm_state, input, ID(xml_rsc), NULL, operation, from_host, from_sys); + return; - } else if (safe_str_eq(operation, CRMD_ACTION_CANCEL)) { - char *op_key = NULL; - char *meta_key = NULL; - int call = 0; - const char *call_id = NULL; - const char *op_task = NULL; - const char *op_interval = NULL; - gboolean in_progress = FALSE; - - CRM_CHECK(params != NULL, crm_log_xml_warn(input->xml, "Bad command"); - lrmd_free_rsc_info(rsc); return); - - meta_key = crm_meta_name(XML_LRM_ATTR_INTERVAL); - op_interval = crm_element_value(params, meta_key); - free(meta_key); - - meta_key = crm_meta_name(XML_LRM_ATTR_TASK); - op_task = crm_element_value(params, meta_key); - free(meta_key); - - meta_key = crm_meta_name(XML_LRM_ATTR_CALLID); - call_id = crm_element_value(params, meta_key); - free(meta_key); - - CRM_CHECK(op_task != NULL, crm_log_xml_warn(input->xml, "Bad command"); - lrmd_free_rsc_info(rsc); return); - CRM_CHECK(op_interval != NULL, crm_log_xml_warn(input->xml, "Bad command"); - lrmd_free_rsc_info(rsc); return); - - op_key = generate_op_key(rsc->id, op_task, crm_parse_int(op_interval, "0")); - - crm_debug("PE requested op %s (call=%s) be cancelled", - op_key, call_id ? call_id : "NA"); - call = crm_parse_int(call_id, "0"); - if (call == 0) { - /* the normal case when the PE cancels a recurring op */ - in_progress = cancel_op_key(lrm_state, rsc, op_key, TRUE); - - } else { - /* the normal case when the PE cancels an orphan op */ - in_progress = cancel_op(lrm_state, rsc->id, NULL, call, TRUE); - } + } else if (rc == -EINVAL) { + // Resource operation on malformed resource + crm_err("Invalid resource definition for %s", ID(xml_rsc)); + crm_log_xml_warn(input->msg, "invalid resource"); + synthesize_lrmd_failure(lrm_state, input->xml, + PCMK_OCF_NOT_CONFIGURED); // fatal error + return; - /* Acknowledge the cancellation operation if it's for a remote connection resource */ - if (in_progress == FALSE || is_remote_lrmd_ra(NULL, NULL, rsc->id)) { - char *op_id = make_stop_id(rsc->id, call); + } else if (rc < 0) { + // Error communicating with lrmd + crm_err("Could not register resource '%s' with lrmd: %s " CRM_XS " rc=%d", + ID(xml_rsc), pcmk_strerror(rc), rc); + crm_log_xml_warn(input->msg, "failed registration"); + synthesize_lrmd_failure(lrm_state, input->xml, + PCMK_OCF_INVALID_PARAM); // hard error + return; + } - if (is_remote_lrmd_ra(NULL, NULL, rsc->id) == FALSE) { - crm_info("Nothing known about operation %d for %s", call, op_key); - } - erase_lrm_history_by_id(lrm_state, rsc->id, op_key, NULL, call); - send_task_ok_ack(lrm_state, input, rsc->id, rsc, op_task, - from_host, from_sys); - - /* needed at least for cancellation of a remote operation */ - g_hash_table_remove(lrm_state->pending_ops, op_id); - free(op_id); - - } else { - /* No ack is needed since abcdaa8, but peers with older versions - * in a rolling upgrade need one. We didn't bump the feature set - * at that commit, so we can only compare against the previous - * CRM version (3.0.8). If any peers have feature set 3.0.9 but - * not abcdaa8, they will time out waiting for the ack (no - * released versions of Pacemaker are affected). - */ - const char *peer_version = crm_element_value(params, XML_ATTR_CRM_VERSION); - - if (compare_version(peer_version, "3.0.8") <= 0) { - crm_info("Sending compatibility ack for %s cancellation to %s (CRM version %s)", - op_key, from_host, peer_version); - send_task_ok_ack(lrm_state, input, rsc->id, rsc, op_task, - from_host, from_sys); - } + if (safe_str_eq(operation, CRMD_ACTION_CANCEL)) { + if (!do_lrm_cancel(input, lrm_state, rsc, from_host, from_sys)) { + crm_log_xml_warn(input->xml, "Bad command"); } - free(op_key); - } else if (safe_str_eq(operation, CRMD_ACTION_DELETE)) { - gboolean unregister = TRUE; - -#if ENABLE_ACL - int cib_rc = delete_rsc_status(lrm_state, rsc->id, cib_dryrun | cib_sync_call, user_name); - if (cib_rc != pcmk_ok) { - lrmd_event_data_t *op = NULL; - - crm_err - ("Attempted deletion of resource status '%s' from CIB for %s (user=%s) on %s failed: (rc=%d) %s", - rsc->id, from_sys, user_name ? user_name : "unknown", from_host, cib_rc, - pcmk_strerror(cib_rc)); - - op = construct_op(lrm_state, input->xml, rsc->id, operation); - op->op_status = PCMK_LRM_OP_ERROR; - - if (cib_rc == -EACCES) { - op->rc = PCMK_OCF_INSUFFICIENT_PRIV; - } else { - op->rc = PCMK_OCF_UNKNOWN_ERROR; - } - send_direct_ack(from_host, from_sys, NULL, op, rsc->id); - lrmd_free_event(op); - lrmd_free_rsc_info(rsc); - return; - } -#endif - if (crm_rsc_delete == TRUE && is_remote_lrmd_ra(NULL, NULL, rsc->id)) { - unregister = FALSE; - } - - delete_resource(lrm_state, rsc->id, rsc, NULL, from_sys, from_host, user_name, input, unregister); + do_lrm_delete(input, lrm_state, rsc, from_sys, from_host, + crm_rsc_delete, user_name); } else { do_lrm_rsc_op(lrm_state, rsc, operation, input->xml, input->msg); } lrmd_free_rsc_info(rsc); } else { crm_err("Cannot perform operation %s of unknown type", crm_str(crm_op)); register_fsa_error(C_FSA_INTERNAL, I_ERROR, NULL); } } static lrmd_event_data_t * construct_op(lrm_state_t * lrm_state, xmlNode * rsc_op, const char *rsc_id, const char *operation) { lrmd_event_data_t *op = NULL; const char *op_delay = NULL; const char *op_timeout = NULL; const char *op_interval = NULL; GHashTable *params = NULL; const char *transition = NULL; - CRM_ASSERT(rsc_id != NULL); + CRM_ASSERT(rsc_id && operation); op = calloc(1, sizeof(lrmd_event_data_t)); + CRM_ASSERT(op != NULL); + op->type = lrmd_event_exec_complete; op->op_type = strdup(operation); op->op_status = PCMK_LRM_OP_PENDING; op->rc = -1; op->rsc_id = strdup(rsc_id); op->interval = 0; op->timeout = 0; op->start_delay = 0; if (rsc_op == NULL) { CRM_LOG_ASSERT(safe_str_eq(CRMD_ACTION_STOP, operation)); op->user_data = NULL; /* the stop_all_resources() case * by definition there is no DC (or they'd be shutting * us down). * So we should put our version here. */ op->params = crm_str_table_new(); g_hash_table_insert(op->params, strdup(XML_ATTR_CRM_VERSION), strdup(CRM_FEATURE_SET)); crm_trace("Constructed %s op for %s", operation, rsc_id); return op; } params = xml2list(rsc_op); g_hash_table_remove(params, CRM_META "_op_target_rc"); op_delay = crm_meta_value(params, XML_OP_ATTR_START_DELAY); op_timeout = crm_meta_value(params, XML_ATTR_TIMEOUT); op_interval = crm_meta_value(params, XML_LRM_ATTR_INTERVAL); op->interval = crm_parse_int(op_interval, "0"); op->timeout = crm_parse_int(op_timeout, "0"); op->start_delay = crm_parse_int(op_delay, "0"); #if ENABLE_VERSIONED_ATTRS // Resolve any versioned parameters - if (safe_str_neq(op->op_type, RSC_METADATA) + if (lrm_state && safe_str_neq(op->op_type, RSC_METADATA) && safe_str_neq(op->op_type, CRMD_ACTION_DELETE) && !is_remote_lrmd_ra(NULL, NULL, rsc_id)) { // Resource info *should* already be cached, so we don't get lrmd call lrmd_rsc_info_t *rsc = lrm_state_get_rsc_info(lrm_state, rsc_id, 0); struct ra_metadata_s *metadata; metadata = metadata_cache_get(lrm_state->metadata_cache, rsc); if (metadata) { xmlNode *versioned_attrs = NULL; GHashTable *hash = NULL; char *key = NULL; char *value = NULL; GHashTableIter iter; versioned_attrs = first_named_child(rsc_op, XML_TAG_OP_VER_ATTRS); hash = pe_unpack_versioned_parameters(versioned_attrs, metadata->ra_version); g_hash_table_iter_init(&iter, hash); while (g_hash_table_iter_next(&iter, (gpointer *) &key, (gpointer *) &value)) { g_hash_table_iter_steal(&iter); g_hash_table_replace(params, key, value); } g_hash_table_destroy(hash); versioned_attrs = first_named_child(rsc_op, XML_TAG_OP_VER_META); hash = pe_unpack_versioned_parameters(versioned_attrs, metadata->ra_version); g_hash_table_iter_init(&iter, hash); while (g_hash_table_iter_next(&iter, (gpointer *) &key, (gpointer *) &value)) { g_hash_table_replace(params, crm_meta_name(key), strdup(value)); if (safe_str_eq(key, XML_ATTR_TIMEOUT)) { op->timeout = crm_parse_int(value, "0"); } else if (safe_str_eq(key, XML_OP_ATTR_START_DELAY)) { op->start_delay = crm_parse_int(value, "0"); } } g_hash_table_destroy(hash); versioned_attrs = first_named_child(rsc_op, XML_TAG_RSC_VER_ATTRS); hash = pe_unpack_versioned_parameters(versioned_attrs, metadata->ra_version); g_hash_table_iter_init(&iter, hash); while (g_hash_table_iter_next(&iter, (gpointer *) &key, (gpointer *) &value)) { g_hash_table_iter_steal(&iter); g_hash_table_replace(params, key, value); } g_hash_table_destroy(hash); } lrmd_free_rsc_info(rsc); } #endif if (safe_str_neq(operation, RSC_STOP)) { op->params = params; } else { - rsc_history_t *entry = g_hash_table_lookup(lrm_state->resource_history, rsc_id); + rsc_history_t *entry = NULL; + + if (lrm_state) { + entry = g_hash_table_lookup(lrm_state->resource_history, rsc_id); + } /* If we do not have stop parameters cached, use * whatever we are given */ if (!entry || !entry->stop_params) { op->params = params; } else { /* Copy the cached parameter list so that we stop the resource * with the old attributes, not the new ones */ op->params = crm_str_table_new(); g_hash_table_foreach(params, copy_meta_keys, op->params); g_hash_table_foreach(entry->stop_params, copy_instance_keys, op->params); g_hash_table_destroy(params); params = NULL; } } /* sanity */ if (op->interval < 0) { op->interval = 0; } if (op->timeout <= 0) { op->timeout = op->interval; } if (op->start_delay < 0) { op->start_delay = 0; } transition = crm_element_value(rsc_op, XML_ATTR_TRANSITION_KEY); CRM_CHECK(transition != NULL, return op); op->user_data = strdup(transition); if (op->interval != 0) { if (safe_str_eq(operation, CRMD_ACTION_START) || safe_str_eq(operation, CRMD_ACTION_STOP)) { crm_err("Start and Stop actions cannot have an interval: %d", op->interval); op->interval = 0; } } crm_trace("Constructed %s op for %s: interval=%d", operation, rsc_id, op->interval); return op; } void send_direct_ack(const char *to_host, const char *to_sys, lrmd_rsc_info_t * rsc, lrmd_event_data_t * op, const char *rsc_id) { xmlNode *reply = NULL; xmlNode *update, *iter; crm_node_t *peer = NULL; CRM_CHECK(op != NULL, return); if (op->rsc_id == NULL) { CRM_ASSERT(rsc_id != NULL); op->rsc_id = strdup(rsc_id); } if (to_sys == NULL) { to_sys = CRM_SYSTEM_TENGINE; } peer = crm_get_peer(0, fsa_our_uname); update = create_node_state_update(peer, node_update_none, NULL, __FUNCTION__); iter = create_xml_node(update, XML_CIB_TAG_LRM); crm_xml_add(iter, XML_ATTR_ID, fsa_our_uuid); iter = create_xml_node(iter, XML_LRM_TAG_RESOURCES); iter = create_xml_node(iter, XML_LRM_TAG_RESOURCE); crm_xml_add(iter, XML_ATTR_ID, op->rsc_id); build_operation_update(iter, rsc, op, fsa_our_uname, __FUNCTION__); reply = create_request(CRM_OP_INVOKE_LRM, update, to_host, to_sys, CRM_SYSTEM_LRMD, NULL); crm_log_xml_trace(update, "ACK Update"); crm_debug("ACK'ing resource op %s_%s_%d from %s: %s", op->rsc_id, op->op_type, op->interval, op->user_data, crm_element_value(reply, XML_ATTR_REFERENCE)); if (relay_message(reply, TRUE) == FALSE) { crm_log_xml_err(reply, "Unable to route reply"); } free_xml(update); free_xml(reply); } gboolean verify_stopped(enum crmd_fsa_state cur_state, int log_level) { gboolean res = TRUE; GList *lrm_state_list = lrm_state_get_list(); GList *state_entry; for (state_entry = lrm_state_list; state_entry != NULL; state_entry = state_entry->next) { lrm_state_t *lrm_state = state_entry->data; if (!lrm_state_verify_stopped(lrm_state, cur_state, log_level)) { /* keep iterating through all even when false is returned */ res = FALSE; } } set_bit(fsa_input_register, R_SENT_RSC_STOP); g_list_free(lrm_state_list); lrm_state_list = NULL; return res; } struct stop_recurring_action_s { lrmd_rsc_info_t *rsc; lrm_state_t *lrm_state; }; static gboolean stop_recurring_action_by_rsc(gpointer key, gpointer value, gpointer user_data) { gboolean remove = FALSE; struct stop_recurring_action_s *event = user_data; struct recurring_op_s *op = (struct recurring_op_s *)value; if (op->interval != 0 && crm_str_eq(op->rsc_id, event->rsc->id, TRUE)) { crm_debug("Cancelling op %d for %s (%s)", op->call_id, op->rsc_id, (char*)key); remove = !cancel_op(event->lrm_state, event->rsc->id, key, op->call_id, FALSE); } return remove; } static gboolean stop_recurring_actions(gpointer key, gpointer value, gpointer user_data) { gboolean remove = FALSE; lrm_state_t *lrm_state = user_data; struct recurring_op_s *op = (struct recurring_op_s *)value; if (op->interval != 0) { crm_info("Cancelling op %d for %s (%s)", op->call_id, op->rsc_id, key); remove = !cancel_op(lrm_state, op->rsc_id, key, op->call_id, FALSE); } return remove; } static void record_pending_op(const char *node_name, lrmd_rsc_info_t *rsc, lrmd_event_data_t *op) { const char *record_pending = NULL; CRM_CHECK(node_name != NULL, return); CRM_CHECK(rsc != NULL, return); CRM_CHECK(op != NULL, return); if ((op->op_type == NULL) || (op->params == NULL) || safe_str_eq(op->op_type, CRMD_ACTION_CANCEL) || safe_str_eq(op->op_type, CRMD_ACTION_DELETE)) { return; } // defaults to true record_pending = crm_meta_value(op->params, XML_OP_ATTR_PENDING); if (record_pending && !crm_is_true(record_pending)) { return; } op->call_id = -1; op->op_status = PCMK_LRM_OP_PENDING; op->rc = PCMK_OCF_UNKNOWN; op->t_run = time(NULL); op->t_rcchange = op->t_run; /* write a "pending" entry to the CIB, inhibit notification */ crm_debug("Recording pending op %s_%s_%d on %s in the CIB", op->rsc_id, op->op_type, op->interval, node_name); do_update_resource(node_name, rsc, op); } static void do_lrm_rsc_op(lrm_state_t * lrm_state, lrmd_rsc_info_t * rsc, const char *operation, xmlNode * msg, xmlNode * request) { int call_id = 0; char *op_id = NULL; lrmd_event_data_t *op = NULL; lrmd_key_value_t *params = NULL; fsa_data_t *msg_data = NULL; const char *transition = NULL; gboolean stop_recurring = FALSE; bool send_nack = FALSE; CRM_CHECK(rsc != NULL, return); CRM_CHECK(operation != NULL, return); if (msg != NULL) { transition = crm_element_value(msg, XML_ATTR_TRANSITION_KEY); if (transition == NULL) { crm_log_xml_err(msg, "Missing transition number"); } } op = construct_op(lrm_state, msg, rsc->id, operation); CRM_CHECK(op != NULL, return); if (is_remote_lrmd_ra(NULL, NULL, rsc->id) && op->interval == 0 && strcmp(operation, CRMD_ACTION_MIGRATE) == 0) { /* pcmk remote connections are a special use case. * We never ever want to stop monitoring a connection resource until * the entire migration has completed. If the connection is unexpectedly * severed, even during a migration, this is an event we must detect.*/ stop_recurring = FALSE; } else if (op->interval == 0 && strcmp(operation, CRMD_ACTION_STATUS) != 0 && strcmp(operation, CRMD_ACTION_NOTIFY) != 0) { /* stop any previous monitor operations before changing the resource state */ stop_recurring = TRUE; } if (stop_recurring == TRUE) { guint removed = 0; struct stop_recurring_action_s data; data.rsc = rsc; data.lrm_state = lrm_state; removed = g_hash_table_foreach_remove( lrm_state->pending_ops, stop_recurring_action_by_rsc, &data); if (removed) { crm_debug("Stopped %u recurring operation%s in preparation for %s_%s_%d", removed, s_if_plural(removed), rsc->id, operation, op->interval); } } /* now do the op */ crm_info("Performing key=%s op=%s_%s_%d", transition, rsc->id, operation, op->interval); if (is_set(fsa_input_register, R_SHUTDOWN) && safe_str_eq(operation, RSC_START)) { register_fsa_input(C_SHUTDOWN, I_SHUTDOWN, NULL); send_nack = TRUE; } else if (fsa_state != S_NOT_DC && fsa_state != S_POLICY_ENGINE /* Recalculating */ && fsa_state != S_TRANSITION_ENGINE && safe_str_neq(operation, CRMD_ACTION_STOP)) { send_nack = TRUE; } if(send_nack) { crm_notice("Discarding attempt to perform action %s on %s in state %s (shutdown=%s)", operation, rsc->id, fsa_state2string(fsa_state), is_set(fsa_input_register, R_SHUTDOWN)?"true":"false"); op->rc = CRM_DIRECT_NACK_RC; op->op_status = PCMK_LRM_OP_ERROR; send_direct_ack(NULL, NULL, rsc, op, rsc->id); lrmd_free_event(op); free(op_id); return; } record_pending_op(lrm_state->node_name, rsc, op); op_id = generate_op_key(rsc->id, op->op_type, op->interval); if (op->interval > 0) { /* cancel it so we can then restart it without conflict */ cancel_op_key(lrm_state, rsc, op_id, FALSE); } if (op->params) { char *key = NULL; char *value = NULL; GHashTableIter iter; g_hash_table_iter_init(&iter, op->params); while (g_hash_table_iter_next(&iter, (gpointer *) & key, (gpointer *) & value)) { params = lrmd_key_value_add(params, key, value); } } call_id = lrm_state_exec(lrm_state, rsc->id, op->op_type, op->user_data, op->interval, op->timeout, op->start_delay, params); if (call_id <= 0 && lrm_state_is_local(lrm_state)) { crm_err("Operation %s on %s failed: %d", operation, rsc->id, call_id); register_fsa_error(C_FSA_INTERNAL, I_FAIL, NULL); } else if (call_id <= 0) { - - crm_err("Operation %s on resource %s failed to execute on remote node %s: %d", operation, rsc->id, lrm_state->node_name, call_id); - op->call_id = get_fake_call_id(lrm_state, rsc->id); - op->op_status = PCMK_LRM_OP_DONE; - op->rc = PCMK_OCF_UNKNOWN_ERROR; - op->t_run = time(NULL); - op->t_rcchange = op->t_run; + crm_err("Operation %s on resource %s failed to execute on remote node %s: %d", + operation, rsc->id, lrm_state->node_name, call_id); + fake_op_status(lrm_state, op, PCMK_LRM_OP_DONE, PCMK_OCF_UNKNOWN_ERROR); process_lrm_event(lrm_state, op, NULL); } else { /* record all operations so we can wait * for them to complete during shutdown */ char *call_id_s = make_stop_id(rsc->id, call_id); struct recurring_op_s *pending = NULL; pending = calloc(1, sizeof(struct recurring_op_s)); crm_trace("Recording pending op: %d - %s %s", call_id, op_id, call_id_s); pending->call_id = call_id; pending->interval = op->interval; pending->op_type = strdup(operation); pending->op_key = strdup(op_id); pending->rsc_id = strdup(rsc->id); pending->start_time = time(NULL); pending->user_data = strdup(op->user_data); g_hash_table_replace(lrm_state->pending_ops, call_id_s, pending); if (op->interval > 0 && op->start_delay > START_DELAY_THRESHOLD) { char *uuid = NULL; int dummy = 0, target_rc = 0; crm_info("Faking confirmation of %s: execution postponed for over 5 minutes", op_id); decode_transition_key(op->user_data, &uuid, &dummy, &dummy, &target_rc); free(uuid); op->rc = target_rc; op->op_status = PCMK_LRM_OP_DONE; send_direct_ack(NULL, NULL, rsc, op, rsc->id); } pending->params = op->params; op->params = NULL; } free(op_id); lrmd_free_event(op); return; } int last_resource_update = 0; static void cib_rsc_callback(xmlNode * msg, int call_id, int rc, xmlNode * output, void *user_data) { switch (rc) { case pcmk_ok: case -pcmk_err_diff_failed: case -pcmk_err_diff_resync: crm_trace("Resource update %d complete: rc=%d", call_id, rc); break; default: crm_warn("Resource update %d failed: (rc=%d) %s", call_id, rc, pcmk_strerror(rc)); } if (call_id == last_resource_update) { last_resource_update = 0; trigger_fsa(fsa_source); } } static int do_update_resource(const char *node_name, lrmd_rsc_info_t * rsc, lrmd_event_data_t * op) { /* */ int rc = pcmk_ok; xmlNode *update, *iter = NULL; int call_opt = crmd_cib_smart_opt(); const char *uuid = NULL; CRM_CHECK(op != NULL, return 0); iter = create_xml_node(iter, XML_CIB_TAG_STATUS); update = iter; iter = create_xml_node(iter, XML_CIB_TAG_STATE); if (safe_str_eq(node_name, fsa_our_uname)) { uuid = fsa_our_uuid; } else { /* remote nodes uuid and uname are equal */ uuid = node_name; crm_xml_add(iter, XML_NODE_IS_REMOTE, "true"); } CRM_LOG_ASSERT(uuid != NULL); if(uuid == NULL) { rc = -EINVAL; goto done; } crm_xml_add(iter, XML_ATTR_UUID, uuid); crm_xml_add(iter, XML_ATTR_UNAME, node_name); crm_xml_add(iter, XML_ATTR_ORIGIN, __FUNCTION__); iter = create_xml_node(iter, XML_CIB_TAG_LRM); crm_xml_add(iter, XML_ATTR_ID, uuid); iter = create_xml_node(iter, XML_LRM_TAG_RESOURCES); iter = create_xml_node(iter, XML_LRM_TAG_RESOURCE); crm_xml_add(iter, XML_ATTR_ID, op->rsc_id); build_operation_update(iter, rsc, op, node_name, __FUNCTION__); if (rsc) { const char *container = NULL; crm_xml_add(iter, XML_ATTR_TYPE, rsc->type); crm_xml_add(iter, XML_AGENT_ATTR_CLASS, rsc->standard); crm_xml_add(iter, XML_AGENT_ATTR_PROVIDER, rsc->provider); if (op->params) { container = g_hash_table_lookup(op->params, CRM_META"_"XML_RSC_ATTR_CONTAINER); } if (container) { crm_trace("Resource %s is a part of container resource %s", op->rsc_id, container); crm_xml_add(iter, XML_RSC_ATTR_CONTAINER, container); } } else { crm_warn("Resource %s no longer exists in the lrmd", op->rsc_id); send_direct_ack(NULL, NULL, rsc, op, op->rsc_id); goto cleanup; } crm_log_xml_trace(update, __FUNCTION__); /* make it an asynchronous call and be done with it * * Best case: * the resource state will be discovered during * the next signup or election. * * Bad case: * we are shutting down and there is no DC at the time, * but then why were we shutting down then anyway? * (probably because of an internal error) * * Worst case: * we get shot for having resources "running" when the really weren't * * the alternative however means blocking here for too long, which * isn't acceptable */ fsa_cib_update(XML_CIB_TAG_STATUS, update, call_opt, rc, NULL); if (rc > 0) { last_resource_update = rc; } done: /* the return code is a call number, not an error code */ crm_trace("Sent resource state update message: %d for %s=%d on %s", rc, op->op_type, op->interval, op->rsc_id); fsa_register_cib_callback(rc, FALSE, NULL, cib_rsc_callback); cleanup: free_xml(update); return rc; } void do_lrm_event(long long action, enum crmd_fsa_cause cause, enum crmd_fsa_state cur_state, enum crmd_fsa_input cur_input, fsa_data_t * msg_data) { CRM_CHECK(FALSE, return); } static char * unescape_newlines(const char *string) { char *pch = NULL; char *ret = NULL; static const char *escaped_newline = "\\n"; if (!string) { return NULL; } ret = strdup(string); pch = strstr(ret, escaped_newline); while (pch != NULL) { /* 2 chars for 2 chars, null-termination irrelevant */ memcpy(pch, "\n ", 2 * sizeof(char)); pch = strstr(pch, escaped_newline); } return ret; } gboolean process_lrm_event(lrm_state_t * lrm_state, lrmd_event_data_t * op, struct recurring_op_s *pending) { char *op_id = NULL; char *op_key = NULL; int update_id = 0; gboolean remove = FALSE; gboolean removed = FALSE; lrmd_rsc_info_t *rsc = NULL; CRM_CHECK(op != NULL, return FALSE); CRM_CHECK(op->rsc_id != NULL, return FALSE); op_id = make_stop_id(op->rsc_id, op->call_id); op_key = generate_op_key(op->rsc_id, op->op_type, op->interval); rsc = lrm_state_get_rsc_info(lrm_state, op->rsc_id, 0); if(pending == NULL) { remove = TRUE; pending = g_hash_table_lookup(lrm_state->pending_ops, op_id); } if (op->op_status == PCMK_LRM_OP_ERROR) { switch(op->rc) { case PCMK_OCF_NOT_RUNNING: case PCMK_OCF_RUNNING_MASTER: case PCMK_OCF_DEGRADED: case PCMK_OCF_DEGRADED_MASTER: /* Leave it up to the TE/PE to decide if this is an error */ op->op_status = PCMK_LRM_OP_DONE; break; default: /* Nothing to do */ break; } } if (op->op_status != PCMK_LRM_OP_CANCELLED) { if (safe_str_eq(op->op_type, RSC_NOTIFY) || safe_str_eq(op->op_type, RSC_METADATA)) { /* Keep notify and meta-data ops out of the CIB */ send_direct_ack(NULL, NULL, NULL, op, op->rsc_id); } else { update_id = do_update_resource(lrm_state->node_name, rsc, op); } } else if (op->interval == 0) { /* This will occur when "crm resource cleanup" is called while actions are in-flight */ crm_err("Op %s (call=%d): Cancelled", op_key, op->call_id); send_direct_ack(NULL, NULL, NULL, op, op->rsc_id); } else if (pending == NULL) { /* We don't need to do anything for cancelled ops * that are not in our pending op list. There are no * transition actions waiting on these operations. */ } else if (op->user_data == NULL) { /* At this point we have a pending entry, but no transition * key present in the user_data field. report this */ crm_err("Op %s (call=%d): No user data", op_key, op->call_id); } else if (pending->remove) { /* The tengine canceled this op, we have been waiting for the cancel to finish. */ erase_lrm_history_by_op(lrm_state, op); } else if (pending && op->rsc_deleted) { /* The tengine initiated this op, but it was cancelled outside of the * tengine's control during a resource cleanup/re-probe request. The tengine * must be alerted that this operation completed, otherwise the tengine * will continue waiting for this update to occur until it is timed out. * We don't want this update going to the cib though, so use a direct ack. */ crm_trace("Op %s (call=%d): cancelled due to rsc deletion", op_key, op->call_id); send_direct_ack(NULL, NULL, NULL, op, op->rsc_id); } else { /* Before a stop is called, no need to direct ack */ crm_trace("Op %s (call=%d): no delete event required", op_key, op->call_id); } if(remove == FALSE) { /* The caller will do this afterwards, but keep the logging consistent */ removed = TRUE; } else if ((op->interval == 0) && g_hash_table_remove(lrm_state->pending_ops, op_id)) { removed = TRUE; crm_trace("Op %s (call=%d, stop-id=%s, remaining=%u): Confirmed", op_key, op->call_id, op_id, g_hash_table_size(lrm_state->pending_ops)); } else if(op->interval != 0 && op->op_status == PCMK_LRM_OP_CANCELLED) { removed = TRUE; g_hash_table_remove(lrm_state->pending_ops, op_id); } switch (op->op_status) { case PCMK_LRM_OP_CANCELLED: crm_info("Result of %s operation for %s on %s: %s " CRM_XS " call=%d key=%s confirmed=%s", crm_action_str(op->op_type, op->interval), op->rsc_id, lrm_state->node_name, services_lrm_status_str(op->op_status), op->call_id, op_key, (removed? "true" : "false")); break; case PCMK_LRM_OP_DONE: do_crm_log(op->interval?LOG_INFO:LOG_NOTICE, "Result of %s operation for %s on %s: %d (%s) " CRM_XS " call=%d key=%s confirmed=%s cib-update=%d", crm_action_str(op->op_type, op->interval), op->rsc_id, lrm_state->node_name, op->rc, services_ocf_exitcode_str(op->rc), op->call_id, op_key, (removed? "true" : "false"), update_id); break; case PCMK_LRM_OP_TIMEOUT: crm_err("Result of %s operation for %s on %s: %s " CRM_XS " call=%d key=%s timeout=%dms", crm_action_str(op->op_type, op->interval), op->rsc_id, lrm_state->node_name, services_lrm_status_str(op->op_status), op->call_id, op_key, op->timeout); break; default: crm_err("Result of %s operation for %s on %s: %s " CRM_XS " call=%d key=%s confirmed=%s status=%d cib-update=%d", crm_action_str(op->op_type, op->interval), op->rsc_id, lrm_state->node_name, services_lrm_status_str(op->op_status), op->call_id, op_key, (removed? "true" : "false"), op->op_status, update_id); } if (op->output) { char *prefix = crm_strdup_printf("%s-%s_%s_%d:%d", lrm_state->node_name, op->rsc_id, op->op_type, op->interval, op->call_id); if (op->rc) { crm_log_output(LOG_NOTICE, prefix, op->output); } else { crm_log_output(LOG_DEBUG, prefix, op->output); } free(prefix); } if (safe_str_neq(op->op_type, RSC_METADATA)) { crmd_alert_resource_op(lrm_state->node_name, op); } else if (op->rc == PCMK_OCF_OK) { char *metadata = unescape_newlines(op->output); metadata_cache_update(lrm_state->metadata_cache, rsc, metadata); free(metadata); } if (op->rsc_deleted) { crm_info("Deletion of resource '%s' complete after %s", op->rsc_id, op_key); delete_rsc_entry(lrm_state, NULL, op->rsc_id, NULL, pcmk_ok, NULL); } /* If a shutdown was escalated while operations were pending, * then the FSA will be stalled right now... allow it to continue */ mainloop_set_trigger(fsa_source); update_history_cache(lrm_state, rsc, op); lrmd_free_rsc_info(rsc); free(op_key); free(op_id); return TRUE; } diff --git a/crmd/lrm_state.c b/crmd/lrm_state.c index 1faf49db85..e18e809dd9 100644 --- a/crmd/lrm_state.c +++ b/crmd/lrm_state.c @@ -1,797 +1,798 @@ /* * Copyright (C) 2012 David Vossel * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This software is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include GHashTable *lrm_state_table = NULL; extern GHashTable *proxy_table; int lrmd_internal_proxy_send(lrmd_t * lrmd, xmlNode *msg); void lrmd_internal_set_proxy_callback(lrmd_t * lrmd, void *userdata, void (*callback)(lrmd_t *lrmd, void *userdata, xmlNode *msg)); static void free_rsc_info(gpointer value) { lrmd_rsc_info_t *rsc_info = value; lrmd_free_rsc_info(rsc_info); } static void free_deletion_op(gpointer value) { struct pending_deletion_op_s *op = value; free(op->rsc); delete_ha_msg_input(op->input); free(op); } static void free_recurring_op(gpointer value) { struct recurring_op_s *op = (struct recurring_op_s *)value; free(op->user_data); free(op->rsc_id); free(op->op_type); free(op->op_key); if (op->params) { g_hash_table_destroy(op->params); } free(op); } static gboolean fail_pending_op(gpointer key, gpointer value, gpointer user_data) { lrmd_event_data_t event = { 0, }; lrm_state_t *lrm_state = user_data; struct recurring_op_s *op = (struct recurring_op_s *)value; crm_trace("Pre-emptively failing %s_%s_%d on %s (call=%s, %s)", op->rsc_id, op->op_type, op->interval, lrm_state->node_name, (char*)key, op->user_data); event.type = lrmd_event_exec_complete; event.rsc_id = op->rsc_id; event.op_type = op->op_type; event.user_data = op->user_data; event.timeout = 0; event.interval = op->interval; event.rc = PCMK_OCF_CONNECTION_DIED; event.op_status = PCMK_LRM_OP_ERROR; event.t_run = op->start_time; event.t_rcchange = op->start_time; event.call_id = op->call_id; event.remote_nodename = lrm_state->node_name; event.params = op->params; process_lrm_event(lrm_state, &event, op); return TRUE; } gboolean lrm_state_is_local(lrm_state_t *lrm_state) { if (lrm_state == NULL || fsa_our_uname == NULL) { return FALSE; } if (strcmp(lrm_state->node_name, fsa_our_uname) != 0) { return FALSE; } return TRUE; } lrm_state_t * lrm_state_create(const char *node_name) { lrm_state_t *state = NULL; if (!node_name) { crm_err("No node name given for lrm state object"); return NULL; } state = calloc(1, sizeof(lrm_state_t)); if (!state) { return NULL; } state->node_name = strdup(node_name); state->rsc_info_cache = g_hash_table_new_full(crm_str_hash, g_str_equal, NULL, free_rsc_info); state->deletion_ops = g_hash_table_new_full(crm_str_hash, g_str_equal, free, free_deletion_op); state->pending_ops = g_hash_table_new_full(crm_str_hash, g_str_equal, free, free_recurring_op); state->resource_history = g_hash_table_new_full(crm_str_hash, g_str_equal, NULL, history_free); state->metadata_cache = metadata_cache_new(); g_hash_table_insert(lrm_state_table, (char *)state->node_name, state); return state; } void lrm_state_destroy(const char *node_name) { g_hash_table_remove(lrm_state_table, node_name); } static gboolean remote_proxy_remove_by_node(gpointer key, gpointer value, gpointer user_data) { remote_proxy_t *proxy = value; const char *node_name = user_data; if (safe_str_eq(node_name, proxy->node_name)) { return TRUE; } return FALSE; } static void internal_lrm_state_destroy(gpointer data) { lrm_state_t *lrm_state = data; if (!lrm_state) { return; } crm_trace("Destroying proxy table %s with %d members", lrm_state->node_name, g_hash_table_size(proxy_table)); g_hash_table_foreach_remove(proxy_table, remote_proxy_remove_by_node, (char *) lrm_state->node_name); remote_ra_cleanup(lrm_state); lrmd_api_delete(lrm_state->conn); if (lrm_state->rsc_info_cache) { crm_trace("Destroying rsc info cache with %d members", g_hash_table_size(lrm_state->rsc_info_cache)); g_hash_table_destroy(lrm_state->rsc_info_cache); } if (lrm_state->resource_history) { crm_trace("Destroying history op cache with %d members", g_hash_table_size(lrm_state->resource_history)); g_hash_table_destroy(lrm_state->resource_history); } if (lrm_state->deletion_ops) { crm_trace("Destroying deletion op cache with %d members", g_hash_table_size(lrm_state->deletion_ops)); g_hash_table_destroy(lrm_state->deletion_ops); } if (lrm_state->pending_ops) { crm_trace("Destroying pending op cache with %d members", g_hash_table_size(lrm_state->pending_ops)); g_hash_table_destroy(lrm_state->pending_ops); } metadata_cache_free(lrm_state->metadata_cache); free((char *)lrm_state->node_name); free(lrm_state); } void lrm_state_reset_tables(lrm_state_t * lrm_state, gboolean reset_metadata) { if (lrm_state->resource_history) { crm_trace("Re-setting history op cache with %d members", g_hash_table_size(lrm_state->resource_history)); g_hash_table_remove_all(lrm_state->resource_history); } if (lrm_state->deletion_ops) { crm_trace("Re-setting deletion op cache with %d members", g_hash_table_size(lrm_state->deletion_ops)); g_hash_table_remove_all(lrm_state->deletion_ops); } if (lrm_state->pending_ops) { crm_trace("Re-setting pending op cache with %d members", g_hash_table_size(lrm_state->pending_ops)); g_hash_table_remove_all(lrm_state->pending_ops); } if (lrm_state->rsc_info_cache) { crm_trace("Re-setting rsc info cache with %d members", g_hash_table_size(lrm_state->rsc_info_cache)); g_hash_table_remove_all(lrm_state->rsc_info_cache); } if (reset_metadata) { metadata_cache_reset(lrm_state->metadata_cache); } } gboolean lrm_state_init_local(void) { if (lrm_state_table) { return TRUE; } lrm_state_table = g_hash_table_new_full(crm_strcase_hash, crm_strcase_equal, NULL, internal_lrm_state_destroy); if (!lrm_state_table) { return FALSE; } proxy_table = g_hash_table_new_full(crm_strcase_hash, crm_strcase_equal, NULL, remote_proxy_free); if (!proxy_table) { g_hash_table_destroy(lrm_state_table); lrm_state_table = NULL; return FALSE; } return TRUE; } void lrm_state_destroy_all(void) { if (lrm_state_table) { crm_trace("Destroying state table with %d members", g_hash_table_size(lrm_state_table)); g_hash_table_destroy(lrm_state_table); lrm_state_table = NULL; } if(proxy_table) { crm_trace("Destroying proxy table with %d members", g_hash_table_size(proxy_table)); g_hash_table_destroy(proxy_table); proxy_table = NULL; } } lrm_state_t * lrm_state_find(const char *node_name) { if (!node_name) { return NULL; } return g_hash_table_lookup(lrm_state_table, node_name); } lrm_state_t * lrm_state_find_or_create(const char *node_name) { lrm_state_t *lrm_state; lrm_state = g_hash_table_lookup(lrm_state_table, node_name); if (!lrm_state) { lrm_state = lrm_state_create(node_name); } return lrm_state; } GList * lrm_state_get_list(void) { return g_hash_table_get_values(lrm_state_table); } static remote_proxy_t * find_connected_proxy_by_node(const char * node_name) { GHashTableIter gIter; remote_proxy_t *proxy = NULL; CRM_CHECK(proxy_table != NULL, return NULL); g_hash_table_iter_init(&gIter, proxy_table); while (g_hash_table_iter_next(&gIter, NULL, (gpointer *) &proxy)) { if (proxy->source && safe_str_eq(node_name, proxy->node_name)) { return proxy; } } return NULL; } static void remote_proxy_disconnect_by_node(const char * node_name) { remote_proxy_t *proxy = NULL; CRM_CHECK(proxy_table != NULL, return); while ((proxy = find_connected_proxy_by_node(node_name)) != NULL) { /* mainloop_del_ipc_client() eventually calls remote_proxy_disconnected() * , which removes the entry from proxy_table. * Do not do this in a g_hash_table_iter_next() loop. */ if (proxy->source) { mainloop_del_ipc_client(proxy->source); } } return; } void lrm_state_disconnect_only(lrm_state_t * lrm_state) { int removed = 0; if (!lrm_state->conn) { return; } crm_trace("Disconnecting %s", lrm_state->node_name); remote_proxy_disconnect_by_node(lrm_state->node_name); ((lrmd_t *) lrm_state->conn)->cmds->disconnect(lrm_state->conn); if (is_not_set(fsa_input_register, R_SHUTDOWN)) { removed = g_hash_table_foreach_remove(lrm_state->pending_ops, fail_pending_op, lrm_state); crm_trace("Synthesized %d operation failures for %s", removed, lrm_state->node_name); } } void lrm_state_disconnect(lrm_state_t * lrm_state) { if (!lrm_state->conn) { return; } lrm_state_disconnect_only(lrm_state); lrmd_api_delete(lrm_state->conn); lrm_state->conn = NULL; } int lrm_state_is_connected(lrm_state_t * lrm_state) { if (!lrm_state->conn) { return FALSE; } return ((lrmd_t *) lrm_state->conn)->cmds->is_connected(lrm_state->conn); } int lrm_state_poke_connection(lrm_state_t * lrm_state) { if (!lrm_state->conn) { return -1; } return ((lrmd_t *) lrm_state->conn)->cmds->poke_connection(lrm_state->conn); } int lrm_state_ipc_connect(lrm_state_t * lrm_state) { int ret; if (!lrm_state->conn) { lrm_state->conn = lrmd_api_new(); ((lrmd_t *) lrm_state->conn)->cmds->set_callback(lrm_state->conn, lrm_op_callback); } ret = ((lrmd_t *) lrm_state->conn)->cmds->connect(lrm_state->conn, CRM_SYSTEM_CRMD, NULL); if (ret != pcmk_ok) { lrm_state->num_lrm_register_fails++; } else { lrm_state->num_lrm_register_fails = 0; } return ret; } static remote_proxy_t * crmd_remote_proxy_new(lrmd_t *lrmd, const char *node_name, const char *session_id, const char *channel) { static struct ipc_client_callbacks proxy_callbacks = { .dispatch = remote_proxy_dispatch, .destroy = remote_proxy_disconnected }; remote_proxy_t *proxy = remote_proxy_new(lrmd, &proxy_callbacks, node_name, session_id, channel); return proxy; } gboolean crmd_is_proxy_session(const char *session) { return g_hash_table_lookup(proxy_table, session) ? TRUE : FALSE; } void crmd_proxy_send(const char *session, xmlNode *msg) { remote_proxy_t *proxy = g_hash_table_lookup(proxy_table, session); lrm_state_t *lrm_state = NULL; if (!proxy) { return; } crm_log_xml_trace(msg, "to-proxy"); lrm_state = lrm_state_find(proxy->node_name); if (lrm_state) { crm_trace("Sending event to %.8s on %s", proxy->session_id, proxy->node_name); remote_proxy_relay_event(proxy, msg); } } static void crmd_proxy_dispatch(const char *session, xmlNode *msg) { crm_log_xml_trace(msg, "CRMd-PROXY[inbound]"); crm_xml_add(msg, F_CRM_SYS_FROM, session); if (crmd_authorize_message(msg, NULL, session)) { route_message(C_IPC_MESSAGE, msg); } trigger_fsa(fsa_source); } static void remote_config_check(xmlNode * msg, int call_id, int rc, xmlNode * output, void *user_data) { if (rc != pcmk_ok) { crm_err("Query resulted in an error: %s", pcmk_strerror(rc)); if (rc == -EACCES || rc == -pcmk_err_schema_validation) { crm_err("The cluster is mis-configured - shutting down and staying down"); } } else { lrmd_t * lrmd = (lrmd_t *)user_data; crm_time_t *now = crm_time_new(NULL); GHashTable *config_hash = crm_str_table_new(); crm_debug("Call %d : Parsing CIB options", call_id); unpack_instance_attributes( output, output, XML_CIB_TAG_PROPSET, NULL, config_hash, CIB_OPTIONS_FIRST, FALSE, now); /* Now send it to the remote peer */ remote_proxy_check(lrmd, config_hash); g_hash_table_destroy(config_hash); crm_time_free(now); } } static void crmd_remote_proxy_cb(lrmd_t *lrmd, void *userdata, xmlNode *msg) { lrm_state_t *lrm_state = userdata; const char *session = crm_element_value(msg, F_LRMD_IPC_SESSION); remote_proxy_t *proxy = g_hash_table_lookup(proxy_table, session); const char *op = crm_element_value(msg, F_LRMD_IPC_OP); if (safe_str_eq(op, LRMD_IPC_OP_NEW)) { const char *channel = crm_element_value(msg, F_LRMD_IPC_IPC_SERVER); proxy = crmd_remote_proxy_new(lrmd, lrm_state->node_name, session, channel); if (proxy != NULL) { /* Look up stonith-watchdog-timeout and send to the remote peer for validation */ int rc = fsa_cib_conn->cmds->query(fsa_cib_conn, XML_CIB_TAG_CRMCONFIG, NULL, cib_scope_local); fsa_cib_conn->cmds->register_callback_full(fsa_cib_conn, rc, 10, FALSE, lrmd, "remote_config_check", remote_config_check, NULL); } } else if (safe_str_eq(op, LRMD_IPC_OP_SHUTDOWN_REQ)) { char *now_s = NULL; time_t now = time(NULL); crm_notice("%s requested shutdown of its remote connection", lrm_state->node_name); if (!remote_ra_is_in_maintenance(lrm_state)) { now_s = crm_itoa(now); update_attrd(lrm_state->node_name, XML_CIB_ATTR_SHUTDOWN, now_s, NULL, TRUE); free(now_s); remote_proxy_ack_shutdown(lrmd); crm_warn("Reconnection attempts to %s may result in failures that must be cleared", lrm_state->node_name); } else { remote_proxy_nack_shutdown(lrmd); crm_notice("Remote resource for %s is not managed so no ordered shutdown happening", lrm_state->node_name); } return; } else if (safe_str_eq(op, LRMD_IPC_OP_REQUEST) && proxy && proxy->is_local) { /* this is for the crmd, which we are, so don't try * and connect/send to ourselves over ipc. instead * do it directly. */ int flags = 0; xmlNode *request = get_message_xml(msg, F_LRMD_IPC_MSG); CRM_CHECK(request != NULL, return); #if ENABLE_ACL CRM_CHECK(lrm_state->node_name, return); crm_xml_add(request, XML_ACL_TAG_ROLE, "pacemaker-remote"); crm_acl_get_set_user(request, F_LRMD_IPC_USER, lrm_state->node_name); #endif crmd_proxy_dispatch(session, request); crm_element_value_int(msg, F_LRMD_IPC_MSG_FLAGS, &flags); if (flags & crm_ipc_client_response) { int msg_id = 0; xmlNode *op_reply = create_xml_node(NULL, "ack"); crm_xml_add(op_reply, "function", __FUNCTION__); crm_xml_add_int(op_reply, "line", __LINE__); crm_element_value_int(msg, F_LRMD_IPC_MSG_ID, &msg_id); remote_proxy_relay_response(proxy, op_reply, msg_id); free_xml(op_reply); } } else { remote_proxy_cb(lrmd, lrm_state->node_name, msg); } } int lrm_state_remote_connect_async(lrm_state_t * lrm_state, const char *server, int port, int timeout_ms) { int ret; if (!lrm_state->conn) { lrm_state->conn = lrmd_remote_api_new(lrm_state->node_name, server, port); if (!lrm_state->conn) { return -1; } ((lrmd_t *) lrm_state->conn)->cmds->set_callback(lrm_state->conn, remote_lrm_op_callback); lrmd_internal_set_proxy_callback(lrm_state->conn, lrm_state, crmd_remote_proxy_cb); } crm_trace("initiating remote connection to %s at %d with timeout %d", server, port, timeout_ms); ret = ((lrmd_t *) lrm_state->conn)->cmds->connect_async(lrm_state->conn, lrm_state->node_name, timeout_ms); if (ret != pcmk_ok) { lrm_state->num_lrm_register_fails++; } else { lrm_state->num_lrm_register_fails = 0; } return ret; } int lrm_state_get_metadata(lrm_state_t * lrm_state, const char *class, const char *provider, const char *agent, char **output, enum lrmd_call_options options) { if (!lrm_state->conn) { return -ENOTCONN; } return ((lrmd_t *) lrm_state->conn)->cmds->get_metadata(lrm_state->conn, class, provider, agent, output, options); } int lrm_state_cancel(lrm_state_t * lrm_state, const char *rsc_id, const char *action, int interval) { if (!lrm_state->conn) { return -ENOTCONN; } /* Figure out a way to make this async? * NOTICE: Currently it's synced and directly acknowledged in do_lrm_invoke(). */ if (is_remote_lrmd_ra(NULL, NULL, rsc_id)) { return remote_ra_cancel(lrm_state, rsc_id, action, interval); } return ((lrmd_t *) lrm_state->conn)->cmds->cancel(lrm_state->conn, rsc_id, action, interval); } lrmd_rsc_info_t * lrm_state_get_rsc_info(lrm_state_t * lrm_state, const char *rsc_id, enum lrmd_call_options options) { lrmd_rsc_info_t *rsc = NULL; if (!lrm_state->conn) { return NULL; } if (is_remote_lrmd_ra(NULL, NULL, rsc_id)) { return remote_ra_get_rsc_info(lrm_state, rsc_id); } rsc = g_hash_table_lookup(lrm_state->rsc_info_cache, rsc_id); if (rsc == NULL) { /* only contact the lrmd if we don't already have a cached rsc info */ rsc = ((lrmd_t *) lrm_state->conn)->cmds->get_rsc_info(lrm_state->conn, rsc_id, options); if (rsc == NULL) { return NULL; } /* cache the result */ g_hash_table_insert(lrm_state->rsc_info_cache, rsc->id, rsc); } return lrmd_copy_rsc_info(rsc); } int lrm_state_exec(lrm_state_t * lrm_state, const char *rsc_id, const char *action, const char *userdata, int interval, /* ms */ int timeout, /* ms */ int start_delay, /* ms */ lrmd_key_value_t * params) { if (!lrm_state->conn) { lrmd_key_value_freeall(params); return -ENOTCONN; } if (is_remote_lrmd_ra(NULL, NULL, rsc_id)) { return remote_ra_exec(lrm_state, rsc_id, action, userdata, interval, timeout, start_delay, params); } return ((lrmd_t *) lrm_state->conn)->cmds->exec(lrm_state->conn, rsc_id, action, userdata, interval, timeout, start_delay, lrmd_opt_notify_changes_only, params); } int lrm_state_register_rsc(lrm_state_t * lrm_state, const char *rsc_id, const char *class, const char *provider, const char *agent, enum lrmd_call_options options) { - if (!lrm_state->conn) { + lrmd_t *conn = (lrmd_t *) lrm_state->conn; + + if (conn == NULL) { return -ENOTCONN; } - /* optimize this... this function is a synced round trip from client to daemon. - * The crmd/lrm.c code path should be re-factored to allow the register of resources - * to be performed async. The lrmd client api needs to make an async version - * of register available. */ if (is_remote_lrmd_ra(agent, provider, NULL)) { - return lrm_state_find_or_create(rsc_id) ? pcmk_ok : -1; + return lrm_state_find_or_create(rsc_id)? pcmk_ok : -EINVAL; } - return ((lrmd_t *) lrm_state->conn)->cmds->register_rsc(lrm_state->conn, rsc_id, class, - provider, agent, options); + /* @TODO Implement an asynchronous version of this (currently a blocking + * call to the lrmd). + */ + return conn->cmds->register_rsc(lrm_state->conn, rsc_id, class, provider, + agent, options); } int lrm_state_unregister_rsc(lrm_state_t * lrm_state, const char *rsc_id, enum lrmd_call_options options) { if (!lrm_state->conn) { return -ENOTCONN; } /* optimize this... this function is a synced round trip from client to daemon. * The crmd/lrm.c code path that uses this function should always treat it as an * async operation. The lrmd client api needs to make an async version unreg available. */ if (is_remote_lrmd_ra(NULL, NULL, rsc_id)) { lrm_state_destroy(rsc_id); return pcmk_ok; } g_hash_table_remove(lrm_state->rsc_info_cache, rsc_id); return ((lrmd_t *) lrm_state->conn)->cmds->unregister_rsc(lrm_state->conn, rsc_id, options); } /* * functions for sending alerts via local LRMD connection */ static GListPtr crmd_alert_list = NULL; void crmd_unpack_alerts(xmlNode *alerts) { pe_free_alert_list(crmd_alert_list); crmd_alert_list = pe_unpack_alerts(alerts); } void crmd_alert_node_event(crm_node_t *node) { lrm_state_t *lrm_state; if (crmd_alert_list == NULL) { return; } lrm_state = lrm_state_find(fsa_our_uname); if (lrm_state == NULL) { return; } lrmd_send_node_alert((lrmd_t *) lrm_state->conn, crmd_alert_list, node->uname, node->id, node->state); } void crmd_alert_fencing_op(stonith_event_t * e) { char *desc; lrm_state_t *lrm_state; if (crmd_alert_list == NULL) { return; } lrm_state = lrm_state_find(fsa_our_uname); if (lrm_state == NULL) { return; } desc = crm_strdup_printf("Operation %s of %s by %s for %s@%s: %s (ref=%s)", e->action, e->target, (e->executioner? e->executioner : ""), e->client_origin, e->origin, pcmk_strerror(e->result), e->id); lrmd_send_fencing_alert((lrmd_t *) lrm_state->conn, crmd_alert_list, e->target, e->operation, desc, e->result); free(desc); } void crmd_alert_resource_op(const char *node, lrmd_event_data_t * op) { lrm_state_t *lrm_state; if (crmd_alert_list == NULL) { return; } lrm_state = lrm_state_find(fsa_our_uname); if (lrm_state == NULL) { return; } lrmd_send_resource_alert((lrmd_t *) lrm_state->conn, crmd_alert_list, node, op); } diff --git a/crmd/remote_lrmd_ra.c b/crmd/remote_lrmd_ra.c index 7ac6941c81..b91336e910 100644 --- a/crmd/remote_lrmd_ra.c +++ b/crmd/remote_lrmd_ra.c @@ -1,1264 +1,1267 @@ /* * Copyright (C) 2013 David Vossel * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This software is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #include #include #include #include #include #include #include #include #include #include #define REMOTE_LRMD_RA "remote" /* The max start timeout before cmd retry */ #define MAX_START_TIMEOUT_MS 10000 typedef struct remote_ra_cmd_s { /*! the local node the cmd is issued from */ char *owner; /*! the remote node the cmd is executed on */ char *rsc_id; /*! the action to execute */ char *action; /*! some string the client wants us to give it back */ char *userdata; char *exit_reason; // descriptive text on error /*! start delay in ms */ int start_delay; /*! timer id used for start delay. */ int delay_id; /*! timeout in ms for cmd */ int timeout; int remaining_timeout; /*! recurring interval in ms */ int interval; /*! interval timer id */ int interval_id; int reported_success; int monitor_timeout_id; int takeover_timeout_id; /*! action parameters */ lrmd_key_value_t *params; /*! executed rc */ int rc; int op_status; int call_id; time_t start_time; gboolean cancel; } remote_ra_cmd_t; enum remote_migration_status { expect_takeover = 1, takeover_complete, }; typedef struct remote_ra_data_s { crm_trigger_t *work; remote_ra_cmd_t *cur_cmd; GList *cmds; GList *recurring_cmds; enum remote_migration_status migrate_status; gboolean active; gboolean is_maintenance; /* kind of complex to determine from crmd-context * so we have it signalled back with the * transition from pengine */ } remote_ra_data_t; static int handle_remote_ra_start(lrm_state_t * lrm_state, remote_ra_cmd_t * cmd, int timeout_ms); static void handle_remote_ra_stop(lrm_state_t * lrm_state, remote_ra_cmd_t * cmd); static GList *fail_all_monitor_cmds(GList * list); static void free_cmd(gpointer user_data) { remote_ra_cmd_t *cmd = user_data; if (!cmd) { return; } if (cmd->delay_id) { g_source_remove(cmd->delay_id); } if (cmd->interval_id) { g_source_remove(cmd->interval_id); } if (cmd->monitor_timeout_id) { g_source_remove(cmd->monitor_timeout_id); } if (cmd->takeover_timeout_id) { g_source_remove(cmd->takeover_timeout_id); } free(cmd->owner); free(cmd->rsc_id); free(cmd->action); free(cmd->userdata); free(cmd->exit_reason); lrmd_key_value_freeall(cmd->params); free(cmd); } static int generate_callid(void) { static int remote_ra_callid = 0; remote_ra_callid++; if (remote_ra_callid <= 0) { remote_ra_callid = 1; } return remote_ra_callid; } static gboolean recurring_helper(gpointer data) { remote_ra_cmd_t *cmd = data; lrm_state_t *connection_rsc = NULL; cmd->interval_id = 0; connection_rsc = lrm_state_find(cmd->rsc_id); if (connection_rsc && connection_rsc->remote_ra_data) { remote_ra_data_t *ra_data = connection_rsc->remote_ra_data; ra_data->recurring_cmds = g_list_remove(ra_data->recurring_cmds, cmd); ra_data->cmds = g_list_append(ra_data->cmds, cmd); mainloop_set_trigger(ra_data->work); } return FALSE; } static gboolean start_delay_helper(gpointer data) { remote_ra_cmd_t *cmd = data; lrm_state_t *connection_rsc = NULL; cmd->delay_id = 0; connection_rsc = lrm_state_find(cmd->rsc_id); if (connection_rsc && connection_rsc->remote_ra_data) { remote_ra_data_t *ra_data = connection_rsc->remote_ra_data; mainloop_set_trigger(ra_data->work); } return FALSE; } /*! * \internal * \brief Handle cluster communication related to pacemaker_remote node joining * * \param[in] node_name Name of newly integrated pacemaker_remote node */ static void remote_node_up(const char *node_name) { int call_opt, call_id = 0; xmlNode *update, *state; crm_node_t *node; CRM_CHECK(node_name != NULL, return); crm_info("Announcing pacemaker_remote node %s", node_name); /* Clear node's operation history. The node's transient attributes should * and normally will be cleared when the node leaves, but since remote node * state has a number of corner cases, clear them here as well, to be sure. */ call_opt = crmd_cib_smart_opt(); erase_status_tag(node_name, XML_CIB_TAG_LRM, call_opt); erase_status_tag(node_name, XML_TAG_TRANSIENT_NODEATTRS, call_opt); /* Clear node's probed attribute */ update_attrd(node_name, CRM_OP_PROBED, NULL, NULL, TRUE); /* Ensure node is in the remote peer cache with member status */ node = crm_remote_peer_get(node_name); CRM_CHECK(node != NULL, return); crm_update_peer_state(__FUNCTION__, node, CRM_NODE_MEMBER, 0); /* pacemaker_remote nodes don't participate in the membership layer, * so cluster nodes don't automatically get notified when they come and go. * We send a cluster message to the DC, and update the CIB node state entry, * so the DC will get it sooner (via message) or later (via CIB refresh), * and any other interested parties can query the CIB. */ send_remote_state_message(node_name, TRUE); update = create_xml_node(NULL, XML_CIB_TAG_STATUS); state = create_node_state_update(node, node_update_cluster, update, __FUNCTION__); /* Clear the XML_NODE_IS_FENCED flag in the node state. If the node ever * needs to be fenced, this flag will allow various actions to determine * whether the fencing has happened yet. */ crm_xml_add(state, XML_NODE_IS_FENCED, "0"); /* TODO: If the remote connection drops, and this (async) CIB update either * failed or has not yet completed, later actions could mistakenly think the * node has already been fenced (if the XML_NODE_IS_FENCED attribute was * previously set, because it won't have been cleared). This could prevent * actual fencing or allow recurring monitor failures to be cleared too * soon. Ideally, we wouldn't rely on the CIB for the fenced status. */ fsa_cib_update(XML_CIB_TAG_STATUS, update, call_opt, call_id, NULL); if (call_id < 0) { crm_perror(LOG_WARNING, "%s CIB node state setup", node_name); } free_xml(update); } enum down_opts { DOWN_KEEP_LRM, DOWN_ERASE_LRM }; /*! * \internal * \brief Handle cluster communication related to pacemaker_remote node leaving * * \param[in] node_name Name of lost node * \param[in] opts Whether to keep or erase LRM history */ static void remote_node_down(const char *node_name, const enum down_opts opts) { xmlNode *update; int call_id = 0; int call_opt = crmd_cib_smart_opt(); crm_node_t *node; /* Purge node from attrd's memory */ update_attrd_remote_node_removed(node_name, NULL); /* Purge node's transient attributes */ erase_status_tag(node_name, XML_TAG_TRANSIENT_NODEATTRS, call_opt); /* Normally, the LRM operation history should be kept until the node comes * back up. However, after a successful fence, we want to clear it, so we * don't think resources are still running on the node. */ if (opts == DOWN_ERASE_LRM) { erase_status_tag(node_name, XML_CIB_TAG_LRM, call_opt); } /* Ensure node is in the remote peer cache with lost state */ node = crm_remote_peer_get(node_name); CRM_CHECK(node != NULL, return); crm_update_peer_state(__FUNCTION__, node, CRM_NODE_LOST, 0); /* Notify DC */ send_remote_state_message(node_name, FALSE); /* Update CIB node state */ update = create_xml_node(NULL, XML_CIB_TAG_STATUS); create_node_state_update(node, node_update_cluster, update, __FUNCTION__); fsa_cib_update(XML_CIB_TAG_STATUS, update, call_opt, call_id, NULL); if (call_id < 0) { crm_perror(LOG_ERR, "%s CIB node state update", node_name); } free_xml(update); } /*! * \internal * \brief Handle effects of a remote RA command on node state * * \param[in] cmd Completed remote RA command */ static void check_remote_node_state(remote_ra_cmd_t *cmd) { /* Only successful actions can change node state */ if (cmd->rc != PCMK_OCF_OK) { return; } if (safe_str_eq(cmd->action, "start")) { remote_node_up(cmd->rsc_id); } else if (safe_str_eq(cmd->action, "migrate_from")) { /* After a successful migration, we don't need to do remote_node_up() * because the DC already knows the node is up, and we don't want to * clear LRM history etc. We do need to add the remote node to this * host's remote peer cache, because (unless it happens to be DC) * it hasn't been tracking the remote node, and other code relies on * the cache to distinguish remote nodes from unseen cluster nodes. */ crm_node_t *node = crm_remote_peer_get(cmd->rsc_id); CRM_CHECK(node != NULL, return); crm_update_peer_state(__FUNCTION__, node, CRM_NODE_MEMBER, 0); } else if (safe_str_eq(cmd->action, "stop")) { lrm_state_t *lrm_state = lrm_state_find(cmd->rsc_id); remote_ra_data_t *ra_data = lrm_state? lrm_state->remote_ra_data : NULL; if (ra_data) { if (ra_data->migrate_status != takeover_complete) { /* Stop means down if we didn't successfully migrate elsewhere */ remote_node_down(cmd->rsc_id, DOWN_KEEP_LRM); } else if (AM_I_DC == FALSE) { /* Only the connection host and DC track node state, * so if the connection migrated elsewhere and we aren't DC, * un-cache the node, so we don't have stale info */ crm_remote_peer_cache_remove(cmd->rsc_id); } } } /* We don't do anything for successful monitors, which is correct for * routine recurring monitors, and for monitors on nodes where the * connection isn't supposed to be (the cluster will stop the connection in * that case). However, if the initial probe finds the connection already * active on the node where we want it, we probably should do * remote_node_up(). Unfortunately, we can't distinguish that case here. * Given that connections have to be initiated by the cluster, the chance of * that should be close to zero. */ } static void report_remote_ra_result(remote_ra_cmd_t * cmd) { lrmd_event_data_t op = { 0, }; check_remote_node_state(cmd); op.type = lrmd_event_exec_complete; op.rsc_id = cmd->rsc_id; op.op_type = cmd->action; op.user_data = cmd->userdata; op.exit_reason = cmd->exit_reason; op.timeout = cmd->timeout; op.interval = cmd->interval; op.rc = cmd->rc; op.op_status = cmd->op_status; op.t_run = cmd->start_time; op.t_rcchange = cmd->start_time; if (cmd->reported_success && cmd->rc != PCMK_OCF_OK) { op.t_rcchange = time(NULL); /* This edge case will likely never ever occur, but if it does the * result is that a failure will not be processed correctly. This is only * remotely possible because we are able to detect a connection resource's tcp * connection has failed at any moment after start has completed. The actual * recurring operation is just a connectivity ping. * * basically, we are not guaranteed that the first successful monitor op and * a subsequent failed monitor op will not occur in the same timestamp. We have to * make it look like the operations occurred at separate times though. */ if (op.t_rcchange == op.t_run) { op.t_rcchange++; } } if (cmd->params) { lrmd_key_value_t *tmp; op.params = crm_str_table_new(); for (tmp = cmd->params; tmp; tmp = tmp->next) { g_hash_table_insert(op.params, strdup(tmp->key), strdup(tmp->value)); } } op.call_id = cmd->call_id; op.remote_nodename = cmd->owner; lrm_op_callback(&op); if (op.params) { g_hash_table_destroy(op.params); } } static void update_remaining_timeout(remote_ra_cmd_t * cmd) { cmd->remaining_timeout = ((cmd->timeout / 1000) - (time(NULL) - cmd->start_time)) * 1000; } static gboolean retry_start_cmd_cb(gpointer data) { lrm_state_t *lrm_state = data; remote_ra_data_t *ra_data = lrm_state->remote_ra_data; remote_ra_cmd_t *cmd = NULL; int rc = -1; if (!ra_data || !ra_data->cur_cmd) { return FALSE; } cmd = ra_data->cur_cmd; if (safe_str_neq(cmd->action, "start") && safe_str_neq(cmd->action, "migrate_from")) { return FALSE; } update_remaining_timeout(cmd); if (cmd->remaining_timeout > 0) { rc = handle_remote_ra_start(lrm_state, cmd, cmd->remaining_timeout); } if (rc != 0) { cmd->rc = PCMK_OCF_UNKNOWN_ERROR; cmd->op_status = PCMK_LRM_OP_ERROR; report_remote_ra_result(cmd); if (ra_data->cmds) { mainloop_set_trigger(ra_data->work); } ra_data->cur_cmd = NULL; free_cmd(cmd); } else { /* wait for connection event */ } return FALSE; } static gboolean connection_takeover_timeout_cb(gpointer data) { lrm_state_t *lrm_state = NULL; remote_ra_cmd_t *cmd = data; crm_info("takeover event timed out for node %s", cmd->rsc_id); cmd->takeover_timeout_id = 0; lrm_state = lrm_state_find(cmd->rsc_id); handle_remote_ra_stop(lrm_state, cmd); free_cmd(cmd); return FALSE; } static gboolean monitor_timeout_cb(gpointer data) { lrm_state_t *lrm_state = NULL; remote_ra_cmd_t *cmd = data; lrm_state = lrm_state_find(cmd->rsc_id); - crm_info("Poke async response timed out for node %s (%p)", cmd->rsc_id, lrm_state); + crm_info("Timed out waiting for remote poke response from %s%s", + cmd->rsc_id, (lrm_state? "" : " (no LRM state)")); cmd->monitor_timeout_id = 0; cmd->op_status = PCMK_LRM_OP_TIMEOUT; cmd->rc = PCMK_OCF_UNKNOWN_ERROR; if (lrm_state && lrm_state->remote_ra_data) { remote_ra_data_t *ra_data = lrm_state->remote_ra_data; if (ra_data->cur_cmd == cmd) { ra_data->cur_cmd = NULL; } if (ra_data->cmds) { mainloop_set_trigger(ra_data->work); } } report_remote_ra_result(cmd); free_cmd(cmd); if(lrm_state) { lrm_state_disconnect(lrm_state); } return FALSE; } static void synthesize_lrmd_success(lrm_state_t *lrm_state, const char *rsc_id, const char *op_type) { lrmd_event_data_t op = { 0, }; if (lrm_state == NULL) { /* if lrm_state not given assume local */ lrm_state = lrm_state_find(fsa_our_uname); } CRM_ASSERT(lrm_state != NULL); op.type = lrmd_event_exec_complete; op.rsc_id = rsc_id; op.op_type = op_type; op.rc = PCMK_OCF_OK; op.op_status = PCMK_LRM_OP_DONE; op.t_run = time(NULL); op.t_rcchange = op.t_run; op.call_id = generate_callid(); process_lrm_event(lrm_state, &op, NULL); } void remote_lrm_op_callback(lrmd_event_data_t * op) { gboolean cmd_handled = FALSE; lrm_state_t *lrm_state = NULL; remote_ra_data_t *ra_data = NULL; remote_ra_cmd_t *cmd = NULL; crm_debug("remote connection event - event_type:%s node:%s action:%s rc:%s op_status:%s", lrmd_event_type2str(op->type), op->remote_nodename, op->op_type ? op->op_type : "none", services_ocf_exitcode_str(op->rc), services_lrm_status_str(op->op_status)); lrm_state = lrm_state_find(op->remote_nodename); if (!lrm_state || !lrm_state->remote_ra_data) { crm_debug("lrm_state info not found for remote lrmd connection event"); return; } ra_data = lrm_state->remote_ra_data; /* Another client has connected to the remote daemon, * determine if this is expected. */ if (op->type == lrmd_event_new_client) { /* great, we new this was coming */ if (ra_data->migrate_status == expect_takeover) { ra_data->migrate_status = takeover_complete; } else { crm_err("Unexpected pacemaker_remote client takeover for %s. Disconnecting", op->remote_nodename); /* In this case, lrmd_tls_connection_destroy() will be called under the control of mainloop. */ /* Do not free lrm_state->conn yet. */ /* It'll be freed in the following stop action. */ lrm_state_disconnect_only(lrm_state); } return; } /* filter all EXEC events up */ if (op->type == lrmd_event_exec_complete) { if (ra_data->migrate_status == takeover_complete) { crm_debug("ignoring event, this connection is taken over by another node"); } else { lrm_op_callback(op); } return; } if ((op->type == lrmd_event_disconnect) && (ra_data->cur_cmd == NULL) && (ra_data->active == TRUE)) { if (!remote_ra_is_in_maintenance(lrm_state)) { - crm_err("Unexpected disconnect on remote-node %s", lrm_state->node_name); + crm_err("Lost connection to Pacemaker Remote node %s", + lrm_state->node_name); ra_data->recurring_cmds = fail_all_monitor_cmds(ra_data->recurring_cmds); ra_data->cmds = fail_all_monitor_cmds(ra_data->cmds); } else { - crm_notice("Disconnect on unmanaged remote-node %s", lrm_state->node_name); + crm_notice("Unmanaged Pacemaker Remote node %s disconnected", + lrm_state->node_name); /* Do roughly what a 'stop' on the remote-resource would do */ handle_remote_ra_stop(lrm_state, NULL); remote_node_down(lrm_state->node_name, DOWN_KEEP_LRM); /* now fake the reply of a successful 'stop' */ synthesize_lrmd_success(NULL, lrm_state->node_name, "stop"); } return; } if (!ra_data->cur_cmd) { crm_debug("no event to match"); return; } cmd = ra_data->cur_cmd; /* Start actions and migrate from actions complete after connection * comes back to us. */ if (op->type == lrmd_event_connect && (safe_str_eq(cmd->action, "start") || safe_str_eq(cmd->action, "migrate_from"))) { if (op->connection_rc < 0) { update_remaining_timeout(cmd); if (op->connection_rc == -ENOKEY) { // Hard error, don't retry cmd->op_status = PCMK_LRM_OP_ERROR; cmd->rc = PCMK_OCF_INVALID_PARAM; cmd->exit_reason = strdup("Authentication key not readable"); } else if (cmd->remaining_timeout > 3000) { crm_trace("rescheduling start, remaining timeout %d", cmd->remaining_timeout); g_timeout_add(1000, retry_start_cmd_cb, lrm_state); return; } else { crm_trace("can't reschedule start, remaining timeout too small %d", cmd->remaining_timeout); cmd->op_status = PCMK_LRM_OP_TIMEOUT; cmd->rc = PCMK_OCF_UNKNOWN_ERROR; } } else { lrm_state_reset_tables(lrm_state, TRUE); cmd->rc = PCMK_OCF_OK; cmd->op_status = PCMK_LRM_OP_DONE; ra_data->active = TRUE; } crm_debug("remote lrmd connect event matched %s action. ", cmd->action); report_remote_ra_result(cmd); cmd_handled = TRUE; } else if (op->type == lrmd_event_poke && safe_str_eq(cmd->action, "monitor")) { if (cmd->monitor_timeout_id) { g_source_remove(cmd->monitor_timeout_id); cmd->monitor_timeout_id = 0; } /* Only report success the first time, after that only worry about failures. * For this function, if we get the poke pack, it is always a success. Pokes * only fail if the send fails, or the response times out. */ if (!cmd->reported_success) { cmd->rc = PCMK_OCF_OK; cmd->op_status = PCMK_LRM_OP_DONE; report_remote_ra_result(cmd); cmd->reported_success = 1; } crm_debug("remote lrmd poke event matched %s action. ", cmd->action); /* success, keep rescheduling if interval is present. */ if (cmd->interval && (cmd->cancel == FALSE)) { ra_data->recurring_cmds = g_list_append(ra_data->recurring_cmds, cmd); cmd->interval_id = g_timeout_add(cmd->interval, recurring_helper, cmd); cmd = NULL; /* prevent free */ } cmd_handled = TRUE; } else if (op->type == lrmd_event_disconnect && safe_str_eq(cmd->action, "monitor")) { if (ra_data->active == TRUE && (cmd->cancel == FALSE)) { cmd->rc = PCMK_OCF_UNKNOWN_ERROR; cmd->op_status = PCMK_LRM_OP_ERROR; report_remote_ra_result(cmd); crm_err("remote-node %s unexpectedly disconneced during monitor operation", lrm_state->node_name); } cmd_handled = TRUE; } else if (op->type == lrmd_event_new_client && safe_str_eq(cmd->action, "stop")) { handle_remote_ra_stop(lrm_state, cmd); cmd_handled = TRUE; } else { crm_debug("Event did not match %s action", ra_data->cur_cmd->action); } if (cmd_handled) { ra_data->cur_cmd = NULL; if (ra_data->cmds) { mainloop_set_trigger(ra_data->work); } free_cmd(cmd); } } static void handle_remote_ra_stop(lrm_state_t * lrm_state, remote_ra_cmd_t * cmd) { remote_ra_data_t *ra_data = NULL; CRM_ASSERT(lrm_state); ra_data = lrm_state->remote_ra_data; if (ra_data->migrate_status != takeover_complete) { /* delete pending ops when ever the remote connection is intentionally stopped */ g_hash_table_remove_all(lrm_state->pending_ops); } else { /* we no longer hold the history if this connection has been migrated, * however, we keep metadata cache for future use */ lrm_state_reset_tables(lrm_state, FALSE); } ra_data->active = FALSE; lrm_state_disconnect(lrm_state); if (ra_data->cmds) { g_list_free_full(ra_data->cmds, free_cmd); } if (ra_data->recurring_cmds) { g_list_free_full(ra_data->recurring_cmds, free_cmd); } ra_data->cmds = NULL; ra_data->recurring_cmds = NULL; ra_data->cur_cmd = NULL; if (cmd) { cmd->rc = PCMK_OCF_OK; cmd->op_status = PCMK_LRM_OP_DONE; report_remote_ra_result(cmd); } } static int handle_remote_ra_start(lrm_state_t * lrm_state, remote_ra_cmd_t * cmd, int timeout_ms) { const char *server = NULL; lrmd_key_value_t *tmp = NULL; int port = 0; int timeout_used = timeout_ms > MAX_START_TIMEOUT_MS ? MAX_START_TIMEOUT_MS : timeout_ms; for (tmp = cmd->params; tmp; tmp = tmp->next) { if (safe_str_eq(tmp->key, "addr") || safe_str_eq(tmp->key, "server")) { server = tmp->value; } if (safe_str_eq(tmp->key, "port")) { port = atoi(tmp->value); } } return lrm_state_remote_connect_async(lrm_state, server, port, timeout_used); } static gboolean handle_remote_ra_exec(gpointer user_data) { int rc = 0; lrm_state_t *lrm_state = user_data; remote_ra_data_t *ra_data = lrm_state->remote_ra_data; remote_ra_cmd_t *cmd; GList *first = NULL; if (ra_data->cur_cmd) { /* still waiting on previous cmd */ return TRUE; } while (ra_data->cmds) { first = ra_data->cmds; cmd = first->data; if (cmd->delay_id) { /* still waiting for start delay timer to trip */ return TRUE; } ra_data->cmds = g_list_remove_link(ra_data->cmds, first); g_list_free_1(first); if (!strcmp(cmd->action, "start") || !strcmp(cmd->action, "migrate_from")) { ra_data->migrate_status = 0; rc = handle_remote_ra_start(lrm_state, cmd, cmd->timeout); if (rc == 0) { /* take care of this later when we get async connection result */ crm_debug("began remote lrmd connect, waiting for connect event."); ra_data->cur_cmd = cmd; return TRUE; } else { crm_debug("connect failed, not expecting to match any connection event later"); cmd->rc = PCMK_OCF_UNKNOWN_ERROR; cmd->op_status = PCMK_LRM_OP_ERROR; } report_remote_ra_result(cmd); } else if (!strcmp(cmd->action, "monitor")) { if (lrm_state_is_connected(lrm_state) == TRUE) { rc = lrm_state_poke_connection(lrm_state); if (rc < 0) { cmd->rc = PCMK_OCF_UNKNOWN_ERROR; cmd->op_status = PCMK_LRM_OP_ERROR; } } else { rc = -1; cmd->op_status = PCMK_LRM_OP_DONE; cmd->rc = PCMK_OCF_NOT_RUNNING; } if (rc == 0) { crm_debug("poked remote lrmd at node %s, waiting for async response.", cmd->rsc_id); ra_data->cur_cmd = cmd; cmd->monitor_timeout_id = g_timeout_add(cmd->timeout, monitor_timeout_cb, cmd); return TRUE; } report_remote_ra_result(cmd); } else if (!strcmp(cmd->action, "stop")) { if (ra_data->migrate_status == expect_takeover) { /* briefly wait on stop for the takeover event to occur. If the * takeover event does not occur during the wait period, that's fine. * It just means that the remote-node's lrm_status section is going to get * cleared which will require all the resources running in the remote-node * to be explicitly re-detected via probe actions. If the takeover does occur * successfully, then we can leave the status section intact. */ cmd->takeover_timeout_id = g_timeout_add((cmd->timeout/2), connection_takeover_timeout_cb, cmd); ra_data->cur_cmd = cmd; return TRUE; } handle_remote_ra_stop(lrm_state, cmd); } else if (!strcmp(cmd->action, "migrate_to")) { ra_data->migrate_status = expect_takeover; cmd->rc = PCMK_OCF_OK; cmd->op_status = PCMK_LRM_OP_DONE; report_remote_ra_result(cmd); } else if (!strcmp(cmd->action, "reload")) { /* reloads are a no-op right now, add logic here when they become important */ cmd->rc = PCMK_OCF_OK; cmd->op_status = PCMK_LRM_OP_DONE; report_remote_ra_result(cmd); } free_cmd(cmd); } return TRUE; } static void remote_ra_data_init(lrm_state_t * lrm_state) { remote_ra_data_t *ra_data = NULL; if (lrm_state->remote_ra_data) { return; } ra_data = calloc(1, sizeof(remote_ra_data_t)); ra_data->work = mainloop_add_trigger(G_PRIORITY_HIGH, handle_remote_ra_exec, lrm_state); lrm_state->remote_ra_data = ra_data; } void remote_ra_cleanup(lrm_state_t * lrm_state) { remote_ra_data_t *ra_data = lrm_state->remote_ra_data; if (!ra_data) { return; } if (ra_data->cmds) { g_list_free_full(ra_data->cmds, free_cmd); } if (ra_data->recurring_cmds) { g_list_free_full(ra_data->recurring_cmds, free_cmd); } mainloop_destroy_trigger(ra_data->work); free(ra_data); lrm_state->remote_ra_data = NULL; } gboolean is_remote_lrmd_ra(const char *agent, const char *provider, const char *id) { if (agent && provider && !strcmp(agent, REMOTE_LRMD_RA) && !strcmp(provider, "pacemaker")) { return TRUE; } if (id && lrm_state_find(id) && safe_str_neq(id, fsa_our_uname)) { return TRUE; } return FALSE; } lrmd_rsc_info_t * remote_ra_get_rsc_info(lrm_state_t * lrm_state, const char *rsc_id) { lrmd_rsc_info_t *info = NULL; if ((lrm_state_find(rsc_id))) { info = calloc(1, sizeof(lrmd_rsc_info_t)); info->id = strdup(rsc_id); info->type = strdup(REMOTE_LRMD_RA); info->standard = strdup(PCMK_RESOURCE_CLASS_OCF); info->provider = strdup("pacemaker"); } return info; } static gboolean is_remote_ra_supported_action(const char *action) { if (!action) { return FALSE; } else if (strcmp(action, "start") && strcmp(action, "stop") && strcmp(action, "reload") && strcmp(action, "migrate_to") && strcmp(action, "migrate_from") && strcmp(action, "monitor")) { return FALSE; } return TRUE; } static GList * fail_all_monitor_cmds(GList * list) { GList *rm_list = NULL; remote_ra_cmd_t *cmd = NULL; GListPtr gIter = NULL; for (gIter = list; gIter != NULL; gIter = gIter->next) { cmd = gIter->data; if (cmd->interval > 0 && safe_str_eq(cmd->action, "monitor")) { rm_list = g_list_append(rm_list, cmd); } } for (gIter = rm_list; gIter != NULL; gIter = gIter->next) { cmd = gIter->data; cmd->rc = PCMK_OCF_UNKNOWN_ERROR; cmd->op_status = PCMK_LRM_OP_ERROR; crm_trace("Pre-emptively failing %s %s (interval=%d, %s)", cmd->action, cmd->rsc_id, cmd->interval, cmd->userdata); report_remote_ra_result(cmd); list = g_list_remove(list, cmd); free_cmd(cmd); } /* frees only the list data, not the cmds */ g_list_free(rm_list); return list; } static GList * remove_cmd(GList * list, const char *action, int interval) { remote_ra_cmd_t *cmd = NULL; GListPtr gIter = NULL; for (gIter = list; gIter != NULL; gIter = gIter->next) { cmd = gIter->data; if (cmd->interval == interval && safe_str_eq(cmd->action, action)) { break; } cmd = NULL; } if (cmd) { list = g_list_remove(list, cmd); free_cmd(cmd); } return list; } int remote_ra_cancel(lrm_state_t * lrm_state, const char *rsc_id, const char *action, int interval) { lrm_state_t *connection_rsc = NULL; remote_ra_data_t *ra_data = NULL; connection_rsc = lrm_state_find(rsc_id); if (!connection_rsc || !connection_rsc->remote_ra_data) { return -EINVAL; } ra_data = connection_rsc->remote_ra_data; ra_data->cmds = remove_cmd(ra_data->cmds, action, interval); ra_data->recurring_cmds = remove_cmd(ra_data->recurring_cmds, action, interval); if (ra_data->cur_cmd && (ra_data->cur_cmd->interval == interval) && (safe_str_eq(ra_data->cur_cmd->action, action))) { ra_data->cur_cmd->cancel = TRUE; } return 0; } static remote_ra_cmd_t * handle_dup_monitor(remote_ra_data_t *ra_data, int interval, const char *userdata) { GList *gIter = NULL; remote_ra_cmd_t *cmd = NULL; /* there are 3 places a potential duplicate monitor operation * could exist. * 1. recurring_cmds list. where the op is waiting for its next interval * 2. cmds list, where the op is queued to get executed immediately * 3. cur_cmd, which means the monitor op is in flight right now. */ if (interval == 0) { return NULL; } if (ra_data->cur_cmd && ra_data->cur_cmd->cancel == FALSE && ra_data->cur_cmd->interval == interval && safe_str_eq(ra_data->cur_cmd->action, "monitor")) { cmd = ra_data->cur_cmd; goto handle_dup; } for (gIter = ra_data->recurring_cmds; gIter != NULL; gIter = gIter->next) { cmd = gIter->data; if (cmd->interval == interval && safe_str_eq(cmd->action, "monitor")) { goto handle_dup; } } for (gIter = ra_data->cmds; gIter != NULL; gIter = gIter->next) { cmd = gIter->data; if (cmd->interval == interval && safe_str_eq(cmd->action, "monitor")) { goto handle_dup; } } return NULL; handle_dup: crm_trace("merging duplicate monitor cmd %s_monitor_%d", cmd->rsc_id, interval); /* update the userdata */ if (userdata) { free(cmd->userdata); cmd->userdata = strdup(userdata); } /* if we've already reported success, generate a new call id */ if (cmd->reported_success) { cmd->start_time = time(NULL); cmd->call_id = generate_callid(); cmd->reported_success = 0; } /* if we have an interval_id set, that means we are in the process of * waiting for this cmd's next interval. instead of waiting, cancel * the timer and execute the action immediately */ if (cmd->interval_id) { g_source_remove(cmd->interval_id); cmd->interval_id = 0; recurring_helper(cmd); } return cmd; } int remote_ra_exec(lrm_state_t * lrm_state, const char *rsc_id, const char *action, const char *userdata, int interval, /* ms */ int timeout, /* ms */ int start_delay, /* ms */ lrmd_key_value_t * params) { int rc = 0; lrm_state_t *connection_rsc = NULL; remote_ra_cmd_t *cmd = NULL; remote_ra_data_t *ra_data = NULL; if (is_remote_ra_supported_action(action) == FALSE) { rc = -EINVAL; goto exec_done; } connection_rsc = lrm_state_find(rsc_id); if (!connection_rsc) { rc = -EINVAL; goto exec_done; } remote_ra_data_init(connection_rsc); ra_data = connection_rsc->remote_ra_data; cmd = handle_dup_monitor(ra_data, interval, userdata); if (cmd) { return cmd->call_id; } cmd = calloc(1, sizeof(remote_ra_cmd_t)); cmd->owner = strdup(lrm_state->node_name); cmd->rsc_id = strdup(rsc_id); cmd->action = strdup(action); cmd->userdata = strdup(userdata); cmd->interval = interval; cmd->timeout = timeout; cmd->start_delay = start_delay; cmd->params = params; cmd->start_time = time(NULL); cmd->call_id = generate_callid(); if (cmd->start_delay) { cmd->delay_id = g_timeout_add(cmd->start_delay, start_delay_helper, cmd); } ra_data->cmds = g_list_append(ra_data->cmds, cmd); mainloop_set_trigger(ra_data->work); return cmd->call_id; exec_done: lrmd_key_value_freeall(params); return rc; } /*! * \internal * \brief Immediately fail all monitors of a remote node, if proxied here * * \param[in] node_name Name of pacemaker_remote node */ void remote_ra_fail(const char *node_name) { lrm_state_t *lrm_state = lrm_state_find(node_name); if (lrm_state && lrm_state_is_connected(lrm_state)) { remote_ra_data_t *ra_data = lrm_state->remote_ra_data; crm_info("Failing monitors on pacemaker_remote node %s", node_name); ra_data->recurring_cmds = fail_all_monitor_cmds(ra_data->recurring_cmds); ra_data->cmds = fail_all_monitor_cmds(ra_data->cmds); } } /* A guest node fencing implied by host fencing looks like: * * * * * * * */ #define XPATH_PSEUDO_FENCE "//" XML_GRAPH_TAG_PSEUDO_EVENT \ "[@" XML_LRM_ATTR_TASK "='stonith']/" XML_GRAPH_TAG_DOWNED \ "/" XML_CIB_TAG_NODE /*! * \internal * \brief Check a pseudo-action for Pacemaker Remote node side effects * * \param[in] xml XML of pseudo-action to check */ void remote_ra_process_pseudo(xmlNode *xml) { xmlXPathObjectPtr search = xpath_search(xml, XPATH_PSEUDO_FENCE); if (numXpathResults(search) == 1) { xmlNode *result = getXpathResult(search, 0); /* Normally, we handle the necessary side effects of a guest node stop * action when reporting the remote agent's result. However, if the stop * is implied due to fencing, it will be a fencing pseudo-event, and * there won't be a result to report. Handle that case here. * * This will result in a duplicate call to remote_node_down() if the * guest stop was real instead of implied, but that shouldn't hurt. * * There is still one corner case that isn't handled: if a guest node * isn't running any resources when its host is fenced, it will appear * to be cleanly stopped, so there will be no pseudo-fence, and our * peer cache state will be incorrect unless and until the guest is * recovered. */ if (result) { const char *remote = ID(result); if (remote) { remote_node_down(remote, DOWN_ERASE_LRM); } } } freeXpathObject(search); } static void remote_ra_maintenance(lrm_state_t * lrm_state, gboolean maintenance) { remote_ra_data_t *ra_data = lrm_state->remote_ra_data; xmlNode *update, *state; int call_opt, call_id = 0; crm_node_t *node; call_opt = crmd_cib_smart_opt(); node = crm_remote_peer_get(lrm_state->node_name); CRM_CHECK(node != NULL, return); update = create_xml_node(NULL, XML_CIB_TAG_STATUS); state = create_node_state_update(node, node_update_none, update, __FUNCTION__); crm_xml_add(state, XML_NODE_IS_MAINTENANCE, maintenance?"1":"0"); fsa_cib_update(XML_CIB_TAG_STATUS, update, call_opt, call_id, NULL); if (call_id < 0) { crm_perror(LOG_WARNING, "%s CIB node state update failed", lrm_state->node_name); } else { /* TODO: still not 100% sure that async update will succeed ... */ ra_data->is_maintenance = maintenance; } free_xml(update); } #define XPATH_PSEUDO_MAINTENANCE "//" XML_GRAPH_TAG_PSEUDO_EVENT \ "[@" XML_LRM_ATTR_TASK "='" CRM_OP_MAINTENANCE_NODES "']/" \ XML_GRAPH_TAG_MAINTENANCE /*! * \internal * \brief Check a pseudo-action holding updates for maintenance state * * \param[in] xml XML of pseudo-action to check */ void remote_ra_process_maintenance_nodes(xmlNode *xml) { xmlXPathObjectPtr search = xpath_search(xml, XPATH_PSEUDO_MAINTENANCE); if (numXpathResults(search) == 1) { xmlNode *node; int cnt = 0, cnt_remote = 0; for (node = first_named_child(getXpathResult(search, 0), XML_CIB_TAG_NODE); node; node = __xml_next(node)) { lrm_state_t *lrm_state = lrm_state_find(ID(node)); cnt++; if (lrm_state && lrm_state->remote_ra_data && ((remote_ra_data_t *) lrm_state->remote_ra_data)->active) { cnt_remote++; remote_ra_maintenance(lrm_state, crm_atoi(crm_element_value(node, XML_NODE_IS_MAINTENANCE), "0")); } } crm_trace("Action holds %d nodes (%d remotes found) " "adjusting maintenance-mode", cnt, cnt_remote); } freeXpathObject(search); } gboolean remote_ra_is_in_maintenance(lrm_state_t * lrm_state) { remote_ra_data_t *ra_data = lrm_state->remote_ra_data; return ra_data->is_maintenance; } diff --git a/crmd/te_callbacks.c b/crmd/te_callbacks.c index fd75368cb9..2aa643387b 100644 --- a/crmd/te_callbacks.c +++ b/crmd/te_callbacks.c @@ -1,920 +1,963 @@ /* * Copyright (C) 2004 Andrew Beekhof * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This software is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #include #include #include #include #include #include #include #include #include /* For ONLINESTATUS etc */ void te_update_confirm(const char *event, xmlNode * msg); extern char *te_uuid; gboolean shuttingdown = FALSE; crm_graph_t *transition_graph; crm_trigger_t *transition_trigger = NULL; static unsigned long int stonith_max_attempts = 10; /* #define rsc_op_template "//"XML_TAG_DIFF_ADDED"//"XML_TAG_CIB"//"XML_CIB_TAG_STATE"[@uname='%s']"//"XML_LRM_TAG_RSC_OP"[@id='%s]" */ #define rsc_op_template "//"XML_TAG_DIFF_ADDED"//"XML_TAG_CIB"//"XML_LRM_TAG_RSC_OP"[@id='%s']" static const char * get_node_id(xmlNode * rsc_op) { xmlNode *node = rsc_op; while (node != NULL && safe_str_neq(XML_CIB_TAG_STATE, TYPE(node))) { node = node->parent; } CRM_CHECK(node != NULL, return NULL); return ID(node); } void update_stonith_max_attempts(const char* value) { if (safe_str_eq(value, CRM_INFINITY_S)) { stonith_max_attempts = CRM_SCORE_INFINITY; } else { stonith_max_attempts = crm_int_helper(value, NULL); } } static void -te_legacy_update_diff(const char *event, xmlNode * diff) +te_update_diff_v1(const char *event, xmlNode *diff) { int lpc, max; xmlXPathObject *xpathObj = NULL; CRM_CHECK(diff != NULL, return); xml_log_patchset(LOG_TRACE, __FUNCTION__, diff); if (cib_config_changed(NULL, NULL, &diff)) { abort_transition(INFINITY, tg_restart, "Non-status change", diff); goto bail; /* configuration changed */ } /* Tickets Attributes - Added/Updated */ xpathObj = xpath_search(diff, "//" F_CIB_UPDATE_RESULT "//" XML_TAG_DIFF_ADDED "//" XML_CIB_TAG_TICKETS); if (numXpathResults(xpathObj) > 0) { xmlNode *aborted = getXpathResult(xpathObj, 0); abort_transition(INFINITY, tg_restart, "Ticket attribute: update", aborted); goto bail; } freeXpathObject(xpathObj); /* Tickets Attributes - Removed */ xpathObj = xpath_search(diff, "//" F_CIB_UPDATE_RESULT "//" XML_TAG_DIFF_REMOVED "//" XML_CIB_TAG_TICKETS); if (numXpathResults(xpathObj) > 0) { xmlNode *aborted = getXpathResult(xpathObj, 0); abort_transition(INFINITY, tg_restart, "Ticket attribute: removal", aborted); goto bail; } freeXpathObject(xpathObj); /* Transient Attributes - Added/Updated */ xpathObj = xpath_search(diff, "//" F_CIB_UPDATE_RESULT "//" XML_TAG_DIFF_ADDED "//" XML_TAG_TRANSIENT_NODEATTRS "//" XML_CIB_TAG_NVPAIR); max = numXpathResults(xpathObj); for (lpc = 0; lpc < max; lpc++) { xmlNode *attr = getXpathResult(xpathObj, lpc); const char *name = crm_element_value(attr, XML_NVPAIR_ATTR_NAME); const char *value = NULL; if (safe_str_eq(CRM_OP_PROBED, name)) { value = crm_element_value(attr, XML_NVPAIR_ATTR_VALUE); } if (crm_is_true(value) == FALSE) { abort_transition(INFINITY, tg_restart, "Transient attribute: update", attr); crm_log_xml_trace(attr, "Abort"); goto bail; } } freeXpathObject(xpathObj); /* Transient Attributes - Removed */ xpathObj = xpath_search(diff, "//" F_CIB_UPDATE_RESULT "//" XML_TAG_DIFF_REMOVED "//" XML_TAG_TRANSIENT_NODEATTRS); if (numXpathResults(xpathObj) > 0) { xmlNode *aborted = getXpathResult(xpathObj, 0); abort_transition(INFINITY, tg_restart, "Transient attribute: removal", aborted); goto bail; } freeXpathObject(xpathObj); /* - * Check for and fast-track the processing of LRM refreshes - * In large clusters this can result in _huge_ speedups + * Updates by, or in response to, TE actions will never contain updates + * for more than one resource at a time, so such updates indicate an + * LRM refresh. * - * Unfortunately we can only do so when there are no pending actions - * Otherwise we could miss updates we're waiting for and stall + * In that case, start a new transition rather than check each result + * individually, which can result in _huge_ speedups in large clusters. * + * Unfortunately, we can only do so when there are no pending actions. + * Otherwise, we could mistakenly throw away those results here, and + * the cluster will stall waiting for them and time out the operation. */ - xpathObj = NULL; if (transition_graph->pending == 0) { - xpathObj = - xpath_search(diff, - "//" F_CIB_UPDATE_RESULT "//" XML_TAG_DIFF_ADDED "//" - XML_LRM_TAG_RESOURCE); - } - - max = numXpathResults(xpathObj); - if (max > 1) { - /* Updates by, or in response to, TE actions will never contain updates - * for more than one resource at a time - */ - crm_debug("Detected LRM refresh - %d resources updated: Skipping all resource events", max); - crm_log_xml_trace(diff, "lrm-refresh"); - abort_transition(INFINITY, tg_restart, "LRM Refresh", NULL); - goto bail; + xpathObj = xpath_search(diff, + "//" F_CIB_UPDATE_RESULT + "//" XML_TAG_DIFF_ADDED + "//" XML_LRM_TAG_RESOURCE); + max = numXpathResults(xpathObj); + if (max > 1) { + crm_debug("Ignoring resource operation updates due to LRM refresh of %d resources", + max); + crm_log_xml_trace(diff, "lrm-refresh"); + abort_transition(INFINITY, tg_restart, "LRM Refresh", NULL); + goto bail; + } + freeXpathObject(xpathObj); } - freeXpathObject(xpathObj); /* Process operation updates */ xpathObj = xpath_search(diff, "//" F_CIB_UPDATE_RESULT "//" XML_TAG_DIFF_ADDED "//" XML_LRM_TAG_RSC_OP); - if (numXpathResults(xpathObj)) { -/* - - - - - -*/ - int lpc = 0, max = numXpathResults(xpathObj); + max = numXpathResults(xpathObj); + if (max > 0) { + int lpc = 0; for (lpc = 0; lpc < max; lpc++) { xmlNode *rsc_op = getXpathResult(xpathObj, lpc); const char *node = get_node_id(rsc_op); process_graph_event(rsc_op, node); } } freeXpathObject(xpathObj); /* Detect deleted (as opposed to replaced or added) actions - eg. crm_resource -C */ xpathObj = xpath_search(diff, "//" XML_TAG_DIFF_REMOVED "//" XML_LRM_TAG_RSC_OP); max = numXpathResults(xpathObj); for (lpc = 0; lpc < max; lpc++) { int path_max = 0; const char *op_id = NULL; char *rsc_op_xpath = NULL; xmlXPathObject *op_match = NULL; xmlNode *match = getXpathResult(xpathObj, lpc); CRM_LOG_ASSERT(match != NULL); if(match == NULL) { continue; }; op_id = ID(match); path_max = strlen(rsc_op_template) + strlen(op_id) + 1; rsc_op_xpath = calloc(1, path_max); snprintf(rsc_op_xpath, path_max, rsc_op_template, op_id); op_match = xpath_search(diff, rsc_op_xpath); if (numXpathResults(op_match) == 0) { /* Prevent false positives by matching cancelations too */ const char *node = get_node_id(match); crm_action_t *cancelled = get_cancel_action(op_id, node); if (cancelled == NULL) { crm_debug("No match for deleted action %s (%s on %s)", rsc_op_xpath, op_id, node); abort_transition(INFINITY, tg_restart, "Resource op removal", match); freeXpathObject(op_match); free(rsc_op_xpath); goto bail; } else { crm_debug("Deleted lrm_rsc_op %s on %s was for graph event %d", op_id, node, cancelled->id); } } freeXpathObject(op_match); free(rsc_op_xpath); } bail: freeXpathObject(xpathObj); } -static void process_resource_updates( - const char *node, xmlNode *xml, xmlNode *change, const char *op, const char *xpath) +static void +process_lrm_resource_diff(xmlNode *lrm_resource, const char *node) +{ + for (xmlNode *rsc_op = __xml_first_child(lrm_resource); rsc_op != NULL; + rsc_op = __xml_next(rsc_op)) { + process_graph_event(rsc_op, node); + } +} + +static void +process_resource_updates(const char *node, xmlNode *xml, xmlNode *change, + const char *op, const char *xpath) { - xmlNode *cIter = NULL; xmlNode *rsc = NULL; - xmlNode *rsc_op = NULL; - int num_resources = 0; - if(xml == NULL) { + if (xml == NULL) { return; - } else if(strcmp((const char*)xml->name, XML_CIB_TAG_LRM) == 0) { + } else if (strcmp((const char*)xml->name, XML_CIB_TAG_LRM) == 0) { xml = first_named_child(xml, XML_LRM_TAG_RESOURCES); crm_trace("Got %p in %s", xml, XML_CIB_TAG_LRM); } CRM_ASSERT(strcmp((const char*)xml->name, XML_LRM_TAG_RESOURCES) == 0); - for(cIter = xml->children; cIter; cIter = cIter->next) { - num_resources++; - } - - if(num_resources > 1) { - /* - * Check for and fast-track the processing of LRM refreshes - * In large clusters this can result in _huge_ speedups - * - * Unfortunately we can only do so when there are no pending actions - * Otherwise we could miss updates we're waiting for and stall - * - */ + /* + * Updates by, or in response to, TE actions will never contain updates + * for more than one resource at a time, so such updates indicate an + * LRM refresh. + * + * In that case, start a new transition rather than check each result + * individually, which can result in _huge_ speedups in large clusters. + * + * Unfortunately, we can only do so when there are no pending actions. + * Otherwise, we could mistakenly throw away those results here, and + * the cluster will stall waiting for them and time out the operation. + */ + if ((transition_graph->pending == 0) + && xml->children && xml->children->next) { - crm_debug("Detected LRM refresh - %d resources updated", num_resources); crm_log_xml_trace(change, "lrm-refresh"); abort_transition(INFINITY, tg_restart, "LRM Refresh", NULL); return; } for (rsc = __xml_first_child(xml); rsc != NULL; rsc = __xml_next(rsc)) { crm_trace("Processing %s", ID(rsc)); - for (rsc_op = __xml_first_child(rsc); rsc_op != NULL; rsc_op = __xml_next(rsc_op)) { - crm_trace("Processing %s", ID(rsc_op)); - process_graph_event(rsc_op, node); - } + process_lrm_resource_diff(rsc, node); } } #define NODE_PATT "/lrm[@id=" static char *get_node_from_xpath(const char *xpath) { char *nodeid = NULL; char *tmp = strstr(xpath, NODE_PATT); if(tmp) { tmp += strlen(NODE_PATT); tmp += 1; nodeid = strdup(tmp); tmp = strstr(nodeid, "\'"); CRM_ASSERT(tmp); tmp[0] = 0; } return nodeid; } static char *extract_node_uuid(const char *xpath) { char *mutable_path = strdup(xpath); char *node_uuid = NULL; char *search = NULL; char *match = NULL; match = strstr(mutable_path, "node_state[@id=\'"); if (match == NULL) { free(mutable_path); return NULL; } match += strlen("node_state[@id=\'"); search = strchr(match, '\''); if (search == NULL) { free(mutable_path); return NULL; } search[0] = 0; node_uuid = strdup(match); free(mutable_path); return node_uuid; } static void abort_unless_down(const char *xpath, const char *op, xmlNode *change, const char *reason) { char *node_uuid = NULL; crm_action_t *down = NULL; if(safe_str_neq(op, "delete")) { abort_transition(INFINITY, tg_restart, reason, change); return; } node_uuid = extract_node_uuid(xpath); if(node_uuid == NULL) { crm_err("Could not extract node ID from %s", xpath); abort_transition(INFINITY, tg_restart, reason, change); return; } down = match_down_event(node_uuid, TRUE); - if(down == NULL || down->executed == false) { + if (down == NULL) { crm_trace("Not expecting %s to be down (%s)", node_uuid, xpath); abort_transition(INFINITY, tg_restart, reason, change); } else { crm_trace("Expecting changes to %s (%s)", node_uuid, xpath); } free(node_uuid); } -void -te_update_diff(const char *event, xmlNode * msg) +static void +process_op_deletion(const char *xpath, xmlNode *change) { - int rc = -EINVAL; - int format = 1; - xmlNode *change = NULL; - const char *op = NULL; - - xmlNode *diff = NULL; + char *mutable_key = strdup(xpath); + char *key; + char *node_uuid; + crm_action_t *cancel = NULL; + + // Extract the part of xpath between last pair of single quotes + key = strrchr(mutable_key, '\''); + if (key != NULL) { + *key = '\0'; + key = strrchr(mutable_key, '\''); + } + if (key == NULL) { + crm_warn("Ignoring malformed CIB update (resource deletion of %s)", + xpath); + free(mutable_key); + return; + } + ++key; - int p_add[] = { 0, 0, 0 }; - int p_del[] = { 0, 0, 0 }; + node_uuid = extract_node_uuid(xpath); + cancel = get_cancel_action(key, node_uuid); + if (cancel) { + crm_info("Cancellation of %s on %s confirmed (%d)", + key, node_uuid, cancel->id); + stop_te_timer(cancel->timer); + te_action_confirmed(cancel); + update_graph(transition_graph, cancel); + trigger_graph(); + } else { + abort_transition(INFINITY, tg_restart, "Resource operation removal", + change); + } + free(mutable_key); + free(node_uuid); +} - CRM_CHECK(msg != NULL, return); - crm_element_value_int(msg, F_CIB_RC, &rc); +static void +process_delete_diff(const char *xpath, const char *op, xmlNode *change) +{ + if (strstr(xpath, "/" XML_LRM_TAG_RSC_OP "[")) { + process_op_deletion(xpath, change); - if (transition_graph == NULL) { - crm_trace("No graph"); - return; + } else if (strstr(xpath, "/" XML_CIB_TAG_LRM "[")) { + abort_unless_down(xpath, op, change, "Resource state removal"); - } else if (rc < pcmk_ok) { - crm_trace("Filter rc=%d (%s)", rc, pcmk_strerror(rc)); - return; + } else if (strstr(xpath, "/" XML_CIB_TAG_STATE "[")) { + abort_unless_down(xpath, op, change, "Node state removal"); - } else if (transition_graph->complete == TRUE - && fsa_state != S_IDLE - && fsa_state != S_TRANSITION_ENGINE && fsa_state != S_POLICY_ENGINE) { - crm_trace("Filter state=%s, complete=%d", fsa_state2string(fsa_state), - transition_graph->complete); - return; + } else { + crm_trace("Ignoring delete of %s", xpath); } +} - op = crm_element_value(msg, F_CIB_OPERATION); - diff = get_message_xml(msg, F_CIB_UPDATE_RESULT); +static void +process_node_state_diff(xmlNode *state, xmlNode *change, const char *op, + const char *xpath) +{ + xmlNode *lrm = first_named_child(state, XML_CIB_TAG_LRM); - xml_patch_versions(diff, p_add, p_del); - crm_debug("Processing (%s) diff: %d.%d.%d -> %d.%d.%d (%s)", op, - p_del[0], p_del[1], p_del[2], p_add[0], p_add[1], p_add[2], - fsa_state2string(fsa_state)); + process_resource_updates(ID(state), lrm, change, op, xpath); +} - crm_element_value_int(diff, "format", &format); - switch(format) { - case 1: - te_legacy_update_diff(event, diff); - return; - case 2: - /* Cool, we know what to do here */ - crm_log_xml_trace(diff, "Patch:Raw"); - break; - default: - crm_warn("Unknown patch format: %d", format); - return; +static void +process_status_diff(xmlNode *status, xmlNode *change, const char *op, + const char *xpath) +{ + for (xmlNode *state = __xml_first_child(status); state != NULL; + state = __xml_next(state)) { + process_node_state_diff(state, change, op, xpath); + } +} + +static void +process_cib_diff(xmlNode *cib, xmlNode *change, const char *op, + const char *xpath) +{ + xmlNode *status = first_named_child(cib, XML_CIB_TAG_STATUS); + xmlNode *config = first_named_child(cib, XML_CIB_TAG_CONFIGURATION); + + if (status) { + process_status_diff(status, change, op, xpath); + } + if (config) { + abort_transition(INFINITY, tg_restart, + "Non-status-only change", change); } +} + +static void +te_update_diff_v2(xmlNode *diff) +{ + crm_log_xml_trace(diff, "Patch:Raw"); - for (change = __xml_first_child(diff); change != NULL; change = __xml_next(change)) { + for (xmlNode *change = __xml_first_child(diff); change != NULL; + change = __xml_next(change)) { + + xmlNode *match = NULL; const char *name = NULL; - const char *op = crm_element_value(change, XML_DIFF_OP); const char *xpath = crm_element_value(change, XML_DIFF_PATH); - xmlNode *match = NULL; - const char *node = NULL; - if(op == NULL) { + // Possible ops: create, modify, delete, move + const char *op = crm_element_value(change, XML_DIFF_OP); + + // Ignore uninteresting updates + if (op == NULL) { continue; - } else if(strcmp(op, "create") == 0) { - match = change->children; + } else if (xpath == NULL) { + crm_trace("Ignoring %s change for version field", op); + continue; - } else if(strcmp(op, "move") == 0) { + } else if (strcmp(op, "move") == 0) { + crm_trace("Ignoring move change at %s", xpath); continue; + } + + // Find the result of create/modify ops + if (strcmp(op, "create") == 0) { + match = change->children; - } else if(strcmp(op, "modify") == 0) { + } else if (strcmp(op, "modify") == 0) { match = first_named_child(change, XML_DIFF_RESULT); if(match) { match = match->children; } + + } else if (strcmp(op, "delete") != 0) { + crm_warn("Ignoring malformed CIB update (%s operation on %s is unrecognized)", + op, xpath); + continue; } - if(match) { + if (match) { if (match->type == XML_COMMENT_NODE) { crm_trace("Ignoring %s operation for comment at %s", op, xpath); continue; } name = (const char *)match->name; } crm_trace("Handling %s operation for %s%s%s", op, (xpath? xpath : "CIB"), (name? " matched by " : ""), (name? name : "")); - if(xpath == NULL) { - /* Version field, ignore */ - } else if(strstr(xpath, "/cib/configuration")) { - abort_transition(INFINITY, tg_restart, "Configuration change", change); - break; /* Won't be packaged with any resource operations we may be waiting for */ + if (strstr(xpath, "/" XML_TAG_CIB "/" XML_CIB_TAG_CONFIGURATION)) { + abort_transition(INFINITY, tg_restart, "Configuration change", + change); + break; // Won't be packaged with operation results we may be waiting for - } else if(strstr(xpath, "/"XML_CIB_TAG_TICKETS) || safe_str_eq(name, XML_CIB_TAG_TICKETS)) { + } else if (strstr(xpath, "/" XML_CIB_TAG_TICKETS) + || safe_str_eq(name, XML_CIB_TAG_TICKETS)) { abort_transition(INFINITY, tg_restart, "Ticket attribute change", change); - break; /* Won't be packaged with any resource operations we may be waiting for */ + break; // Won't be packaged with operation results we may be waiting for - } else if(strstr(xpath, "/"XML_TAG_TRANSIENT_NODEATTRS"[") || safe_str_eq(name, XML_TAG_TRANSIENT_NODEATTRS)) { + } else if (strstr(xpath, "/" XML_TAG_TRANSIENT_NODEATTRS "[") + || safe_str_eq(name, XML_TAG_TRANSIENT_NODEATTRS)) { abort_unless_down(xpath, op, change, "Transient attribute change"); - break; /* Won't be packaged with any resource operations we may be waiting for */ - - } else if(strstr(xpath, "/"XML_LRM_TAG_RSC_OP"[") && safe_str_eq(op, "delete")) { - crm_action_t *cancel = NULL; - char *mutable_key = strdup(xpath); - char *key, *node_uuid; - - /* Extract the part of xpath between last pair of single quotes */ - key = strrchr(mutable_key, '\''); - if (key != NULL) { - *key = '\0'; - key = strrchr(mutable_key, '\''); - } - if (key == NULL) { - crm_warn("Ignoring malformed CIB update (resource deletion)"); - free(mutable_key); - continue; - } - ++key; - - node_uuid = extract_node_uuid(xpath); - cancel = get_cancel_action(key, node_uuid); - if (cancel == NULL) { - abort_transition(INFINITY, tg_restart, "Resource operation removal", change); - - } else { - crm_info("Cancellation of %s on %s confirmed (%d)", key, node_uuid, cancel->id); - stop_te_timer(cancel->timer); - te_action_confirmed(cancel); - - update_graph(transition_graph, cancel); - trigger_graph(); + break; // Won't be packaged with operation results we may be waiting for - } - free(mutable_key); - free(node_uuid); + } else if (strcmp(op, "delete") == 0) { + process_delete_diff(xpath, op, change); - } else if(strstr(xpath, "/"XML_CIB_TAG_LRM"[") && safe_str_eq(op, "delete")) { - abort_unless_down(xpath, op, change, "Resource state removal"); + } else if (name == NULL) { + crm_warn("Ignoring malformed CIB update (%s at %s has no result)", + op, xpath); - } else if(strstr(xpath, "/"XML_CIB_TAG_STATE"[") && safe_str_eq(op, "delete")) { - abort_unless_down(xpath, op, change, "Node state removal"); + } else if (strcmp(name, XML_TAG_CIB) == 0) { + process_cib_diff(match, change, op, xpath); - } else if(name == NULL) { - crm_debug("No result for %s operation to %s", op, xpath); - CRM_ASSERT(strcmp(op, "delete") == 0 || strcmp(op, "move") == 0); + } else if (strcmp(name, XML_CIB_TAG_STATUS) == 0) { + process_status_diff(match, change, op, xpath); - } else if(strcmp(name, XML_TAG_CIB) == 0) { - xmlNode *state = NULL; - xmlNode *status = first_named_child(match, XML_CIB_TAG_STATUS); - xmlNode *config = first_named_child(match, XML_CIB_TAG_CONFIGURATION); - - for (state = __xml_first_child(status); state != NULL; state = __xml_next(state)) { - xmlNode *lrm = first_named_child(state, XML_CIB_TAG_LRM); - - node = ID(state); - process_resource_updates(node, lrm, change, op, xpath); - } - - if(config) { - abort_transition(INFINITY, tg_restart, "Non-status-only change", change); - } + } else if (strcmp(name, XML_CIB_TAG_STATE) == 0) { + process_node_state_diff(match, change, op, xpath); - } else if(strcmp(name, XML_CIB_TAG_STATUS) == 0) { - xmlNode *state = NULL; + } else if (strcmp(name, XML_CIB_TAG_LRM) == 0) { + process_resource_updates(ID(match), match, change, op, xpath); - for (state = __xml_first_child(match); state != NULL; state = __xml_next(state)) { - xmlNode *lrm = first_named_child(state, XML_CIB_TAG_LRM); - - node = ID(state); - process_resource_updates(node, lrm, change, op, xpath); - } - - } else if(strcmp(name, XML_CIB_TAG_STATE) == 0) { - xmlNode *lrm = first_named_child(match, XML_CIB_TAG_LRM); - - node = ID(match); - process_resource_updates(node, lrm, change, op, xpath); - - } else if(strcmp(name, XML_CIB_TAG_LRM) == 0) { - node = ID(match); - process_resource_updates(node, match, change, op, xpath); - - } else if(strcmp(name, XML_LRM_TAG_RESOURCES) == 0) { + } else if (strcmp(name, XML_LRM_TAG_RESOURCES) == 0) { char *local_node = get_node_from_xpath(xpath); process_resource_updates(local_node, match, change, op, xpath); free(local_node); - } else if(strcmp(name, XML_LRM_TAG_RESOURCE) == 0) { - - xmlNode *rsc_op; + } else if (strcmp(name, XML_LRM_TAG_RESOURCE) == 0) { char *local_node = get_node_from_xpath(xpath); - for (rsc_op = __xml_first_child(match); rsc_op != NULL; rsc_op = __xml_next(rsc_op)) { - process_graph_event(rsc_op, local_node); - } + process_lrm_resource_diff(match, local_node); free(local_node); - } else if(strcmp(name, XML_LRM_TAG_RSC_OP) == 0) { + } else if (strcmp(name, XML_LRM_TAG_RSC_OP) == 0) { char *local_node = get_node_from_xpath(xpath); process_graph_event(match, local_node); free(local_node); } else { - crm_err("Ignoring %s operation for %s %p, %s", op, xpath, match, name); + crm_warn("Ignoring malformed CIB update (%s at %s has unrecognized result %s)", + op, xpath, name); } } } +void +te_update_diff(const char *event, xmlNode * msg) +{ + xmlNode *diff = NULL; + const char *op = NULL; + int rc = -EINVAL; + int format = 1; + int p_add[] = { 0, 0, 0 }; + int p_del[] = { 0, 0, 0 }; + + CRM_CHECK(msg != NULL, return); + crm_element_value_int(msg, F_CIB_RC, &rc); + + if (transition_graph == NULL) { + crm_trace("No graph"); + return; + + } else if (rc < pcmk_ok) { + crm_trace("Filter rc=%d (%s)", rc, pcmk_strerror(rc)); + return; + + } else if (transition_graph->complete + && fsa_state != S_IDLE + && fsa_state != S_TRANSITION_ENGINE + && fsa_state != S_POLICY_ENGINE) { + crm_trace("Filter state=%s, complete=%d", fsa_state2string(fsa_state), + transition_graph->complete); + return; + } + + op = crm_element_value(msg, F_CIB_OPERATION); + diff = get_message_xml(msg, F_CIB_UPDATE_RESULT); + + xml_patch_versions(diff, p_add, p_del); + crm_debug("Processing (%s) diff: %d.%d.%d -> %d.%d.%d (%s)", op, + p_del[0], p_del[1], p_del[2], p_add[0], p_add[1], p_add[2], + fsa_state2string(fsa_state)); + + crm_element_value_int(diff, "format", &format); + switch (format) { + case 1: + te_update_diff_v1(event, diff); + break; + case 2: + te_update_diff_v2(diff); + break; + default: + crm_warn("Ignoring malformed CIB update (unknown patch format %d)", + format); + } +} gboolean process_te_message(xmlNode * msg, xmlNode * xml_data) { const char *from = crm_element_value(msg, F_ORIG); const char *sys_to = crm_element_value(msg, F_CRM_SYS_TO); const char *sys_from = crm_element_value(msg, F_CRM_SYS_FROM); const char *ref = crm_element_value(msg, F_CRM_REFERENCE); const char *op = crm_element_value(msg, F_CRM_TASK); const char *type = crm_element_value(msg, F_CRM_MSG_TYPE); crm_trace("Processing %s (%s) message", op, ref); crm_log_xml_trace(msg, "ipc"); if (op == NULL) { /* error */ } else if (sys_to == NULL || strcasecmp(sys_to, CRM_SYSTEM_TENGINE) != 0) { crm_trace("Bad sys-to %s", crm_str(sys_to)); return FALSE; } else if (safe_str_eq(op, CRM_OP_INVOKE_LRM) && safe_str_eq(sys_from, CRM_SYSTEM_LRMD) /* && safe_str_eq(type, XML_ATTR_RESPONSE) */ ) { xmlXPathObject *xpathObj = NULL; crm_log_xml_trace(msg, "Processing (N)ACK"); crm_debug("Processing (N)ACK %s from %s", crm_element_value(msg, F_CRM_REFERENCE), from); xpathObj = xpath_search(xml_data, "//" XML_LRM_TAG_RSC_OP); if (numXpathResults(xpathObj)) { int lpc = 0, max = numXpathResults(xpathObj); for (lpc = 0; lpc < max; lpc++) { xmlNode *rsc_op = getXpathResult(xpathObj, lpc); const char *node = get_node_id(rsc_op); process_graph_event(rsc_op, node); } freeXpathObject(xpathObj); } else { crm_log_xml_err(msg, "Invalid (N)ACK"); freeXpathObject(xpathObj); return FALSE; } } else { crm_err("Unknown command: %s::%s from %s", type, op, sys_from); } crm_trace("finished processing message"); return TRUE; } GHashTable *stonith_failures = NULL; struct st_fail_rec { int count; }; static gboolean too_many_st_failures(const char *target) { GHashTableIter iter; const char *key = NULL; struct st_fail_rec *value = NULL; if (stonith_failures == NULL) { return FALSE; } if (target == NULL) { g_hash_table_iter_init(&iter, stonith_failures); while (g_hash_table_iter_next(&iter, (gpointer *) & key, (gpointer *) & value)) { if (value->count >= stonith_max_attempts) { target = (const char*)key; goto too_many; } } } else { value = g_hash_table_lookup(stonith_failures, target); if ((value != NULL) && (value->count >= stonith_max_attempts)) { goto too_many; } } return FALSE; too_many: crm_warn("Too many failures (%d) to fence %s, giving up", value->count, target); return TRUE; } /*! * \internal * \brief Reset a stonith fail count * * \param[in] target Name of node to reset, or NULL for all */ void st_fail_count_reset(const char *target) { if (stonith_failures == NULL) { return; } if (target) { struct st_fail_rec *rec = NULL; rec = g_hash_table_lookup(stonith_failures, target); if (rec) { rec->count = 0; } } else { GHashTableIter iter; const char *key = NULL; struct st_fail_rec *rec = NULL; g_hash_table_iter_init(&iter, stonith_failures); while (g_hash_table_iter_next(&iter, (gpointer *) &key, (gpointer *) &rec)) { rec->count = 0; } } } void st_fail_count_increment(const char *target) { struct st_fail_rec *rec = NULL; if (stonith_failures == NULL) { stonith_failures = crm_str_table_new(); } rec = g_hash_table_lookup(stonith_failures, target); if (rec) { rec->count++; } else { rec = malloc(sizeof(struct st_fail_rec)); if(rec == NULL) { return; } rec->count = 1; g_hash_table_insert(stonith_failures, strdup(target), rec); } } /*! * \internal * \brief Abort transition due to stonith failure * * \param[in] abort_action Whether to restart or stop transition * \param[in] target Don't restart if this (NULL for any) has too many failures * \param[in] reason Log this stonith action XML as abort reason (or NULL) */ void abort_for_stonith_failure(enum transition_action abort_action, const char *target, xmlNode *reason) { /* If stonith repeatedly fails, we eventually give up on starting a new * transition for that reason. */ if ((abort_action != tg_stop) && too_many_st_failures(target)) { abort_action = tg_stop; } abort_transition(INFINITY, abort_action, "Stonith failed", reason); } void tengine_stonith_callback(stonith_t * stonith, stonith_callback_data_t * data) { char *uuid = NULL; int target_rc = -1; int stonith_id = -1; int transition_id = -1; crm_action_t *action = NULL; int call_id = data->call_id; int rc = data->rc; char *userdata = data->userdata; CRM_CHECK(userdata != NULL, return); crm_notice("Stonith operation %d/%s: %s (%d)", call_id, (char *)userdata, pcmk_strerror(rc), rc); if (AM_I_DC == FALSE) { return; } /* crm_info("call=%d, optype=%d, node_name=%s, result=%d, node_list=%s, action=%s", */ /* op->call_id, op->optype, op->node_name, op->op_result, */ /* (char *)op->node_list, op->private_data); */ /* filter out old STONITH actions */ CRM_CHECK(decode_transition_key(userdata, &uuid, &transition_id, &stonith_id, &target_rc), crm_err("Invalid event detected"); goto bail; ); if (transition_graph->complete || stonith_id < 0 || safe_str_neq(uuid, te_uuid) || transition_graph->id != transition_id) { crm_info("Ignoring STONITH action initiated outside of the current transition"); goto bail; } action = get_action(stonith_id, FALSE); if (action == NULL) { crm_err("Stonith action not matched"); goto bail; } stop_te_timer(action->timer); if (rc == pcmk_ok) { const char *target = crm_element_value(action->xml, XML_LRM_ATTR_TARGET); const char *uuid = crm_element_value(action->xml, XML_LRM_ATTR_TARGET_UUID); const char *op = crm_meta_value(action->params, "stonith_action"); crm_info("Stonith operation %d for %s passed", call_id, target); if (action->confirmed == FALSE) { te_action_confirmed(action); if (safe_str_eq("on", op)) { const char *value = NULL; char *now = crm_itoa(time(NULL)); update_attrd(target, CRM_ATTR_UNFENCED, now, NULL, FALSE); free(now); value = crm_meta_value(action->params, XML_OP_ATTR_DIGESTS_ALL); update_attrd(target, CRM_ATTR_DIGESTS_ALL, value, NULL, FALSE); value = crm_meta_value(action->params, XML_OP_ATTR_DIGESTS_SECURE); update_attrd(target, CRM_ATTR_DIGESTS_SECURE, value, NULL, FALSE); } else if (action->sent_update == FALSE) { send_stonith_update(action, target, uuid); action->sent_update = TRUE; } } st_fail_count_reset(target); } else { const char *target = crm_element_value(action->xml, XML_LRM_ATTR_TARGET); enum transition_action abort_action = tg_restart; action->failed = TRUE; crm_notice("Stonith operation %d for %s failed (%s): aborting transition.", call_id, target, pcmk_strerror(rc)); /* If no fence devices were available, there's no use in immediately * checking again, so don't start a new transition in that case. */ if (rc == -ENODEV) { crm_warn("No devices found in cluster to fence %s, giving up", target); abort_action = tg_stop; } /* Increment the fail count now, so abort_for_stonith_failure() can * check it. Non-DC nodes will increment it in tengine_stonith_notify(). */ st_fail_count_increment(target); abort_for_stonith_failure(abort_action, target, NULL); } update_graph(transition_graph, action); trigger_graph(); bail: free(userdata); free(uuid); return; } void cib_fencing_updated(xmlNode * msg, int call_id, int rc, xmlNode * output, void *user_data) { if (rc < pcmk_ok) { crm_err("Fencing update %d for %s: failed - %s (%d)", call_id, (char *)user_data, pcmk_strerror(rc), rc); crm_log_xml_warn(msg, "Failed update"); abort_transition(INFINITY, tg_shutdown, "CIB update failed", NULL); } else { crm_info("Fencing update %d for %s: complete", call_id, (char *)user_data); } } void cib_action_updated(xmlNode * msg, int call_id, int rc, xmlNode * output, void *user_data) { if (rc < pcmk_ok) { crm_err("Update %d FAILED: %s", call_id, pcmk_strerror(rc)); } } gboolean action_timer_callback(gpointer data) { crm_action_timer_t *timer = NULL; CRM_CHECK(data != NULL, return FALSE); timer = (crm_action_timer_t *) data; stop_te_timer(timer); crm_warn("Timer popped (timeout=%d, abort_level=%d, complete=%s)", timer->timeout, transition_graph->abort_priority, transition_graph->complete ? "true" : "false"); CRM_CHECK(timer->action != NULL, return FALSE); if (transition_graph->complete) { crm_warn("Ignoring timeout while not in transition"); } else { /* fail the action */ gboolean send_update = TRUE; const char *task = crm_element_value(timer->action->xml, XML_LRM_ATTR_TASK); print_action(LOG_ERR, "Aborting transition, action lost: ", timer->action); timer->action->failed = TRUE; te_action_confirmed(timer->action); abort_transition(INFINITY, tg_restart, "Action lost", NULL); update_graph(transition_graph, timer->action); trigger_graph(); if (timer->action->type != action_type_rsc) { send_update = FALSE; } else if (safe_str_eq(task, RSC_CANCEL)) { /* we don't need to update the CIB with these */ send_update = FALSE; } if (send_update) { cib_action_update(timer->action, PCMK_LRM_OP_TIMEOUT, PCMK_OCF_UNKNOWN_ERROR); } } return FALSE; } diff --git a/crmd/te_events.c b/crmd/te_events.c index 26389b9af0..019ec63b3a 100644 --- a/crmd/te_events.c +++ b/crmd/te_events.c @@ -1,536 +1,535 @@ /* * Copyright (C) 2004 Andrew Beekhof * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This software is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #include #include #include #include #include #include #include #include char *failed_stop_offset = NULL; char *failed_start_offset = NULL; gboolean fail_incompletable_actions(crm_graph_t * graph, const char *down_node) { const char *target_uuid = NULL; const char *router = NULL; const char *router_uuid = NULL; xmlNode *last_action = NULL; GListPtr gIter = NULL; GListPtr gIter2 = NULL; if (graph == NULL || graph->complete) { return FALSE; } gIter = graph->synapses; for (; gIter != NULL; gIter = gIter->next) { synapse_t *synapse = (synapse_t *) gIter->data; if (synapse->confirmed || synapse->failed) { /* We've already been here */ continue; } gIter2 = synapse->actions; for (; gIter2 != NULL; gIter2 = gIter2->next) { crm_action_t *action = (crm_action_t *) gIter2->data; if (action->type == action_type_pseudo || action->confirmed) { continue; } else if (action->type == action_type_crm) { const char *task = crm_element_value(action->xml, XML_LRM_ATTR_TASK); if (safe_str_eq(task, CRM_OP_FENCE)) { continue; } } target_uuid = crm_element_value(action->xml, XML_LRM_ATTR_TARGET_UUID); router = crm_element_value(action->xml, XML_LRM_ATTR_ROUTER_NODE); if (router) { crm_node_t *node = crm_get_peer(0, router); if (node) { router_uuid = node->uuid; } } if (safe_str_eq(target_uuid, down_node) || safe_str_eq(router_uuid, down_node)) { action->failed = TRUE; synapse->failed = TRUE; last_action = action->xml; stop_te_timer(action->timer); update_graph(graph, action); if (synapse->executed) { crm_notice("Action %d (%s) was pending on %s (offline)", action->id, crm_element_value(action->xml, XML_LRM_ATTR_TASK_KEY), down_node); } else { crm_info("Action %d (%s) is scheduled for %s (offline)", action->id, crm_element_value(action->xml, XML_LRM_ATTR_TASK_KEY), down_node); } } } } if (last_action != NULL) { crm_info("Node %s shutdown resulted in un-runnable actions", down_node); abort_transition(INFINITY, tg_restart, "Node failure", last_action); return TRUE; } return FALSE; } /*! * \internal * \brief Update failure-related node attributes if warranted * * \param[in] event XML describing operation that (maybe) failed * \param[in] event_node_uuid Node that event occurred on * \param[in] rc Actual operation return code * \param[in] target_rc Expected operation return code * \param[in] do_update If TRUE, do update regardless of operation type * \param[in] ignore_failures If TRUE, update last failure but not fail count * * \return TRUE if this was not a direct nack, success or lrm status refresh */ static gboolean update_failcount(xmlNode * event, const char *event_node_uuid, int rc, int target_rc, gboolean do_update, gboolean ignore_failures) { int interval = 0; char *task = NULL; char *rsc_id = NULL; const char *value = NULL; const char *id = crm_element_value(event, XML_LRM_ATTR_TASK_KEY); const char *on_uname = crm_peer_uname(event_node_uuid); const char *origin = crm_element_value(event, XML_ATTR_ORIGIN); /* Nothing needs to be done for success, lrm status refresh, * or direct nack (internal code for "busy, try again") */ if ((rc == CRM_DIRECT_NACK_RC) || (rc == target_rc)) { return FALSE; } else if (safe_str_eq(origin, "build_active_RAs")) { crm_debug("No update for %s (rc=%d) on %s: Old failure from lrm status refresh", id, rc, on_uname); return FALSE; } /* Sanity check */ CRM_CHECK(on_uname != NULL, return TRUE); CRM_CHECK(parse_op_key(id, &rsc_id, &task, &interval), crm_err("Couldn't parse: %s", ID(event)); goto bail); CRM_CHECK(task != NULL, goto bail); CRM_CHECK(rsc_id != NULL, goto bail); /* Decide whether update is necessary and what value to use */ if ((interval > 0) || safe_str_eq(task, CRMD_ACTION_PROMOTE) || safe_str_eq(task, CRMD_ACTION_DEMOTE)) { do_update = TRUE; } else if (safe_str_eq(task, CRMD_ACTION_START)) { do_update = TRUE; if (failed_start_offset == NULL) { failed_start_offset = strdup(CRM_INFINITY_S); } value = failed_start_offset; } else if (safe_str_eq(task, CRMD_ACTION_STOP)) { do_update = TRUE; if (failed_stop_offset == NULL) { failed_stop_offset = strdup(CRM_INFINITY_S); } value = failed_stop_offset; } /* Fail count will be either incremented or set to infinity */ if (value == NULL || safe_str_neq(value, CRM_INFINITY_S)) { value = XML_NVPAIR_ATTR_VALUE "++"; } if (do_update) { char *now = crm_itoa(time(NULL)); char *attr_name = NULL; gboolean is_remote_node = FALSE; if (g_hash_table_lookup(crm_remote_peer_cache, event_node_uuid)) { is_remote_node = TRUE; } crm_info("Updating %s for %s on %s after failed %s: rc=%d (update=%s, time=%s)", (ignore_failures? "last failure" : "failcount"), rsc_id, on_uname, task, rc, value, now); /* Update the fail count, if we're not ignoring failures */ if (!ignore_failures) { attr_name = crm_failcount_name(rsc_id, task, interval); update_attrd(on_uname, attr_name, value, NULL, is_remote_node); free(attr_name); } /* Update the last failure time (even if we're ignoring failures, * so that failure can still be detected and shown, e.g. by crm_mon) */ attr_name = crm_lastfailure_name(rsc_id, task, interval); update_attrd(on_uname, attr_name, now, NULL, is_remote_node); free(attr_name); free(now); } bail: free(rsc_id); free(task); return TRUE; } /*! * \internal * \brief Return simplified operation status based on operation return code * * \param[in] action CRM action instance of operation * \param[in] orig_status Original reported operation status * \param[in] rc Actual operation return code * \param[in] target_rc Expected operation return code * * \return PCMK_LRM_OP_DONE if rc equals target_rc, PCMK_LRM_OP_ERROR otherwise * * \note This assumes that PCMK_LRM_OP_PENDING operations have already been * filtered (otherwise they will get simplified as well). */ static int status_from_rc(crm_action_t * action, int orig_status, int rc, int target_rc) { if (target_rc == rc) { crm_trace("Target rc: == %d", rc); if (orig_status != PCMK_LRM_OP_DONE) { crm_trace("Re-mapping op status to PCMK_LRM_OP_DONE for rc=%d", rc); } return PCMK_LRM_OP_DONE; } if (rc != CRM_DIRECT_NACK_RC) { const char *task = crm_element_value(action->xml, XML_LRM_ATTR_TASK_KEY); const char *uname = crm_element_value(action->xml, XML_LRM_ATTR_TARGET); crm_warn("Action %d (%s) on %s failed (target: %d vs. rc: %d): %s", action->id, task, uname, target_rc, rc, services_lrm_status_str(PCMK_LRM_OP_ERROR)); } return PCMK_LRM_OP_ERROR; } /*! * \internal * \brief Confirm action and update transition graph, aborting transition on failures * * \param[in,out] action CRM action instance of this operation * \param[in] event Event instance of this operation * \param[in] orig_status Original reported operation status * \param[in] op_rc Actual operation return code * \param[in] target_rc Expected operation return code * \param[in] ignore_failures Whether to ignore operation failures * * \note This assumes that PCMK_LRM_OP_PENDING operations have already been * filtered (otherwise they may be treated as failures). */ static void match_graph_event(crm_action_t *action, xmlNode *event, int op_status, int op_rc, int target_rc, gboolean ignore_failures) { const char *target = NULL; const char *this_event = NULL; const char *ignore_s = ""; /* Remap operation status based on return code */ op_status = status_from_rc(action, op_status, op_rc, target_rc); /* Process OP status */ switch (op_status) { case PCMK_LRM_OP_DONE: break; case PCMK_LRM_OP_ERROR: case PCMK_LRM_OP_TIMEOUT: case PCMK_LRM_OP_NOTSUPPORTED: if (ignore_failures) { ignore_s = ", ignoring failure"; } else { action->failed = TRUE; } break; case PCMK_LRM_OP_CANCELLED: /* do nothing?? */ crm_err("Don't know what to do for cancelled ops yet"); break; default: /* PCMK_LRM_OP_ERROR_HARD, PCMK_LRM_OP_ERROR_FATAL, PCMK_LRM_OP_NOT_INSTALLED */ action->failed = TRUE; crm_err("Unsupported action result: %d", op_status); } /* stop this event's timer if it had one */ stop_te_timer(action->timer); te_action_confirmed(action); update_graph(transition_graph, action); trigger_graph(); if (action->failed) { abort_transition(action->synapse->priority + 1, tg_restart, "Event failed", event); } this_event = crm_element_value(event, XML_LRM_ATTR_TASK_KEY); target = crm_element_value(action->xml, XML_LRM_ATTR_TARGET); crm_info("Action %s (%d) confirmed on %s (rc=%d%s)", crm_str(this_event), action->id, crm_str(target), op_rc, ignore_s); } crm_action_t * get_action(int id, gboolean confirmed) { GListPtr gIter = NULL; GListPtr gIter2 = NULL; gIter = transition_graph->synapses; for (; gIter != NULL; gIter = gIter->next) { synapse_t *synapse = (synapse_t *) gIter->data; gIter2 = synapse->actions; for (; gIter2 != NULL; gIter2 = gIter2->next) { crm_action_t *action = (crm_action_t *) gIter2->data; if (action->id == id) { if (confirmed) { stop_te_timer(action->timer); te_action_confirmed(action); } return action; } } } return NULL; } crm_action_t * get_cancel_action(const char *id, const char *node) { GListPtr gIter = NULL; GListPtr gIter2 = NULL; gIter = transition_graph->synapses; for (; gIter != NULL; gIter = gIter->next) { synapse_t *synapse = (synapse_t *) gIter->data; gIter2 = synapse->actions; for (; gIter2 != NULL; gIter2 = gIter2->next) { const char *task = NULL; const char *target = NULL; crm_action_t *action = (crm_action_t *) gIter2->data; task = crm_element_value(action->xml, XML_LRM_ATTR_TASK); if (safe_str_neq(CRMD_ACTION_CANCEL, task)) { continue; } task = crm_element_value(action->xml, XML_LRM_ATTR_TASK_KEY); if (safe_str_neq(task, id)) { crm_trace("Wrong key %s for %s on %s", task, id, node); continue; } target = crm_element_value(action->xml, XML_LRM_ATTR_TARGET_UUID); if (node && safe_str_neq(target, node)) { crm_trace("Wrong node %s for %s on %s", target, id, node); continue; } crm_trace("Found %s on %s", id, node); return action; } } return NULL; } /* downed nodes are listed like: ... */ #define XPATH_DOWNED "//" XML_GRAPH_TAG_DOWNED \ "/" XML_CIB_TAG_NODE "[@" XML_ATTR_UUID "='%s']" /*! * \brief Find a transition event that would have made a specified node down * * \param[in] target UUID of node to match * \param[in] quiet If FALSE, log a warning if no match found * * \return Matching event if found, NULL otherwise */ crm_action_t * match_down_event(const char *target, bool quiet) { crm_action_t *match = NULL; xmlXPathObjectPtr xpath_ret = NULL; GListPtr gIter, gIter2; char *xpath = crm_strdup_printf(XPATH_DOWNED, target); for (gIter = transition_graph->synapses; gIter != NULL && match == NULL; gIter = gIter->next) { for (gIter2 = ((synapse_t*)gIter->data)->actions; gIter2 != NULL && match == NULL; gIter2 = gIter2->next) { match = (crm_action_t*)gIter2->data; - xpath_ret = xpath_search(match->xml, xpath); - if (numXpathResults(xpath_ret) < 1) { + if (match->executed) { + xpath_ret = xpath_search(match->xml, xpath); + if (numXpathResults(xpath_ret) < 1) { + match = NULL; + } + freeXpathObject(xpath_ret); + } else { + // Only actions that were actually started can match match = NULL; } - freeXpathObject(xpath_ret); } } free(xpath); if (match != NULL) { crm_debug("Shutdown action found for node %s: action %d (%s)", target, match->id, crm_element_value(match->xml, XML_LRM_ATTR_TASK_KEY)); } else { do_crm_log((quiet? LOG_DEBUG : LOG_WARNING), "No reason to expect node %s to be down", target); } return match; } -gboolean -process_graph_event(xmlNode * event, const char *event_node) +void +process_graph_event(xmlNode *event, const char *event_node) { int rc = -1; int status = -1; int callid = -1; int action_num = -1; crm_action_t *action = NULL; int target_rc = -1; int transition_num = -1; char *update_te_uuid = NULL; - gboolean stop_early = FALSE; gboolean ignore_failures = FALSE; const char *id = NULL; const char *desc = NULL; const char *magic = NULL; CRM_ASSERT(event != NULL); /* */ id = crm_element_value(event, XML_LRM_ATTR_TASK_KEY); crm_element_value_int(event, XML_LRM_ATTR_RC, &rc); crm_element_value_int(event, XML_LRM_ATTR_OPSTATUS, &status); crm_element_value_int(event, XML_LRM_ATTR_CALLID, &callid); magic = crm_element_value(event, XML_ATTR_TRANSITION_KEY); if (magic == NULL) { /* non-change */ - return FALSE; + return; } if (decode_transition_key(magic, &update_te_uuid, &transition_num, &action_num, &target_rc) == FALSE) { crm_err("Invalid event %s.%d detected: %s", id, callid, magic); abort_transition(INFINITY, tg_restart, "Bad event", event); - return FALSE; + return; } if (status == PCMK_LRM_OP_PENDING) { goto bail; } if (transition_num == -1) { desc = "initiated outside of the cluster"; abort_transition(INFINITY, tg_restart, "Unexpected event", event); } else if ((action_num < 0) || (crm_str_eq(update_te_uuid, te_uuid, TRUE) == FALSE)) { desc = "initiated by a different node"; abort_transition(INFINITY, tg_restart, "Foreign event", event); - stop_early = TRUE; /* This could be an lrm status refresh */ } else if (transition_graph->id != transition_num) { desc = "arrived really late"; abort_transition(INFINITY, tg_restart, "Old event", event); - stop_early = TRUE; /* This could be an lrm status refresh */ } else if (transition_graph->complete) { desc = "arrived late"; abort_transition(INFINITY, tg_restart, "Inactive graph", event); } else { action = get_action(action_num, FALSE); if (action == NULL) { desc = "unknown"; abort_transition(INFINITY, tg_restart, "Unknown event", event); } else { ignore_failures = safe_str_eq( crm_meta_value(action->params, XML_OP_ATTR_ON_FAIL), "ignore"); match_graph_event(action, event, status, rc, target_rc, ignore_failures); } } if (action && (rc == target_rc)) { crm_trace("Processed update to %s: %s", id, magic); } else { if (update_failcount(event, event_node, rc, target_rc, (transition_num == -1), ignore_failures)) { - /* Turns out this wasn't an lrm status refresh update afterall */ - stop_early = FALSE; desc = "failed"; } crm_info("Detected action (%d.%d) %s.%d=%s: %s", transition_num, action_num, id, callid, services_ocf_exitcode_str(rc), desc); } bail: free(update_te_uuid); - return stop_early; } diff --git a/crmd/tengine.h b/crmd/tengine.h index 80e906de48..838f518f36 100644 --- a/crmd/tengine.h +++ b/crmd/tengine.h @@ -1,84 +1,84 @@ /* * Copyright (C) 2004 Andrew Beekhof * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This software is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #ifndef TENGINE__H # define TENGINE__H # include # include # include # include extern stonith_t *stonith_api; extern void send_stonith_update(crm_action_t * stonith_action, const char *target, const char *uuid); /* stonith cleanup list */ void add_stonith_cleanup(const char *target); void remove_stonith_cleanup(const char *target); void purge_stonith_cleanup(void); void execute_stonith_cleanup(void); /* tengine */ extern crm_action_t *match_down_event(const char *target, bool quiet); extern crm_action_t *get_cancel_action(const char *id, const char *node); extern gboolean cib_action_update(crm_action_t * action, int status, int op_rc); extern gboolean fail_incompletable_actions(crm_graph_t * graph, const char *down_node); -extern gboolean process_graph_event(xmlNode * event, const char *event_node); +void process_graph_event(xmlNode *event, const char *event_node); /* utils */ extern crm_action_t *get_action(int id, gboolean confirmed); extern gboolean stop_te_timer(crm_action_timer_t * timer); extern const char *get_rsc_state(const char *task, enum op_status status); /* unpack */ extern gboolean process_te_message(xmlNode * msg, xmlNode * xml_data); extern crm_graph_t *transition_graph; extern crm_trigger_t *transition_trigger; extern char *te_uuid; extern void notify_crmd(crm_graph_t * graph); # include extern void trigger_graph_processing(const char *fn, int line); void abort_after_delay(int abort_priority, enum transition_action abort_action, const char *abort_text, guint delay_ms); extern void abort_transition_graph(int abort_priority, enum transition_action abort_action, const char *abort_text, xmlNode * reason, const char *fn, int line); # define trigger_graph() trigger_graph_processing(__FUNCTION__, __LINE__) # define abort_transition(pri, action, text, reason) \ abort_transition_graph(pri, action, text, reason,__FUNCTION__,__LINE__); extern gboolean te_connect_stonith(gpointer user_data); extern crm_trigger_t *transition_trigger; extern crm_trigger_t *stonith_reconnect; extern char *failed_stop_offset; extern char *failed_start_offset; extern int active_timeout; extern int stonith_op_active; void te_action_confirmed(crm_action_t * action); void te_reset_job_counts(void); #endif diff --git a/cts/CTStests.py b/cts/CTStests.py index ebeaec8901..ba9840e2e0 100644 --- a/cts/CTStests.py +++ b/cts/CTStests.py @@ -1,3128 +1,3129 @@ '''CTS: Cluster Testing System: Tests module There are a few things we want to do here: ''' from __future__ import division from __future__ import print_function __copyright__ = ''' Copyright (C) 2000, 2001 Alan Robertson Licensed under the GNU GPL. Add RecourceRecover testcase Zhao Kai ''' # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. # # SPECIAL NOTE: # # Tests may NOT implement any cluster-manager-specific code in them. # EXTEND the ClusterManager object to provide the base capabilities # the test needs if you need to do something that the current CM classes # do not. Otherwise you screw up the whole point of the object structure # in CTS. # # Thank you. # import time, os, re, string, subprocess, tempfile from stat import * from cts import CTS from cts.CTSaudits import * from cts.CTSvars import * from cts.patterns import PatternSelector from cts.logging import LogFactory from cts.remote import RemoteFactory, input_wrapper from cts.watcher import LogWatcher from cts.environment import EnvFactory AllTestClasses = [ ] class CTSTest(object): ''' A Cluster test. We implement the basic set of properties and behaviors for a generic cluster test. Cluster tests track their own statistics. We keep each of the kinds of counts we track as separate {name,value} pairs. ''' def __init__(self, cm): #self.name="the unnamed test" self.Stats = {"calls":0 , "success":0 , "failure":0 , "skipped":0 , "auditfail":0} # if not issubclass(cm.__class__, ClusterManager): # raise ValueError("Must be a ClusterManager object") self.CM = cm self.Env = EnvFactory().getInstance() self.rsh = RemoteFactory().getInstance() self.logger = LogFactory() self.templates = PatternSelector(cm["Name"]) self.Audits = [] self.timeout = 120 self.passed = 1 self.is_loop = 0 self.is_unsafe = 0 self.is_docker_unsafe = 0 self.is_experimental = 0 self.is_container = 0 self.is_valgrind = 0 self.benchmark = 0 # which tests to benchmark self.timer = {} # timers def log(self, args): self.logger.log(args) def debug(self, args): self.logger.debug(args) def has_key(self, key): return key in self.Stats def __setitem__(self, key, value): self.Stats[key] = value def __getitem__(self, key): if str(key) == "0": raise ValueError("Bad call to 'foo in X', should reference 'foo in X.Stats' instead") if key in self.Stats: return self.Stats[key] return None def log_mark(self, msg): self.debug("MARK: test %s %s %d" % (self.name,msg,time.time())) return def get_timer(self,key = "test"): try: return self.timer[key] except: return 0 def set_timer(self,key = "test"): self.timer[key] = time.time() return self.timer[key] def log_timer(self,key = "test"): elapsed = 0 if key in self.timer: elapsed = time.time() - self.timer[key] s = key == "test" and self.name or "%s:%s" % (self.name,key) self.debug("%s runtime: %.2f" % (s, elapsed)) del self.timer[key] return elapsed def incr(self, name): '''Increment (or initialize) the value associated with the given name''' if not name in self.Stats: self.Stats[name] = 0 self.Stats[name] = self.Stats[name]+1 # Reset the test passed boolean if name == "calls": self.passed = 1 def failure(self, reason="none"): '''Increment the failure count''' self.passed = 0 self.incr("failure") self.logger.log(("Test %s" % self.name).ljust(35) + " FAILED: %s" % reason) return None def success(self): '''Increment the success count''' self.incr("success") return 1 def skipped(self): '''Increment the skipped count''' self.incr("skipped") return 1 def __call__(self, node): '''Perform the given test''' raise ValueError("Abstract Class member (__call__)") self.incr("calls") return self.failure() def audit(self): passed = 1 if len(self.Audits) > 0: for audit in self.Audits: if not audit(): self.logger.log("Internal %s Audit %s FAILED." % (self.name, audit.name())) self.incr("auditfail") passed = 0 return passed def setup(self, node): '''Setup the given test''' return self.success() def teardown(self, node): '''Tear down the given test''' return self.success() def create_watch(self, patterns, timeout, name=None): if not name: name = self.name return LogWatcher(self.Env["LogFileName"], patterns, name, timeout, kind=self.Env["LogWatcher"], hosts=self.Env["nodes"]) def local_badnews(self, prefix, watch, local_ignore=[]): errcount = 0 if not prefix: prefix = "LocalBadNews:" ignorelist = [] ignorelist.append(" CTS: ") ignorelist.append(prefix) ignorelist.extend(local_ignore) while errcount < 100: match = watch.look(0) if match: add_err = 1 for ignore in ignorelist: if add_err == 1 and re.search(ignore, match): add_err = 0 if add_err == 1: self.logger.log(prefix + " " + match) errcount = errcount + 1 else: break else: self.logger.log("Too many errors!") watch.end() return errcount def is_applicable(self): return self.is_applicable_common() def is_applicable_common(self): '''Return TRUE if we are applicable in the current test configuration''' #raise ValueError("Abstract Class member (is_applicable)") if self.is_loop and not self.Env["loop-tests"]: return 0 elif self.is_unsafe and not self.Env["unsafe-tests"]: return 0 elif self.is_valgrind and not self.Env["valgrind-tests"]: return 0 elif self.is_experimental and not self.Env["experimental-tests"]: return 0 elif self.is_docker_unsafe and self.Env["docker"]: return 0 elif self.is_container and not self.Env["container-tests"]: return 0 elif self.Env["benchmark"] and self.benchmark == 0: return 0 return 1 def find_ocfs2_resources(self, node): self.r_o2cb = None self.r_ocfs2 = [] (rc, lines) = self.rsh(node, "crm_resource -c", None) for line in lines: if re.search("^Resource", line): r = AuditResource(self.CM, line) if r.rtype == "o2cb" and r.parent != "NA": self.debug("Found o2cb: %s" % self.r_o2cb) self.r_o2cb = r.parent if re.search("^Constraint", line): c = AuditConstraint(self.CM, line) if c.type == "rsc_colocation" and c.target == self.r_o2cb: self.r_ocfs2.append(c.rsc) self.debug("Found ocfs2 filesystems: %s" % repr(self.r_ocfs2)) return len(self.r_ocfs2) def canrunnow(self, node): '''Return TRUE if we can meaningfully run right now''' return 1 def errorstoignore(self): '''Return list of errors which are 'normal' and should be ignored''' return [] class StopTest(CTSTest): '''Stop (deactivate) the cluster manager on a node''' def __init__(self, cm): CTSTest.__init__(self, cm) self.name = "Stop" def __call__(self, node): '''Perform the 'stop' test. ''' self.incr("calls") if self.CM.ShouldBeStatus[node] != "up": return self.skipped() patterns = [] # Technically we should always be able to notice ourselves stopping patterns.append(self.templates["Pat:We_stopped"] % node) # Any active node needs to notice this one left # NOTE: This wont work if we have multiple partitions for other in self.Env["nodes"]: if self.CM.ShouldBeStatus[other] == "up" and other != node: patterns.append(self.templates["Pat:They_stopped"] %(other, self.CM.key_for_node(node))) #self.debug("Checking %s will notice %s left"%(other, node)) watch = self.create_watch(patterns, self.Env["DeadTime"]) watch.setwatch() if node == self.CM.OurNode: self.incr("us") else: if self.CM.upcount() <= 1: self.incr("all") else: self.incr("them") self.CM.StopaCM(node) watch_result = watch.lookforall() failreason = None UnmatchedList = "||" if watch.unmatched: (rc, output) = self.rsh(node, "/bin/ps axf", None) for line in output: self.debug(line) (rc, output) = self.rsh(node, "/usr/sbin/dlm_tool dump", None) for line in output: self.debug(line) for regex in watch.unmatched: self.logger.log ("ERROR: Shutdown pattern not found: %s" % (regex)) UnmatchedList += regex + "||"; failreason = "Missing shutdown pattern" self.CM.cluster_stable(self.Env["DeadTime"]) if not watch.unmatched or self.CM.upcount() == 0: return self.success() if len(watch.unmatched) >= self.CM.upcount(): return self.failure("no match against (%s)" % UnmatchedList) if failreason == None: return self.success() else: return self.failure(failreason) # # We don't register StopTest because it's better when called by # another test... # class StartTest(CTSTest): '''Start (activate) the cluster manager on a node''' def __init__(self, cm, debug=None): CTSTest.__init__(self,cm) self.name = "start" self.debug = debug def __call__(self, node): '''Perform the 'start' test. ''' self.incr("calls") if self.CM.upcount() == 0: self.incr("us") else: self.incr("them") if self.CM.ShouldBeStatus[node] != "down": return self.skipped() elif self.CM.StartaCM(node): return self.success() else: return self.failure("Startup %s on node %s failed" % (self.Env["Name"], node)) # # We don't register StartTest because it's better when called by # another test... # class FlipTest(CTSTest): '''If it's running, stop it. If it's stopped start it. Overthrow the status quo... ''' def __init__(self, cm): CTSTest.__init__(self,cm) self.name = "Flip" self.start = StartTest(cm) self.stop = StopTest(cm) def __call__(self, node): '''Perform the 'Flip' test. ''' self.incr("calls") if self.CM.ShouldBeStatus[node] == "up": self.incr("stopped") ret = self.stop(node) type = "up->down" # Give the cluster time to recognize it's gone... time.sleep(self.Env["StableTime"]) elif self.CM.ShouldBeStatus[node] == "down": self.incr("started") ret = self.start(node) type = "down->up" else: return self.skipped() self.incr(type) if ret: return self.success() else: return self.failure("%s failure" % type) # Register FlipTest as a good test to run AllTestClasses.append(FlipTest) class RestartTest(CTSTest): '''Stop and restart a node''' def __init__(self, cm): CTSTest.__init__(self,cm) self.name = "Restart" self.start = StartTest(cm) self.stop = StopTest(cm) self.benchmark = 1 def __call__(self, node): '''Perform the 'restart' test. ''' self.incr("calls") self.incr("node:" + node) ret1 = 1 if self.CM.StataCM(node): self.incr("WasStopped") if not self.start(node): return self.failure("start (setup) failure: "+node) self.set_timer() if not self.stop(node): return self.failure("stop failure: "+node) if not self.start(node): return self.failure("start failure: "+node) return self.success() # Register RestartTest as a good test to run AllTestClasses.append(RestartTest) class StonithdTest(CTSTest): def __init__(self, cm): CTSTest.__init__(self, cm) self.name = "Stonithd" self.startall = SimulStartLite(cm) self.benchmark = 1 def __call__(self, node): self.incr("calls") if len(self.Env["nodes"]) < 2: return self.skipped() ret = self.startall(None) if not ret: return self.failure("Setup failed") is_dc = self.CM.is_node_dc(node) watchpats = [] watchpats.append(self.templates["Pat:FenceOpOK"] % node) watchpats.append(self.templates["Pat:NodeFenced"] % node) if self.Env["at-boot"] == 0: self.debug("Expecting %s to stay down" % node) self.CM.ShouldBeStatus[node] = "down" else: self.debug("Expecting %s to come up again %d" % (node, self.Env["at-boot"])) watchpats.append("%s.* S_STARTING -> S_PENDING" % node) watchpats.append("%s.* S_PENDING -> S_NOT_DC" % node) watch = self.create_watch(watchpats, 30 + self.Env["DeadTime"] + self.Env["StableTime"] + self.Env["StartTime"]) watch.setwatch() origin = self.Env.RandomGen.choice(self.Env["nodes"]) rc = self.rsh(origin, "stonith_admin --reboot %s -VVVVVV" % node) if rc == 194: # 194 - 256 = -62 = Timer expired # # Look for the patterns, usually this means the required # device was running on the node to be fenced - or that # the required devices were in the process of being loaded # and/or moved # # Effectively the node committed suicide so there will be # no confirmation, but pacemaker should be watching and # fence the node again self.logger.log("Fencing command on %s to fence %s timed out" % (origin, node)) elif origin != node and rc != 0: self.debug("Waiting for the cluster to recover") self.CM.cluster_stable() self.debug("Waiting STONITHd node to come back up") self.CM.ns.WaitForAllNodesToComeUp(self.Env["nodes"], 600) self.logger.log("Fencing command on %s failed to fence %s (rc=%d)" % (origin, node, rc)) elif origin == node and rc != 255: # 255 == broken pipe, ie. the node was fenced as expected self.logger.log("Locally originated fencing returned %d" % rc) self.set_timer("fence") matched = watch.lookforall() self.log_timer("fence") self.set_timer("reform") if watch.unmatched: self.logger.log("Patterns not found: " + repr(watch.unmatched)) self.debug("Waiting for the cluster to recover") self.CM.cluster_stable() self.debug("Waiting STONITHd node to come back up") self.CM.ns.WaitForAllNodesToComeUp(self.Env["nodes"], 600) self.debug("Waiting for the cluster to re-stabilize with all nodes") is_stable = self.CM.cluster_stable(self.Env["StartTime"]) if not matched: return self.failure("Didn't find all expected patterns") elif not is_stable: return self.failure("Cluster did not become stable") self.log_timer("reform") return self.success() def errorstoignore(self): return [ self.templates["Pat:Fencing_start"] % ".*", self.templates["Pat:Fencing_ok"] % ".*", r"error.*: Resource .*stonith::.* is active on 2 nodes attempting recovery", r"error.*: Operation reboot of .*by .* for stonith_admin.*: Timer expired", ] def is_applicable(self): if not self.is_applicable_common(): return 0 if "DoFencing" in list(self.Env.keys()): return self.Env["DoFencing"] return 1 AllTestClasses.append(StonithdTest) class StartOnebyOne(CTSTest): '''Start all the nodes ~ one by one''' def __init__(self, cm): CTSTest.__init__(self,cm) self.name = "StartOnebyOne" self.stopall = SimulStopLite(cm) self.start = StartTest(cm) self.ns = CTS.NodeStatus(cm.Env) def __call__(self, dummy): '''Perform the 'StartOnebyOne' test. ''' self.incr("calls") # We ignore the "node" parameter... # Shut down all the nodes... ret = self.stopall(None) if not ret: return self.failure("Test setup failed") failed = [] self.set_timer() for node in self.Env["nodes"]: if not self.start(node): failed.append(node) if len(failed) > 0: return self.failure("Some node failed to start: " + repr(failed)) return self.success() # Register StartOnebyOne as a good test to run AllTestClasses.append(StartOnebyOne) class SimulStart(CTSTest): '''Start all the nodes ~ simultaneously''' def __init__(self, cm): CTSTest.__init__(self,cm) self.name = "SimulStart" self.stopall = SimulStopLite(cm) self.startall = SimulStartLite(cm) def __call__(self, dummy): '''Perform the 'SimulStart' test. ''' self.incr("calls") # We ignore the "node" parameter... # Shut down all the nodes... ret = self.stopall(None) if not ret: return self.failure("Setup failed") if not self.startall(None): return self.failure("Startall failed") return self.success() # Register SimulStart as a good test to run AllTestClasses.append(SimulStart) class SimulStop(CTSTest): '''Stop all the nodes ~ simultaneously''' def __init__(self, cm): CTSTest.__init__(self,cm) self.name = "SimulStop" self.startall = SimulStartLite(cm) self.stopall = SimulStopLite(cm) def __call__(self, dummy): '''Perform the 'SimulStop' test. ''' self.incr("calls") # We ignore the "node" parameter... # Start up all the nodes... ret = self.startall(None) if not ret: return self.failure("Setup failed") if not self.stopall(None): return self.failure("Stopall failed") return self.success() # Register SimulStop as a good test to run AllTestClasses.append(SimulStop) class StopOnebyOne(CTSTest): '''Stop all the nodes in order''' def __init__(self, cm): CTSTest.__init__(self,cm) self.name = "StopOnebyOne" self.startall = SimulStartLite(cm) self.stop = StopTest(cm) def __call__(self, dummy): '''Perform the 'StopOnebyOne' test. ''' self.incr("calls") # We ignore the "node" parameter... # Start up all the nodes... ret = self.startall(None) if not ret: return self.failure("Setup failed") failed = [] self.set_timer() for node in self.Env["nodes"]: if not self.stop(node): failed.append(node) if len(failed) > 0: return self.failure("Some node failed to stop: " + repr(failed)) return self.success() # Register StopOnebyOne as a good test to run AllTestClasses.append(StopOnebyOne) class RestartOnebyOne(CTSTest): '''Restart all the nodes in order''' def __init__(self, cm): CTSTest.__init__(self,cm) self.name = "RestartOnebyOne" self.startall = SimulStartLite(cm) def __call__(self, dummy): '''Perform the 'RestartOnebyOne' test. ''' self.incr("calls") # We ignore the "node" parameter... # Start up all the nodes... ret = self.startall(None) if not ret: return self.failure("Setup failed") did_fail = [] self.set_timer() self.restart = RestartTest(self.CM) for node in self.Env["nodes"]: if not self.restart(node): did_fail.append(node) if did_fail: return self.failure("Could not restart %d nodes: %s" % (len(did_fail), repr(did_fail))) return self.success() # Register StopOnebyOne as a good test to run AllTestClasses.append(RestartOnebyOne) class PartialStart(CTSTest): '''Start a node - but tell it to stop before it finishes starting up''' def __init__(self, cm): CTSTest.__init__(self,cm) self.name = "PartialStart" self.startall = SimulStartLite(cm) self.stopall = SimulStopLite(cm) self.stop = StopTest(cm) #self.is_unsafe = 1 def __call__(self, node): '''Perform the 'PartialStart' test. ''' self.incr("calls") ret = self.stopall(None) if not ret: return self.failure("Setup failed") # FIXME! This should use the CM class to get the pattern # then it would be applicable in general watchpats = [] watchpats.append("crmd.*Connecting to cluster infrastructure") watch = self.create_watch(watchpats, self.Env["DeadTime"]+10) watch.setwatch() self.CM.StartaCMnoBlock(node) ret = watch.lookforall() if not ret: self.logger.log("Patterns not found: " + repr(watch.unmatched)) return self.failure("Setup of %s failed" % node) ret = self.stop(node) if not ret: return self.failure("%s did not stop in time" % node) return self.success() def errorstoignore(self): '''Return list of errors which should be ignored''' # We might do some fencing in the 2-node case if we make it up far enough return [ r"Executing reboot fencing operation", r"Requesting fencing \([^)]+\) of node ", ] # Register StopOnebyOne as a good test to run AllTestClasses.append(PartialStart) class StandbyTest(CTSTest): def __init__(self, cm): CTSTest.__init__(self,cm) self.name = "Standby" self.benchmark = 1 self.start = StartTest(cm) self.startall = SimulStartLite(cm) # make sure the node is active # set the node to standby mode # check resources, none resource should be running on the node # set the node to active mode # check resouces, resources should have been migrated back (SHOULD THEY?) def __call__(self, node): self.incr("calls") ret = self.startall(None) if not ret: return self.failure("Start all nodes failed") self.debug("Make sure node %s is active" % node) if self.CM.StandbyStatus(node) != "off": if not self.CM.SetStandbyMode(node, "off"): return self.failure("can't set node %s to active mode" % node) self.CM.cluster_stable() status = self.CM.StandbyStatus(node) if status != "off": return self.failure("standby status of %s is [%s] but we expect [off]" % (node, status)) self.debug("Getting resources running on node %s" % node) rsc_on_node = self.CM.active_resources(node) watchpats = [] watchpats.append(r"State transition .* -> S_POLICY_ENGINE") watch = self.create_watch(watchpats, self.Env["DeadTime"]+10) watch.setwatch() self.debug("Setting node %s to standby mode" % node) if not self.CM.SetStandbyMode(node, "on"): return self.failure("can't set node %s to standby mode" % node) self.set_timer("on") ret = watch.lookforall() if not ret: self.logger.log("Patterns not found: " + repr(watch.unmatched)) self.CM.SetStandbyMode(node, "off") return self.failure("cluster didn't react to standby change on %s" % node) self.CM.cluster_stable() status = self.CM.StandbyStatus(node) if status != "on": return self.failure("standby status of %s is [%s] but we expect [on]" % (node, status)) self.log_timer("on") self.debug("Checking resources") bad_run = self.CM.active_resources(node) if len(bad_run) > 0: rc = self.failure("%s set to standby, %s is still running on it" % (node, repr(bad_run))) self.debug("Setting node %s to active mode" % node) self.CM.SetStandbyMode(node, "off") return rc self.debug("Setting node %s to active mode" % node) if not self.CM.SetStandbyMode(node, "off"): return self.failure("can't set node %s to active mode" % node) self.set_timer("off") self.CM.cluster_stable() status = self.CM.StandbyStatus(node) if status != "off": return self.failure("standby status of %s is [%s] but we expect [off]" % (node, status)) self.log_timer("off") return self.success() AllTestClasses.append(StandbyTest) class ValgrindTest(CTSTest): '''Check for memory leaks''' def __init__(self, cm): CTSTest.__init__(self,cm) self.name = "Valgrind" self.stopall = SimulStopLite(cm) self.startall = SimulStartLite(cm) self.is_valgrind = 1 self.is_loop = 1 def setup(self, node): self.incr("calls") ret = self.stopall(None) if not ret: return self.failure("Stop all nodes failed") # Enable valgrind self.logger.logPat = "/tmp/%s-*.valgrind" % self.name self.Env["valgrind-prefix"] = self.name self.rsh(node, "rm -f %s" % self.logger.logPat, None) ret = self.startall(None) if not ret: return self.failure("Start all nodes failed") return self.success() def teardown(self, node): # Disable valgrind self.Env["valgrind-prefix"] = None # Return all nodes to normal ret = self.stopall(None) if not ret: return self.failure("Stop all nodes failed") return self.success() def find_leaks(self): # Check for leaks leaked = [] self.stop = StopTest(self.CM) for node in self.Env["nodes"]: rc = self.stop(node) if not rc: self.failure("Couldn't shut down %s" % node) rc = self.rsh(node, "grep -e indirectly.*lost:.*[1-9] -e definitely.*lost:.*[1-9] -e (ERROR|error).*SUMMARY:.*[1-9].*errors %s" % self.logger.logPat, 0) if rc != 1: leaked.append(node) self.failure("Valgrind errors detected on %s" % node) (rc, output) = self.rsh(node, "grep -e lost: -e SUMMARY: %s" % self.logger.logPat, None) for line in output: self.logger.log(line) (rc, output) = self.rsh(node, "cat %s" % self.logger.logPat, None) for line in output: self.debug(line) self.rsh(node, "rm -f %s" % self.logger.logPat, None) return leaked def __call__(self, node): leaked = self.find_leaks() if len(leaked) > 0: return self.failure("Nodes %s leaked" % repr(leaked)) return self.success() def errorstoignore(self): '''Return list of errors which should be ignored''' return [ r"cib.*: \*\*\*\*\*\*\*\*\*\*\*\*\*", r"cib.*: .* avoid confusing Valgrind", r"HA_VALGRIND_ENABLED", ] class StandbyLoopTest(ValgrindTest): '''Check for memory leaks by putting a node in and out of standby for an hour''' def __init__(self, cm): ValgrindTest.__init__(self,cm) self.name = "StandbyLoop" def __call__(self, node): lpc = 0 delay = 2 failed = 0 done = time.time() + self.Env["loop-minutes"] * 60 while time.time() <= done and not failed: lpc = lpc + 1 time.sleep(delay) if not self.CM.SetStandbyMode(node, "on"): self.failure("can't set node %s to standby mode" % node) failed = lpc time.sleep(delay) if not self.CM.SetStandbyMode(node, "off"): self.failure("can't set node %s to active mode" % node) failed = lpc leaked = self.find_leaks() if failed: return self.failure("Iteration %d failed" % failed) elif len(leaked) > 0: return self.failure("Nodes %s leaked" % repr(leaked)) return self.success() AllTestClasses.append(StandbyLoopTest) class BandwidthTest(CTSTest): # Tests should not be cluster-manager-specific # If you need to find out cluster manager configuration to do this, then # it should be added to the generic cluster manager API. '''Test the bandwidth which the cluster uses''' def __init__(self, cm): CTSTest.__init__(self, cm) self.name = "Bandwidth" self.start = StartTest(cm) self.__setitem__("min",0) self.__setitem__("max",0) self.__setitem__("totalbandwidth",0) (handle, self.tempfile) = tempfile.mkstemp(".cts") os.close(handle) self.startall = SimulStartLite(cm) def __call__(self, node): '''Perform the Bandwidth test''' self.incr("calls") if self.CM.upcount() < 1: return self.skipped() Path = self.CM.InternalCommConfig() if "ip" not in Path["mediatype"]: return self.skipped() port = Path["port"][0] port = int(port) ret = self.startall(None) if not ret: return self.failure("Test setup failed") time.sleep(5) # We get extra messages right after startup. fstmpfile = "/var/run/band_estimate" dumpcmd = "tcpdump -p -n -c 102 -i any udp port %d > %s 2>&1" \ % (port, fstmpfile) rc = self.rsh(node, dumpcmd) if rc == 0: farfile = "root@%s:%s" % (node, fstmpfile) self.rsh.cp(farfile, self.tempfile) Bandwidth = self.countbandwidth(self.tempfile) if not Bandwidth: self.logger.log("Could not compute bandwidth.") return self.success() intband = int(Bandwidth + 0.5) self.logger.log("...bandwidth: %d bits/sec" % intband) self.Stats["totalbandwidth"] = self.Stats["totalbandwidth"] + Bandwidth if self.Stats["min"] == 0: self.Stats["min"] = Bandwidth if Bandwidth > self.Stats["max"]: self.Stats["max"] = Bandwidth if Bandwidth < self.Stats["min"]: self.Stats["min"] = Bandwidth self.rsh(node, "rm -f %s" % fstmpfile) os.unlink(self.tempfile) return self.success() else: return self.failure("no response from tcpdump command [%d]!" % rc) def countbandwidth(self, file): fp = open(file, "r") fp.seek(0) count = 0 sum = 0 while 1: line = fp.readline() if not line: return None if re.search("udp",line) or re.search("UDP,", line): count = count + 1 linesplit = str.split(line," ") for j in range(len(linesplit)-1): if linesplit[j] == "udp": break if linesplit[j] == "length:": break try: sum = sum + int(linesplit[j+1]) except ValueError: self.logger.log("Invalid tcpdump line: %s" % line) return None T1 = linesplit[0] timesplit = str.split(T1,":") time2split = str.split(timesplit[2],".") time1 = (int(timesplit[0])*60+int(timesplit[1]))*60+int(time2split[0])+int(time2split[1])*0.000001 break while count < 100: line = fp.readline() if not line: return None if re.search("udp",line) or re.search("UDP,", line): count = count+1 linessplit = str.split(line," ") for j in range(len(linessplit)-1): if linessplit[j] == "udp": break if linesplit[j] == "length:": break try: sum = int(linessplit[j+1]) + sum except ValueError: self.logger.log("Invalid tcpdump line: %s" % line) return None T2 = linessplit[0] timesplit = str.split(T2,":") time2split = str.split(timesplit[2],".") time2 = (int(timesplit[0])*60+int(timesplit[1]))*60+int(time2split[0])+int(time2split[1])*0.000001 time = time2-time1 if (time <= 0): return 0 return int((sum*8)/time) def is_applicable(self): '''BandwidthTest never applicable''' return 0 AllTestClasses.append(BandwidthTest) ################################################################### class MaintenanceMode(CTSTest): ################################################################### def __init__(self, cm): CTSTest.__init__(self,cm) self.name = "MaintenanceMode" self.start = StartTest(cm) self.startall = SimulStartLite(cm) self.max = 30 #self.is_unsafe = 1 self.benchmark = 1 self.action = "asyncmon" self.interval = 0 self.rid = "maintenanceDummy" def toggleMaintenanceMode(self, node, action): pats = [] pats.append(self.templates["Pat:DC_IDLE"]) # fail the resource right after turning Maintenance mode on # verify it is not recovered until maintenance mode is turned off if action == "On": pats.append(r"pengine.*:\s+warning:.*Processing failed op %s for %s on" % (self.action, self.rid)) else: pats.append(self.templates["Pat:RscOpOK"] % ("stop", self.rid)) pats.append(self.templates["Pat:RscOpOK"] % ("start", self.rid)) watch = self.create_watch(pats, 60) watch.setwatch() self.debug("Turning maintenance mode %s" % action) self.rsh(node, self.templates["MaintenanceMode%s" % (action)]) if (action == "On"): self.rsh(node, "crm_resource -V -F -r %s -H %s &>/dev/null" % (self.rid, node)) self.set_timer("recover%s" % (action)) watch.lookforall() self.log_timer("recover%s" % (action)) if watch.unmatched: self.debug("Failed to find patterns when turning maintenance mode %s" % action) return repr(watch.unmatched) return "" def insertMaintenanceDummy(self, node): pats = [] pats.append(("%s.*" % node) + (self.templates["Pat:RscOpOK"] % ("start", self.rid))) watch = self.create_watch(pats, 60) watch.setwatch() self.CM.AddDummyRsc(node, self.rid) self.set_timer("addDummy") watch.lookforall() self.log_timer("addDummy") if watch.unmatched: self.debug("Failed to find patterns when adding maintenance dummy resource") return repr(watch.unmatched) return "" def removeMaintenanceDummy(self, node): pats = [] pats.append(self.templates["Pat:RscOpOK"] % ("stop", self.rid)) watch = self.create_watch(pats, 60) watch.setwatch() self.CM.RemoveDummyRsc(node, self.rid) self.set_timer("removeDummy") watch.lookforall() self.log_timer("removeDummy") if watch.unmatched: self.debug("Failed to find patterns when removing maintenance dummy resource") return repr(watch.unmatched) return "" def managedRscList(self, node): rscList = [] (rc, lines) = self.rsh(node, "crm_resource -c", None) for line in lines: if re.search("^Resource", line): tmp = AuditResource(self.CM, line) if tmp.managed(): rscList.append(tmp.id) return rscList def verifyResources(self, node, rscList, managed): managedList = list(rscList) managed_str = "managed" if not managed: managed_str = "unmanaged" (rc, lines) = self.rsh(node, "crm_resource -c", None) for line in lines: if re.search("^Resource", line): tmp = AuditResource(self.CM, line) if managed and not tmp.managed(): continue elif not managed and tmp.managed(): continue elif managedList.count(tmp.id): managedList.remove(tmp.id) if len(managedList) == 0: self.debug("Found all %s resources on %s" % (managed_str, node)) return True self.logger.log("Could not find all %s resources on %s. %s" % (managed_str, node, managedList)) return False def __call__(self, node): '''Perform the 'MaintenanceMode' test. ''' self.incr("calls") verify_managed = False verify_unmanaged = False failPat = "" ret = self.startall(None) if not ret: return self.failure("Setup failed") # get a list of all the managed resources. We use this list # after enabling maintenance mode to verify all managed resources # become un-managed. After maintenance mode is turned off, we use # this list to verify all the resources become managed again. managedResources = self.managedRscList(node) if len(managedResources) == 0: self.logger.log("No managed resources on %s" % node) return self.skipped() # insert a fake resource we can fail during maintenance mode # so we can verify recovery does not take place until after maintenance # mode is disabled. failPat = failPat + self.insertMaintenanceDummy(node) # toggle maintenance mode ON, then fail dummy resource. failPat = failPat + self.toggleMaintenanceMode(node, "On") # verify all the resources are now unmanaged if self.verifyResources(node, managedResources, False): verify_unmanaged = True # Toggle maintenance mode OFF, verify dummy is recovered. failPat = failPat + self.toggleMaintenanceMode(node, "Off") # verify all the resources are now managed again if self.verifyResources(node, managedResources, True): verify_managed = True # Remove our maintenance dummy resource. failPat = failPat + self.removeMaintenanceDummy(node) self.CM.cluster_stable() if failPat != "": return self.failure("Unmatched patterns: %s" % (failPat)) elif verify_unmanaged is False: return self.failure("Failed to verify resources became unmanaged during maintenance mode") elif verify_managed is False: return self.failure("Failed to verify resources switched back to managed after disabling maintenance mode") return self.success() def errorstoignore(self): '''Return list of errors which should be ignored''' return [ r"Updating failcount for %s" % self.rid, r"pengine.*: Recover %s\s*\(.*\)" % self.rid, r"Unknown operation: fail", r"(ERROR|error): sending stonithRA op to stonithd failed.", self.templates["Pat:RscOpOK"] % (self.action, self.rid), r"(ERROR|error).*: Action %s_%s_%d .* initiated outside of a transition" % (self.rid, self.action, self.interval), ] AllTestClasses.append(MaintenanceMode) class ResourceRecover(CTSTest): def __init__(self, cm): CTSTest.__init__(self,cm) self.name = "ResourceRecover" self.start = StartTest(cm) self.startall = SimulStartLite(cm) self.max = 30 self.rid = None self.rid_alt = None #self.is_unsafe = 1 self.benchmark = 1 # these are the values used for the new LRM API call self.action = "asyncmon" self.interval = 0 def __call__(self, node): '''Perform the 'ResourceRecover' test. ''' self.incr("calls") ret = self.startall(None) if not ret: return self.failure("Setup failed") resourcelist = self.CM.active_resources(node) # if there are no resourcelist, return directly if len(resourcelist) == 0: self.logger.log("No active resources on %s" % node) return self.skipped() self.rid = self.Env.RandomGen.choice(resourcelist) self.rid_alt = self.rid rsc = None (rc, lines) = self.rsh(node, "crm_resource -c", None) for line in lines: if re.search("^Resource", line): tmp = AuditResource(self.CM, line) if tmp.id == self.rid: rsc = tmp # Handle anonymous clones that get renamed self.rid = rsc.clone_id break if not rsc: return self.failure("Could not find %s in the resource list" % self.rid) self.debug("Shooting %s aka. %s" % (rsc.clone_id, rsc.id)) pats = [] pats.append(r"pengine.*:\s+warning:.*Processing failed op %s for (%s|%s) on" % (self.action, rsc.id, rsc.clone_id)) if rsc.managed(): pats.append(self.templates["Pat:RscOpOK"] % ("stop", self.rid)) if rsc.unique(): pats.append(self.templates["Pat:RscOpOK"] % ("start", self.rid)) else: # Anonymous clones may get restarted with a different clone number pats.append(self.templates["Pat:RscOpOK"] % ("start", ".*")) watch = self.create_watch(pats, 60) watch.setwatch() self.rsh(node, "crm_resource -V -F -r %s -H %s &>/dev/null" % (self.rid, node)) self.set_timer("recover") watch.lookforall() self.log_timer("recover") self.CM.cluster_stable() recovered = self.CM.ResourceLocation(self.rid) if watch.unmatched: return self.failure("Patterns not found: %s" % repr(watch.unmatched)) elif rsc.unique() and len(recovered) > 1: return self.failure("%s is now active on more than one node: %s"%(self.rid, repr(recovered))) elif len(recovered) > 0: self.debug("%s is running on: %s" % (self.rid, repr(recovered))) elif rsc.managed(): return self.failure("%s was not recovered and is inactive" % self.rid) return self.success() def errorstoignore(self): '''Return list of errors which should be ignored''' return [ r"Updating failcount for %s" % self.rid, r"pengine.*: Recover (%s|%s)\s*\(.*\)" % (self.rid, self.rid_alt), r"Unknown operation: fail", r"(ERROR|error): sending stonithRA op to stonithd failed.", self.templates["Pat:RscOpOK"] % (self.action, self.rid), r"(ERROR|error).*: Action %s_%s_%d .* initiated outside of a transition" % (self.rid, self.action, self.interval), ] AllTestClasses.append(ResourceRecover) class ComponentFail(CTSTest): def __init__(self, cm): CTSTest.__init__(self,cm) self.name = "ComponentFail" # TODO make this work correctly in docker. self.is_docker_unsafe = 1 self.startall = SimulStartLite(cm) self.complist = cm.Components() self.patterns = [] self.okerrpatterns = [] self.is_unsafe = 1 def __call__(self, node): '''Perform the 'ComponentFail' test. ''' self.incr("calls") self.patterns = [] self.okerrpatterns = [] # start all nodes ret = self.startall(None) if not ret: return self.failure("Setup failed") if not self.CM.cluster_stable(self.Env["StableTime"]): return self.failure("Setup failed - unstable") node_is_dc = self.CM.is_node_dc(node, None) # select a component to kill chosen = self.Env.RandomGen.choice(self.complist) while chosen.dc_only == 1 and node_is_dc == 0: chosen = self.Env.RandomGen.choice(self.complist) self.debug("...component %s (dc=%d,boot=%d)" % (chosen.name, node_is_dc,chosen.triggersreboot)) self.incr(chosen.name) if chosen.name != "corosync": self.patterns.append(self.templates["Pat:ChildKilled"] %(node, chosen.name)) self.patterns.append(self.templates["Pat:ChildRespawn"] %(node, chosen.name)) self.patterns.extend(chosen.pats) if node_is_dc: self.patterns.extend(chosen.dc_pats) if chosen.name == "stonith": # Ignore actions for STONITH resources (rc, lines) = self.rsh(node, "crm_resource -c", None) for line in lines: if re.search("^Resource", line): r = AuditResource(self.CM, line) if r.rclass == "stonith": self.okerrpatterns.append(self.templates["Pat:Fencing_recover"] % r.id) # supply a copy so self.patterns doesn't end up empty tmpPats = [] tmpPats.extend(self.patterns) self.patterns.extend(chosen.badnews_ignore) # Look for STONITH ops, depending on Env["at-boot"] we might need to change the nodes status stonithPats = [] stonithPats.append(self.templates["Pat:Fencing_ok"] % node) stonith = self.create_watch(stonithPats, 0) stonith.setwatch() # set the watch for stable watch = self.create_watch( tmpPats, self.Env["DeadTime"] + self.Env["StableTime"] + self.Env["StartTime"]) watch.setwatch() # kill the component chosen.kill(node) self.debug("Waiting for the cluster to recover") self.CM.cluster_stable() self.debug("Waiting for any STONITHd node to come back up") self.CM.ns.WaitForAllNodesToComeUp(self.Env["nodes"], 600) self.debug("Waiting for the cluster to re-stabilize with all nodes") self.CM.cluster_stable(self.Env["StartTime"]) self.debug("Checking if %s was shot" % node) shot = stonith.look(60) if shot: self.debug("Found: " + repr(shot)) self.okerrpatterns.append(self.templates["Pat:Fencing_start"] % node) if self.Env["at-boot"] == 0: self.CM.ShouldBeStatus[node] = "down" # If fencing occurred, chances are many (if not all) the expected logs # will not be sent - or will be lost when the node reboots return self.success() # check for logs indicating a graceful recovery matched = watch.lookforall(allow_multiple_matches=1) if watch.unmatched: self.logger.log("Patterns not found: " + repr(watch.unmatched)) self.debug("Waiting for the cluster to re-stabilize with all nodes") is_stable = self.CM.cluster_stable(self.Env["StartTime"]) if not matched: return self.failure("Didn't find all expected %s patterns" % chosen.name) elif not is_stable: return self.failure("Cluster did not become stable after killing %s" % chosen.name) return self.success() def errorstoignore(self): '''Return list of errors which should be ignored''' # Note that okerrpatterns refers to the last time we ran this test # The good news is that this works fine for us... self.okerrpatterns.extend(self.patterns) return self.okerrpatterns AllTestClasses.append(ComponentFail) class SplitBrainTest(CTSTest): '''It is used to test split-brain. when the path between the two nodes break check the two nodes both take over the resource''' def __init__(self,cm): CTSTest.__init__(self,cm) self.name = "SplitBrain" self.start = StartTest(cm) self.startall = SimulStartLite(cm) self.is_experimental = 1 def isolate_partition(self, partition): other_nodes = [] other_nodes.extend(self.Env["nodes"]) for node in partition: try: other_nodes.remove(node) except ValueError: self.logger.log("Node "+node+" not in " + repr(self.Env["nodes"]) + " from " +repr(partition)) if len(other_nodes) == 0: return 1 self.debug("Creating partition: " + repr(partition)) self.debug("Everyone else: " + repr(other_nodes)) for node in partition: if not self.CM.isolate_node(node, other_nodes): self.logger.log("Could not isolate %s" % node) return 0 return 1 def heal_partition(self, partition): other_nodes = [] other_nodes.extend(self.Env["nodes"]) for node in partition: try: other_nodes.remove(node) except ValueError: self.logger.log("Node "+node+" not in " + repr(self.Env["nodes"])) if len(other_nodes) == 0: return 1 self.debug("Healing partition: " + repr(partition)) self.debug("Everyone else: " + repr(other_nodes)) for node in partition: self.CM.unisolate_node(node, other_nodes) def __call__(self, node): '''Perform split-brain test''' self.incr("calls") self.passed = 1 partitions = {} ret = self.startall(None) if not ret: return self.failure("Setup failed") while 1: # Retry until we get multiple partitions partitions = {} p_max = len(self.Env["nodes"]) for node in self.Env["nodes"]: p = self.Env.RandomGen.randint(1, p_max) if not p in partitions: partitions[p] = [] partitions[p].append(node) p_max = len(list(partitions.keys())) if p_max > 1: break # else, try again self.debug("Created %d partitions" % p_max) for key in list(partitions.keys()): self.debug("Partition["+str(key)+"]:\t"+repr(partitions[key])) # Disabling STONITH to reduce test complexity for now self.rsh(node, "crm_attribute -V -n stonith-enabled -v false") for key in list(partitions.keys()): self.isolate_partition(partitions[key]) count = 30 while count > 0: if len(self.CM.find_partitions()) != p_max: time.sleep(10) else: break else: self.failure("Expected partitions were not created") # Target number of partitions formed - wait for stability if not self.CM.cluster_stable(): self.failure("Partitioned cluster not stable") # Now audit the cluster state self.CM.partitions_expected = p_max if not self.audit(): self.failure("Audits failed") self.CM.partitions_expected = 1 # And heal them again for key in list(partitions.keys()): self.heal_partition(partitions[key]) # Wait for a single partition to form count = 30 while count > 0: if len(self.CM.find_partitions()) != 1: time.sleep(10) count -= 1 else: break else: self.failure("Cluster did not reform") # Wait for it to have the right number of members count = 30 while count > 0: members = [] partitions = self.CM.find_partitions() if len(partitions) > 0: members = partitions[0].split() if len(members) != len(self.Env["nodes"]): time.sleep(10) count -= 1 else: break else: self.failure("Cluster did not completely reform") # Wait up to 20 minutes - the delay is more preferable than # trying to continue with in a messed up state if not self.CM.cluster_stable(1200): self.failure("Reformed cluster not stable") if self.Env["continue"] == 1: answer = "Y" else: try: answer = input_wrapper('Continue? [nY]') except EOFError as e: answer = "n" if answer and answer == "n": raise ValueError("Reformed cluster not stable") # Turn fencing back on if self.Env["DoFencing"]: self.rsh(node, "crm_attribute -V -D -n stonith-enabled") self.CM.cluster_stable() if self.passed: return self.success() return self.failure("See previous errors") def errorstoignore(self): '''Return list of errors which are 'normal' and should be ignored''' return [ r"Another DC detected:", r"(ERROR|error).*: .*Application of an update diff failed", r"crmd.*:.*not in our membership list", r"CRIT:.*node.*returning after partition", ] def is_applicable(self): if not self.is_applicable_common(): return 0 return len(self.Env["nodes"]) > 2 AllTestClasses.append(SplitBrainTest) class Reattach(CTSTest): def __init__(self, cm): CTSTest.__init__(self,cm) self.name = "Reattach" self.startall = SimulStartLite(cm) self.restart1 = RestartTest(cm) self.stopall = SimulStopLite(cm) self.is_unsafe = 0 # Handled by canrunnow() def _is_managed(self, node): is_managed = self.rsh(node, "crm_attribute -t rsc_defaults -n is-managed -q -G -d true", 1) is_managed = is_managed[:-1] # Strip off the newline return is_managed == "true" def _set_unmanaged(self, node): self.debug("Disable resource management") self.rsh(node, "crm_attribute -t rsc_defaults -n is-managed -v false") def _set_managed(self, node): self.debug("Re-enable resource management") self.rsh(node, "crm_attribute -t rsc_defaults -n is-managed -D") def setup(self, node): attempt = 0 if not self.startall(None): return None # Make sure we are really _really_ stable and that all # resources, including those that depend on transient node # attributes, are started while not self.CM.cluster_stable(double_check=True): if attempt < 5: attempt += 1 self.debug("Not stable yet, re-testing") else: self.logger.log("Cluster is not stable") return None return 1 def teardown(self, node): # Make sure 'node' is up start = StartTest(self.CM) start(node) if not self._is_managed(node): self.logger.log("Attempting to re-enable resource management on %s" % node) self._set_managed(node) self.CM.cluster_stable() if not self._is_managed(node): self.logger.log("Could not re-enable resource management") return 0 return 1 def canrunnow(self, node): '''Return TRUE if we can meaningfully run right now''' if self.find_ocfs2_resources(node): self.logger.log("Detach/Reattach scenarios are not possible with OCFS2 services present") return 0 return 1 def __call__(self, node): self.incr("calls") pats = [] # Conveniently, pengine will display this message when disabling management, # even if fencing is not enabled, so we can rely on it. managed = self.create_watch(["Delaying fencing operations"], 60) managed.setwatch() self._set_unmanaged(node) if not managed.lookforall(): self.logger.log("Patterns not found: " + repr(managed.unmatched)) return self.failure("Resource management not disabled") pats = [] pats.append(self.templates["Pat:RscOpOK"] % ("start", ".*")) pats.append(self.templates["Pat:RscOpOK"] % ("stop", ".*")) pats.append(self.templates["Pat:RscOpOK"] % ("promote", ".*")) pats.append(self.templates["Pat:RscOpOK"] % ("demote", ".*")) pats.append(self.templates["Pat:RscOpOK"] % ("migrate", ".*")) watch = self.create_watch(pats, 60, "ShutdownActivity") watch.setwatch() self.debug("Shutting down the cluster") ret = self.stopall(None) if not ret: self._set_managed(node) return self.failure("Couldn't shut down the cluster") self.debug("Bringing the cluster back up") ret = self.startall(None) time.sleep(5) # allow ping to update the CIB if not ret: self._set_managed(node) return self.failure("Couldn't restart the cluster") if self.local_badnews("ResourceActivity:", watch): self._set_managed(node) return self.failure("Resources stopped or started during cluster restart") watch = self.create_watch(pats, 60, "StartupActivity") watch.setwatch() # Re-enable resource management (and verify it happened). self._set_managed(node) self.CM.cluster_stable() if not self._is_managed(node): return self.failure("Could not re-enable resource management") # Ignore actions for STONITH resources ignore = [] (rc, lines) = self.rsh(node, "crm_resource -c", None) for line in lines: if re.search("^Resource", line): r = AuditResource(self.CM, line) if r.rclass == "stonith": self.debug("Ignoring start actions for %s" % r.id) ignore.append(self.templates["Pat:RscOpOK"] % ("start", r.id)) if self.local_badnews("ResourceActivity:", watch, ignore): return self.failure("Resources stopped or started after resource management was re-enabled") return ret def errorstoignore(self): '''Return list of errors which should be ignored''' return [ r"resource( was|s were) active at shutdown", ] def is_applicable(self): return 1 AllTestClasses.append(Reattach) class SpecialTest1(CTSTest): '''Set up a custom test to cause quorum failure issues for Andrew''' def __init__(self, cm): CTSTest.__init__(self,cm) self.name = "SpecialTest1" self.startall = SimulStartLite(cm) self.restart1 = RestartTest(cm) self.stopall = SimulStopLite(cm) def __call__(self, node): '''Perform the 'SpecialTest1' test for Andrew. ''' self.incr("calls") # Shut down all the nodes... ret = self.stopall(None) if not ret: return self.failure("Could not stop all nodes") # Test config recovery when the other nodes come up self.rsh(node, "rm -f "+CTSvars.CRM_CONFIG_DIR+"/cib*") # Start the selected node ret = self.restart1(node) if not ret: return self.failure("Could not start "+node) # Start all remaining nodes ret = self.startall(None) if not ret: return self.failure("Could not start the remaining nodes") return self.success() def errorstoignore(self): '''Return list of errors which should be ignored''' # Errors that occur as a result of the CIB being wiped return [ r"error.*: v1 patchset error, patch failed to apply: Application of an update diff failed", r"error.*: Resource start-up disabled since no STONITH resources have been defined", r"error.*: Either configure some or disable STONITH with the stonith-enabled option", r"error.*: NOTE: Clusters with shared data need STONITH to ensure data integrity", ] AllTestClasses.append(SpecialTest1) class HAETest(CTSTest): '''Set up a custom test to cause quorum failure issues for Andrew''' def __init__(self, cm): CTSTest.__init__(self,cm) self.name = "HAETest" self.stopall = SimulStopLite(cm) self.startall = SimulStartLite(cm) self.is_loop = 1 def setup(self, node): # Start all remaining nodes ret = self.startall(None) if not ret: return self.failure("Couldn't start all nodes") return self.success() def teardown(self, node): # Stop everything ret = self.stopall(None) if not ret: return self.failure("Couldn't stop all nodes") return self.success() def wait_on_state(self, node, resource, expected_clones, attempts=240): while attempts > 0: active = 0 (rc, lines) = self.rsh(node, "crm_resource -r %s -W -Q" % resource, stdout=None) # Hack until crm_resource does the right thing if rc == 0 and lines: active = len(lines) if len(lines) == expected_clones: return 1 elif rc == 1: self.debug("Resource %s is still inactive" % resource) elif rc == 234: self.logger.log("Unknown resource %s" % resource) return 0 elif rc == 246: self.logger.log("Cluster is inactive") return 0 elif rc != 0: self.logger.log("Call to crm_resource failed, rc=%d" % rc) return 0 else: self.debug("Resource %s is active on %d times instead of %d" % (resource, active, expected_clones)) attempts -= 1 time.sleep(1) return 0 def find_dlm(self, node): self.r_dlm = None (rc, lines) = self.rsh(node, "crm_resource -c", None) for line in lines: if re.search("^Resource", line): r = AuditResource(self.CM, line) if r.rtype == "controld" and r.parent != "NA": self.debug("Found dlm: %s" % self.r_dlm) self.r_dlm = r.parent return 1 return 0 def find_hae_resources(self, node): self.r_dlm = None self.r_o2cb = None self.r_ocfs2 = [] if self.find_dlm(node): self.find_ocfs2_resources(node) def is_applicable(self): if not self.is_applicable_common(): return 0 if self.Env["Schema"] == "hae": return 1 return None class HAERoleTest(HAETest): def __init__(self, cm): '''Lars' mount/unmount test for the HA extension. ''' HAETest.__init__(self,cm) self.name = "HAERoleTest" def change_state(self, node, resource, target): rc = self.rsh(node, "crm_resource -V -r %s -p target-role -v %s --meta" % (resource, target)) return rc def __call__(self, node): self.incr("calls") lpc = 0 failed = 0 delay = 2 done = time.time() + self.Env["loop-minutes"]*60 self.find_hae_resources(node) clone_max = len(self.Env["nodes"]) while time.time() <= done and not failed: lpc = lpc + 1 self.change_state(node, self.r_dlm, "Stopped") if not self.wait_on_state(node, self.r_dlm, 0): self.failure("%s did not go down correctly" % self.r_dlm) failed = lpc self.change_state(node, self.r_dlm, "Started") if not self.wait_on_state(node, self.r_dlm, clone_max): self.failure("%s did not come up correctly" % self.r_dlm) failed = lpc if not self.wait_on_state(node, self.r_o2cb, clone_max): self.failure("%s did not come up correctly" % self.r_o2cb) failed = lpc for fs in self.r_ocfs2: if not self.wait_on_state(node, fs, clone_max): self.failure("%s did not come up correctly" % fs) failed = lpc if failed: return self.failure("iteration %d failed" % failed) return self.success() AllTestClasses.append(HAERoleTest) class HAEStandbyTest(HAETest): '''Set up a custom test to cause quorum failure issues for Andrew''' def __init__(self, cm): HAETest.__init__(self,cm) self.name = "HAEStandbyTest" def change_state(self, node, resource, target): rc = self.rsh(node, "crm_standby -V -l reboot -v %s" % (target)) return rc def __call__(self, node): self.incr("calls") lpc = 0 failed = 0 done = time.time() + self.Env["loop-minutes"]*60 self.find_hae_resources(node) clone_max = len(self.Env["nodes"]) while time.time() <= done and not failed: lpc = lpc + 1 self.change_state(node, self.r_dlm, "true") if not self.wait_on_state(node, self.r_dlm, clone_max-1): self.failure("%s did not go down correctly" % self.r_dlm) failed = lpc self.change_state(node, self.r_dlm, "false") if not self.wait_on_state(node, self.r_dlm, clone_max): self.failure("%s did not come up correctly" % self.r_dlm) failed = lpc if not self.wait_on_state(node, self.r_o2cb, clone_max): self.failure("%s did not come up correctly" % self.r_o2cb) failed = lpc for fs in self.r_ocfs2: if not self.wait_on_state(node, fs, clone_max): self.failure("%s did not come up correctly" % fs) failed = lpc if failed: return self.failure("iteration %d failed" % failed) return self.success() AllTestClasses.append(HAEStandbyTest) class NearQuorumPointTest(CTSTest): ''' This test brings larger clusters near the quorum point (50%). In addition, it will test doing starts and stops at the same time. Here is how I think it should work: - loop over the nodes and decide randomly which will be up and which will be down Use a 50% probability for each of up/down. - figure out what to do to get into that state from the current state - in parallel, bring up those going up and bring those going down. ''' def __init__(self, cm): CTSTest.__init__(self,cm) self.name = "NearQuorumPoint" def __call__(self, dummy): '''Perform the 'NearQuorumPoint' test. ''' self.incr("calls") startset = [] stopset = [] stonith = self.CM.prepare_fencing_watcher("NearQuorumPoint") #decide what to do with each node for node in self.Env["nodes"]: action = self.Env.RandomGen.choice(["start","stop"]) #action = self.Env.RandomGen.choice(["start","stop","no change"]) if action == "start" : startset.append(node) elif action == "stop" : stopset.append(node) self.debug("start nodes:" + repr(startset)) self.debug("stop nodes:" + repr(stopset)) #add search patterns watchpats = [ ] for node in stopset: if self.CM.ShouldBeStatus[node] == "up": watchpats.append(self.templates["Pat:We_stopped"] % node) for node in startset: if self.CM.ShouldBeStatus[node] == "down": #watchpats.append(self.templates["Pat:NonDC_started"] % node) watchpats.append(self.templates["Pat:Local_started"] % node) else: for stopping in stopset: if self.CM.ShouldBeStatus[stopping] == "up": watchpats.append(self.templates["Pat:They_stopped"] % (node, self.CM.key_for_node(stopping))) if len(watchpats) == 0: return self.skipped() if len(startset) != 0: watchpats.append(self.templates["Pat:DC_IDLE"]) watch = self.create_watch(watchpats, self.Env["DeadTime"]+10) watch.setwatch() #begin actions for node in stopset: if self.CM.ShouldBeStatus[node] == "up": self.CM.StopaCMnoBlock(node) for node in startset: if self.CM.ShouldBeStatus[node] == "down": self.CM.StartaCMnoBlock(node) #get the result if watch.lookforall(): self.CM.cluster_stable() self.CM.fencing_cleanup("NearQuorumPoint", stonith) return self.success() self.logger.log("Warn: Patterns not found: " + repr(watch.unmatched)) #get the "bad" nodes upnodes = [] for node in stopset: if self.CM.StataCM(node) == 1: upnodes.append(node) downnodes = [] for node in startset: if self.CM.StataCM(node) == 0: downnodes.append(node) self.CM.fencing_cleanup("NearQuorumPoint", stonith) if upnodes == [] and downnodes == []: self.CM.cluster_stable() # Make sure they're completely down with no residule for node in stopset: self.rsh(node, self.templates["StopCmd"]) return self.success() if len(upnodes) > 0: self.logger.log("Warn: Unstoppable nodes: " + repr(upnodes)) if len(downnodes) > 0: self.logger.log("Warn: Unstartable nodes: " + repr(downnodes)) return self.failure() def is_applicable(self): return 1 AllTestClasses.append(NearQuorumPointTest) class RollingUpgradeTest(CTSTest): '''Perform a rolling upgrade of the cluster''' def __init__(self, cm): CTSTest.__init__(self,cm) self.name = "RollingUpgrade" self.start = StartTest(cm) self.stop = StopTest(cm) self.stopall = SimulStopLite(cm) self.startall = SimulStartLite(cm) def setup(self, node): # Start all remaining nodes ret = self.stopall(None) if not ret: return self.failure("Couldn't stop all nodes") for node in self.Env["nodes"]: if not self.downgrade(node, None): return self.failure("Couldn't downgrade %s" % node) ret = self.startall(None) if not ret: return self.failure("Couldn't start all nodes") return self.success() def teardown(self, node): # Stop everything ret = self.stopall(None) if not ret: return self.failure("Couldn't stop all nodes") for node in self.Env["nodes"]: if not self.upgrade(node, None): return self.failure("Couldn't upgrade %s" % node) return self.success() def install(self, node, version, start=1, flags="--force"): target_dir = "/tmp/rpm-%s" % version src_dir = "%s/%s" % (self.Env["rpm-dir"], version) self.logger.log("Installing %s on %s with %s" % (version, node, flags)) if not self.stop(node): return self.failure("stop failure: "+node) rc = self.rsh(node, "mkdir -p %s" % target_dir) rc = self.rsh(node, "rm -f %s/*.rpm" % target_dir) (rc, lines) = self.rsh(node, "ls -1 %s/*.rpm" % src_dir, None) for line in lines: line = line[:-1] rc = self.rsh.cp("%s" % (line), "%s:%s/" % (node, target_dir)) rc = self.rsh(node, "rpm -Uvh %s %s/*.rpm" % (flags, target_dir)) if start and not self.start(node): return self.failure("start failure: "+node) return self.success() def upgrade(self, node, start=1): return self.install(node, self.Env["current-version"], start) def downgrade(self, node, start=1): return self.install(node, self.Env["previous-version"], start, "--force --nodeps") def __call__(self, node): '''Perform the 'Rolling Upgrade' test. ''' self.incr("calls") for node in self.Env["nodes"]: if self.upgrade(node): return self.failure("Couldn't upgrade %s" % node) self.CM.cluster_stable() return self.success() def is_applicable(self): if not self.is_applicable_common(): return None if not "rpm-dir" in list(self.Env.keys()): return None if not "current-version" in list(self.Env.keys()): return None if not "previous-version" in list(self.Env.keys()): return None return 1 # Register RestartTest as a good test to run AllTestClasses.append(RollingUpgradeTest) class BSC_AddResource(CTSTest): '''Add a resource to the cluster''' def __init__(self, cm): CTSTest.__init__(self, cm) self.name = "AddResource" self.resource_offset = 0 self.cib_cmd = """cibadmin -C -o %s -X '%s' """ def __call__(self, node): self.incr("calls") self.resource_offset = self.resource_offset + 1 r_id = "bsc-rsc-%s-%d" % (node, self.resource_offset) start_pat = "crmd.*%s_start_0.*confirmed.*ok" patterns = [] patterns.append(start_pat % r_id) watch = self.create_watch(patterns, self.Env["DeadTime"]) watch.setwatch() ip = self.NextIP() if not self.make_ip_resource(node, r_id, "ocf", "IPaddr", ip): return self.failure("Make resource %s failed" % r_id) failed = 0 watch_result = watch.lookforall() if watch.unmatched: for regex in watch.unmatched: self.logger.log ("Warn: Pattern not found: %s" % (regex)) failed = 1 if failed: return self.failure("Resource pattern(s) not found") if not self.CM.cluster_stable(self.Env["DeadTime"]): return self.failure("Unstable cluster") return self.success() def NextIP(self): ip = self.Env["IPBase"] if ":" in ip: fields = ip.rpartition(":") fields[2] = str(hex(int(fields[2], 16)+1)) print(str(hex(int(f[2], 16)+1))) else: fields = ip.rpartition('.') fields[2] = str(int(fields[2])+1) ip = fields[0] + fields[1] + fields[3]; self.Env["IPBase"] = ip return ip.strip() def make_ip_resource(self, node, id, rclass, type, ip): self.logger.log("Creating %s::%s:%s (%s) on %s" % (rclass,type,id,ip,node)) rsc_xml=""" """ % (id, rclass, type, id, id, ip) node_constraint = """ """ % (id, id, id, id, node) rc = 0 (rc, lines) = self.rsh(node, self.cib_cmd % ("constraints", node_constraint), None) if rc != 0: self.logger.log("Constraint creation failed: %d" % rc) return None (rc, lines) = self.rsh(node, self.cib_cmd % ("resources", rsc_xml), None) if rc != 0: self.logger.log("Resource creation failed: %d" % rc) return None return 1 def is_applicable(self): if self.Env["DoBSC"]: return 1 return None AllTestClasses.append(BSC_AddResource) class SimulStopLite(CTSTest): '''Stop any active nodes ~ simultaneously''' def __init__(self, cm): CTSTest.__init__(self,cm) self.name = "SimulStopLite" def __call__(self, dummy): '''Perform the 'SimulStopLite' setup work. ''' self.incr("calls") self.debug("Setup: " + self.name) # We ignore the "node" parameter... watchpats = [ ] for node in self.Env["nodes"]: if self.CM.ShouldBeStatus[node] == "up": self.incr("WasStarted") watchpats.append(self.templates["Pat:We_stopped"] % node) if len(watchpats) == 0: return self.success() # Stop all the nodes - at about the same time... watch = self.create_watch(watchpats, self.Env["DeadTime"]+10) watch.setwatch() self.set_timer() for node in self.Env["nodes"]: if self.CM.ShouldBeStatus[node] == "up": self.CM.StopaCMnoBlock(node) if watch.lookforall(): # Make sure they're completely down with no residule for node in self.Env["nodes"]: self.rsh(node, self.templates["StopCmd"]) return self.success() did_fail = 0 up_nodes = [] for node in self.Env["nodes"]: if self.CM.StataCM(node) == 1: did_fail = 1 up_nodes.append(node) if did_fail: return self.failure("Active nodes exist: " + repr(up_nodes)) self.logger.log("Warn: All nodes stopped but CTS didnt detect: " + repr(watch.unmatched)) return self.failure("Missing log message: "+repr(watch.unmatched)) def is_applicable(self): '''SimulStopLite is a setup test and never applicable''' return 0 class SimulStartLite(CTSTest): '''Start any stopped nodes ~ simultaneously''' def __init__(self, cm): CTSTest.__init__(self,cm) self.name = "SimulStartLite" def __call__(self, dummy): '''Perform the 'SimulStartList' setup work. ''' self.incr("calls") self.debug("Setup: " + self.name) # We ignore the "node" parameter... node_list = [] for node in self.Env["nodes"]: if self.CM.ShouldBeStatus[node] == "down": self.incr("WasStopped") node_list.append(node) self.set_timer() while len(node_list) > 0: # Repeat until all nodes come up watchpats = [ ] uppat = self.templates["Pat:NonDC_started"] if self.CM.upcount() == 0: uppat = self.templates["Pat:Local_started"] watchpats.append(self.templates["Pat:DC_IDLE"]) for node in node_list: watchpats.append(uppat % node) watchpats.append(self.templates["Pat:InfraUp"] % node) watchpats.append(self.templates["Pat:PacemakerUp"] % node) # Start all the nodes - at about the same time... watch = self.create_watch(watchpats, self.Env["DeadTime"]+10) watch.setwatch() stonith = self.CM.prepare_fencing_watcher(self.name) for node in node_list: self.CM.StartaCMnoBlock(node) watch.lookforall() node_list = self.CM.fencing_cleanup(self.name, stonith) if node_list == None: return self.failure("Cluster did not stabilize") # Remove node_list messages from watch.unmatched for node in node_list: self.logger.debug("Dealing with stonith operations for %s" % repr(node_list)) if watch.unmatched: try: watch.unmatched.remove(uppat % node) except: self.debug("Already matched: %s" % (uppat % node)) try: watch.unmatched.remove(self.templates["Pat:InfraUp"] % node) except: self.debug("Already matched: %s" % (self.templates["Pat:InfraUp"] % node)) try: watch.unmatched.remove(self.templates["Pat:PacemakerUp"] % node) except: self.debug("Already matched: %s" % (self.templates["Pat:PacemakerUp"] % node)) if watch.unmatched: for regex in watch.unmatched: self.logger.log ("Warn: Startup pattern not found: %s" %(regex)) if not self.CM.cluster_stable(): return self.failure("Cluster did not stabilize") did_fail = 0 unstable = [] for node in self.Env["nodes"]: if self.CM.StataCM(node) == 0: did_fail = 1 unstable.append(node) if did_fail: return self.failure("Unstarted nodes exist: " + repr(unstable)) unstable = [] for node in self.Env["nodes"]: if not self.CM.node_stable(node): did_fail = 1 unstable.append(node) if did_fail: return self.failure("Unstable cluster nodes exist: " + repr(unstable)) return self.success() def is_applicable(self): '''SimulStartLite is a setup test and never applicable''' return 0 def TestList(cm, audits): result = [] for testclass in AllTestClasses: bound_test = testclass(cm) if bound_test.is_applicable(): bound_test.Audits = audits result.append(bound_test) return result class RemoteLXC(CTSTest): def __init__(self, cm): CTSTest.__init__(self,cm) self.name = "RemoteLXC" self.start = StartTest(cm) self.startall = SimulStartLite(cm) self.num_containers = 2 self.is_container = 1 self.is_docker_unsafe = 1 self.failed = 0 self.fail_string = "" def start_lxc_simple(self, node): # restore any artifacts laying around from a previous test. self.rsh(node, "/usr/share/pacemaker/tests/cts/lxc_autogen.sh -s -R &>/dev/null") # generate the containers, put them in the config, add some resources to them pats = [ ] watch = self.create_watch(pats, 120) watch.setwatch() pats.append(self.templates["Pat:RscOpOK"] % ("start", "lxc1")) pats.append(self.templates["Pat:RscOpOK"] % ("start", "lxc2")) pats.append(self.templates["Pat:RscOpOK"] % ("start", "lxc-ms")) pats.append(self.templates["Pat:RscOpOK"] % ("promote", "lxc-ms")) self.rsh(node, "/usr/share/pacemaker/tests/cts/lxc_autogen.sh -g -a -m -s -c %d &>/dev/null" % self.num_containers) self.set_timer("remoteSimpleInit") watch.lookforall() self.log_timer("remoteSimpleInit") if watch.unmatched: self.fail_string = "Unmatched patterns: %s" % (repr(watch.unmatched)) self.failed = 1 def cleanup_lxc_simple(self, node): pats = [ ] # if the test failed, attempt to clean up the cib and libvirt environment # as best as possible if self.failed == 1: # restore libvirt and cib self.rsh(node, "/usr/share/pacemaker/tests/cts/lxc_autogen.sh -s -R &>/dev/null") return watch = self.create_watch(pats, 120) watch.setwatch() pats.append(self.templates["Pat:RscOpOK"] % ("stop", "container1")) pats.append(self.templates["Pat:RscOpOK"] % ("stop", "container2")) self.rsh(node, "/usr/share/pacemaker/tests/cts/lxc_autogen.sh -p &>/dev/null") self.set_timer("remoteSimpleCleanup") watch.lookforall() self.log_timer("remoteSimpleCleanup") if watch.unmatched: self.fail_string = "Unmatched patterns: %s" % (repr(watch.unmatched)) self.failed = 1 # cleanup libvirt self.rsh(node, "/usr/share/pacemaker/tests/cts/lxc_autogen.sh -s -R &>/dev/null") def __call__(self, node): '''Perform the 'RemoteLXC' test. ''' self.incr("calls") ret = self.startall(None) if not ret: return self.failure("Setup failed, start all nodes failed.") rc = self.rsh(node, "/usr/share/pacemaker/tests/cts/lxc_autogen.sh -v &>/dev/null") if rc == 1: self.log("Environment test for lxc support failed.") return self.skipped() self.start_lxc_simple(node) self.cleanup_lxc_simple(node) self.debug("Waiting for the cluster to recover") self.CM.cluster_stable() if self.failed == 1: return self.failure(self.fail_string) return self.success() def errorstoignore(self): '''Return list of errors which should be ignored''' return [ r"Updating failcount for ping", r"pengine.*: Recover (ping|lxc-ms|container)\s*\(.*\)", # The orphaned lxc-ms resource causes an expected transition error # that is a result of the pengine not having knowledge that the # ms resource used to be a clone. As a result it looks like that # resource is running in multiple locations when it shouldn't... But in # this instance we know why this error is occurring and that it is expected. r"Calculated [Tt]ransition .* /var/lib/pacemaker/pengine/pe-error", r"Resource lxc-ms .* is active on 2 nodes attempting recovery", r"Unknown operation: fail", r"(ERROR|error): sending stonithRA op to stonithd failed.", r"VirtualDomain.*ERROR: Unable to determine emulator", ] AllTestClasses.append(RemoteLXC) class RemoteDriver(CTSTest): def __init__(self, cm): CTSTest.__init__(self,cm) self.name = self.__class__.__name__ self.is_docker_unsafe = 1 self.start = StartTest(cm) self.startall = SimulStartLite(cm) self.stop = StopTest(cm) self.remote_rsc = "remote-rsc" self.cib_cmd = """cibadmin -C -o %s -X '%s' """ self.reset() def reset(self): self.pcmk_started = 0 self.failed = False self.fail_string = "" self.remote_node_added = 0 self.remote_rsc_added = 0 self.remote_use_reconnect_interval = self.Env.RandomGen.choice([True,False]) def fail(self, msg): """ Mark test as failed. """ self.failed = True # Always log the failure. self.logger.log(msg) # Use first failure as test status, as it's likely to be most useful. if not self.fail_string: self.fail_string = msg def get_othernode(self, node): for othernode in self.Env["nodes"]: if othernode == node: # we don't want to try and use the cib that we just shutdown. # find a cluster node that is not our soon to be remote-node. continue else: return othernode def del_rsc(self, node, rsc): othernode = self.get_othernode(node) rc = self.rsh(othernode, "crm_resource -D -r %s -t primitive" % (rsc)) if rc != 0: self.fail("Removal of resource '%s' failed" % rsc) def add_rsc(self, node, rsc_xml): othernode = self.get_othernode(node) rc = self.rsh(othernode, self.cib_cmd % ("resources", rsc_xml)) if rc != 0: self.fail("resource creation failed") def add_primitive_rsc(self, node): rsc_xml = """ """ % (self.remote_rsc) self.add_rsc(node, rsc_xml) if not self.failed: self.remote_rsc_added = 1 def add_connection_rsc(self, node): if self.remote_use_reconnect_interval: # use reconnect interval and make sure to set cluster-recheck-interval as well. rsc_xml = """ """ % (self.remote_node, node) self.rsh(self.get_othernode(node), self.templates["SetCheckInterval"] % ("45s")) else: # not using reconnect interval rsc_xml = """ """ % (self.remote_node, node) self.add_rsc(node, rsc_xml) if not self.failed: self.remote_node_added = 1 def stop_pcmk_remote(self, node): # disable pcmk remote for i in range(10): rc = self.rsh(node, "service pacemaker_remote stop") if rc != 0: time.sleep(6) else: break def start_pcmk_remote(self, node): for i in range(10): rc = self.rsh(node, "service pacemaker_remote start") if rc != 0: time.sleep(6) else: self.pcmk_started = 1 break def kill_pcmk_remote(self, node): """ Simulate a Pacemaker Remote daemon failure. """ # We kill the process to prevent a graceful stop, # then stop it to prevent the OS from restarting it. self.rsh(node, "killall -9 pacemaker_remoted") self.stop_pcmk_remote(node) def start_metal(self, node): pcmk_started = 0 # make sure the resource doesn't already exist for some reason self.rsh(node, "crm_resource -D -r %s -t primitive" % (self.remote_rsc)) self.rsh(node, "crm_resource -D -r %s -t primitive" % (self.remote_node)) if not self.stop(node): self.fail("Failed to shutdown cluster node %s" % node) return self.start_pcmk_remote(node) if self.pcmk_started == 0: self.fail("Failed to start pacemaker_remote on node %s" % node) return # Convert node to baremetal now that it has shutdown the cluster stack pats = [ ] watch = self.create_watch(pats, 120) watch.setwatch() pats.append(self.templates["Pat:RscOpOK"] % ("start", self.remote_node)) pats.append(self.templates["Pat:DC_IDLE"]) self.add_connection_rsc(node) self.set_timer("remoteMetalInit") watch.lookforall() self.log_timer("remoteMetalInit") if watch.unmatched: self.fail("Unmatched patterns: %s" % watch.unmatched) def migrate_connection(self, node): if self.failed: return pats = [ ] pats.append(self.templates["Pat:RscOpOK"] % ("migrate_to", self.remote_node)) pats.append(self.templates["Pat:RscOpOK"] % ("migrate_from", self.remote_node)) pats.append(self.templates["Pat:DC_IDLE"]) watch = self.create_watch(pats, 120) watch.setwatch() (rc, lines) = self.rsh(node, "crm_resource -M -r %s" % (self.remote_node), None) if rc != 0: self.fail("failed to move remote node connection resource") return self.set_timer("remoteMetalMigrate") watch.lookforall() self.log_timer("remoteMetalMigrate") if watch.unmatched: self.fail("Unmatched patterns: %s" % watch.unmatched) return def fail_rsc(self, node): if self.failed: return watchpats = [ ] watchpats.append(self.templates["Pat:RscRemoteOpOK"] % ("stop", self.remote_rsc, self.remote_node)) watchpats.append(self.templates["Pat:RscRemoteOpOK"] % ("start", self.remote_rsc, self.remote_node)) watchpats.append(self.templates["Pat:DC_IDLE"]) watch = self.create_watch(watchpats, 120) watch.setwatch() self.debug("causing dummy rsc to fail.") rc = self.rsh(node, "rm -f /var/run/resource-agents/Dummy*") self.set_timer("remoteRscFail") watch.lookforall() self.log_timer("remoteRscFail") if watch.unmatched: self.fail("Unmatched patterns during rsc fail: %s" % watch.unmatched) def fail_connection(self, node): if self.failed: return watchpats = [ ] watchpats.append(self.templates["Pat:FenceOpOK"] % self.remote_node) watchpats.append(self.templates["Pat:NodeFenced"] % self.remote_node) watch = self.create_watch(watchpats, 120) watch.setwatch() # force stop the pcmk remote daemon. this will result in fencing self.debug("Force stopped active remote node") self.kill_pcmk_remote(node) self.debug("Waiting for remote node to be fenced.") self.set_timer("remoteMetalFence") watch.lookforall() self.log_timer("remoteMetalFence") if watch.unmatched: self.fail("Unmatched patterns: %s" % watch.unmatched) return self.debug("Waiting for the remote node to come back up") self.CM.ns.WaitForNodeToComeUp(node, 120); pats = [ ] watch = self.create_watch(pats, 240) watch.setwatch() pats.append(self.templates["Pat:RscOpOK"] % ("start", self.remote_node)) if self.remote_rsc_added == 1: pats.append(self.templates["Pat:RscRemoteOpOK"] % ("start", self.remote_rsc, self.remote_node)) # start the remote node again watch it integrate back into cluster. self.start_pcmk_remote(node) if self.pcmk_started == 0: self.fail("Failed to start pacemaker_remote on node %s" % node) return self.debug("Waiting for remote node to rejoin cluster after being fenced.") self.set_timer("remoteMetalRestart") watch.lookforall() self.log_timer("remoteMetalRestart") if watch.unmatched: self.fail("Unmatched patterns: %s" % watch.unmatched) return def add_dummy_rsc(self, node): if self.failed: return # verify we can put a resource on the remote node pats = [ ] watch = self.create_watch(pats, 120) watch.setwatch() pats.append(self.templates["Pat:RscRemoteOpOK"] % ("start", self.remote_rsc, self.remote_node)) pats.append(self.templates["Pat:DC_IDLE"]) # Add a resource that must live on remote-node self.add_primitive_rsc(node) # force that rsc to prefer the remote node. (rc, line) = self.CM.rsh(node, "crm_resource -M -r %s -N %s -f" % (self.remote_rsc, self.remote_node), None) if rc != 0: self.fail("Failed to place remote resource on remote node.") return self.set_timer("remoteMetalRsc") watch.lookforall() self.log_timer("remoteMetalRsc") if watch.unmatched: self.fail("Unmatched patterns: %s" % watch.unmatched) def test_attributes(self, node): if self.failed: return # This verifies permanent attributes can be set on a remote-node. It also # verifies the remote-node can edit its own cib node section remotely. (rc, line) = self.CM.rsh(node, "crm_attribute -l forever -n testattr -v testval -N %s" % (self.remote_node), None) if rc != 0: self.fail("Failed to set remote-node attribute. rc:%s output:%s" % (rc, line)) return (rc, line) = self.CM.rsh(node, "crm_attribute -l forever -n testattr -q -N %s" % (self.remote_node), None) if rc != 0: self.fail("Failed to get remote-node attribute") return (rc, line) = self.CM.rsh(node, "crm_attribute -l forever -n testattr -D -N %s" % (self.remote_node), None) if rc != 0: self.fail("Failed to delete remote-node attribute") return def cleanup_metal(self, node): if self.pcmk_started == 0: return pats = [ ] watch = self.create_watch(pats, 120) watch.setwatch() if self.remote_rsc_added == 1: pats.append(self.templates["Pat:RscOpOK"] % ("stop", self.remote_rsc)) if self.remote_node_added == 1: pats.append(self.templates["Pat:RscOpOK"] % ("stop", self.remote_node)) self.set_timer("remoteMetalCleanup") if self.remote_use_reconnect_interval: self.debug("Cleaning up re-check interval") self.rsh(self.get_othernode(node), self.templates["ClearCheckInterval"]) if self.remote_rsc_added == 1: # Remove dummy resource added for remote node tests self.debug("Cleaning up dummy rsc put on remote node") self.rsh(node, "crm_resource -U -r %s" % self.remote_rsc) self.del_rsc(node, self.remote_rsc) if self.remote_node_added == 1: # Remove remote node's connection resource self.debug("Cleaning up remote node connection resource") self.rsh(node, "crm_resource -U -r %s" % (self.remote_node)) self.del_rsc(node, self.remote_node) watch.lookforall() self.log_timer("remoteMetalCleanup") if watch.unmatched: self.fail("Unmatched patterns: %s" % watch.unmatched) self.stop_pcmk_remote(node) self.debug("Waiting for the cluster to recover") self.CM.cluster_stable() if self.remote_node_added == 1: # Remove remote node itself self.debug("Cleaning up node entry for remote node") self.rsh(self.get_othernode(node), "crm_node --force --remove %s" % self.remote_node) def setup_env(self, node): self.remote_node = "remote-%s" % (node) # we are assuming if all nodes have a key, that it is # the right key... If any node doesn't have a remote # key, we regenerate it everywhere. if self.rsh.exists_on_all("/etc/pacemaker/authkey", self.Env["nodes"]): return # create key locally (handle, keyfile) = tempfile.mkstemp(".cts") os.close(handle) devnull = open(os.devnull, 'wb') subprocess.check_call(["dd", "if=/dev/urandom", "of=%s" % keyfile, "bs=4096", "count=1"], stdout=devnull, stderr=devnull) devnull.close() # sync key throughout the cluster for node in self.Env["nodes"]: self.rsh(node, "mkdir -p --mode=0750 /etc/pacemaker") self.rsh.cp(keyfile, "root@%s:/etc/pacemaker/authkey" % node) self.rsh(node, "chgrp haclient /etc/pacemaker /etc/pacemaker/authkey") self.rsh(node, "chmod 0640 /etc/pacemaker/authkey") os.unlink(keyfile) def is_applicable(self): if not self.is_applicable_common(): return False for node in self.Env["nodes"]: rc = self.rsh(node, "type pacemaker_remoted >/dev/null 2>&1") if rc != 0: return False return True def start_new_test(self, node): self.incr("calls") self.reset() ret = self.startall(None) if not ret: return self.failure("setup failed: could not start all nodes") self.setup_env(node) self.start_metal(node) self.add_dummy_rsc(node) return True def __call__(self, node): return self.failure("This base class is not meant to be called directly.") def errorstoignore(self): '''Return list of errors which should be ignored''' return [ """is running on remote.*which isn't allowed""", """Connection terminated""", """Failed to send remote""", ] # RemoteDriver is just a base class for other tests, so it is not added to AllTestClasses class RemoteBasic(RemoteDriver): def __call__(self, node): '''Perform the 'RemoteBaremetal' test. ''' if not self.start_new_test(node): return self.failure(self.fail_string) self.test_attributes(node) self.cleanup_metal(node) self.debug("Waiting for the cluster to recover") self.CM.cluster_stable() if self.failed: return self.failure(self.fail_string) return self.success() AllTestClasses.append(RemoteBasic) class RemoteStonithd(RemoteDriver): def __call__(self, node): '''Perform the 'RemoteStonithd' test. ''' if not self.start_new_test(node): return self.failure(self.fail_string) self.fail_connection(node) self.cleanup_metal(node) self.debug("Waiting for the cluster to recover") self.CM.cluster_stable() if self.failed: return self.failure(self.fail_string) return self.success() def is_applicable(self): if not RemoteDriver.is_applicable(self): return False if "DoFencing" in list(self.Env.keys()): return self.Env["DoFencing"] return True def errorstoignore(self): ignore_pats = [ - r"Unexpected disconnect on remote-node", + r"Lost connection to Pacemaker Remote node", + r"Software caused connection abort", r"crmd.*:\s+error.*: Operation remote-.*_monitor", r"crmd.*:\s+error.*: Result of monitor operation for remote-.*", r"pengine.*:\s+Recover remote-.*\s*\(.*\)", r"Calculated [Tt]ransition .* /var/lib/pacemaker/pengine/pe-error", r"error.*: Resource .*ocf::.* is active on 2 nodes attempting recovery", ] ignore_pats.extend(RemoteDriver.errorstoignore(self)) return ignore_pats AllTestClasses.append(RemoteStonithd) class RemoteMigrate(RemoteDriver): def __call__(self, node): '''Perform the 'RemoteMigrate' test. ''' if not self.start_new_test(node): return self.failure(self.fail_string) self.migrate_connection(node) self.cleanup_metal(node) self.debug("Waiting for the cluster to recover") self.CM.cluster_stable() if self.failed: return self.failure(self.fail_string) return self.success() AllTestClasses.append(RemoteMigrate) class RemoteRscFailure(RemoteDriver): def __call__(self, node): '''Perform the 'RemoteRscFailure' test. ''' if not self.start_new_test(node): return self.failure(self.fail_string) # This is an important step. We are migrating the connection # before failing the resource. This verifies that the migration # has properly maintained control over the remote-node. self.migrate_connection(node) self.fail_rsc(node) self.cleanup_metal(node) self.debug("Waiting for the cluster to recover") self.CM.cluster_stable() if self.failed: return self.failure(self.fail_string) return self.success() def errorstoignore(self): ignore_pats = [ r"pengine.*: Recover remote-rsc\s*\(.*\)", ] ignore_pats.extend(RemoteDriver.errorstoignore(self)) return ignore_pats AllTestClasses.append(RemoteRscFailure) # vim:ts=4:sw=4:et: diff --git a/include/crm/common/ipcs.h b/include/crm/common/ipcs.h index 87a34e59ec..2adc96f635 100644 --- a/include/crm/common/ipcs.h +++ b/include/crm/common/ipcs.h @@ -1,136 +1,137 @@ /* * Copyright (C) 2013 Andrew Beekhof * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This software is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #ifndef CRM_COMMON_IPCS__H # define CRM_COMMON_IPCS__H #ifdef __cplusplus extern "C" { #endif # include # include # ifdef HAVE_GNUTLS_GNUTLS_H # undef KEYFILE # include # endif # include # include typedef struct crm_client_s crm_client_t; enum client_type { CRM_CLIENT_IPC = 1, CRM_CLIENT_TCP = 2, # ifdef HAVE_GNUTLS_GNUTLS_H CRM_CLIENT_TLS = 3, # endif }; struct crm_remote_s { /* Shared */ char *buffer; size_t buffer_size; size_t buffer_offset; int auth_timeout; int tcp_socket; mainloop_io_t *source; /* CIB-only */ bool authenticated; char *token; /* TLS only */ # ifdef HAVE_GNUTLS_GNUTLS_H gnutls_session_t *tls_session; bool tls_handshake_complete; # endif }; enum crm_client_flags { crm_client_flag_ipc_proxied = 0x00001, /* ipc_proxy code only */ crm_client_flag_ipc_privileged = 0x00002, /* root or cluster user */ }; struct crm_client_s { uint pid; uid_t uid; gid_t gid; char *id; char *name; char *user; /* Provided for server use (not used by library) */ /* @TODO merge options, flags, and kind (reserving lower bits for server) */ long long options; int request_id; uint32_t flags; void *userdata; int event_timer; GList *event_queue; /* @TODO use GQueue instead */ /* Depending on the value of kind, only some of the following * will be populated/valid */ enum client_type kind; qb_ipcs_connection_t *ipcs; /* IPC */ struct crm_remote_s *remote; /* TCP/TLS */ unsigned int queue_backlog; /* IPC queue length after last flush */ unsigned int queue_max; /* Evict client whose queue grows this big */ }; extern GHashTable *client_connections; void crm_client_init(void); void crm_client_cleanup(void); crm_client_t *crm_client_get(qb_ipcs_connection_t * c); crm_client_t *crm_client_get_by_id(const char *id); const char *crm_client_name(crm_client_t * c); +const char *crm_client_type_text(enum client_type client_type); crm_client_t *crm_client_alloc(void *key); crm_client_t *crm_client_new(qb_ipcs_connection_t * c, uid_t uid, gid_t gid); void crm_client_destroy(crm_client_t * c); void crm_client_disconnect_all(qb_ipcs_service_t *s); bool crm_set_client_queue_max(crm_client_t *client, const char *qmax); void crm_ipcs_send_ack(crm_client_t * c, uint32_t request, uint32_t flags, const char *tag, const char *function, int line); /* when max_send_size is 0, default ipc buffer size is used */ ssize_t crm_ipc_prepare(uint32_t request, xmlNode * message, struct iovec ** result, uint32_t max_send_size); ssize_t crm_ipcs_send(crm_client_t * c, uint32_t request, xmlNode * message, enum crm_ipc_flags flags); ssize_t crm_ipcs_sendv(crm_client_t * c, struct iovec *iov, enum crm_ipc_flags flags); xmlNode *crm_ipcs_recv(crm_client_t * c, void *data, size_t size, uint32_t * id, uint32_t * flags); int crm_ipcs_client_pid(qb_ipcs_connection_t * c); #ifdef __cplusplus } #endif #endif diff --git a/include/crm/lrmd.h b/include/crm/lrmd.h index 0d2d9fc27d..9762f47fb3 100644 --- a/include/crm/lrmd.h +++ b/include/crm/lrmd.h @@ -1,530 +1,532 @@ /* * Copyright (c) 2012 David Vossel * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * */ #ifndef LRMD__H # define LRMD__H #ifdef __cplusplus extern "C" { #endif /** * \file * \brief Local Resource Manager * \ingroup lrmd */ #include #include #include typedef struct lrmd_s lrmd_t; typedef struct lrmd_key_value_s { char *key; char *value; struct lrmd_key_value_s *next; } lrmd_key_value_t; /* This should be bumped every time there is an incompatible change that * prevents older clients from connecting to this version of the server. */ #define LRMD_PROTOCOL_VERSION "1.1" /* This is the version that the client version will actually be compared * against. This should be identical to LRMD_PROTOCOL_VERSION. However, we * accidentally bumped LRMD_PROTOCOL_VERSION in 6424a647 (1.1.15) when we didn't * need to, so for now it's different. If we ever have a truly incompatible * bump, we can drop this and compare against LRMD_PROTOCOL_VERSION. */ #define LRMD_MIN_PROTOCOL_VERSION "1.0" /* *INDENT-OFF* */ #define DEFAULT_REMOTE_KEY_LOCATION PACEMAKER_CONFIG_DIR "/authkey" #define ALT_REMOTE_KEY_LOCATION "/etc/corosync/authkey" #define DEFAULT_REMOTE_PORT 3121 #define DEFAULT_REMOTE_USERNAME "lrmd" #define F_LRMD_OPERATION "lrmd_op" #define F_LRMD_CLIENTNAME "lrmd_clientname" #define F_LRMD_IS_IPC_PROVIDER "lrmd_is_ipc_provider" #define F_LRMD_CLIENTID "lrmd_clientid" #define F_LRMD_PROTOCOL_VERSION "lrmd_protocol_version" #define F_LRMD_REMOTE_MSG_TYPE "lrmd_remote_msg_type" #define F_LRMD_REMOTE_MSG_ID "lrmd_remote_msg_id" #define F_LRMD_CALLBACK_TOKEN "lrmd_async_id" #define F_LRMD_CALLID "lrmd_callid" #define F_LRMD_CALLOPTS "lrmd_callopt" #define F_LRMD_CALLDATA "lrmd_calldata" #define F_LRMD_RC "lrmd_rc" #define F_LRMD_EXEC_RC "lrmd_exec_rc" #define F_LRMD_OP_STATUS "lrmd_exec_op_status" #define F_LRMD_TIMEOUT "lrmd_timeout" #define F_LRMD_WATCHDOG "lrmd_watchdog" #define F_LRMD_CLASS "lrmd_class" #define F_LRMD_PROVIDER "lrmd_provider" #define F_LRMD_TYPE "lrmd_type" #define F_LRMD_ORIGIN "lrmd_origin" #define F_LRMD_RSC_RUN_TIME "lrmd_run_time" #define F_LRMD_RSC_RCCHANGE_TIME "lrmd_rcchange_time" #define F_LRMD_RSC_EXEC_TIME "lrmd_exec_time" #define F_LRMD_RSC_QUEUE_TIME "lrmd_queue_time" #define F_LRMD_RSC_ID "lrmd_rsc_id" #define F_LRMD_RSC_ACTION "lrmd_rsc_action" #define F_LRMD_RSC_USERDATA_STR "lrmd_rsc_userdata_str" #define F_LRMD_RSC_OUTPUT "lrmd_rsc_output" #define F_LRMD_RSC_EXIT_REASON "lrmd_rsc_exit_reason" #define F_LRMD_RSC_START_DELAY "lrmd_rsc_start_delay" #define F_LRMD_RSC_INTERVAL "lrmd_rsc_interval" #define F_LRMD_RSC_DELETED "lrmd_rsc_deleted" #define F_LRMD_RSC "lrmd_rsc" #define F_LRMD_ALERT_ID "lrmd_alert_id" #define F_LRMD_ALERT_PATH "lrmd_alert_path" #define F_LRMD_ALERT "lrmd_alert" #define LRMD_OP_RSC_REG "lrmd_rsc_register" #define LRMD_OP_RSC_EXEC "lrmd_rsc_exec" #define LRMD_OP_RSC_CANCEL "lrmd_rsc_cancel" #define LRMD_OP_RSC_UNREG "lrmd_rsc_unregister" #define LRMD_OP_RSC_INFO "lrmd_rsc_info" #define LRMD_OP_RSC_METADATA "lrmd_rsc_metadata" #define LRMD_OP_POKE "lrmd_rsc_poke" #define LRMD_OP_NEW_CLIENT "lrmd_rsc_new_client" #define LRMD_OP_CHECK "lrmd_check" #define LRMD_OP_ALERT_EXEC "lrmd_alert_exec" #define LRMD_IPC_OP_NEW "new" #define LRMD_IPC_OP_DESTROY "destroy" #define LRMD_IPC_OP_EVENT "event" #define LRMD_IPC_OP_REQUEST "request" #define LRMD_IPC_OP_RESPONSE "response" #define LRMD_IPC_OP_SHUTDOWN_REQ "shutdown_req" #define LRMD_IPC_OP_SHUTDOWN_ACK "shutdown_ack" #define LRMD_IPC_OP_SHUTDOWN_NACK "shutdown_nack" #define F_LRMD_IPC_OP "lrmd_ipc_op" #define F_LRMD_IPC_IPC_SERVER "lrmd_ipc_server" #define F_LRMD_IPC_SESSION "lrmd_ipc_session" #define F_LRMD_IPC_CLIENT "lrmd_ipc_client" #define F_LRMD_IPC_USER "lrmd_ipc_user" #define F_LRMD_IPC_MSG "lrmd_ipc_msg" #define F_LRMD_IPC_MSG_ID "lrmd_ipc_msg_id" #define F_LRMD_IPC_MSG_FLAGS "lrmd_ipc_msg_flags" #define T_LRMD "lrmd" #define T_LRMD_REPLY "lrmd_reply" #define T_LRMD_NOTIFY "lrmd_notify" #define T_LRMD_IPC_PROXY "lrmd_ipc_proxy" /* *INDENT-ON* */ /*! * \brief Create a new local lrmd connection */ lrmd_t *lrmd_api_new(void); /*! * \brief Create a new remote lrmd connection using tls backend * * \param nodename name of remote node identified with this connection * \param server name of server to connect to * \param port port number to connect to * * \note nodename and server may be the same value. */ lrmd_t *lrmd_remote_api_new(const char *nodename, const char *server, int port); /*! * \brief Use after lrmd_poll returns 1 to read and dispatch a message * * \param[in,out] lrmd lrmd connection object * * \return TRUE if connection is still up, FALSE if disconnected */ bool lrmd_dispatch(lrmd_t * lrmd); /*! * \brief Poll for a specified timeout period to determine if a message * is ready for dispatch. * \retval 1 msg is ready * \retval 0 timeout occurred * \retval negative error code */ int lrmd_poll(lrmd_t * lrmd, int timeout); /*! * \brief Destroy lrmd object */ void lrmd_api_delete(lrmd_t * lrmd); lrmd_key_value_t *lrmd_key_value_add(lrmd_key_value_t * kvp, const char *key, const char *value); /* *INDENT-OFF* */ /* Reserved for future use */ enum lrmd_call_options { lrmd_opt_none = 0x00000000, /* lrmd_opt_sync_call = 0x00000001, //Not implemented, patches welcome. */ /*! Only notify the client originating a exec() the results */ lrmd_opt_notify_orig_only = 0x00000002, /*! Drop recurring operations initiated by a client when client disconnects. * This call_option is only valid when registering a resource. When used * remotely with the pacemaker_remote daemon, this option means that recurring * operations will be dropped once all the remote connections disconnect. */ lrmd_opt_drop_recurring = 0x00000003, /*! Send notifications for recurring operations only when the result changes */ lrmd_opt_notify_changes_only = 0x00000004, }; enum lrmd_callback_event { lrmd_event_register, lrmd_event_unregister, lrmd_event_exec_complete, lrmd_event_disconnect, lrmd_event_connect, lrmd_event_poke, lrmd_event_new_client, }; /* *INDENT-ON* */ typedef struct lrmd_event_data_s { /*! Type of event, register, unregister, call_completed... */ enum lrmd_callback_event type; /*! The resource this event occurred on. */ const char *rsc_id; /*! The action performed, start, stop, monitor... */ const char *op_type; /*! The userdata string given do exec() api function */ const char *user_data; /*! The client api call id associated with this event */ int call_id; /*! The operation's timeout period in ms. */ int timeout; /*! The operation's recurring interval in ms. */ int interval; /*! The operation's start delay value in ms. */ int start_delay; /*! This operation that just completed is on a deleted rsc. */ int rsc_deleted; /*! The executed ra return code mapped to OCF */ enum ocf_exitcode rc; /*! The lrmd status returned for exec_complete events */ int op_status; /*! stdout from resource agent operation */ const char *output; /*! Timestamp of when op ran */ unsigned int t_run; /*! Timestamp of last rc change */ unsigned int t_rcchange; /*! Time in length op took to execute */ unsigned int exec_time; /*! Time in length spent in queue */ unsigned int queue_time; /*! int connection result. Used for connection and poke events */ int connection_rc; /* This is a GHashTable containing the * parameters given to the operation */ void *params; /*! client node name associated with this connection * (used to match actions to the proper client when there are multiple) */ const char *remote_nodename; /*! exit failure reason string from resource agent operation */ const char *exit_reason; } lrmd_event_data_t; lrmd_event_data_t *lrmd_copy_event(lrmd_event_data_t * event); void lrmd_free_event(lrmd_event_data_t * event); typedef struct lrmd_rsc_info_s { char *id; char *type; char *standard; char *provider; } lrmd_rsc_info_t; +lrmd_rsc_info_t *lrmd_new_rsc_info(const char *rsc_id, const char *standard, + const char *provider, const char *type); lrmd_rsc_info_t *lrmd_copy_rsc_info(lrmd_rsc_info_t * rsc_info); void lrmd_free_rsc_info(lrmd_rsc_info_t * rsc_info); typedef void (*lrmd_event_callback) (lrmd_event_data_t * event); typedef struct lrmd_list_s { const char *val; struct lrmd_list_s *next; } lrmd_list_t; void lrmd_list_freeall(lrmd_list_t * head); void lrmd_key_value_freeall(lrmd_key_value_t * head); typedef struct lrmd_api_operations_s { /*! * \brief Connect from the lrmd. * * \retval 0, success * \retval negative error code on failure */ int (*connect) (lrmd_t * lrmd, const char *client_name, int *fd); /*! * \brief Establish an connection to lrmd, don't block while connecting. * \note this function requires the use of mainloop. * * \note The is returned using the event callback. * \note When this function returns 0, the callback will be invoked * to report the final result of the connect. * \retval 0, connect in progress, wait for event callback * \retval -1, failure. */ int (*connect_async) (lrmd_t * lrmd, const char *client_name, int timeout /*ms */ ); /*! * \brief Is connected to lrmd daemon? * * \retval 0, false * \retval 1, true */ int (*is_connected) (lrmd_t * lrmd); /*! * \brief Poke lrmd connection to verify it is still capable of serving requests * \note The response comes in the form of a poke event to the callback. * * \retval 0, wait for response in callback * \retval -1, connection failure, callback may not be invoked */ int (*poke_connection) (lrmd_t * lrmd); /*! * \brief Disconnect from the lrmd. * * \retval 0, success * \retval negative error code on failure */ int (*disconnect) (lrmd_t * lrmd); /*! * \brief Register a resource with the lrmd. * * \note Synchronous, guaranteed to occur in daemon before function returns. * * \retval 0, success * \retval negative error code on failure */ int (*register_rsc) (lrmd_t * lrmd, const char *rsc_id, const char *standard, const char *provider, const char *agent, enum lrmd_call_options options); /*! * \brief Retrieve registration info for a rsc * * \retval info on success * \retval NULL on failure */ lrmd_rsc_info_t *(*get_rsc_info) (lrmd_t * lrmd, const char *rsc_id, enum lrmd_call_options options); /*! * \brief Unregister a resource from the lrmd. * * \note All pending and recurring operations will be cancelled * automatically. * * \note Synchronous, guaranteed to occur in daemon before function returns. * * \retval 0, success * \retval -1, success, but operations are currently executing on the rsc which will * return once they are completed. * \retval negative error code on failure * */ int (*unregister_rsc) (lrmd_t * lrmd, const char *rsc_id, enum lrmd_call_options options); /*! * \brief Sets the callback to receive lrmd events on. */ void (*set_callback) (lrmd_t * lrmd, lrmd_event_callback callback); /*! * \brief Issue a command on a resource * * \note Asynchronous, command is queued in daemon on function return, but * execution of command is not synced. * * \note Operations on individual resources are guaranteed to occur * in the order the client api calls them in. * * \note Operations between different resources are not guaranteed * to occur in any specific order in relation to one another * regardless of what order the client api is called in. * \retval call_id to track async event result on success * \retval negative error code on failure */ int (*exec) (lrmd_t * lrmd, const char *rsc_id, const char *action, const char *userdata, /* userdata string given back in event notification */ int interval, /* ms */ int timeout, /* ms */ int start_delay, /* ms */ enum lrmd_call_options options, lrmd_key_value_t * params); /* ownership of params is given up to api here */ /*! * \brief Cancel a recurring command. * * \note Synchronous, guaranteed to occur in daemon before function returns. * * \note The cancel is completed async from this call. * We can be guaranteed the cancel has completed once * the callback receives an exec_complete event with * the lrmd_op_status signifying that the operation is * cancelled. * \note For each resource, cancel operations and exec operations * are processed in the order they are received. * It is safe to assume that for a single resource, a cancel * will occur in the lrmd before an exec if the client's cancel * api call occurs before the exec api call. * * It is not however safe to assume any operation on one resource will * occur before an operation on another resource regardless of * the order the client api is called in. * * \retval 0, cancel command sent. * \retval negative error code on failure */ int (*cancel) (lrmd_t * lrmd, const char *rsc_id, const char *action, int interval); /*! * \brief Get resource metadata for a specified resource agent * * \param[in] lrmd LRMD connection (unused) * \param[in] class Resource agent class * \param[in] provider Resource agent provider * \param[in] agent Resource agent type * \param[out] output Metadata will be stored here (must not be NULL) * \param[in] options Options to use with any LRMD API calls (unused) * * \note Caller is responsible for freeing output. This call is currently * always synchronous (blocking), and always done directly by the * library (not via the LRMD connection). This means that it is based * on the local host environment, even if the lrmd connection is to a * remote node, so (for most resource agent classes) this will fail if * the agent is not installed locally. This also means that, if an * external agent must be executed, it will be executed by the * caller's user, not the lrmd's. * \todo Add a metadata call to the LRMD API and let the server handle this. * * \retval lrmd_ok success * \retval negative error code on failure */ int (*get_metadata) (lrmd_t * lrmd, const char *standard, const char *provider, const char *agent, char **output, enum lrmd_call_options options); /*! * \brief Retrieve a list of installed resource agents. * * \note if class is not provided, all known agents will be returned * \note list must be freed using lrmd_list_freeall() * * \retval num items in list on success * \retval negative error code on failure */ int (*list_agents) (lrmd_t * lrmd, lrmd_list_t ** agents, const char *standard, const char *provider); /*! * \brief Retrieve a list of resource agent providers * * \note When the agent is provided, only the agent's provider will be returned * \note When no agent is supplied, all providers will be returned. * \note List must be freed using lrmd_list_freeall() * * \retval num items in list on success * \retval negative error code on failure */ int (*list_ocf_providers) (lrmd_t * lrmd, const char *agent, lrmd_list_t ** providers); /*! * \brief Retrieve a list of standards supported by this machine/installation * * \note List must be freed using lrmd_list_freeall() * * \retval num items in list on success * \retval negative error code on failure */ int (*list_standards) (lrmd_t * lrmd, lrmd_list_t ** standards); /*! * \brief Execute an alert agent * * \note Asynchronous, command is queued in daemon on function return, but * execution of command is not synced. * * \note Operations on individual alerts are guaranteed to occur * in the order the client api calls them in. * * \note Operations between different alerts are not guaranteed * to occur in any specific order in relation to one another * regardless of what order the client api is called in. * \retval call_id to track async event result on success * \retval negative error code on failure */ int (*exec_alert) (lrmd_t *lrmd, const char *alert_id, const char *alert_path, int timeout, /* ms */ lrmd_key_value_t *params); /* ownership of params is given up to api here */ } lrmd_api_operations_t; struct lrmd_s { lrmd_api_operations_t *cmds; void *lrmd_private; }; static inline const char * lrmd_event_type2str(enum lrmd_callback_event type) { switch (type) { case lrmd_event_register: return "register"; case lrmd_event_unregister: return "unregister"; case lrmd_event_exec_complete: return "exec_complete"; case lrmd_event_disconnect: return "disconnect"; case lrmd_event_connect: return "connect"; case lrmd_event_poke: return "poke"; case lrmd_event_new_client: return "new_client"; } return "unknown"; } #ifdef __cplusplus } #endif #endif diff --git a/lib/common/ipc.c b/lib/common/ipc.c index ff38d05893..8278dba0d1 100644 --- a/lib/common/ipc.c +++ b/lib/common/ipc.c @@ -1,1341 +1,1358 @@ /* * Copyright (C) 2004 Andrew Beekhof * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #define PCMK_IPC_VERSION 1 /* Evict clients whose event queue grows this large (by default) */ #define PCMK_IPC_DEFAULT_QUEUE_MAX 500 struct crm_ipc_response_header { struct qb_ipc_response_header qb; uint32_t size_uncompressed; uint32_t size_compressed; uint32_t flags; uint8_t version; /* Protect against version changes for anyone that might bother to statically link us */ }; static int hdr_offset = 0; static unsigned int ipc_buffer_max = 0; static unsigned int pick_ipc_buffer(unsigned int max); static inline void crm_ipc_init(void) { if (hdr_offset == 0) { hdr_offset = sizeof(struct crm_ipc_response_header); } if (ipc_buffer_max == 0) { ipc_buffer_max = pick_ipc_buffer(0); } } unsigned int crm_ipc_default_buffer_size(void) { return pick_ipc_buffer(0); } static char * generateReference(const char *custom1, const char *custom2) { static uint ref_counter = 0; return crm_strdup_printf("%s-%s-%lu-%u", (custom1? custom1 : "_empty_"), (custom2? custom2 : "_empty_"), (unsigned long)time(NULL), ref_counter++); } xmlNode * create_request_adv(const char *task, xmlNode * msg_data, const char *host_to, const char *sys_to, const char *sys_from, const char *uuid_from, const char *origin) { char *true_from = NULL; xmlNode *request = NULL; char *reference = generateReference(task, sys_from); if (uuid_from != NULL) { true_from = generate_hash_key(sys_from, uuid_from); } else if (sys_from != NULL) { true_from = strdup(sys_from); } else { crm_err("No sys from specified"); } /* host_from will get set for us if necessary by CRMd when routed */ request = create_xml_node(NULL, __FUNCTION__); crm_xml_add(request, F_CRM_ORIGIN, origin); crm_xml_add(request, F_TYPE, T_CRM); crm_xml_add(request, F_CRM_VERSION, CRM_FEATURE_SET); crm_xml_add(request, F_CRM_MSG_TYPE, XML_ATTR_REQUEST); crm_xml_add(request, F_CRM_REFERENCE, reference); crm_xml_add(request, F_CRM_TASK, task); crm_xml_add(request, F_CRM_SYS_TO, sys_to); crm_xml_add(request, F_CRM_SYS_FROM, true_from); /* HOSTTO will be ignored if it is to the DC anyway. */ if (host_to != NULL && strlen(host_to) > 0) { crm_xml_add(request, F_CRM_HOST_TO, host_to); } if (msg_data != NULL) { add_message_xml(request, F_CRM_DATA, msg_data); } free(reference); free(true_from); return request; } /* * This method adds a copy of xml_response_data */ xmlNode * create_reply_adv(xmlNode * original_request, xmlNode * xml_response_data, const char *origin) { xmlNode *reply = NULL; const char *host_from = crm_element_value(original_request, F_CRM_HOST_FROM); const char *sys_from = crm_element_value(original_request, F_CRM_SYS_FROM); const char *sys_to = crm_element_value(original_request, F_CRM_SYS_TO); const char *type = crm_element_value(original_request, F_CRM_MSG_TYPE); const char *operation = crm_element_value(original_request, F_CRM_TASK); const char *crm_msg_reference = crm_element_value(original_request, F_CRM_REFERENCE); if (type == NULL) { crm_err("Cannot create new_message, no message type in original message"); CRM_ASSERT(type != NULL); return NULL; #if 0 } else if (strcasecmp(XML_ATTR_REQUEST, type) != 0) { crm_err("Cannot create new_message, original message was not a request"); return NULL; #endif } reply = create_xml_node(NULL, __FUNCTION__); if (reply == NULL) { crm_err("Cannot create new_message, malloc failed"); return NULL; } crm_xml_add(reply, F_CRM_ORIGIN, origin); crm_xml_add(reply, F_TYPE, T_CRM); crm_xml_add(reply, F_CRM_VERSION, CRM_FEATURE_SET); crm_xml_add(reply, F_CRM_MSG_TYPE, XML_ATTR_RESPONSE); crm_xml_add(reply, F_CRM_REFERENCE, crm_msg_reference); crm_xml_add(reply, F_CRM_TASK, operation); /* since this is a reply, we reverse the from and to */ crm_xml_add(reply, F_CRM_SYS_TO, sys_from); crm_xml_add(reply, F_CRM_SYS_FROM, sys_to); /* HOSTTO will be ignored if it is to the DC anyway. */ if (host_from != NULL && strlen(host_from) > 0) { crm_xml_add(reply, F_CRM_HOST_TO, host_from); } if (xml_response_data != NULL) { add_message_xml(reply, F_CRM_DATA, xml_response_data); } return reply; } /* Libqb based IPC */ /* Server... */ GHashTable *client_connections = NULL; crm_client_t * crm_client_get(qb_ipcs_connection_t * c) { if (client_connections) { return g_hash_table_lookup(client_connections, c); } crm_trace("No client found for %p", c); return NULL; } crm_client_t * crm_client_get_by_id(const char *id) { gpointer key; crm_client_t *client; GHashTableIter iter; if (client_connections && id) { g_hash_table_iter_init(&iter, client_connections); while (g_hash_table_iter_next(&iter, &key, (gpointer *) & client)) { if (strcmp(client->id, id) == 0) { return client; } } } crm_trace("No client found with id=%s", id); return NULL; } const char * crm_client_name(crm_client_t * c) { if (c == NULL) { return "null"; } else if (c->name == NULL && c->id == NULL) { return "unknown"; } else if (c->name == NULL) { return c->id; } else { return c->name; } } +const char * +crm_client_type_text(enum client_type client_type) +{ + switch (client_type) { + case CRM_CLIENT_IPC: + return "IPC"; + case CRM_CLIENT_TCP: + return "TCP"; +#ifdef HAVE_GNUTLS_GNUTLS_H + case CRM_CLIENT_TLS: + return "TLS"; +#endif + default: + return "unknown"; + } +} + void crm_client_init(void) { if (client_connections == NULL) { crm_trace("Creating client hash table"); client_connections = g_hash_table_new(g_direct_hash, g_direct_equal); } } void crm_client_cleanup(void) { if (client_connections != NULL) { int active = g_hash_table_size(client_connections); if (active) { crm_err("Exiting with %d active connections", active); } g_hash_table_destroy(client_connections); client_connections = NULL; } } void crm_client_disconnect_all(qb_ipcs_service_t *service) { qb_ipcs_connection_t *c = NULL; if (service == NULL) { return; } c = qb_ipcs_connection_first_get(service); while (c != NULL) { qb_ipcs_connection_t *last = c; c = qb_ipcs_connection_next_get(service, last); /* There really shouldn't be anyone connected at this point */ crm_notice("Disconnecting client %p, pid=%d...", last, crm_ipcs_client_pid(last)); qb_ipcs_disconnect(last); qb_ipcs_connection_unref(last); } } /*! * \brief Allocate a new crm_client_t object and generate its ID * * \param[in] key What to use as connections hash table key (NULL to use ID) * * \return Pointer to new crm_client_t (asserts on failure) */ crm_client_t * crm_client_alloc(void *key) { crm_client_t *client = calloc(1, sizeof(crm_client_t)); CRM_ASSERT(client != NULL); client->id = crm_generate_uuid(); g_hash_table_insert(client_connections, (key? key : client->id), client); return client; } crm_client_t * crm_client_new(qb_ipcs_connection_t * c, uid_t uid_client, gid_t gid_client) { static gid_t uid_cluster = 0; static gid_t gid_cluster = 0; crm_client_t *client = NULL; CRM_LOG_ASSERT(c); if (c == NULL) { return NULL; } if (uid_cluster == 0) { if (crm_user_lookup(CRM_DAEMON_USER, &uid_cluster, &gid_cluster) < 0) { static bool have_error = FALSE; if(have_error == FALSE) { crm_warn("Could not find user and group IDs for user %s", CRM_DAEMON_USER); have_error = TRUE; } } } if (uid_client != 0) { crm_trace("Giving access to group %u", gid_cluster); /* Passing -1 to chown(2) means don't change */ qb_ipcs_connection_auth_set(c, -1, gid_cluster, S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP); } crm_client_init(); /* TODO: Do our own auth checking, return NULL if unauthorized */ client = crm_client_alloc(c); client->ipcs = c; client->kind = CRM_CLIENT_IPC; client->pid = crm_ipcs_client_pid(c); if ((uid_client == 0) || (uid_client == uid_cluster)) { /* Remember when a connection came from root or hacluster */ set_bit(client->flags, crm_client_flag_ipc_privileged); } crm_debug("Connecting %p for uid=%d gid=%d pid=%u id=%s", c, uid_client, gid_client, client->pid, client->id); #if ENABLE_ACL client->user = uid2username(uid_client); #endif return client; } void crm_client_destroy(crm_client_t * c) { if (c == NULL) { return; } if (client_connections) { if (c->ipcs) { crm_trace("Destroying %p/%p (%d remaining)", c, c->ipcs, crm_hash_table_size(client_connections) - 1); g_hash_table_remove(client_connections, c->ipcs); } else { crm_trace("Destroying remote connection %p (%d remaining)", c, crm_hash_table_size(client_connections) - 1); g_hash_table_remove(client_connections, c->id); } } if (c->event_timer) { g_source_remove(c->event_timer); } crm_debug("Destroying %d events", g_list_length(c->event_queue)); while (c->event_queue) { struct iovec *event = c->event_queue->data; c->event_queue = g_list_remove(c->event_queue, event); free(event[0].iov_base); free(event[1].iov_base); free(event); } free(c->id); free(c->name); free(c->user); if (c->remote) { if (c->remote->auth_timeout) { g_source_remove(c->remote->auth_timeout); } free(c->remote->buffer); free(c->remote); } free(c); } /*! * \brief Raise IPC eviction threshold for a client, if allowed * * \param[in,out] client Client to modify * \param[in] queue_max New threshold (as string) * * \return TRUE if change was allowed, FALSE otherwise */ bool crm_set_client_queue_max(crm_client_t *client, const char *qmax) { if (is_set(client->flags, crm_client_flag_ipc_privileged)) { int qmax_int = crm_int_helper(qmax, NULL); if ((errno == 0) && (qmax_int > 0)) { client->queue_max = qmax_int; return TRUE; } } return FALSE; } int crm_ipcs_client_pid(qb_ipcs_connection_t * c) { struct qb_ipcs_connection_stats stats; stats.client_pid = 0; qb_ipcs_connection_stats_get(c, &stats, 0); return stats.client_pid; } xmlNode * crm_ipcs_recv(crm_client_t * c, void *data, size_t size, uint32_t * id, uint32_t * flags) { xmlNode *xml = NULL; char *uncompressed = NULL; char *text = ((char *)data) + sizeof(struct crm_ipc_response_header); struct crm_ipc_response_header *header = data; if (id) { *id = ((struct qb_ipc_response_header *)data)->id; } if (flags) { *flags = header->flags; } if (is_set(header->flags, crm_ipc_proxied)) { /* mark this client as being the endpoint of a proxy connection. * Proxy connections responses are sent on the event channel to avoid * blocking the proxy daemon (crmd) */ c->flags |= crm_client_flag_ipc_proxied; } if(header->version > PCMK_IPC_VERSION) { crm_err("Filtering incompatible v%d IPC message, we only support versions <= %d", header->version, PCMK_IPC_VERSION); return NULL; } if (header->size_compressed) { int rc = 0; unsigned int size_u = 1 + header->size_uncompressed; uncompressed = calloc(1, size_u); crm_trace("Decompressing message data %u bytes into %u bytes", header->size_compressed, size_u); rc = BZ2_bzBuffToBuffDecompress(uncompressed, &size_u, text, header->size_compressed, 1, 0); text = uncompressed; if (rc != BZ_OK) { crm_err("Decompression failed: %s " CRM_XS " bzerror=%d", bz2_strerror(rc), rc); free(uncompressed); return NULL; } } CRM_ASSERT(text[header->size_uncompressed - 1] == 0); crm_trace("Received %.200s", text); xml = string2xml(text); free(uncompressed); return xml; } ssize_t crm_ipcs_flush_events(crm_client_t * c); static gboolean crm_ipcs_flush_events_cb(gpointer data) { crm_client_t *c = data; c->event_timer = 0; crm_ipcs_flush_events(c); return FALSE; } /*! * \internal * \brief Add progressive delay before next event queue flush * * \param[in,out] c Client connection to add delay to * \param[in] queue_len Current event queue length */ static inline void delay_next_flush(crm_client_t *c, unsigned int queue_len) { /* Delay a maximum of 5 seconds */ guint delay = (queue_len < 40)? (1000 + 100 * queue_len) : 5000; c->event_timer = g_timeout_add(delay, crm_ipcs_flush_events_cb, c); } ssize_t crm_ipcs_flush_events(crm_client_t * c) { ssize_t rc = 0; unsigned int sent = 0; unsigned int queue_len = 0; if (c == NULL) { return pcmk_ok; } else if (c->event_timer) { /* There is already a timer, wait until it goes off */ crm_trace("Timer active for %p - %d", c->ipcs, c->event_timer); return pcmk_ok; } queue_len = g_list_length(c->event_queue); while (c->event_queue && sent < 100) { struct crm_ipc_response_header *header = NULL; struct iovec *event = c->event_queue->data; rc = qb_ipcs_event_sendv(c->ipcs, event, 2); if (rc < 0) { break; } sent++; header = event[0].iov_base; if (header->size_compressed) { crm_trace("Event %d to %p[%d] (%lld compressed bytes) sent", header->qb.id, c->ipcs, c->pid, (long long) rc); } else { crm_trace("Event %d to %p[%d] (%lld bytes) sent: %.120s", header->qb.id, c->ipcs, c->pid, (long long) rc, (char *) (event[1].iov_base)); } c->event_queue = g_list_remove(c->event_queue, event); free(event[0].iov_base); free(event[1].iov_base); free(event); } queue_len -= sent; if (sent > 0 || queue_len) { crm_trace("Sent %d events (%d remaining) for %p[%d]: %s (%lld)", sent, queue_len, c->ipcs, c->pid, pcmk_strerror(rc < 0 ? rc : 0), (long long) rc); } if (queue_len) { /* Allow clients to briefly fall behind on processing incoming messages, * but drop completely unresponsive clients so the connection doesn't * consume resources indefinitely. */ if (queue_len > QB_MAX(c->queue_max, PCMK_IPC_DEFAULT_QUEUE_MAX)) { if ((c->queue_backlog <= 1) || (queue_len < c->queue_backlog)) { /* Don't evict for a new or shrinking backlog */ crm_warn("Client with process ID %u has a backlog of %u messages " CRM_XS " %p", c->pid, queue_len, c->ipcs); } else { crm_err("Evicting client with process ID %u due to backlog of %u messages " CRM_XS " %p", c->pid, queue_len, c->ipcs); c->queue_backlog = 0; qb_ipcs_disconnect(c->ipcs); return rc; } } c->queue_backlog = queue_len; delay_next_flush(c, queue_len); } else { /* Event queue is empty, there is no backlog */ c->queue_backlog = 0; } return rc; } ssize_t crm_ipc_prepare(uint32_t request, xmlNode * message, struct iovec ** result, uint32_t max_send_size) { static unsigned int biggest = 0; struct iovec *iov; unsigned int total = 0; char *compressed = NULL; char *buffer = dump_xml_unformatted(message); struct crm_ipc_response_header *header = calloc(1, sizeof(struct crm_ipc_response_header)); CRM_ASSERT(result != NULL); crm_ipc_init(); if (max_send_size == 0) { max_send_size = ipc_buffer_max; } CRM_LOG_ASSERT(max_send_size != 0); *result = NULL; iov = calloc(2, sizeof(struct iovec)); iov[0].iov_len = hdr_offset; iov[0].iov_base = header; header->version = PCMK_IPC_VERSION; header->size_uncompressed = 1 + strlen(buffer); total = iov[0].iov_len + header->size_uncompressed; if (total < max_send_size) { iov[1].iov_base = buffer; iov[1].iov_len = header->size_uncompressed; } else { unsigned int new_size = 0; if (crm_compress_string (buffer, header->size_uncompressed, max_send_size, &compressed, &new_size)) { header->flags |= crm_ipc_compressed; header->size_compressed = new_size; iov[1].iov_len = header->size_compressed; iov[1].iov_base = compressed; free(buffer); biggest = QB_MAX(header->size_compressed, biggest); } else { ssize_t rc = -EMSGSIZE; crm_log_xml_trace(message, "EMSGSIZE"); biggest = QB_MAX(header->size_uncompressed, biggest); crm_err ("Could not compress the message (%u bytes) into less than the configured ipc limit (%u bytes). " "Set PCMK_ipc_buffer to a higher value (%u bytes suggested)", header->size_uncompressed, max_send_size, 4 * biggest); free(compressed); free(buffer); free(header); free(iov); return rc; } } header->qb.size = iov[0].iov_len + iov[1].iov_len; header->qb.id = (int32_t)request; /* Replying to a specific request */ *result = iov; CRM_ASSERT(header->qb.size > 0); return header->qb.size; } ssize_t crm_ipcs_sendv(crm_client_t * c, struct iovec * iov, enum crm_ipc_flags flags) { ssize_t rc; static uint32_t id = 1; struct crm_ipc_response_header *header = iov[0].iov_base; if (c->flags & crm_client_flag_ipc_proxied) { /* _ALL_ replies to proxied connections need to be sent as events */ if (is_not_set(flags, crm_ipc_server_event)) { flags |= crm_ipc_server_event; /* this flag lets us know this was originally meant to be a response. * even though we're sending it over the event channel. */ flags |= crm_ipc_proxied_relay_response; } } header->flags |= flags; if (flags & crm_ipc_server_event) { header->qb.id = id++; /* We don't really use it, but doesn't hurt to set one */ if (flags & crm_ipc_server_free) { crm_trace("Sending the original to %p[%d]", c->ipcs, c->pid); c->event_queue = g_list_append(c->event_queue, iov); } else { struct iovec *iov_copy = calloc(2, sizeof(struct iovec)); crm_trace("Sending a copy to %p[%d]", c->ipcs, c->pid); iov_copy[0].iov_len = iov[0].iov_len; iov_copy[0].iov_base = malloc(iov[0].iov_len); memcpy(iov_copy[0].iov_base, iov[0].iov_base, iov[0].iov_len); iov_copy[1].iov_len = iov[1].iov_len; iov_copy[1].iov_base = malloc(iov[1].iov_len); memcpy(iov_copy[1].iov_base, iov[1].iov_base, iov[1].iov_len); c->event_queue = g_list_append(c->event_queue, iov_copy); } } else { CRM_LOG_ASSERT(header->qb.id != 0); /* Replying to a specific request */ rc = qb_ipcs_response_sendv(c->ipcs, iov, 2); if (rc < header->qb.size) { crm_notice("Response %d to %p[%d] (%u bytes) failed: %s (%d)", header->qb.id, c->ipcs, c->pid, header->qb.size, pcmk_strerror(rc), rc); } else { crm_trace("Response %d sent, %lld bytes to %p[%d]", header->qb.id, (long long) rc, c->ipcs, c->pid); } if (flags & crm_ipc_server_free) { free(iov[0].iov_base); free(iov[1].iov_base); free(iov); } } if (flags & crm_ipc_server_event) { rc = crm_ipcs_flush_events(c); } else { crm_ipcs_flush_events(c); } if (rc == -EPIPE || rc == -ENOTCONN) { crm_trace("Client %p disconnected", c->ipcs); } return rc; } ssize_t crm_ipcs_send(crm_client_t * c, uint32_t request, xmlNode * message, enum crm_ipc_flags flags) { struct iovec *iov = NULL; ssize_t rc = 0; if(c == NULL) { return -EDESTADDRREQ; } crm_ipc_init(); rc = crm_ipc_prepare(request, message, &iov, ipc_buffer_max); if (rc > 0) { rc = crm_ipcs_sendv(c, iov, flags | crm_ipc_server_free); } else { free(iov); crm_notice("Message to %p[%d] failed: %s (%d)", c->ipcs, c->pid, pcmk_strerror(rc), rc); } return rc; } void crm_ipcs_send_ack(crm_client_t * c, uint32_t request, uint32_t flags, const char *tag, const char *function, int line) { if (flags & crm_ipc_client_response) { xmlNode *ack = create_xml_node(NULL, tag); crm_trace("Ack'ing msg from %s (%p)", crm_client_name(c), c); c->request_id = 0; crm_xml_add(ack, "function", function); crm_xml_add_int(ack, "line", line); crm_ipcs_send(c, request, ack, flags); free_xml(ack); } } /* Client... */ #define MIN_MSG_SIZE 12336 /* sizeof(struct qb_ipc_connection_response) */ #define MAX_MSG_SIZE 128*1024 /* 128k default */ struct crm_ipc_s { struct pollfd pfd; /* the max size we can send/receive over ipc */ unsigned int max_buf_size; /* Size of the allocated 'buffer' */ unsigned int buf_size; int msg_size; int need_reply; char *buffer; char *name; uint32_t buffer_flags; qb_ipcc_connection_t *ipc; }; static unsigned int pick_ipc_buffer(unsigned int max) { static unsigned int global_max = 0; if (global_max == 0) { const char *env = getenv("PCMK_ipc_buffer"); if (env) { int env_max = crm_parse_int(env, "0"); global_max = (env_max > 0)? QB_MAX(MIN_MSG_SIZE, env_max) : MAX_MSG_SIZE; } else { global_max = MAX_MSG_SIZE; } } return QB_MAX(max, global_max); } crm_ipc_t * crm_ipc_new(const char *name, size_t max_size) { crm_ipc_t *client = NULL; client = calloc(1, sizeof(crm_ipc_t)); client->name = strdup(name); client->buf_size = pick_ipc_buffer(max_size); client->buffer = malloc(client->buf_size); /* Clients initiating connection pick the max buf size */ client->max_buf_size = client->buf_size; client->pfd.fd = -1; client->pfd.events = POLLIN; client->pfd.revents = 0; return client; } /*! * \brief Establish an IPC connection to a Pacemaker component * * \param[in] client Connection instance obtained from crm_ipc_new() * * \return TRUE on success, FALSE otherwise (in which case errno will be set) */ bool crm_ipc_connect(crm_ipc_t * client) { client->need_reply = FALSE; client->ipc = qb_ipcc_connect(client->name, client->buf_size); if (client->ipc == NULL) { crm_debug("Could not establish %s connection: %s (%d)", client->name, pcmk_strerror(errno), errno); return FALSE; } client->pfd.fd = crm_ipc_get_fd(client); if (client->pfd.fd < 0) { crm_debug("Could not obtain file descriptor for %s connection: %s (%d)", client->name, pcmk_strerror(errno), errno); return FALSE; } qb_ipcc_context_set(client->ipc, client); #ifdef HAVE_IPCS_GET_BUFFER_SIZE client->max_buf_size = qb_ipcc_get_buffer_size(client->ipc); if (client->max_buf_size > client->buf_size) { free(client->buffer); client->buffer = calloc(1, client->max_buf_size); client->buf_size = client->max_buf_size; } #endif return TRUE; } void crm_ipc_close(crm_ipc_t * client) { if (client) { crm_trace("Disconnecting %s IPC connection %p (%p)", client->name, client, client->ipc); if (client->ipc) { qb_ipcc_connection_t *ipc = client->ipc; client->ipc = NULL; qb_ipcc_disconnect(ipc); } } } void crm_ipc_destroy(crm_ipc_t * client) { if (client) { if (client->ipc && qb_ipcc_is_connected(client->ipc)) { crm_notice("Destroying an active IPC connection to %s", client->name); /* The next line is basically unsafe * * If this connection was attached to mainloop and mainloop is active, * the 'disconnected' callback will end up back here and we'll end * up free'ing the memory twice - something that can still happen * even without this if we destroy a connection and it closes before * we call exit */ /* crm_ipc_close(client); */ } crm_trace("Destroying IPC connection to %s: %p", client->name, client); free(client->buffer); free(client->name); free(client); } } int crm_ipc_get_fd(crm_ipc_t * client) { int fd = 0; if (client && client->ipc && (qb_ipcc_fd_get(client->ipc, &fd) == 0)) { return fd; } errno = EINVAL; crm_perror(LOG_ERR, "Could not obtain file IPC descriptor for %s", (client? client->name : "unspecified client")); return -errno; } bool crm_ipc_connected(crm_ipc_t * client) { bool rc = FALSE; if (client == NULL) { crm_trace("No client"); return FALSE; } else if (client->ipc == NULL) { crm_trace("No connection"); return FALSE; } else if (client->pfd.fd < 0) { crm_trace("Bad descriptor"); return FALSE; } rc = qb_ipcc_is_connected(client->ipc); if (rc == FALSE) { client->pfd.fd = -EINVAL; } return rc; } /*! * \brief Check whether an IPC connection is ready to be read * * \param[in] client Connection to check * * \return Positive value if ready to be read, 0 if not ready, -errno on error */ int crm_ipc_ready(crm_ipc_t *client) { int rc; CRM_ASSERT(client != NULL); if (crm_ipc_connected(client) == FALSE) { return -ENOTCONN; } client->pfd.revents = 0; rc = poll(&(client->pfd), 1, 0); return (rc < 0)? -errno : rc; } static int crm_ipc_decompress(crm_ipc_t * client) { struct crm_ipc_response_header *header = (struct crm_ipc_response_header *)(void*)client->buffer; if (header->size_compressed) { int rc = 0; unsigned int size_u = 1 + header->size_uncompressed; /* never let buf size fall below our max size required for ipc reads. */ unsigned int new_buf_size = QB_MAX((hdr_offset + size_u), client->max_buf_size); char *uncompressed = calloc(1, new_buf_size); crm_trace("Decompressing message data %u bytes into %u bytes", header->size_compressed, size_u); rc = BZ2_bzBuffToBuffDecompress(uncompressed + hdr_offset, &size_u, client->buffer + hdr_offset, header->size_compressed, 1, 0); if (rc != BZ_OK) { crm_err("Decompression failed: %s " CRM_XS " bzerror=%d", bz2_strerror(rc), rc); free(uncompressed); return -EILSEQ; } /* * This assert no longer holds true. For an identical msg, some clients may * require compression, and others may not. If that same msg (event) is sent * to multiple clients, it could result in some clients receiving a compressed * msg even though compression was not explicitly required for them. * * CRM_ASSERT((header->size_uncompressed + hdr_offset) >= ipc_buffer_max); */ CRM_ASSERT(size_u == header->size_uncompressed); memcpy(uncompressed, client->buffer, hdr_offset); /* Preserve the header */ header = (struct crm_ipc_response_header *)(void*)uncompressed; free(client->buffer); client->buf_size = new_buf_size; client->buffer = uncompressed; } CRM_ASSERT(client->buffer[hdr_offset + header->size_uncompressed - 1] == 0); return pcmk_ok; } long crm_ipc_read(crm_ipc_t * client) { struct crm_ipc_response_header *header = NULL; CRM_ASSERT(client != NULL); CRM_ASSERT(client->ipc != NULL); CRM_ASSERT(client->buffer != NULL); crm_ipc_init(); client->buffer[0] = 0; client->msg_size = qb_ipcc_event_recv(client->ipc, client->buffer, client->buf_size - 1, 0); if (client->msg_size >= 0) { int rc = crm_ipc_decompress(client); if (rc != pcmk_ok) { return rc; } header = (struct crm_ipc_response_header *)(void*)client->buffer; if(header->version > PCMK_IPC_VERSION) { crm_err("Filtering incompatible v%d IPC message, we only support versions <= %d", header->version, PCMK_IPC_VERSION); return -EBADMSG; } crm_trace("Received %s event %d, size=%u, rc=%d, text: %.100s", client->name, header->qb.id, header->qb.size, client->msg_size, client->buffer + hdr_offset); } else { crm_trace("No message from %s received: %s", client->name, pcmk_strerror(client->msg_size)); } if (crm_ipc_connected(client) == FALSE || client->msg_size == -ENOTCONN) { crm_err("Connection to %s failed", client->name); } if (header) { /* Data excluding the header */ return header->size_uncompressed; } return -ENOMSG; } const char * crm_ipc_buffer(crm_ipc_t * client) { CRM_ASSERT(client != NULL); return client->buffer + sizeof(struct crm_ipc_response_header); } uint32_t crm_ipc_buffer_flags(crm_ipc_t * client) { struct crm_ipc_response_header *header = NULL; CRM_ASSERT(client != NULL); if (client->buffer == NULL) { return 0; } header = (struct crm_ipc_response_header *)(void*)client->buffer; return header->flags; } const char * crm_ipc_name(crm_ipc_t * client) { CRM_ASSERT(client != NULL); return client->name; } static int internal_ipc_send_recv(crm_ipc_t * client, const void *iov) { int rc = 0; do { rc = qb_ipcc_sendv_recv(client->ipc, iov, 2, client->buffer, client->buf_size, -1); } while (rc == -EAGAIN && crm_ipc_connected(client)); return rc; } static int internal_ipc_send_request(crm_ipc_t * client, const void *iov, int ms_timeout) { int rc = 0; time_t timeout = time(NULL) + 1 + (ms_timeout / 1000); do { rc = qb_ipcc_sendv(client->ipc, iov, 2); } while (rc == -EAGAIN && time(NULL) < timeout && crm_ipc_connected(client)); return rc; } static int internal_ipc_get_reply(crm_ipc_t * client, int request_id, int ms_timeout) { time_t timeout = time(NULL) + 1 + (ms_timeout / 1000); int rc = 0; crm_ipc_init(); /* get the reply */ crm_trace("client %s waiting on reply to msg id %d", client->name, request_id); do { rc = qb_ipcc_recv(client->ipc, client->buffer, client->buf_size, 1000); if (rc > 0) { struct crm_ipc_response_header *hdr = NULL; int rc = crm_ipc_decompress(client); if (rc != pcmk_ok) { return rc; } hdr = (struct crm_ipc_response_header *)(void*)client->buffer; if (hdr->qb.id == request_id) { /* Got it */ break; } else if (hdr->qb.id < request_id) { xmlNode *bad = string2xml(crm_ipc_buffer(client)); crm_err("Discarding old reply %d (need %d)", hdr->qb.id, request_id); crm_log_xml_notice(bad, "OldIpcReply"); } else { xmlNode *bad = string2xml(crm_ipc_buffer(client)); crm_err("Discarding newer reply %d (need %d)", hdr->qb.id, request_id); crm_log_xml_notice(bad, "ImpossibleReply"); CRM_ASSERT(hdr->qb.id <= request_id); } } else if (crm_ipc_connected(client) == FALSE) { crm_err("Server disconnected client %s while waiting for msg id %d", client->name, request_id); break; } } while (time(NULL) < timeout); return rc; } int crm_ipc_send(crm_ipc_t * client, xmlNode * message, enum crm_ipc_flags flags, int32_t ms_timeout, xmlNode ** reply) { long rc = 0; struct iovec *iov; static uint32_t id = 0; static int factor = 8; struct crm_ipc_response_header *header; crm_ipc_init(); if (client == NULL) { crm_notice("Invalid connection"); return -ENOTCONN; } else if (crm_ipc_connected(client) == FALSE) { /* Don't even bother */ crm_notice("Connection to %s closed", client->name); return -ENOTCONN; } if (ms_timeout == 0) { ms_timeout = 5000; } if (client->need_reply) { crm_trace("Trying again to obtain pending reply from %s", client->name); rc = qb_ipcc_recv(client->ipc, client->buffer, client->buf_size, ms_timeout); if (rc < 0) { crm_warn("Sending to %s (%p) is disabled until pending reply is received", client->name, client->ipc); return -EALREADY; } else { crm_notice("Lost reply from %s (%p) finally arrived, sending re-enabled", client->name, client->ipc); client->need_reply = FALSE; } } id++; CRM_LOG_ASSERT(id != 0); /* Crude wrap-around detection */ rc = crm_ipc_prepare(id, message, &iov, client->max_buf_size); if(rc < 0) { return rc; } header = iov[0].iov_base; header->flags |= flags; if(is_set(flags, crm_ipc_proxied)) { /* Don't look for a synchronous response */ clear_bit(flags, crm_ipc_client_response); } if(header->size_compressed) { if(factor < 10 && (client->max_buf_size / 10) < (rc / factor)) { crm_notice("Compressed message exceeds %d0%% of the configured ipc limit (%u bytes), " "consider setting PCMK_ipc_buffer to %u or higher", factor, client->max_buf_size, 2 * client->max_buf_size); factor++; } } crm_trace("Sending from client: %s request id: %d bytes: %u timeout:%d msg...", client->name, header->qb.id, header->qb.size, ms_timeout); if (ms_timeout > 0 || is_not_set(flags, crm_ipc_client_response)) { rc = internal_ipc_send_request(client, iov, ms_timeout); if (rc <= 0) { crm_trace("Failed to send from client %s request %d with %u bytes...", client->name, header->qb.id, header->qb.size); goto send_cleanup; } else if (is_not_set(flags, crm_ipc_client_response)) { crm_trace("Message sent, not waiting for reply to %d from %s to %u bytes...", header->qb.id, client->name, header->qb.size); goto send_cleanup; } rc = internal_ipc_get_reply(client, header->qb.id, ms_timeout); if (rc < 0) { /* No reply, for now, disable sending * * The alternative is to close the connection since we don't know * how to detect and discard out-of-sequence replies * * TODO - implement the above */ client->need_reply = TRUE; } } else { rc = internal_ipc_send_recv(client, iov); } if (rc > 0) { struct crm_ipc_response_header *hdr = (struct crm_ipc_response_header *)(void*)client->buffer; crm_trace("Received response %d, size=%u, rc=%ld, text: %.200s", hdr->qb.id, hdr->qb.size, rc, crm_ipc_buffer(client)); if (reply) { *reply = string2xml(crm_ipc_buffer(client)); } } else { crm_trace("Response not received: rc=%ld, errno=%d", rc, errno); } send_cleanup: if (crm_ipc_connected(client) == FALSE) { crm_notice("Connection to %s closed: %s (%ld)", client->name, pcmk_strerror(rc), rc); } else if (rc == -ETIMEDOUT) { crm_warn("Request %d to %s (%p) failed: %s (%ld) after %dms", header->qb.id, client->name, client->ipc, pcmk_strerror(rc), rc, ms_timeout); crm_write_blackbox(0, NULL); } else if (rc <= 0) { crm_warn("Request %d to %s (%p) failed: %s (%ld)", header->qb.id, client->name, client->ipc, pcmk_strerror(rc), rc); } free(header); free(iov[1].iov_base); free(iov); return rc; } /* Utils */ xmlNode * create_hello_message(const char *uuid, const char *client_name, const char *major_version, const char *minor_version) { xmlNode *hello_node = NULL; xmlNode *hello = NULL; if (uuid == NULL || strlen(uuid) == 0 || client_name == NULL || strlen(client_name) == 0 || major_version == NULL || strlen(major_version) == 0 || minor_version == NULL || strlen(minor_version) == 0) { crm_err("Missing fields, Hello message will not be valid."); return NULL; } hello_node = create_xml_node(NULL, XML_TAG_OPTIONS); crm_xml_add(hello_node, "major_version", major_version); crm_xml_add(hello_node, "minor_version", minor_version); crm_xml_add(hello_node, "client_name", client_name); crm_xml_add(hello_node, "client_uuid", uuid); crm_trace("creating hello message"); hello = create_request(CRM_OP_HELLO, hello_node, NULL, NULL, client_name, uuid); free_xml(hello_node); return hello; } diff --git a/lib/common/remote.c b/lib/common/remote.c index b228d10c0d..09598adc03 100644 --- a/lib/common/remote.c +++ b/lib/common/remote.c @@ -1,1054 +1,1054 @@ /* * Copyright (c) 2008 Andrew Beekhof * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef HAVE_GNUTLS_GNUTLS_H # undef KEYFILE # include const int psk_tls_kx_order[] = { GNUTLS_KX_DHE_PSK, GNUTLS_KX_PSK, }; const int anon_tls_kx_order[] = { GNUTLS_KX_ANON_DH, GNUTLS_KX_DHE_RSA, GNUTLS_KX_DHE_DSS, GNUTLS_KX_RSA, 0 }; #endif /* Swab macros from linux/swab.h */ #ifdef HAVE_LINUX_SWAB_H # include #else /* * casts are necessary for constants, because we never know how for sure * how U/UL/ULL map to __u16, __u32, __u64. At least not in a portable way. */ #define __swab16(x) ((uint16_t)( \ (((uint16_t)(x) & (uint16_t)0x00ffU) << 8) | \ (((uint16_t)(x) & (uint16_t)0xff00U) >> 8))) #define __swab32(x) ((uint32_t)( \ (((uint32_t)(x) & (uint32_t)0x000000ffUL) << 24) | \ (((uint32_t)(x) & (uint32_t)0x0000ff00UL) << 8) | \ (((uint32_t)(x) & (uint32_t)0x00ff0000UL) >> 8) | \ (((uint32_t)(x) & (uint32_t)0xff000000UL) >> 24))) #define __swab64(x) ((uint64_t)( \ (((uint64_t)(x) & (uint64_t)0x00000000000000ffULL) << 56) | \ (((uint64_t)(x) & (uint64_t)0x000000000000ff00ULL) << 40) | \ (((uint64_t)(x) & (uint64_t)0x0000000000ff0000ULL) << 24) | \ (((uint64_t)(x) & (uint64_t)0x00000000ff000000ULL) << 8) | \ (((uint64_t)(x) & (uint64_t)0x000000ff00000000ULL) >> 8) | \ (((uint64_t)(x) & (uint64_t)0x0000ff0000000000ULL) >> 24) | \ (((uint64_t)(x) & (uint64_t)0x00ff000000000000ULL) >> 40) | \ (((uint64_t)(x) & (uint64_t)0xff00000000000000ULL) >> 56))) #endif #define REMOTE_MSG_VERSION 1 #define ENDIAN_LOCAL 0xBADADBBD struct crm_remote_header_v0 { uint32_t endian; /* Detect messages from hosts with different endian-ness */ uint32_t version; uint64_t id; uint64_t flags; uint32_t size_total; uint32_t payload_offset; uint32_t payload_compressed; uint32_t payload_uncompressed; /* New fields get added here */ } __attribute__ ((packed)); static struct crm_remote_header_v0 * crm_remote_header(crm_remote_t * remote) { struct crm_remote_header_v0 *header = (struct crm_remote_header_v0 *)remote->buffer; if(remote->buffer_offset < sizeof(struct crm_remote_header_v0)) { return NULL; } else if(header->endian != ENDIAN_LOCAL) { uint32_t endian = __swab32(header->endian); CRM_LOG_ASSERT(endian == ENDIAN_LOCAL); if(endian != ENDIAN_LOCAL) { crm_err("Invalid message detected, endian mismatch: %lx is neither %lx nor the swab'd %lx", ENDIAN_LOCAL, header->endian, endian); return NULL; } header->id = __swab64(header->id); header->flags = __swab64(header->flags); header->endian = __swab32(header->endian); header->version = __swab32(header->version); header->size_total = __swab32(header->size_total); header->payload_offset = __swab32(header->payload_offset); header->payload_compressed = __swab32(header->payload_compressed); header->payload_uncompressed = __swab32(header->payload_uncompressed); } return header; } #ifdef HAVE_GNUTLS_GNUTLS_H int crm_initiate_client_tls_handshake(crm_remote_t * remote, int timeout_ms) { int rc = 0; int pollrc = 0; time_t start = time(NULL); do { rc = gnutls_handshake(*remote->tls_session); if (rc == GNUTLS_E_INTERRUPTED || rc == GNUTLS_E_AGAIN) { pollrc = crm_remote_ready(remote, 1000); if (pollrc < 0) { /* poll returned error, there is no hope */ rc = -1; } } } while (((time(NULL) - start) < (timeout_ms / 1000)) && (rc == GNUTLS_E_INTERRUPTED || rc == GNUTLS_E_AGAIN)); if (rc < 0) { crm_trace("gnutls_handshake() failed with %d", rc); } return rc; } void * crm_create_anon_tls_session(int csock, int type /* GNUTLS_SERVER, GNUTLS_CLIENT */ , void *credentials) { gnutls_session_t *session = gnutls_malloc(sizeof(gnutls_session_t)); gnutls_init(session, type); # ifdef HAVE_GNUTLS_PRIORITY_SET_DIRECT /* http://www.manpagez.com/info/gnutls/gnutls-2.10.4/gnutls_81.php#Echo-Server-with-anonymous-authentication */ gnutls_priority_set_direct(*session, "NORMAL:+ANON-DH", NULL); /* gnutls_priority_set_direct (*session, "NONE:+VERS-TLS-ALL:+CIPHER-ALL:+MAC-ALL:+SIGN-ALL:+COMP-ALL:+ANON-DH", NULL); */ # else gnutls_set_default_priority(*session); gnutls_kx_set_priority(*session, anon_tls_kx_order); # endif gnutls_transport_set_ptr(*session, (gnutls_transport_ptr_t) GINT_TO_POINTER(csock)); switch (type) { case GNUTLS_SERVER: gnutls_credentials_set(*session, GNUTLS_CRD_ANON, (gnutls_anon_server_credentials_t) credentials); break; case GNUTLS_CLIENT: gnutls_credentials_set(*session, GNUTLS_CRD_ANON, (gnutls_anon_client_credentials_t) credentials); break; } return session; } void * create_psk_tls_session(int csock, int type /* GNUTLS_SERVER, GNUTLS_CLIENT */ , void *credentials) { gnutls_session_t *session = gnutls_malloc(sizeof(gnutls_session_t)); gnutls_init(session, type); # ifdef HAVE_GNUTLS_PRIORITY_SET_DIRECT gnutls_priority_set_direct(*session, "NORMAL:+DHE-PSK:+PSK", NULL); # else gnutls_set_default_priority(*session); gnutls_kx_set_priority(*session, psk_tls_kx_order); # endif gnutls_transport_set_ptr(*session, (gnutls_transport_ptr_t) GINT_TO_POINTER(csock)); switch (type) { case GNUTLS_SERVER: gnutls_credentials_set(*session, GNUTLS_CRD_PSK, (gnutls_psk_server_credentials_t) credentials); break; case GNUTLS_CLIENT: gnutls_credentials_set(*session, GNUTLS_CRD_PSK, (gnutls_psk_client_credentials_t) credentials); break; } return session; } static int crm_send_tls(gnutls_session_t * session, const char *buf, size_t len) { const char *unsent = buf; int rc = 0; int total_send; if (buf == NULL) { return -EINVAL; } total_send = len; crm_trace("Message size: %llu", (unsigned long long) len); while (TRUE) { rc = gnutls_record_send(*session, unsent, len); if (rc == GNUTLS_E_INTERRUPTED || rc == GNUTLS_E_AGAIN) { crm_trace("Retrying to send %llu bytes", (unsigned long long) len); } else if (rc < 0) { - crm_err("Connection terminated: %s " CRM_XS " rc=%d", - gnutls_strerror(rc), rc); + // Caller can log as error if necessary + crm_info("TLS connection terminated: %s " CRM_XS " rc=%d", + gnutls_strerror(rc), rc); rc = -ECONNABORTED; break; } else if (rc < len) { crm_debug("Sent %d of %llu bytes", rc, (unsigned long long) len); len -= rc; unsent += rc; } else { crm_trace("Sent all %d bytes", rc); break; } } return rc < 0 ? rc : total_send; } #endif static int crm_send_plaintext(int sock, const char *buf, size_t len) { int rc = 0; const char *unsent = buf; int total_send; if (buf == NULL) { return -EINVAL; } total_send = len; crm_trace("Message on socket %d: size=%llu", sock, (unsigned long long) len); retry: rc = write(sock, unsent, len); if (rc < 0) { rc = -errno; switch (errno) { case EINTR: case EAGAIN: crm_trace("Retry"); goto retry; default: - crm_perror(LOG_ERR, "Could only write %d of the remaining %d bytes", rc, (int)len); + crm_perror(LOG_INFO, + "Could only write %d of the remaining %llu bytes", + rc, (unsigned long long) len); break; } } else if (rc < len) { crm_trace("Only sent %d of %llu remaining bytes", rc, (unsigned long long) len); len -= rc; unsent += rc; goto retry; } else { crm_trace("Sent %d bytes: %.100s", rc, buf); } return rc < 0 ? rc : total_send; } static int crm_remote_sendv(crm_remote_t * remote, struct iovec * iov, int iovs) { - int lpc = 0; - int rc = -ESOCKTNOSUPPORT; - - for(; lpc < iovs; lpc++) { + int rc = 0; + for (int lpc = 0; (lpc < iovs) && (rc >= 0); lpc++) { #ifdef HAVE_GNUTLS_GNUTLS_H if (remote->tls_session) { rc = crm_send_tls(remote->tls_session, iov[lpc].iov_base, iov[lpc].iov_len); - } else if (remote->tcp_socket) { -#else - if (remote->tcp_socket) { + continue; + } #endif + if (remote->tcp_socket) { rc = crm_send_plaintext(remote->tcp_socket, iov[lpc].iov_base, iov[lpc].iov_len); - } else { - crm_err("Unsupported connection type"); + rc = -ESOCKTNOSUPPORT; } } return rc; } int crm_remote_send(crm_remote_t * remote, xmlNode * msg) { int rc = pcmk_ok; static uint64_t id = 0; char *xml_text = dump_xml_unformatted(msg); struct iovec iov[2]; struct crm_remote_header_v0 *header; if (xml_text == NULL) { crm_err("Could not send remote message: no message provided"); return -EINVAL; } header = calloc(1, sizeof(struct crm_remote_header_v0)); iov[0].iov_base = header; iov[0].iov_len = sizeof(struct crm_remote_header_v0); iov[1].iov_base = xml_text; iov[1].iov_len = 1 + strlen(xml_text); id++; header->id = id; header->endian = ENDIAN_LOCAL; header->version = REMOTE_MSG_VERSION; header->payload_offset = iov[0].iov_len; header->payload_uncompressed = iov[1].iov_len; header->size_total = iov[0].iov_len + iov[1].iov_len; crm_trace("Sending len[0]=%d, start=%x", (int)iov[0].iov_len, *(int*)(void*)xml_text); rc = crm_remote_sendv(remote, iov, 2); if (rc < 0) { crm_err("Could not send remote message: %s " CRM_XS " rc=%d", pcmk_strerror(rc), rc); } free(iov[0].iov_base); free(iov[1].iov_base); return rc; } /*! * \internal * \brief handles the recv buffer and parsing out msgs. * \note new_data is owned by this function once it is passed in. */ xmlNode * crm_remote_parse_buffer(crm_remote_t * remote) { xmlNode *xml = NULL; struct crm_remote_header_v0 *header = crm_remote_header(remote); if (remote->buffer == NULL || header == NULL) { return NULL; } /* Support compression on the receiving end now, in case we ever want to add it later */ if (header->payload_compressed) { int rc = 0; unsigned int size_u = 1 + header->payload_uncompressed; char *uncompressed = calloc(1, header->payload_offset + size_u); crm_trace("Decompressing message data %d bytes into %d bytes", header->payload_compressed, size_u); rc = BZ2_bzBuffToBuffDecompress(uncompressed + header->payload_offset, &size_u, remote->buffer + header->payload_offset, header->payload_compressed, 1, 0); if (rc != BZ_OK && header->version > REMOTE_MSG_VERSION) { crm_warn("Couldn't decompress v%d message, we only understand v%d", header->version, REMOTE_MSG_VERSION); free(uncompressed); return NULL; } else if (rc != BZ_OK) { crm_err("Decompression failed: %s " CRM_XS " bzerror=%d", bz2_strerror(rc), rc); free(uncompressed); return NULL; } CRM_ASSERT(size_u == header->payload_uncompressed); memcpy(uncompressed, remote->buffer, header->payload_offset); /* Preserve the header */ remote->buffer_size = header->payload_offset + size_u; free(remote->buffer); remote->buffer = uncompressed; header = crm_remote_header(remote); } /* take ownership of the buffer */ remote->buffer_offset = 0; CRM_LOG_ASSERT(remote->buffer[sizeof(struct crm_remote_header_v0) + header->payload_uncompressed - 1] == 0); xml = string2xml(remote->buffer + header->payload_offset); if (xml == NULL && header->version > REMOTE_MSG_VERSION) { crm_warn("Couldn't parse v%d message, we only understand v%d", header->version, REMOTE_MSG_VERSION); } else if (xml == NULL) { crm_err("Couldn't parse: '%.120s'", remote->buffer + header->payload_offset); } return xml; } /*! * \internal * \brief Wait for a remote session to have data to read * * \param[in] remote Connection to check * \param[in] total_timeout Maximum time (in ms) to wait * * \return Positive value if ready to be read, 0 on timeout, -errno on error */ int crm_remote_ready(crm_remote_t *remote, int total_timeout) { struct pollfd fds = { 0, }; int sock = 0; int rc = 0; time_t start; int timeout = total_timeout; #ifdef HAVE_GNUTLS_GNUTLS_H if (remote->tls_session) { void *sock_ptr = gnutls_transport_get_ptr(*remote->tls_session); sock = GPOINTER_TO_INT(sock_ptr); } else if (remote->tcp_socket) { #else if (remote->tcp_socket) { #endif sock = remote->tcp_socket; } else { crm_err("Unsupported connection type"); } if (sock <= 0) { crm_trace("No longer connected"); return -ENOTCONN; } start = time(NULL); errno = 0; do { fds.fd = sock; fds.events = POLLIN; /* If we got an EINTR while polling, and we have a * specific timeout we are trying to honor, attempt * to adjust the timeout to the closest second. */ if (errno == EINTR && (timeout > 0)) { timeout = total_timeout - ((time(NULL) - start) * 1000); if (timeout < 1000) { timeout = 1000; } } rc = poll(&fds, 1, timeout); } while (rc < 0 && errno == EINTR); return (rc < 0)? -errno : rc; } /*! * \internal * \brief Read bytes off non blocking remote connection. * * \note only use with NON-Blocking sockets. Should only be used after polling socket. * This function will return once max_size is met, the socket read buffer * is empty, or an error is encountered. * * \retval number of bytes received */ static size_t crm_remote_recv_once(crm_remote_t * remote) { int rc = 0; size_t read_len = sizeof(struct crm_remote_header_v0); struct crm_remote_header_v0 *header = crm_remote_header(remote); if(header) { /* Stop at the end of the current message */ read_len = header->size_total; } /* automatically grow the buffer when needed */ if(remote->buffer_size < read_len) { remote->buffer_size = 2 * read_len; crm_trace("Expanding buffer to %llu bytes", (unsigned long long) remote->buffer_size); remote->buffer = realloc_safe(remote->buffer, remote->buffer_size + 1); CRM_ASSERT(remote->buffer != NULL); } #ifdef HAVE_GNUTLS_GNUTLS_H if (remote->tls_session) { rc = gnutls_record_recv(*(remote->tls_session), remote->buffer + remote->buffer_offset, remote->buffer_size - remote->buffer_offset); if (rc == GNUTLS_E_INTERRUPTED) { rc = -EINTR; } else if (rc == GNUTLS_E_AGAIN) { rc = -EAGAIN; } else if (rc < 0) { crm_debug("TLS receive failed: %s (%d)", gnutls_strerror(rc), rc); rc = -pcmk_err_generic; } } else if (remote->tcp_socket) { #else if (remote->tcp_socket) { #endif errno = 0; rc = read(remote->tcp_socket, remote->buffer + remote->buffer_offset, remote->buffer_size - remote->buffer_offset); if(rc < 0) { rc = -errno; } } else { crm_err("Unsupported connection type"); return -ESOCKTNOSUPPORT; } /* process any errors. */ if (rc > 0) { remote->buffer_offset += rc; /* always null terminate buffer, the +1 to alloc always allows for this. */ remote->buffer[remote->buffer_offset] = '\0'; crm_trace("Received %u more bytes, %llu total", rc, (unsigned long long) remote->buffer_offset); } else if (rc == -EINTR || rc == -EAGAIN) { crm_trace("non-blocking, exiting read: %s (%d)", pcmk_strerror(rc), rc); } else if (rc == 0) { crm_debug("EOF encoutered after %llu bytes", (unsigned long long) remote->buffer_offset); return -ENOTCONN; } else { crm_debug("Error receiving message after %llu bytes: %s (%d)", (unsigned long long) remote->buffer_offset, pcmk_strerror(rc), rc); return -ENOTCONN; } header = crm_remote_header(remote); if(header) { if(remote->buffer_offset < header->size_total) { crm_trace("Read less than the advertised length: %llu < %u bytes", (unsigned long long) remote->buffer_offset, header->size_total); } else { crm_trace("Read full message of %llu bytes", (unsigned long long) remote->buffer_offset); return remote->buffer_offset; } } return -EAGAIN; } /*! * \internal * \brief Read message(s) from a remote connection * * \param[in] remote Remote connection to read * \param[in] total_timeout Fail if message not read in this time (ms) * \param[out] disconnected Will be set to 1 if disconnect detected * * \return TRUE if at least one full message read, FALSE otherwise */ gboolean crm_remote_recv(crm_remote_t *remote, int total_timeout, int *disconnected) { int rc; time_t start = time(NULL); int remaining_timeout = 0; if (total_timeout == 0) { total_timeout = 10000; } else if (total_timeout < 0) { total_timeout = 60000; } *disconnected = 0; remaining_timeout = total_timeout; while ((remaining_timeout > 0) && !(*disconnected)) { crm_trace("Waiting for remote data (%d of %d ms timeout remaining)", remaining_timeout, total_timeout); rc = crm_remote_ready(remote, remaining_timeout); if (rc == 0) { crm_err("Timed out (%d ms) while waiting for remote data", remaining_timeout); return FALSE; } else if (rc < 0) { crm_debug("Wait for remote data aborted, will try again: %s " CRM_XS " rc=%d", pcmk_strerror(rc), rc); } else { rc = crm_remote_recv_once(remote); if (rc > 0) { return TRUE; } else if (rc == -EAGAIN) { crm_trace("Still waiting for remote data"); } else if (rc < 0) { crm_debug("Could not receive remote data: %s " CRM_XS " rc=%d", pcmk_strerror(rc), rc); } } if (rc == -ENOTCONN) { *disconnected = 1; return FALSE; } remaining_timeout = total_timeout - ((time(NULL) - start) * 1000); } return FALSE; } struct tcp_async_cb_data { gboolean success; int sock; void *userdata; void (*callback) (void *userdata, int sock); int timeout; /*ms */ time_t start; }; static gboolean check_connect_finished(gpointer userdata) { struct tcp_async_cb_data *cb_data = userdata; int cb_arg = 0; // socket fd on success, -errno on error int sock = cb_data->sock; int error = 0; fd_set rset, wset; socklen_t len = sizeof(error); struct timeval ts = { 0, }; if (cb_data->success == TRUE) { goto dispatch_done; } FD_ZERO(&rset); FD_SET(sock, &rset); wset = rset; crm_trace("fd %d: checking to see if connect finished", sock); cb_arg = select(sock + 1, &rset, &wset, NULL, &ts); if (cb_arg < 0) { cb_arg = -errno; if ((errno == EINPROGRESS) || (errno == EAGAIN)) { /* reschedule if there is still time left */ if ((time(NULL) - cb_data->start) < (cb_data->timeout / 1000)) { goto reschedule; } else { cb_arg = -ETIMEDOUT; } } crm_trace("fd %d: select failed %d connect dispatch ", sock, cb_arg); goto dispatch_done; } else if (cb_arg == 0) { if ((time(NULL) - cb_data->start) < (cb_data->timeout / 1000)) { goto reschedule; } crm_debug("fd %d: timeout during select", sock); cb_arg = -ETIMEDOUT; goto dispatch_done; } else { crm_trace("fd %d: select returned success", sock); cb_arg = 0; } /* can we read or write to the socket now? */ if (FD_ISSET(sock, &rset) || FD_ISSET(sock, &wset)) { if (getsockopt(sock, SOL_SOCKET, SO_ERROR, &error, &len) < 0) { cb_arg = -errno; crm_trace("fd %d: call to getsockopt failed", sock); goto dispatch_done; } if (error) { crm_trace("fd %d: error returned from getsockopt: %d", sock, error); cb_arg = -error; goto dispatch_done; } } else { crm_trace("neither read nor write set after select"); cb_arg = -EAGAIN; goto dispatch_done; } dispatch_done: if (!cb_arg) { crm_trace("fd %d: connected", sock); /* Success, set the return code to the sock to report to the callback */ cb_arg = cb_data->sock; cb_data->sock = 0; } else { close(sock); } if (cb_data->callback) { cb_data->callback(cb_data->userdata, cb_arg); } free(cb_data); return FALSE; reschedule: /* will check again next interval */ return TRUE; } static int internal_tcp_connect_async(int sock, const struct sockaddr *addr, socklen_t addrlen, int timeout /* ms */ , int *timer_id, void *userdata, void (*callback) (void *userdata, int sock)) { int rc = 0; int interval = 500; int timer; struct tcp_async_cb_data *cb_data = NULL; rc = crm_set_nonblocking(sock); if (rc < 0) { crm_warn("Could not set socket non-blocking: %s " CRM_XS " rc=%d", pcmk_strerror(rc), rc); close(sock); return -1; } rc = connect(sock, addr, addrlen); if (rc < 0 && (errno != EINPROGRESS) && (errno != EAGAIN)) { crm_perror(LOG_WARNING, "connect"); return -1; } cb_data = calloc(1, sizeof(struct tcp_async_cb_data)); cb_data->userdata = userdata; cb_data->callback = callback; cb_data->sock = sock; cb_data->timeout = timeout; cb_data->start = time(NULL); if (rc == 0) { /* The connect was successful immediately, we still return to mainloop * and let this callback get called later. This avoids the user of this api * to have to account for the fact the callback could be invoked within this * function before returning. */ cb_data->success = TRUE; interval = 1; } /* Check connect finished is mostly doing a non-block poll on the socket * to see if we can read/write to it. Once we can, the connect has completed. * This method allows us to connect to the server without blocking mainloop. * * This is a poor man's way of polling to see when the connection finished. * At some point we should figure out a way to use a mainloop fd callback for this. * Something about the way mainloop is currently polling prevents this from working at the * moment though. */ crm_trace("Scheduling check in %dms for whether connect to fd %d finished", interval, sock); timer = g_timeout_add(interval, check_connect_finished, cb_data); if (timer_id) { *timer_id = timer; } return 0; } static int internal_tcp_connect(int sock, const struct sockaddr *addr, socklen_t addrlen) { int rc = connect(sock, addr, addrlen); if (rc < 0) { rc = -errno; crm_warn("Could not connect socket: %s " CRM_XS " rc=%d", pcmk_strerror(rc), rc); return rc; } rc = crm_set_nonblocking(sock); if (rc < 0) { crm_warn("Could not set socket non-blocking: %s " CRM_XS " rc=%d", pcmk_strerror(rc), rc); return rc; } return pcmk_ok; } /*! * \internal * \brief Connect to server at specified TCP port * * \param[in] host Name of server to connect to * \param[in] port Server port to connect to * \param[in] timeout Report error if not connected in this many milliseconds * \param[out] timer_id If non-NULL, will be set to timer ID, if asynchronous * \param[in] userdata Data to pass to callback, if asynchronous * \param[in] callback If non-NULL, connect asynchronously then call this * * \return File descriptor of connected socket on success, -ENOTCONN otherwise */ int crm_remote_tcp_connect_async(const char *host, int port, int timeout, int *timer_id, void *userdata, void (*callback) (void *userdata, int sock)) { char buffer[INET6_ADDRSTRLEN]; struct addrinfo *res = NULL; struct addrinfo *rp = NULL; struct addrinfo hints; const char *server = host; int ret_ga; int sock = -ENOTCONN; // Get host's IP address(es) memset(&hints, 0, sizeof(struct addrinfo)); hints.ai_family = AF_UNSPEC; /* Allow IPv4 or IPv6 */ hints.ai_socktype = SOCK_STREAM; hints.ai_flags = AI_CANONNAME; ret_ga = getaddrinfo(server, NULL, &hints, &res); if (ret_ga) { crm_err("Unable to get IP address info for %s: %s", server, gai_strerror(ret_ga)); goto async_cleanup; } if (!res || !res->ai_addr) { crm_err("Unable to get IP address info for %s: no result", server); goto async_cleanup; } // getaddrinfo() returns a list of host's addresses, try them in order for (rp = res; rp != NULL; rp = rp->ai_next) { struct sockaddr *addr = rp->ai_addr; if (!addr) { continue; } if (rp->ai_canonname) { server = res->ai_canonname; } crm_debug("Got canonical name %s for %s", server, host); sock = socket(rp->ai_family, SOCK_STREAM, IPPROTO_TCP); if (sock == -1) { crm_perror(LOG_WARNING, "creating socket for connection to %s", server); sock = -ENOTCONN; continue; } /* Set port appropriately for address family */ /* (void*) casts avoid false-positive compiler alignment warnings */ if (addr->sa_family == AF_INET6) { ((struct sockaddr_in6 *)(void*)addr)->sin6_port = htons(port); } else { ((struct sockaddr_in *)(void*)addr)->sin_port = htons(port); } memset(buffer, 0, DIMOF(buffer)); crm_sockaddr2str(addr, buffer); crm_info("Attempting TCP connection to %s:%d", buffer, port); if (callback) { if (internal_tcp_connect_async (sock, rp->ai_addr, rp->ai_addrlen, timeout, timer_id, userdata, callback) == 0) { goto async_cleanup; /* Success for now, we'll hear back later in the callback */ } } else if (internal_tcp_connect(sock, rp->ai_addr, rp->ai_addrlen) == 0) { break; /* Success */ } close(sock); sock = -ENOTCONN; } async_cleanup: if (res) { freeaddrinfo(res); } return sock; } int crm_remote_tcp_connect(const char *host, int port) { return crm_remote_tcp_connect_async(host, port, -1, NULL, NULL, NULL); } /*! * \brief Convert an IP address (IPv4 or IPv6) to a string for logging * * \param[in] sa Socket address for IP * \param[out] s Storage for at least INET6_ADDRSTRLEN bytes * * \note sa The socket address can be a pointer to struct sockaddr_in (IPv4), * struct sockaddr_in6 (IPv6) or struct sockaddr_storage (either), * as long as its sa_family member is set correctly. */ void crm_sockaddr2str(void *sa, char *s) { switch (((struct sockaddr*)sa)->sa_family) { case AF_INET: inet_ntop(AF_INET, &(((struct sockaddr_in *)sa)->sin_addr), s, INET6_ADDRSTRLEN); break; case AF_INET6: inet_ntop(AF_INET6, &(((struct sockaddr_in6 *)sa)->sin6_addr), s, INET6_ADDRSTRLEN); break; default: strcpy(s, ""); } } int crm_remote_accept(int ssock) { int csock = 0; int rc = 0; unsigned laddr = 0; struct sockaddr_storage addr; char addr_str[INET6_ADDRSTRLEN]; #ifdef TCP_USER_TIMEOUT int optval; long sbd_timeout = crm_get_sbd_timeout(); #endif /* accept the connection */ laddr = sizeof(addr); memset(&addr, 0, sizeof(addr)); csock = accept(ssock, (struct sockaddr *)&addr, &laddr); crm_sockaddr2str(&addr, addr_str); crm_info("New remote connection from %s", addr_str); if (csock == -1) { crm_err("accept socket failed"); return -1; } rc = crm_set_nonblocking(csock); if (rc < 0) { crm_err("Could not set socket non-blocking: %s " CRM_XS " rc=%d", pcmk_strerror(rc), rc); close(csock); return rc; } #ifdef TCP_USER_TIMEOUT if (sbd_timeout > 0) { optval = sbd_timeout / 2; /* time to fail and retry before watchdog */ rc = setsockopt(csock, SOL_TCP, TCP_USER_TIMEOUT, &optval, sizeof(optval)); if (rc < 0) { crm_err("setting TCP_USER_TIMEOUT (%d) on client socket failed", optval); close(csock); return rc; } } #endif return csock; } /*! * \brief Get the default remote connection TCP port on this host * * \return Remote connection TCP port number */ int crm_default_remote_port() { static int port = 0; if (port == 0) { const char *env = getenv("PCMK_remote_port"); if (env) { errno = 0; port = strtol(env, NULL, 10); if (errno || (port < 1) || (port > 65535)) { crm_warn("Environment variable PCMK_remote_port has invalid value '%s', using %d instead", env, DEFAULT_REMOTE_PORT); port = DEFAULT_REMOTE_PORT; } } else { port = DEFAULT_REMOTE_PORT; } } return port; } diff --git a/lib/lrmd/lrmd_client.c b/lib/lrmd/lrmd_client.c index 7452f669c5..da5fc569b9 100644 --- a/lib/lrmd/lrmd_client.c +++ b/lib/lrmd/lrmd_client.c @@ -1,1904 +1,1916 @@ /* * Copyright (c) 2012 David Vossel * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef HAVE_GNUTLS_GNUTLS_H # undef KEYFILE # include #endif #include #include #include #include #include #define MAX_TLS_RECV_WAIT 10000 CRM_TRACE_INIT_DATA(lrmd); static int lrmd_api_disconnect(lrmd_t * lrmd); static int lrmd_api_is_connected(lrmd_t * lrmd); /* IPC proxy functions */ int lrmd_internal_proxy_send(lrmd_t * lrmd, xmlNode *msg); static void lrmd_internal_proxy_dispatch(lrmd_t *lrmd, xmlNode *msg); void lrmd_internal_set_proxy_callback(lrmd_t * lrmd, void *userdata, void (*callback)(lrmd_t *lrmd, void *userdata, xmlNode *msg)); #ifdef HAVE_GNUTLS_GNUTLS_H # define LRMD_CLIENT_HANDSHAKE_TIMEOUT 5000 /* 5 seconds */ gnutls_psk_client_credentials_t psk_cred_s; int lrmd_tls_set_key(gnutls_datum_t * key); static void lrmd_tls_disconnect(lrmd_t * lrmd); static int global_remote_msg_id = 0; int lrmd_tls_send_msg(crm_remote_t * session, xmlNode * msg, uint32_t id, const char *msg_type); static void lrmd_tls_connection_destroy(gpointer userdata); #endif typedef struct lrmd_private_s { enum client_type type; char *token; mainloop_io_t *source; /* IPC parameters */ crm_ipc_t *ipc; crm_remote_t *remote; /* Extra TLS parameters */ char *remote_nodename; #ifdef HAVE_GNUTLS_GNUTLS_H char *server; int port; gnutls_psk_client_credentials_t psk_cred_c; /* while the async connection is occurring, this is the id * of the connection timeout timer. */ int async_timer; int sock; /* since tls requires a round trip across the network for a * request/reply, there are times where we just want to be able * to send a request from the client and not wait around (or even care * about) what the reply is. */ int expected_late_replies; GList *pending_notify; crm_trigger_t *process_notify; #endif lrmd_event_callback callback; /* Internal IPC proxy msg passing for remote guests */ void (*proxy_callback)(lrmd_t *lrmd, void *userdata, xmlNode *msg); void *proxy_callback_userdata; char *peer_version; } lrmd_private_t; static lrmd_list_t * lrmd_list_add(lrmd_list_t * head, const char *value) { lrmd_list_t *p, *end; p = calloc(1, sizeof(lrmd_list_t)); p->val = strdup(value); end = head; while (end && end->next) { end = end->next; } if (end) { end->next = p; } else { head = p; } return head; } void lrmd_list_freeall(lrmd_list_t * head) { lrmd_list_t *p; while (head) { char *val = (char *)head->val; p = head->next; free(val); free(head); head = p; } } lrmd_key_value_t * lrmd_key_value_add(lrmd_key_value_t * head, const char *key, const char *value) { lrmd_key_value_t *p, *end; p = calloc(1, sizeof(lrmd_key_value_t)); p->key = strdup(key); p->value = strdup(value); end = head; while (end && end->next) { end = end->next; } if (end) { end->next = p; } else { head = p; } return head; } void lrmd_key_value_freeall(lrmd_key_value_t * head) { lrmd_key_value_t *p; while (head) { p = head->next; free(head->key); free(head->value); free(head); head = p; } } lrmd_event_data_t * lrmd_copy_event(lrmd_event_data_t * event) { lrmd_event_data_t *copy = NULL; copy = calloc(1, sizeof(lrmd_event_data_t)); /* This will get all the int values. * we just have to be careful not to leave any * dangling pointers to strings. */ memcpy(copy, event, sizeof(lrmd_event_data_t)); copy->rsc_id = event->rsc_id ? strdup(event->rsc_id) : NULL; copy->op_type = event->op_type ? strdup(event->op_type) : NULL; copy->user_data = event->user_data ? strdup(event->user_data) : NULL; copy->output = event->output ? strdup(event->output) : NULL; copy->exit_reason = event->exit_reason ? strdup(event->exit_reason) : NULL; copy->remote_nodename = event->remote_nodename ? strdup(event->remote_nodename) : NULL; copy->params = crm_str_table_dup(event->params); return copy; } void lrmd_free_event(lrmd_event_data_t * event) { if (!event) { return; } /* free gives me grief if i try to cast */ free((char *)event->rsc_id); free((char *)event->op_type); free((char *)event->user_data); free((char *)event->output); free((char *)event->exit_reason); free((char *)event->remote_nodename); if (event->params) { g_hash_table_destroy(event->params); } free(event); } static int lrmd_dispatch_internal(lrmd_t * lrmd, xmlNode * msg) { const char *type; const char *proxy_session = crm_element_value(msg, F_LRMD_IPC_SESSION); lrmd_private_t *native = lrmd->lrmd_private; lrmd_event_data_t event = { 0, }; if (proxy_session != NULL) { /* this is proxy business */ lrmd_internal_proxy_dispatch(lrmd, msg); return 1; } else if (!native->callback) { /* no callback set */ crm_trace("notify event received but client has not set callback"); return 1; } event.remote_nodename = native->remote_nodename; type = crm_element_value(msg, F_LRMD_OPERATION); crm_element_value_int(msg, F_LRMD_CALLID, &event.call_id); event.rsc_id = crm_element_value(msg, F_LRMD_RSC_ID); if (crm_str_eq(type, LRMD_OP_RSC_REG, TRUE)) { event.type = lrmd_event_register; } else if (crm_str_eq(type, LRMD_OP_RSC_UNREG, TRUE)) { event.type = lrmd_event_unregister; } else if (crm_str_eq(type, LRMD_OP_RSC_EXEC, TRUE)) { crm_element_value_int(msg, F_LRMD_TIMEOUT, &event.timeout); crm_element_value_int(msg, F_LRMD_RSC_INTERVAL, &event.interval); crm_element_value_int(msg, F_LRMD_RSC_START_DELAY, &event.start_delay); crm_element_value_int(msg, F_LRMD_EXEC_RC, (int *)&event.rc); crm_element_value_int(msg, F_LRMD_OP_STATUS, &event.op_status); crm_element_value_int(msg, F_LRMD_RSC_DELETED, &event.rsc_deleted); crm_element_value_int(msg, F_LRMD_RSC_RUN_TIME, (int *)&event.t_run); crm_element_value_int(msg, F_LRMD_RSC_RCCHANGE_TIME, (int *)&event.t_rcchange); crm_element_value_int(msg, F_LRMD_RSC_EXEC_TIME, (int *)&event.exec_time); crm_element_value_int(msg, F_LRMD_RSC_QUEUE_TIME, (int *)&event.queue_time); event.op_type = crm_element_value(msg, F_LRMD_RSC_ACTION); event.user_data = crm_element_value(msg, F_LRMD_RSC_USERDATA_STR); event.output = crm_element_value(msg, F_LRMD_RSC_OUTPUT); event.exit_reason = crm_element_value(msg, F_LRMD_RSC_EXIT_REASON); event.type = lrmd_event_exec_complete; event.params = xml2list(msg); } else if (crm_str_eq(type, LRMD_OP_NEW_CLIENT, TRUE)) { event.type = lrmd_event_new_client; } else if (crm_str_eq(type, LRMD_OP_POKE, TRUE)) { event.type = lrmd_event_poke; } else { return 1; } crm_trace("op %s notify event received", type); native->callback(&event); if (event.params) { g_hash_table_destroy(event.params); } return 1; } static int lrmd_ipc_dispatch(const char *buffer, ssize_t length, gpointer userdata) { lrmd_t *lrmd = userdata; lrmd_private_t *native = lrmd->lrmd_private; xmlNode *msg; int rc; if (!native->callback) { /* no callback set */ return 1; } msg = string2xml(buffer); rc = lrmd_dispatch_internal(lrmd, msg); free_xml(msg); return rc; } #ifdef HAVE_GNUTLS_GNUTLS_H static void lrmd_free_xml(gpointer userdata) { free_xml((xmlNode *) userdata); } static int lrmd_tls_connected(lrmd_t * lrmd) { lrmd_private_t *native = lrmd->lrmd_private; if (native->remote->tls_session) { return TRUE; } return FALSE; } static int lrmd_tls_dispatch(gpointer userdata) { lrmd_t *lrmd = userdata; lrmd_private_t *native = lrmd->lrmd_private; xmlNode *xml = NULL; int rc = 0; int disconnected = 0; if (lrmd_tls_connected(lrmd) == FALSE) { - crm_trace("tls dispatch triggered after disconnect"); + crm_trace("TLS dispatch triggered after disconnect"); return 0; } - crm_trace("tls_dispatch triggered"); + crm_trace("TLS dispatch triggered"); /* First check if there are any pending notifies to process that came * while we were waiting for replies earlier. */ if (native->pending_notify) { GList *iter = NULL; crm_trace("Processing pending notifies"); for (iter = native->pending_notify; iter; iter = iter->next) { lrmd_dispatch_internal(lrmd, iter->data); } g_list_free_full(native->pending_notify, lrmd_free_xml); native->pending_notify = NULL; } /* Next read the current buffer and see if there are any messages to handle. */ rc = crm_remote_ready(native->remote, 0); if (rc == 0) { /* nothing to read, see if any full messages are already in buffer. */ xml = crm_remote_parse_buffer(native->remote); } else if (rc < 0) { disconnected = 1; } else { crm_remote_recv(native->remote, -1, &disconnected); xml = crm_remote_parse_buffer(native->remote); } while (xml) { const char *msg_type = crm_element_value(xml, F_LRMD_REMOTE_MSG_TYPE); if (safe_str_eq(msg_type, "notify")) { lrmd_dispatch_internal(lrmd, xml); } else if (safe_str_eq(msg_type, "reply")) { if (native->expected_late_replies > 0) { native->expected_late_replies--; } else { int reply_id = 0; crm_element_value_int(xml, F_LRMD_CALLID, &reply_id); /* if this happens, we want to know about it */ - crm_err("Got outdated reply %d", reply_id); + crm_err("Got outdated remote LRM reply %d", reply_id); } } free_xml(xml); xml = crm_remote_parse_buffer(native->remote); } if (disconnected) { - crm_info("Server disconnected while reading remote server msg."); + crm_info("Lost %s LRM connection while reading data", + (native->remote_nodename? native->remote_nodename : "local")); lrmd_tls_disconnect(lrmd); return 0; } return 1; } #endif /* Not used with mainloop */ int lrmd_poll(lrmd_t * lrmd, int timeout) { lrmd_private_t *native = lrmd->lrmd_private; switch (native->type) { case CRM_CLIENT_IPC: return crm_ipc_ready(native->ipc); #ifdef HAVE_GNUTLS_GNUTLS_H case CRM_CLIENT_TLS: if (native->pending_notify) { return 1; } return crm_remote_ready(native->remote, 0); #endif default: crm_err("Unsupported connection type: %d", native->type); } return 0; } /* Not used with mainloop */ bool lrmd_dispatch(lrmd_t * lrmd) { lrmd_private_t *private = NULL; CRM_ASSERT(lrmd != NULL); private = lrmd->lrmd_private; switch (private->type) { case CRM_CLIENT_IPC: while (crm_ipc_ready(private->ipc)) { if (crm_ipc_read(private->ipc) > 0) { const char *msg = crm_ipc_buffer(private->ipc); lrmd_ipc_dispatch(msg, strlen(msg), lrmd); } } break; #ifdef HAVE_GNUTLS_GNUTLS_H case CRM_CLIENT_TLS: lrmd_tls_dispatch(lrmd); break; #endif default: crm_err("Unsupported connection type: %d", private->type); } if (lrmd_api_is_connected(lrmd) == FALSE) { crm_err("Connection closed"); return FALSE; } return TRUE; } static xmlNode * lrmd_create_op(const char *token, const char *op, xmlNode *data, int timeout, enum lrmd_call_options options) { xmlNode *op_msg = create_xml_node(NULL, "lrmd_command"); CRM_CHECK(op_msg != NULL, return NULL); CRM_CHECK(token != NULL, return NULL); crm_xml_add(op_msg, F_XML_TAGNAME, "lrmd_command"); crm_xml_add(op_msg, F_TYPE, T_LRMD); crm_xml_add(op_msg, F_LRMD_CALLBACK_TOKEN, token); crm_xml_add(op_msg, F_LRMD_OPERATION, op); crm_xml_add_int(op_msg, F_LRMD_TIMEOUT, timeout); crm_xml_add_int(op_msg, F_LRMD_CALLOPTS, options); if (data != NULL) { add_message_xml(op_msg, F_LRMD_CALLDATA, data); } crm_trace("Created lrmd %s command with call options %.8lx (%d)", op, (long)options, options); return op_msg; } static void lrmd_ipc_connection_destroy(gpointer userdata) { lrmd_t *lrmd = userdata; lrmd_private_t *native = lrmd->lrmd_private; crm_info("IPC connection destroyed"); /* Prevent these from being cleaned up in lrmd_api_disconnect() */ native->ipc = NULL; native->source = NULL; if (native->callback) { lrmd_event_data_t event = { 0, }; event.type = lrmd_event_disconnect; event.remote_nodename = native->remote_nodename; native->callback(&event); } } #ifdef HAVE_GNUTLS_GNUTLS_H static void lrmd_tls_connection_destroy(gpointer userdata) { lrmd_t *lrmd = userdata; lrmd_private_t *native = lrmd->lrmd_private; crm_info("TLS connection destroyed"); if (native->remote->tls_session) { gnutls_bye(*native->remote->tls_session, GNUTLS_SHUT_RDWR); gnutls_deinit(*native->remote->tls_session); gnutls_free(native->remote->tls_session); } if (native->psk_cred_c) { gnutls_psk_free_client_credentials(native->psk_cred_c); } if (native->sock) { close(native->sock); } if (native->process_notify) { mainloop_destroy_trigger(native->process_notify); native->process_notify = NULL; } if (native->pending_notify) { g_list_free_full(native->pending_notify, lrmd_free_xml); native->pending_notify = NULL; } free(native->remote->buffer); native->remote->buffer = NULL; native->source = 0; native->sock = 0; native->psk_cred_c = NULL; native->remote->tls_session = NULL; native->sock = 0; if (native->callback) { lrmd_event_data_t event = { 0, }; event.remote_nodename = native->remote_nodename; event.type = lrmd_event_disconnect; native->callback(&event); } return; } int lrmd_tls_send_msg(crm_remote_t * session, xmlNode * msg, uint32_t id, const char *msg_type) { crm_xml_add_int(msg, F_LRMD_REMOTE_MSG_ID, id); crm_xml_add(msg, F_LRMD_REMOTE_MSG_TYPE, msg_type); return crm_remote_send(session, msg); } static xmlNode * lrmd_tls_recv_reply(lrmd_t * lrmd, int total_timeout, int expected_reply_id, int *disconnected) { lrmd_private_t *native = lrmd->lrmd_private; xmlNode *xml = NULL; time_t start = time(NULL); const char *msg_type = NULL; int reply_id = 0; int remaining_timeout = 0; /* A timeout of 0 here makes no sense. We have to wait a period of time * for the response to come back. If -1 or 0, default to 10 seconds. */ if (total_timeout <= 0 || total_timeout > MAX_TLS_RECV_WAIT) { total_timeout = MAX_TLS_RECV_WAIT; } while (!xml) { xml = crm_remote_parse_buffer(native->remote); if (!xml) { /* read some more off the tls buffer if we still have time left. */ if (remaining_timeout) { remaining_timeout = total_timeout - ((time(NULL) - start) * 1000); } else { remaining_timeout = total_timeout; } if (remaining_timeout <= 0) { crm_err("Never received the expected reply during the timeout period, disconnecting."); *disconnected = TRUE; return NULL; } crm_remote_recv(native->remote, remaining_timeout, disconnected); xml = crm_remote_parse_buffer(native->remote); if (!xml) { crm_err("Unable to receive expected reply, disconnecting."); *disconnected = TRUE; return NULL; } else if (*disconnected) { return NULL; } } CRM_ASSERT(xml != NULL); crm_element_value_int(xml, F_LRMD_REMOTE_MSG_ID, &reply_id); msg_type = crm_element_value(xml, F_LRMD_REMOTE_MSG_TYPE); if (!msg_type) { crm_err("Empty msg type received while waiting for reply"); free_xml(xml); xml = NULL; } else if (safe_str_eq(msg_type, "notify")) { /* got a notify while waiting for reply, trigger the notify to be processed later */ crm_info("queueing notify"); native->pending_notify = g_list_append(native->pending_notify, xml); if (native->process_notify) { crm_info("notify trigger set."); mainloop_set_trigger(native->process_notify); } xml = NULL; } else if (safe_str_neq(msg_type, "reply")) { /* msg isn't a reply, make some noise */ crm_err("Expected a reply, got %s", msg_type); free_xml(xml); xml = NULL; } else if (reply_id != expected_reply_id) { if (native->expected_late_replies > 0) { native->expected_late_replies--; } else { crm_err("Got outdated reply, expected id %d got id %d", expected_reply_id, reply_id); } free_xml(xml); xml = NULL; } } if (native->remote->buffer && native->process_notify) { mainloop_set_trigger(native->process_notify); } return xml; } static int lrmd_tls_send(lrmd_t * lrmd, xmlNode * msg) { int rc = 0; lrmd_private_t *native = lrmd->lrmd_private; global_remote_msg_id++; if (global_remote_msg_id <= 0) { global_remote_msg_id = 1; } rc = lrmd_tls_send_msg(native->remote, msg, global_remote_msg_id, "request"); if (rc <= 0) { crm_err("Remote lrmd send failed, disconnecting"); lrmd_tls_disconnect(lrmd); return -ENOTCONN; } return pcmk_ok; } static int lrmd_tls_send_recv(lrmd_t * lrmd, xmlNode * msg, int timeout, xmlNode ** reply) { int rc = 0; int disconnected = 0; xmlNode *xml = NULL; if (lrmd_tls_connected(lrmd) == FALSE) { return -1; } rc = lrmd_tls_send(lrmd, msg); if (rc < 0) { return rc; } xml = lrmd_tls_recv_reply(lrmd, timeout, global_remote_msg_id, &disconnected); if (disconnected) { crm_err("Remote lrmd server disconnected while waiting for reply with id %d. ", global_remote_msg_id); lrmd_tls_disconnect(lrmd); rc = -ENOTCONN; } else if (!xml) { crm_err("Remote lrmd never received reply for request id %d. timeout: %dms ", global_remote_msg_id, timeout); rc = -ECOMM; } if (reply) { *reply = xml; } else { free_xml(xml); } return rc; } #endif static int lrmd_send_xml(lrmd_t * lrmd, xmlNode * msg, int timeout, xmlNode ** reply) { int rc = -1; lrmd_private_t *native = lrmd->lrmd_private; switch (native->type) { case CRM_CLIENT_IPC: rc = crm_ipc_send(native->ipc, msg, crm_ipc_client_response, timeout, reply); break; #ifdef HAVE_GNUTLS_GNUTLS_H case CRM_CLIENT_TLS: rc = lrmd_tls_send_recv(lrmd, msg, timeout, reply); break; #endif default: crm_err("Unsupported connection type: %d", native->type); } return rc; } static int lrmd_send_xml_no_reply(lrmd_t * lrmd, xmlNode * msg) { int rc = -1; lrmd_private_t *native = lrmd->lrmd_private; switch (native->type) { case CRM_CLIENT_IPC: rc = crm_ipc_send(native->ipc, msg, crm_ipc_flags_none, 0, NULL); break; #ifdef HAVE_GNUTLS_GNUTLS_H case CRM_CLIENT_TLS: rc = lrmd_tls_send(lrmd, msg); if (rc == pcmk_ok) { /* we don't want to wait around for the reply, but * since the request/reply protocol needs to behave the same * as libqb, a reply will eventually come later anyway. */ native->expected_late_replies++; } break; #endif default: crm_err("Unsupported connection type: %d", native->type); } return rc; } static int lrmd_api_is_connected(lrmd_t * lrmd) { lrmd_private_t *native = lrmd->lrmd_private; switch (native->type) { case CRM_CLIENT_IPC: return crm_ipc_connected(native->ipc); break; #ifdef HAVE_GNUTLS_GNUTLS_H case CRM_CLIENT_TLS: return lrmd_tls_connected(lrmd); break; #endif default: crm_err("Unsupported connection type: %d", native->type); } return 0; } /*! * \internal * \brief Send a prepared API command to the lrmd server * * \param[in] lrmd Existing connection to the lrmd server * \param[in] op Name of API command to send * \param[in] data Command data XML to add to the sent command * \param[out] output_data If expecting a reply, it will be stored here * \param[in] timeout Timeout in milliseconds (if 0, defaults to 1000); * will be added to the command XML * \param[in] call_options Call options to pass to server when sending * \param[in] expect_reply If TRUE, wait for a reply from the server; * must be TRUE for IPC (as opposed to TLS) clients * * \return pcmk_ok on success, -errno on error */ static int lrmd_send_command(lrmd_t *lrmd, const char *op, xmlNode *data, xmlNode **output_data, int timeout, enum lrmd_call_options options, gboolean expect_reply) { int rc = pcmk_ok; lrmd_private_t *native = lrmd->lrmd_private; xmlNode *op_msg = NULL; xmlNode *op_reply = NULL; if (!lrmd_api_is_connected(lrmd)) { return -ENOTCONN; } if (op == NULL) { crm_err("No operation specified"); return -EINVAL; } CRM_CHECK(native->token != NULL,; ); crm_trace("sending %s op to lrmd", op); op_msg = lrmd_create_op(native->token, op, data, timeout, options); if (op_msg == NULL) { return -EINVAL; } if (expect_reply) { rc = lrmd_send_xml(lrmd, op_msg, timeout, &op_reply); } else { rc = lrmd_send_xml_no_reply(lrmd, op_msg); goto done; } if (rc < 0) { crm_perror(LOG_ERR, "Couldn't perform %s operation (timeout=%d): %d", op, timeout, rc); rc = -ECOMM; goto done; } else if(op_reply == NULL) { rc = -ENOMSG; goto done; } rc = pcmk_ok; crm_trace("%s op reply received", op); if (crm_element_value_int(op_reply, F_LRMD_RC, &rc) != 0) { rc = -ENOMSG; goto done; } crm_log_xml_trace(op_reply, "Reply"); if (output_data) { *output_data = op_reply; op_reply = NULL; /* Prevent subsequent free */ } done: if (lrmd_api_is_connected(lrmd) == FALSE) { crm_err("LRMD disconnected"); } free_xml(op_msg); free_xml(op_reply); return rc; } static int lrmd_api_poke_connection(lrmd_t * lrmd) { int rc; lrmd_private_t *native = lrmd->lrmd_private; xmlNode *data = create_xml_node(NULL, F_LRMD_RSC); crm_xml_add(data, F_LRMD_ORIGIN, __FUNCTION__); rc = lrmd_send_command(lrmd, LRMD_OP_POKE, data, NULL, 0, 0, native->type == CRM_CLIENT_IPC ? TRUE : FALSE); free_xml(data); return rc < 0 ? rc : pcmk_ok; } int remote_proxy_check(lrmd_t * lrmd, GHashTable *hash) { int rc; const char *value; lrmd_private_t *native = lrmd->lrmd_private; xmlNode *data = create_xml_node(NULL, F_LRMD_OPERATION); crm_xml_add(data, F_LRMD_ORIGIN, __FUNCTION__); value = g_hash_table_lookup(hash, "stonith-watchdog-timeout"); crm_xml_add(data, F_LRMD_WATCHDOG, value); rc = lrmd_send_command(lrmd, LRMD_OP_CHECK, data, NULL, 0, 0, native->type == CRM_CLIENT_IPC ? TRUE : FALSE); free_xml(data); return rc < 0 ? rc : pcmk_ok; } static int lrmd_handshake(lrmd_t * lrmd, const char *name) { int rc = pcmk_ok; lrmd_private_t *native = lrmd->lrmd_private; xmlNode *reply = NULL; xmlNode *hello = create_xml_node(NULL, "lrmd_command"); crm_xml_add(hello, F_TYPE, T_LRMD); crm_xml_add(hello, F_LRMD_OPERATION, CRM_OP_REGISTER); crm_xml_add(hello, F_LRMD_CLIENTNAME, name); crm_xml_add(hello, F_LRMD_PROTOCOL_VERSION, LRMD_PROTOCOL_VERSION); /* advertise that we are a proxy provider */ if (native->proxy_callback) { crm_xml_add(hello, F_LRMD_IS_IPC_PROVIDER, "true"); } rc = lrmd_send_xml(lrmd, hello, -1, &reply); if (rc < 0) { crm_perror(LOG_DEBUG, "Couldn't complete registration with the lrmd API: %d", rc); rc = -ECOMM; } else if (reply == NULL) { crm_err("Did not receive registration reply"); rc = -EPROTO; } else { const char *version = crm_element_value(reply, F_LRMD_PROTOCOL_VERSION); const char *msg_type = crm_element_value(reply, F_LRMD_OPERATION); const char *tmp_ticket = crm_element_value(reply, F_LRMD_CLIENTID); crm_element_value_int(reply, F_LRMD_RC, &rc); if (rc == -EPROTO) { crm_err("LRMD protocol mismatch client version %s, server version %s", LRMD_PROTOCOL_VERSION, version); crm_log_xml_err(reply, "Protocol Error"); } else if (safe_str_neq(msg_type, CRM_OP_REGISTER)) { crm_err("Invalid registration message: %s", msg_type); crm_log_xml_err(reply, "Bad reply"); rc = -EPROTO; } else if (tmp_ticket == NULL) { crm_err("No registration token provided"); crm_log_xml_err(reply, "Bad reply"); rc = -EPROTO; } else { crm_trace("Obtained registration token: %s", tmp_ticket); native->token = strdup(tmp_ticket); native->peer_version = strdup(version?version:"1.0"); /* Included since 1.1 */ rc = pcmk_ok; } } free_xml(reply); free_xml(hello); if (rc != pcmk_ok) { lrmd_api_disconnect(lrmd); } return rc; } static int lrmd_ipc_connect(lrmd_t * lrmd, int *fd) { int rc = pcmk_ok; lrmd_private_t *native = lrmd->lrmd_private; static struct ipc_client_callbacks lrmd_callbacks = { .dispatch = lrmd_ipc_dispatch, .destroy = lrmd_ipc_connection_destroy }; crm_info("Connecting to lrmd"); if (fd) { /* No mainloop */ native->ipc = crm_ipc_new(CRM_SYSTEM_LRMD, 0); if (native->ipc && crm_ipc_connect(native->ipc)) { *fd = crm_ipc_get_fd(native->ipc); } else if (native->ipc) { crm_perror(LOG_ERR, "Connection to local resource manager failed"); rc = -ENOTCONN; } } else { native->source = mainloop_add_ipc_client(CRM_SYSTEM_LRMD, G_PRIORITY_HIGH, 0, lrmd, &lrmd_callbacks); native->ipc = mainloop_get_ipc_client(native->source); } if (native->ipc == NULL) { crm_debug("Could not connect to the LRMD API"); rc = -ENOTCONN; } return rc; } #ifdef HAVE_GNUTLS_GNUTLS_H static int set_key(gnutls_datum_t * key, const char *location) { FILE *stream; int read_len = 256; int cur_len = 0; int buf_len = read_len; static char *key_cache = NULL; static size_t key_cache_len = 0; static time_t key_cache_updated; if (location == NULL) { return -1; } if (key_cache) { time_t now = time(NULL); if ((now - key_cache_updated) < 60) { key->data = gnutls_malloc(key_cache_len + 1); key->size = key_cache_len; memcpy(key->data, key_cache, key_cache_len); crm_debug("using cached LRMD key"); return 0; } else { key_cache_len = 0; key_cache_updated = 0; free(key_cache); key_cache = NULL; crm_debug("clearing lrmd key cache"); } } stream = fopen(location, "r"); if (!stream) { return -1; } key->data = gnutls_malloc(read_len); while (!feof(stream)) { int next; if (cur_len == buf_len) { buf_len = cur_len + read_len; key->data = gnutls_realloc(key->data, buf_len); } next = fgetc(stream); if (next == EOF && feof(stream)) { break; } key->data[cur_len] = next; cur_len++; } fclose(stream); key->size = cur_len; if (!cur_len) { gnutls_free(key->data); key->data = 0; return -1; } if (!key_cache) { key_cache = calloc(1, key->size + 1); memcpy(key_cache, key->data, key->size); key_cache_len = key->size; key_cache_updated = time(NULL); } return 0; } int lrmd_tls_set_key(gnutls_datum_t * key) { const char *specific_location = getenv("PCMK_authkey_location"); if (set_key(key, specific_location) == 0) { crm_debug("Using custom authkey location %s", specific_location); return pcmk_ok; } else if (specific_location) { crm_err("No valid lrmd remote key found at %s, trying default location", specific_location); } if ((set_key(key, DEFAULT_REMOTE_KEY_LOCATION) != 0) && (set_key(key, ALT_REMOTE_KEY_LOCATION) != 0)) { crm_err("No valid lrmd remote key found at %s", DEFAULT_REMOTE_KEY_LOCATION); return -ENOKEY; } return pcmk_ok; } static void lrmd_gnutls_global_init(void) { static int gnutls_init = 0; if (!gnutls_init) { crm_gnutls_global_init(); } gnutls_init = 1; } #endif static void report_async_connection_result(lrmd_t * lrmd, int rc) { lrmd_private_t *native = lrmd->lrmd_private; if (native->callback) { lrmd_event_data_t event = { 0, }; event.type = lrmd_event_connect; event.remote_nodename = native->remote_nodename; event.connection_rc = rc; native->callback(&event); } } #ifdef HAVE_GNUTLS_GNUTLS_H static void lrmd_tcp_connect_cb(void *userdata, int sock) { lrmd_t *lrmd = userdata; lrmd_private_t *native = lrmd->lrmd_private; char *name; static struct mainloop_fd_callbacks lrmd_tls_callbacks = { .dispatch = lrmd_tls_dispatch, .destroy = lrmd_tls_connection_destroy, }; int rc = sock; gnutls_datum_t psk_key = { NULL, 0 }; native->async_timer = 0; if (rc < 0) { lrmd_tls_connection_destroy(lrmd); crm_info("Could not connect to remote LRMD at %s:%d", native->server, native->port); report_async_connection_result(lrmd, rc); return; } /* The TCP connection was successful, so establish the TLS connection. * @TODO make this async to avoid blocking code in client */ native->sock = sock; rc = lrmd_tls_set_key(&psk_key); if (rc != 0) { crm_warn("Could not set key for remote LRMD at %s:%d " CRM_XS " rc=%d", native->server, native->port, rc); lrmd_tls_connection_destroy(lrmd); report_async_connection_result(lrmd, rc); return; } gnutls_psk_allocate_client_credentials(&native->psk_cred_c); gnutls_psk_set_client_credentials(native->psk_cred_c, DEFAULT_REMOTE_USERNAME, &psk_key, GNUTLS_PSK_KEY_RAW); gnutls_free(psk_key.data); native->remote->tls_session = create_psk_tls_session(sock, GNUTLS_CLIENT, native->psk_cred_c); if (crm_initiate_client_tls_handshake(native->remote, LRMD_CLIENT_HANDSHAKE_TIMEOUT) != 0) { crm_warn("Disconnecting after TLS handshake with remote LRMD %s:%d failed", native->server, native->port); gnutls_deinit(*native->remote->tls_session); gnutls_free(native->remote->tls_session); native->remote->tls_session = NULL; lrmd_tls_connection_destroy(lrmd); report_async_connection_result(lrmd, -EKEYREJECTED); return; } crm_info("TLS connection to remote LRMD %s:%d succeeded", native->server, native->port); name = crm_strdup_printf("remote-lrmd-%s:%d", native->server, native->port); native->process_notify = mainloop_add_trigger(G_PRIORITY_HIGH, lrmd_tls_dispatch, lrmd); native->source = mainloop_add_fd(name, G_PRIORITY_HIGH, native->sock, lrmd, &lrmd_tls_callbacks); rc = lrmd_handshake(lrmd, name); free(name); report_async_connection_result(lrmd, rc); return; } static int lrmd_tls_connect_async(lrmd_t * lrmd, int timeout /*ms */ ) { int sock = 0; int timer_id = 0; lrmd_private_t *native = lrmd->lrmd_private; lrmd_gnutls_global_init(); sock = crm_remote_tcp_connect_async(native->server, native->port, timeout, &timer_id, lrmd, lrmd_tcp_connect_cb); if (sock < 0) { return sock; } native->sock = sock; native->async_timer = timer_id; return pcmk_ok; } static int lrmd_tls_connect(lrmd_t * lrmd, int *fd) { static struct mainloop_fd_callbacks lrmd_tls_callbacks = { .dispatch = lrmd_tls_dispatch, .destroy = lrmd_tls_connection_destroy, }; int rc; lrmd_private_t *native = lrmd->lrmd_private; int sock; gnutls_datum_t psk_key = { NULL, 0 }; lrmd_gnutls_global_init(); sock = crm_remote_tcp_connect(native->server, native->port); if (sock < 0) { crm_warn("Could not establish remote lrmd connection to %s", native->server); lrmd_tls_connection_destroy(lrmd); return -ENOTCONN; } native->sock = sock; rc = lrmd_tls_set_key(&psk_key); if (rc < 0) { lrmd_tls_connection_destroy(lrmd); return rc; } gnutls_psk_allocate_client_credentials(&native->psk_cred_c); gnutls_psk_set_client_credentials(native->psk_cred_c, DEFAULT_REMOTE_USERNAME, &psk_key, GNUTLS_PSK_KEY_RAW); gnutls_free(psk_key.data); native->remote->tls_session = create_psk_tls_session(sock, GNUTLS_CLIENT, native->psk_cred_c); if (crm_initiate_client_tls_handshake(native->remote, LRMD_CLIENT_HANDSHAKE_TIMEOUT) != 0) { crm_err("Session creation for %s:%d failed", native->server, native->port); gnutls_deinit(*native->remote->tls_session); gnutls_free(native->remote->tls_session); native->remote->tls_session = NULL; lrmd_tls_connection_destroy(lrmd); return -EKEYREJECTED; } crm_info("Remote lrmd client TLS connection established with server %s:%d", native->server, native->port); if (fd) { *fd = sock; } else { char *name = crm_strdup_printf("remote-lrmd-%s:%d", native->server, native->port); native->process_notify = mainloop_add_trigger(G_PRIORITY_HIGH, lrmd_tls_dispatch, lrmd); native->source = mainloop_add_fd(name, G_PRIORITY_HIGH, native->sock, lrmd, &lrmd_tls_callbacks); free(name); } return pcmk_ok; } #endif static int lrmd_api_connect(lrmd_t * lrmd, const char *name, int *fd) { int rc = -ENOTCONN; lrmd_private_t *native = lrmd->lrmd_private; switch (native->type) { case CRM_CLIENT_IPC: rc = lrmd_ipc_connect(lrmd, fd); break; #ifdef HAVE_GNUTLS_GNUTLS_H case CRM_CLIENT_TLS: rc = lrmd_tls_connect(lrmd, fd); break; #endif default: crm_err("Unsupported connection type: %d", native->type); } if (rc == pcmk_ok) { rc = lrmd_handshake(lrmd, name); } return rc; } static int lrmd_api_connect_async(lrmd_t * lrmd, const char *name, int timeout) { int rc = 0; lrmd_private_t *native = lrmd->lrmd_private; if (!native->callback) { crm_err("Async connect not possible, no lrmd client callback set."); return -1; } switch (native->type) { case CRM_CLIENT_IPC: /* fake async connection with ipc. it should be fast * enough that we gain very little from async */ rc = lrmd_api_connect(lrmd, name, NULL); if (!rc) { report_async_connection_result(lrmd, rc); } break; #ifdef HAVE_GNUTLS_GNUTLS_H case CRM_CLIENT_TLS: rc = lrmd_tls_connect_async(lrmd, timeout); if (rc) { /* connection failed, report rc now */ report_async_connection_result(lrmd, rc); } break; #endif default: crm_err("Unsupported connection type: %d", native->type); } return rc; } static void lrmd_ipc_disconnect(lrmd_t * lrmd) { lrmd_private_t *native = lrmd->lrmd_private; if (native->source != NULL) { /* Attached to mainloop */ mainloop_del_ipc_client(native->source); native->source = NULL; native->ipc = NULL; } else if (native->ipc) { /* Not attached to mainloop */ crm_ipc_t *ipc = native->ipc; native->ipc = NULL; crm_ipc_close(ipc); crm_ipc_destroy(ipc); } } #ifdef HAVE_GNUTLS_GNUTLS_H static void lrmd_tls_disconnect(lrmd_t * lrmd) { lrmd_private_t *native = lrmd->lrmd_private; if (native->remote->tls_session) { gnutls_bye(*native->remote->tls_session, GNUTLS_SHUT_RDWR); gnutls_deinit(*native->remote->tls_session); gnutls_free(native->remote->tls_session); native->remote->tls_session = 0; } if (native->async_timer) { g_source_remove(native->async_timer); native->async_timer = 0; } if (native->source != NULL) { /* Attached to mainloop */ mainloop_del_ipc_client(native->source); native->source = NULL; } else if (native->sock) { close(native->sock); native->sock = 0; } if (native->pending_notify) { g_list_free_full(native->pending_notify, lrmd_free_xml); native->pending_notify = NULL; } } #endif static int lrmd_api_disconnect(lrmd_t * lrmd) { lrmd_private_t *native = lrmd->lrmd_private; - crm_info("Disconnecting from %d lrmd service", native->type); + crm_info("Disconnecting %s LRM connection to %s", + crm_client_type_text(native->type), + (native->remote_nodename? native->remote_nodename : "local")); switch (native->type) { case CRM_CLIENT_IPC: lrmd_ipc_disconnect(lrmd); break; #ifdef HAVE_GNUTLS_GNUTLS_H case CRM_CLIENT_TLS: lrmd_tls_disconnect(lrmd); break; #endif default: crm_err("Unsupported connection type: %d", native->type); } free(native->token); native->token = NULL; free(native->peer_version); native->peer_version = NULL; return 0; } static int lrmd_api_register_rsc(lrmd_t * lrmd, const char *rsc_id, const char *class, const char *provider, const char *type, enum lrmd_call_options options) { int rc = pcmk_ok; xmlNode *data = NULL; if (!class || !type || !rsc_id) { return -EINVAL; } if (crm_provider_required(class) && !provider) { return -EINVAL; } data = create_xml_node(NULL, F_LRMD_RSC); crm_xml_add(data, F_LRMD_ORIGIN, __FUNCTION__); crm_xml_add(data, F_LRMD_RSC_ID, rsc_id); crm_xml_add(data, F_LRMD_CLASS, class); crm_xml_add(data, F_LRMD_PROVIDER, provider); crm_xml_add(data, F_LRMD_TYPE, type); rc = lrmd_send_command(lrmd, LRMD_OP_RSC_REG, data, NULL, 0, options, TRUE); free_xml(data); return rc; } static int lrmd_api_unregister_rsc(lrmd_t * lrmd, const char *rsc_id, enum lrmd_call_options options) { int rc = pcmk_ok; xmlNode *data = create_xml_node(NULL, F_LRMD_RSC); crm_xml_add(data, F_LRMD_ORIGIN, __FUNCTION__); crm_xml_add(data, F_LRMD_RSC_ID, rsc_id); rc = lrmd_send_command(lrmd, LRMD_OP_RSC_UNREG, data, NULL, 0, options, TRUE); free_xml(data); return rc; } lrmd_rsc_info_t * -lrmd_copy_rsc_info(lrmd_rsc_info_t * rsc_info) +lrmd_new_rsc_info(const char *rsc_id, const char *standard, + const char *provider, const char *type) { - lrmd_rsc_info_t *copy = NULL; - - copy = calloc(1, sizeof(lrmd_rsc_info_t)); + lrmd_rsc_info_t *rsc_info = calloc(1, sizeof(lrmd_rsc_info_t)); - copy->id = strdup(rsc_info->id); - copy->type = strdup(rsc_info->type); - copy->standard = strdup(rsc_info->standard); - if (rsc_info->provider) { - copy->provider = strdup(rsc_info->provider); + CRM_ASSERT(rsc_info); + if (rsc_id) { + rsc_info->id = strdup(rsc_id); + CRM_ASSERT(rsc_info->id); + } + if (standard) { + rsc_info->standard = strdup(standard); + CRM_ASSERT(rsc_info->standard); } + if (provider) { + rsc_info->provider = strdup(provider); + CRM_ASSERT(rsc_info->provider); + } + if (type) { + rsc_info->type = strdup(type); + CRM_ASSERT(rsc_info->type); + } + return rsc_info; +} - return copy; +lrmd_rsc_info_t * +lrmd_copy_rsc_info(lrmd_rsc_info_t * rsc_info) +{ + return lrmd_new_rsc_info(rsc_info->id, rsc_info->standard, + rsc_info->provider, rsc_info->type); } void lrmd_free_rsc_info(lrmd_rsc_info_t * rsc_info) { if (!rsc_info) { return; } free(rsc_info->id); free(rsc_info->type); free(rsc_info->standard); free(rsc_info->provider); free(rsc_info); } static lrmd_rsc_info_t * lrmd_api_get_rsc_info(lrmd_t * lrmd, const char *rsc_id, enum lrmd_call_options options) { lrmd_rsc_info_t *rsc_info = NULL; xmlNode *data = create_xml_node(NULL, F_LRMD_RSC); xmlNode *output = NULL; const char *class = NULL; const char *provider = NULL; const char *type = NULL; crm_xml_add(data, F_LRMD_ORIGIN, __FUNCTION__); crm_xml_add(data, F_LRMD_RSC_ID, rsc_id); lrmd_send_command(lrmd, LRMD_OP_RSC_INFO, data, &output, 0, options, TRUE); free_xml(data); if (!output) { return NULL; } class = crm_element_value(output, F_LRMD_CLASS); provider = crm_element_value(output, F_LRMD_PROVIDER); type = crm_element_value(output, F_LRMD_TYPE); if (!class || !type) { free_xml(output); return NULL; } else if (crm_provider_required(class) && !provider) { free_xml(output); return NULL; } - rsc_info = calloc(1, sizeof(lrmd_rsc_info_t)); - rsc_info->id = strdup(rsc_id); - rsc_info->standard = strdup(class); - if (provider) { - rsc_info->provider = strdup(provider); - } - rsc_info->type = strdup(type); - + rsc_info = lrmd_new_rsc_info(rsc_id, class, provider, type); free_xml(output); return rsc_info; } static void lrmd_api_set_callback(lrmd_t * lrmd, lrmd_event_callback callback) { lrmd_private_t *native = lrmd->lrmd_private; native->callback = callback; } void lrmd_internal_set_proxy_callback(lrmd_t * lrmd, void *userdata, void (*callback)(lrmd_t *lrmd, void *userdata, xmlNode *msg)) { lrmd_private_t *native = lrmd->lrmd_private; native->proxy_callback = callback; native->proxy_callback_userdata = userdata; } void lrmd_internal_proxy_dispatch(lrmd_t *lrmd, xmlNode *msg) { lrmd_private_t *native = lrmd->lrmd_private; if (native->proxy_callback) { crm_log_xml_trace(msg, "PROXY_INBOUND"); native->proxy_callback(lrmd, native->proxy_callback_userdata, msg); } } int lrmd_internal_proxy_send(lrmd_t * lrmd, xmlNode *msg) { if (lrmd == NULL) { return -ENOTCONN; } crm_xml_add(msg, F_LRMD_OPERATION, CRM_OP_IPC_FWD); crm_log_xml_trace(msg, "PROXY_OUTBOUND"); return lrmd_send_xml_no_reply(lrmd, msg); } static int stonith_get_metadata(const char *provider, const char *type, char **output) { int rc = pcmk_ok; stonith_t *stonith_api = stonith_api_new(); if(stonith_api) { stonith_api->cmds->metadata(stonith_api, st_opt_sync_call, type, provider, output, 0); stonith_api->cmds->free(stonith_api); } if (*output == NULL) { rc = -EIO; } return rc; } static int lrmd_api_get_metadata(lrmd_t * lrmd, const char *class, const char *provider, const char *type, char **output, enum lrmd_call_options options) { svc_action_t *action; if (!class || !type) { return -EINVAL; } if (safe_str_eq(class, PCMK_RESOURCE_CLASS_STONITH)) { return stonith_get_metadata(provider, type, output); } action = resources_action_create(type, class, provider, type, "meta-data", 0, CRMD_METADATA_CALL_TIMEOUT, NULL, 0); if (action == NULL) { crm_err("Unable to retrieve meta-data for %s:%s:%s", class, provider, type); services_action_free(action); return -EINVAL; } if (!(services_action_sync(action))) { crm_err("Failed to retrieve meta-data for %s:%s:%s", class, provider, type); services_action_free(action); return -EIO; } if (!action->stdout_data) { crm_err("Failed to receive meta-data for %s:%s:%s", class, provider, type); services_action_free(action); return -EIO; } *output = strdup(action->stdout_data); services_action_free(action); return pcmk_ok; } static int lrmd_api_exec(lrmd_t * lrmd, const char *rsc_id, const char *action, const char *userdata, int interval, /* ms */ int timeout, /* ms */ int start_delay, /* ms */ enum lrmd_call_options options, lrmd_key_value_t * params) { int rc = pcmk_ok; xmlNode *data = create_xml_node(NULL, F_LRMD_RSC); xmlNode *args = create_xml_node(data, XML_TAG_ATTRS); lrmd_key_value_t *tmp = NULL; crm_xml_add(data, F_LRMD_ORIGIN, __FUNCTION__); crm_xml_add(data, F_LRMD_RSC_ID, rsc_id); crm_xml_add(data, F_LRMD_RSC_ACTION, action); crm_xml_add(data, F_LRMD_RSC_USERDATA_STR, userdata); crm_xml_add_int(data, F_LRMD_RSC_INTERVAL, interval); crm_xml_add_int(data, F_LRMD_TIMEOUT, timeout); crm_xml_add_int(data, F_LRMD_RSC_START_DELAY, start_delay); for (tmp = params; tmp; tmp = tmp->next) { hash2smartfield((gpointer) tmp->key, (gpointer) tmp->value, args); } rc = lrmd_send_command(lrmd, LRMD_OP_RSC_EXEC, data, NULL, timeout, options, TRUE); free_xml(data); lrmd_key_value_freeall(params); return rc; } /* timeout is in ms */ static int lrmd_api_exec_alert(lrmd_t *lrmd, const char *alert_id, const char *alert_path, int timeout, lrmd_key_value_t *params) { int rc = pcmk_ok; xmlNode *data = create_xml_node(NULL, F_LRMD_ALERT); xmlNode *args = create_xml_node(data, XML_TAG_ATTRS); lrmd_key_value_t *tmp = NULL; crm_xml_add(data, F_LRMD_ORIGIN, __FUNCTION__); crm_xml_add(data, F_LRMD_ALERT_ID, alert_id); crm_xml_add(data, F_LRMD_ALERT_PATH, alert_path); crm_xml_add_int(data, F_LRMD_TIMEOUT, timeout); for (tmp = params; tmp; tmp = tmp->next) { hash2smartfield((gpointer) tmp->key, (gpointer) tmp->value, args); } rc = lrmd_send_command(lrmd, LRMD_OP_ALERT_EXEC, data, NULL, timeout, lrmd_opt_notify_orig_only, TRUE); free_xml(data); lrmd_key_value_freeall(params); return rc; } static int lrmd_api_cancel(lrmd_t * lrmd, const char *rsc_id, const char *action, int interval) { int rc = pcmk_ok; xmlNode *data = create_xml_node(NULL, F_LRMD_RSC); crm_xml_add(data, F_LRMD_ORIGIN, __FUNCTION__); crm_xml_add(data, F_LRMD_RSC_ACTION, action); crm_xml_add(data, F_LRMD_RSC_ID, rsc_id); crm_xml_add_int(data, F_LRMD_RSC_INTERVAL, interval); rc = lrmd_send_command(lrmd, LRMD_OP_RSC_CANCEL, data, NULL, 0, 0, TRUE); free_xml(data); return rc; } static int list_stonith_agents(lrmd_list_t ** resources) { int rc = 0; stonith_t *stonith_api = stonith_api_new(); stonith_key_value_t *stonith_resources = NULL; stonith_key_value_t *dIter = NULL; if(stonith_api) { stonith_api->cmds->list_agents(stonith_api, st_opt_sync_call, NULL, &stonith_resources, 0); stonith_api->cmds->free(stonith_api); } for (dIter = stonith_resources; dIter; dIter = dIter->next) { rc++; if (resources) { *resources = lrmd_list_add(*resources, dIter->value); } } stonith_key_value_freeall(stonith_resources, 1, 0); return rc; } static int lrmd_api_list_agents(lrmd_t * lrmd, lrmd_list_t ** resources, const char *class, const char *provider) { int rc = 0; if (safe_str_eq(class, PCMK_RESOURCE_CLASS_STONITH)) { rc += list_stonith_agents(resources); } else { GListPtr gIter = NULL; GList *agents = resources_list_agents(class, provider); for (gIter = agents; gIter != NULL; gIter = gIter->next) { *resources = lrmd_list_add(*resources, (const char *)gIter->data); rc++; } g_list_free_full(agents, free); if (!class) { rc += list_stonith_agents(resources); } } if (rc == 0) { crm_notice("No agents found for class %s", class); rc = -EPROTONOSUPPORT; } return rc; } static int does_provider_have_agent(const char *agent, const char *provider, const char *class) { int found = 0; GList *agents = NULL; GListPtr gIter2 = NULL; agents = resources_list_agents(class, provider); for (gIter2 = agents; gIter2 != NULL; gIter2 = gIter2->next) { if (safe_str_eq(agent, gIter2->data)) { found = 1; } } g_list_free_full(agents, free); return found; } static int lrmd_api_list_ocf_providers(lrmd_t * lrmd, const char *agent, lrmd_list_t ** providers) { int rc = pcmk_ok; char *provider = NULL; GList *ocf_providers = NULL; GListPtr gIter = NULL; ocf_providers = resources_list_providers(PCMK_RESOURCE_CLASS_OCF); for (gIter = ocf_providers; gIter != NULL; gIter = gIter->next) { provider = gIter->data; if (!agent || does_provider_have_agent(agent, provider, PCMK_RESOURCE_CLASS_OCF)) { *providers = lrmd_list_add(*providers, (const char *)gIter->data); rc++; } } g_list_free_full(ocf_providers, free); return rc; } static int lrmd_api_list_standards(lrmd_t * lrmd, lrmd_list_t ** supported) { int rc = 0; GList *standards = NULL; GListPtr gIter = NULL; standards = resources_list_standards(); for (gIter = standards; gIter != NULL; gIter = gIter->next) { *supported = lrmd_list_add(*supported, (const char *)gIter->data); rc++; } if (list_stonith_agents(NULL) > 0) { *supported = lrmd_list_add(*supported, PCMK_RESOURCE_CLASS_STONITH); rc++; } g_list_free_full(standards, free); return rc; } lrmd_t * lrmd_api_new(void) { lrmd_t *new_lrmd = NULL; lrmd_private_t *pvt = NULL; new_lrmd = calloc(1, sizeof(lrmd_t)); pvt = calloc(1, sizeof(lrmd_private_t)); pvt->remote = calloc(1, sizeof(crm_remote_t)); new_lrmd->cmds = calloc(1, sizeof(lrmd_api_operations_t)); pvt->type = CRM_CLIENT_IPC; new_lrmd->lrmd_private = pvt; new_lrmd->cmds->connect = lrmd_api_connect; new_lrmd->cmds->connect_async = lrmd_api_connect_async; new_lrmd->cmds->is_connected = lrmd_api_is_connected; new_lrmd->cmds->poke_connection = lrmd_api_poke_connection; new_lrmd->cmds->disconnect = lrmd_api_disconnect; new_lrmd->cmds->register_rsc = lrmd_api_register_rsc; new_lrmd->cmds->unregister_rsc = lrmd_api_unregister_rsc; new_lrmd->cmds->get_rsc_info = lrmd_api_get_rsc_info; new_lrmd->cmds->set_callback = lrmd_api_set_callback; new_lrmd->cmds->get_metadata = lrmd_api_get_metadata; new_lrmd->cmds->exec = lrmd_api_exec; new_lrmd->cmds->cancel = lrmd_api_cancel; new_lrmd->cmds->list_agents = lrmd_api_list_agents; new_lrmd->cmds->list_ocf_providers = lrmd_api_list_ocf_providers; new_lrmd->cmds->list_standards = lrmd_api_list_standards; new_lrmd->cmds->exec_alert = lrmd_api_exec_alert; return new_lrmd; } lrmd_t * lrmd_remote_api_new(const char *nodename, const char *server, int port) { #ifdef HAVE_GNUTLS_GNUTLS_H lrmd_t *new_lrmd = lrmd_api_new(); lrmd_private_t *native = new_lrmd->lrmd_private; if (!nodename && !server) { lrmd_api_delete(new_lrmd); return NULL; } native->type = CRM_CLIENT_TLS; native->remote_nodename = nodename ? strdup(nodename) : strdup(server); native->server = server ? strdup(server) : strdup(nodename); native->port = port; if (native->port == 0) { native->port = crm_default_remote_port(); } return new_lrmd; #else crm_err("GNUTLS is not enabled for this build, remote LRMD client can not be created"); return NULL; #endif } void lrmd_api_delete(lrmd_t * lrmd) { if (!lrmd) { return; } lrmd->cmds->disconnect(lrmd); /* no-op if already disconnected */ free(lrmd->cmds); if (lrmd->lrmd_private) { lrmd_private_t *native = lrmd->lrmd_private; #ifdef HAVE_GNUTLS_GNUTLS_H free(native->server); #endif free(native->remote_nodename); free(native->remote); free(native->token); free(native->peer_version); } free(lrmd->lrmd_private); free(lrmd); } diff --git a/lrmd/lrmd.c b/lrmd/lrmd.c index cb56334aa3..11ee803329 100644 --- a/lrmd/lrmd.c +++ b/lrmd/lrmd.c @@ -1,1611 +1,1612 @@ /* * Copyright (c) 2012 David Vossel * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * */ #include #include #include #include #include #include #include #include #include #include #ifdef HAVE_SYS_TIMEB_H # include #endif #define EXIT_REASON_MAX_LEN 128 GHashTable *rsc_list = NULL; typedef struct lrmd_cmd_s { int timeout; int interval; int start_delay; int timeout_orig; int call_id; int exec_rc; int lrmd_op_status; int call_opts; /* Timer ids, must be removed on cmd destruction. */ int delay_id; int stonith_recurring_id; int rsc_deleted; int service_flags; char *client_id; char *origin; char *rsc_id; char *action; char *real_action; char *exit_reason; char *output; char *userdata_str; #ifdef HAVE_SYS_TIMEB_H /* recurring and systemd operations may involve more than one lrmd command * per operation, so they need info about original and most recent */ struct timeb t_first_run; /* Timestamp of when op first ran */ struct timeb t_run; /* Timestamp of when op most recently ran */ struct timeb t_first_queue; /* Timestamp of when op first was queued */ struct timeb t_queue; /* Timestamp of when op most recently was queued */ struct timeb t_rcchange; /* Timestamp of last rc change */ #endif int first_notify_sent; int last_notify_rc; int last_notify_op_status; int last_pid; GHashTable *params; } lrmd_cmd_t; static void cmd_finalize(lrmd_cmd_t * cmd, lrmd_rsc_t * rsc); static gboolean lrmd_rsc_dispatch(gpointer user_data); static void cancel_all_recurring(lrmd_rsc_t * rsc, const char *client_id); static void log_finished(lrmd_cmd_t * cmd, int exec_time, int queue_time) { char pid_str[32] = { 0, }; int log_level = LOG_INFO; if (cmd->last_pid) { snprintf(pid_str, 32, "%d", cmd->last_pid); } if (safe_str_eq(cmd->action, "monitor")) { log_level = LOG_DEBUG; } #ifdef HAVE_SYS_TIMEB_H do_crm_log(log_level, "finished - rsc:%s action:%s call_id:%d %s%s exit-code:%d exec-time:%dms queue-time:%dms", cmd->rsc_id, cmd->action, cmd->call_id, cmd->last_pid ? "pid:" : "", pid_str, cmd->exec_rc, exec_time, queue_time); #else do_crm_log(log_level, "finished - rsc:%s action:%s call_id:%d %s%s exit-code:%d", cmd->rsc_id, cmd->action, cmd->call_id, cmd->last_pid ? "pid:" : "", pid_str, cmd->exec_rc); #endif } static void log_execute(lrmd_cmd_t * cmd) { int log_level = LOG_INFO; if (safe_str_eq(cmd->action, "monitor")) { log_level = LOG_DEBUG; } do_crm_log(log_level, "executing - rsc:%s action:%s call_id:%d", cmd->rsc_id, cmd->action, cmd->call_id); } static const char * normalize_action_name(lrmd_rsc_t * rsc, const char *action) { if (safe_str_eq(action, "monitor") && (safe_str_eq(rsc->class, PCMK_RESOURCE_CLASS_LSB) || safe_str_eq(rsc->class, PCMK_RESOURCE_CLASS_SERVICE) || safe_str_eq(rsc->class, PCMK_RESOURCE_CLASS_SYSTEMD))) { return "status"; } return action; } static lrmd_rsc_t * build_rsc_from_xml(xmlNode * msg) { xmlNode *rsc_xml = get_xpath_object("//" F_LRMD_RSC, msg, LOG_ERR); lrmd_rsc_t *rsc = NULL; rsc = calloc(1, sizeof(lrmd_rsc_t)); crm_element_value_int(msg, F_LRMD_CALLOPTS, &rsc->call_opts); rsc->rsc_id = crm_element_value_copy(rsc_xml, F_LRMD_RSC_ID); rsc->class = crm_element_value_copy(rsc_xml, F_LRMD_CLASS); rsc->provider = crm_element_value_copy(rsc_xml, F_LRMD_PROVIDER); rsc->type = crm_element_value_copy(rsc_xml, F_LRMD_TYPE); rsc->work = mainloop_add_trigger(G_PRIORITY_HIGH, lrmd_rsc_dispatch, rsc); return rsc; } static lrmd_cmd_t * create_lrmd_cmd(xmlNode * msg, crm_client_t * client) { int call_options = 0; xmlNode *rsc_xml = get_xpath_object("//" F_LRMD_RSC, msg, LOG_ERR); lrmd_cmd_t *cmd = NULL; cmd = calloc(1, sizeof(lrmd_cmd_t)); crm_element_value_int(msg, F_LRMD_CALLOPTS, &call_options); cmd->call_opts = call_options; cmd->client_id = strdup(client->id); crm_element_value_int(msg, F_LRMD_CALLID, &cmd->call_id); crm_element_value_int(rsc_xml, F_LRMD_RSC_INTERVAL, &cmd->interval); crm_element_value_int(rsc_xml, F_LRMD_TIMEOUT, &cmd->timeout); crm_element_value_int(rsc_xml, F_LRMD_RSC_START_DELAY, &cmd->start_delay); cmd->timeout_orig = cmd->timeout; cmd->origin = crm_element_value_copy(rsc_xml, F_LRMD_ORIGIN); cmd->action = crm_element_value_copy(rsc_xml, F_LRMD_RSC_ACTION); cmd->userdata_str = crm_element_value_copy(rsc_xml, F_LRMD_RSC_USERDATA_STR); cmd->rsc_id = crm_element_value_copy(rsc_xml, F_LRMD_RSC_ID); cmd->params = xml2list(rsc_xml); if (safe_str_eq(g_hash_table_lookup(cmd->params, "CRM_meta_on_fail"), "block")) { crm_debug("Setting flag to leave pid group on timeout and only kill action pid for %s_%s_%d", cmd->rsc_id, cmd->action, cmd->interval); cmd->service_flags |= SVC_ACTION_LEAVE_GROUP; } return cmd; } static void free_lrmd_cmd(lrmd_cmd_t * cmd) { if (cmd->stonith_recurring_id) { g_source_remove(cmd->stonith_recurring_id); } if (cmd->delay_id) { g_source_remove(cmd->delay_id); } if (cmd->params) { g_hash_table_destroy(cmd->params); } free(cmd->origin); free(cmd->action); free(cmd->real_action); free(cmd->userdata_str); free(cmd->rsc_id); free(cmd->output); free(cmd->exit_reason); free(cmd->client_id); free(cmd); } static gboolean stonith_recurring_op_helper(gpointer data) { lrmd_cmd_t *cmd = data; lrmd_rsc_t *rsc; cmd->stonith_recurring_id = 0; if (!cmd->rsc_id) { return FALSE; } rsc = g_hash_table_lookup(rsc_list, cmd->rsc_id); CRM_ASSERT(rsc != NULL); /* take it out of recurring_ops list, and put it in the pending ops * to be executed */ rsc->recurring_ops = g_list_remove(rsc->recurring_ops, cmd); rsc->pending_ops = g_list_append(rsc->pending_ops, cmd); #ifdef HAVE_SYS_TIMEB_H ftime(&cmd->t_queue); if (cmd->t_first_queue.time == 0) { cmd->t_first_queue = cmd->t_queue; } #endif mainloop_set_trigger(rsc->work); return FALSE; } static gboolean start_delay_helper(gpointer data) { lrmd_cmd_t *cmd = data; lrmd_rsc_t *rsc = NULL; cmd->delay_id = 0; rsc = cmd->rsc_id ? g_hash_table_lookup(rsc_list, cmd->rsc_id) : NULL; if (rsc) { mainloop_set_trigger(rsc->work); } return FALSE; } static gboolean merge_recurring_duplicate(lrmd_rsc_t * rsc, lrmd_cmd_t * cmd) { GListPtr gIter = NULL; lrmd_cmd_t * dup = NULL; gboolean dup_pending = FALSE; if (cmd->interval == 0) { return 0; } for (gIter = rsc->pending_ops; gIter != NULL; gIter = gIter->next) { dup = gIter->data; if (safe_str_eq(cmd->action, dup->action) && cmd->interval == dup->interval) { dup_pending = TRUE; goto merge_dup; } } /* if dup is in recurring_ops list, that means it has already executed * and is in the interval loop. we can't just remove it in this case. */ for (gIter = rsc->recurring_ops; gIter != NULL; gIter = gIter->next) { dup = gIter->data; if (safe_str_eq(cmd->action, dup->action) && cmd->interval == dup->interval) { goto merge_dup; } } return FALSE; merge_dup: /* This should not occur, if it does we need to investigate in the crmd * how something like this is possible */ crm_warn("Duplicate recurring op entry detected (%s_%s_%d), merging with previous op entry", rsc->rsc_id, normalize_action_name(rsc, dup->action), dup->interval); /* merge */ dup->first_notify_sent = 0; free(dup->userdata_str); dup->userdata_str = cmd->userdata_str; cmd->userdata_str = NULL; dup->call_id = cmd->call_id; if (safe_str_eq(rsc->class, PCMK_RESOURCE_CLASS_STONITH)) { /* if we are waiting for the next interval, kick it off now */ if (dup_pending == TRUE) { g_source_remove(cmd->stonith_recurring_id); cmd->stonith_recurring_id = 0; stonith_recurring_op_helper(cmd); } } else if (dup_pending == FALSE) { /* if we've already handed this to the service lib, kick off an early execution */ services_action_kick(rsc->rsc_id, normalize_action_name(rsc, dup->action), dup->interval); } free_lrmd_cmd(cmd); return TRUE; } static void schedule_lrmd_cmd(lrmd_rsc_t * rsc, lrmd_cmd_t * cmd) { gboolean dup_processed = FALSE; CRM_CHECK(cmd != NULL, return); CRM_CHECK(rsc != NULL, return); crm_trace("Scheduling %s on %s", cmd->action, rsc->rsc_id); dup_processed = merge_recurring_duplicate(rsc, cmd); if (dup_processed) { /* duplicate recurring cmd found, cmds merged */ return; } /* crmd expects lrmd to automatically cancel recurring ops before rsc stops. */ if (rsc && safe_str_eq(cmd->action, "stop")) { cancel_all_recurring(rsc, NULL); } rsc->pending_ops = g_list_append(rsc->pending_ops, cmd); #ifdef HAVE_SYS_TIMEB_H ftime(&cmd->t_queue); if (cmd->t_first_queue.time == 0) { cmd->t_first_queue = cmd->t_queue; } #endif mainloop_set_trigger(rsc->work); if (cmd->start_delay) { cmd->delay_id = g_timeout_add(cmd->start_delay, start_delay_helper, cmd); } } static void send_reply(crm_client_t * client, int rc, uint32_t id, int call_id) { int send_rc = 0; xmlNode *reply = NULL; reply = create_xml_node(NULL, T_LRMD_REPLY); crm_xml_add(reply, F_LRMD_ORIGIN, __FUNCTION__); crm_xml_add_int(reply, F_LRMD_RC, rc); crm_xml_add_int(reply, F_LRMD_CALLID, call_id); send_rc = lrmd_server_send_reply(client, id, reply); free_xml(reply); if (send_rc < 0) { - crm_warn("LRMD reply to %s failed: %d", client->name, send_rc); + crm_warn("Reply to client %s failed: %s " CRM_XS " %d", + client->name, pcmk_strerror(send_rc), send_rc); } } static void send_client_notify(gpointer key, gpointer value, gpointer user_data) { xmlNode *update_msg = user_data; crm_client_t *client = value; int rc; if (client == NULL) { crm_err("Asked to send event to NULL client"); return; } else if (client->name == NULL) { crm_trace("Asked to send event to client with no name"); return; } rc = lrmd_server_send_notify(client, update_msg); if ((rc <= 0) && (rc != -ENOTCONN)) { crm_warn("Could not notify client %s/%s: %s " CRM_XS " rc=%d", client->name, client->id, (rc? pcmk_strerror(rc) : "no data sent"), rc); } } #ifdef HAVE_SYS_TIMEB_H /*! * \internal * \brief Return difference between two times in milliseconds * * \param[in] now More recent time (or NULL to use current time) * \param[in] old Earlier time * * \return milliseconds difference (or 0 if old is NULL or has time zero) */ static int time_diff_ms(struct timeb *now, struct timeb *old) { struct timeb local_now = { 0, }; if (now == NULL) { ftime(&local_now); now = &local_now; } if ((old == NULL) || (old->time == 0)) { return 0; } return difftime(now->time, old->time) * 1000 + now->millitm - old->millitm; } /*! * \internal * \brief Reset a command's operation times to their original values. * * Reset a command's run and queued timestamps to the timestamps of the original * command, so we report the entire time since then and not just the time since * the most recent command (for recurring and systemd operations). * * /param[in] cmd LRMD command object to reset * * /note It's not obvious what the queued time should be for a systemd * start/stop operation, which might go like this: * initial command queued 5ms, runs 3s * monitor command queued 10ms, runs 10s * monitor command queued 10ms, runs 10s * Is the queued time for that operation 5ms, 10ms or 25ms? The current * implementation will report 5ms. If it's 25ms, then we need to * subtract 20ms from the total exec time so as not to count it twice. * We can implement that later if it matters to anyone ... */ static void cmd_original_times(lrmd_cmd_t * cmd) { cmd->t_run = cmd->t_first_run; cmd->t_queue = cmd->t_first_queue; } #endif static void send_cmd_complete_notify(lrmd_cmd_t * cmd) { int exec_time = 0; int queue_time = 0; xmlNode *notify = NULL; #ifdef HAVE_SYS_TIMEB_H exec_time = time_diff_ms(NULL, &cmd->t_run); queue_time = time_diff_ms(&cmd->t_run, &cmd->t_queue); #endif log_finished(cmd, exec_time, queue_time); /* if the first notify result for a cmd has already been sent earlier, and the * the option to only send notifies on result changes is set. Check to see * if the last result is the same as the new one. If so, suppress this update */ if (cmd->first_notify_sent && (cmd->call_opts & lrmd_opt_notify_changes_only)) { if (cmd->last_notify_rc == cmd->exec_rc && cmd->last_notify_op_status == cmd->lrmd_op_status) { /* only send changes */ return; } } cmd->first_notify_sent = 1; cmd->last_notify_rc = cmd->exec_rc; cmd->last_notify_op_status = cmd->lrmd_op_status; notify = create_xml_node(NULL, T_LRMD_NOTIFY); crm_xml_add(notify, F_LRMD_ORIGIN, __FUNCTION__); crm_xml_add_int(notify, F_LRMD_TIMEOUT, cmd->timeout); crm_xml_add_int(notify, F_LRMD_RSC_INTERVAL, cmd->interval); crm_xml_add_int(notify, F_LRMD_RSC_START_DELAY, cmd->start_delay); crm_xml_add_int(notify, F_LRMD_EXEC_RC, cmd->exec_rc); crm_xml_add_int(notify, F_LRMD_OP_STATUS, cmd->lrmd_op_status); crm_xml_add_int(notify, F_LRMD_CALLID, cmd->call_id); crm_xml_add_int(notify, F_LRMD_RSC_DELETED, cmd->rsc_deleted); #ifdef HAVE_SYS_TIMEB_H crm_xml_add_int(notify, F_LRMD_RSC_RUN_TIME, cmd->t_run.time); crm_xml_add_int(notify, F_LRMD_RSC_RCCHANGE_TIME, cmd->t_rcchange.time); crm_xml_add_int(notify, F_LRMD_RSC_EXEC_TIME, exec_time); crm_xml_add_int(notify, F_LRMD_RSC_QUEUE_TIME, queue_time); #endif crm_xml_add(notify, F_LRMD_OPERATION, LRMD_OP_RSC_EXEC); crm_xml_add(notify, F_LRMD_RSC_ID, cmd->rsc_id); if(cmd->real_action) { crm_xml_add(notify, F_LRMD_RSC_ACTION, cmd->real_action); } else { crm_xml_add(notify, F_LRMD_RSC_ACTION, cmd->action); } crm_xml_add(notify, F_LRMD_RSC_USERDATA_STR, cmd->userdata_str); crm_xml_add(notify, F_LRMD_RSC_OUTPUT, cmd->output); crm_xml_add(notify, F_LRMD_RSC_EXIT_REASON, cmd->exit_reason); if (cmd->params) { char *key = NULL; char *value = NULL; GHashTableIter iter; xmlNode *args = create_xml_node(notify, XML_TAG_ATTRS); g_hash_table_iter_init(&iter, cmd->params); while (g_hash_table_iter_next(&iter, (gpointer *) & key, (gpointer *) & value)) { hash2smartfield((gpointer) key, (gpointer) value, args); } } if (cmd->client_id && (cmd->call_opts & lrmd_opt_notify_orig_only)) { crm_client_t *client = crm_client_get_by_id(cmd->client_id); if (client) { send_client_notify(client->id, client, notify); } } else if (client_connections != NULL) { g_hash_table_foreach(client_connections, send_client_notify, notify); } free_xml(notify); } static void send_generic_notify(int rc, xmlNode * request) { if (client_connections != NULL) { int call_id = 0; xmlNode *notify = NULL; xmlNode *rsc_xml = get_xpath_object("//" F_LRMD_RSC, request, LOG_ERR); const char *rsc_id = crm_element_value(rsc_xml, F_LRMD_RSC_ID); const char *op = crm_element_value(request, F_LRMD_OPERATION); crm_element_value_int(request, F_LRMD_CALLID, &call_id); notify = create_xml_node(NULL, T_LRMD_NOTIFY); crm_xml_add(notify, F_LRMD_ORIGIN, __FUNCTION__); crm_xml_add_int(notify, F_LRMD_RC, rc); crm_xml_add_int(notify, F_LRMD_CALLID, call_id); crm_xml_add(notify, F_LRMD_OPERATION, op); crm_xml_add(notify, F_LRMD_RSC_ID, rsc_id); g_hash_table_foreach(client_connections, send_client_notify, notify); free_xml(notify); } } static void cmd_reset(lrmd_cmd_t * cmd) { cmd->lrmd_op_status = 0; cmd->last_pid = 0; memset(&cmd->t_run, 0, sizeof(cmd->t_run)); memset(&cmd->t_queue, 0, sizeof(cmd->t_queue)); free(cmd->exit_reason); cmd->exit_reason = NULL; free(cmd->output); cmd->output = NULL; } static void cmd_finalize(lrmd_cmd_t * cmd, lrmd_rsc_t * rsc) { crm_trace("Resource operation rsc:%s action:%s completed (%p %p)", cmd->rsc_id, cmd->action, rsc ? rsc->active : NULL, cmd); if (rsc && (rsc->active == cmd)) { rsc->active = NULL; mainloop_set_trigger(rsc->work); } if (!rsc) { cmd->rsc_deleted = 1; } /* reset original timeout so client notification has correct information */ cmd->timeout = cmd->timeout_orig; send_cmd_complete_notify(cmd); if (cmd->interval && (cmd->lrmd_op_status == PCMK_LRM_OP_CANCELLED)) { if (rsc) { rsc->recurring_ops = g_list_remove(rsc->recurring_ops, cmd); rsc->pending_ops = g_list_remove(rsc->pending_ops, cmd); } free_lrmd_cmd(cmd); } else if (cmd->interval == 0) { if (rsc) { rsc->pending_ops = g_list_remove(rsc->pending_ops, cmd); } free_lrmd_cmd(cmd); } else { /* Clear all the values pertaining just to the last iteration of a recurring op. */ cmd_reset(cmd); } } static int ocf2uniform_rc(int rc) { if (rc < 0 || rc > PCMK_OCF_FAILED_MASTER) { return PCMK_OCF_UNKNOWN_ERROR; } return rc; } static int stonith2uniform_rc(const char *action, int rc) { if (rc == -ENODEV) { if (safe_str_eq(action, "stop")) { rc = PCMK_OCF_OK; } else if (safe_str_eq(action, "start")) { rc = PCMK_OCF_NOT_INSTALLED; } else { rc = PCMK_OCF_NOT_RUNNING; } } else if (rc != 0) { rc = PCMK_OCF_UNKNOWN_ERROR; } return rc; } #if SUPPORT_NAGIOS static int nagios2uniform_rc(const char *action, int rc) { if (rc < 0) { return PCMK_OCF_UNKNOWN_ERROR; } switch (rc) { case NAGIOS_STATE_OK: return PCMK_OCF_OK; case NAGIOS_INSUFFICIENT_PRIV: return PCMK_OCF_INSUFFICIENT_PRIV; case NAGIOS_NOT_INSTALLED: return PCMK_OCF_NOT_INSTALLED; case NAGIOS_STATE_WARNING: case NAGIOS_STATE_CRITICAL: case NAGIOS_STATE_UNKNOWN: case NAGIOS_STATE_DEPENDENT: default: return PCMK_OCF_UNKNOWN_ERROR; } return PCMK_OCF_UNKNOWN_ERROR; } #endif static int get_uniform_rc(const char *standard, const char *action, int rc) { if (safe_str_eq(standard, PCMK_RESOURCE_CLASS_OCF)) { return ocf2uniform_rc(rc); } else if (safe_str_eq(standard, PCMK_RESOURCE_CLASS_STONITH)) { return stonith2uniform_rc(action, rc); } else if (safe_str_eq(standard, PCMK_RESOURCE_CLASS_SYSTEMD)) { return rc; } else if (safe_str_eq(standard, PCMK_RESOURCE_CLASS_UPSTART)) { return rc; #if SUPPORT_NAGIOS } else if (safe_str_eq(standard, PCMK_RESOURCE_CLASS_NAGIOS)) { return nagios2uniform_rc(action, rc); #endif } else { return services_get_ocf_exitcode(action, rc); } } static int action_get_uniform_rc(svc_action_t * action) { lrmd_cmd_t *cmd = action->cb_data; return get_uniform_rc(action->standard, cmd->action, action->rc); } void notify_of_new_client(crm_client_t *new_client) { crm_client_t *client = NULL; GHashTableIter iter; xmlNode *notify = NULL; char *key = NULL; notify = create_xml_node(NULL, T_LRMD_NOTIFY); crm_xml_add(notify, F_LRMD_ORIGIN, __FUNCTION__); crm_xml_add(notify, F_LRMD_OPERATION, LRMD_OP_NEW_CLIENT); g_hash_table_iter_init(&iter, client_connections); while (g_hash_table_iter_next(&iter, (gpointer *) & key, (gpointer *) & client)) { if (safe_str_eq(client->id, new_client->id)) { continue; } send_client_notify((gpointer) key, (gpointer) client, (gpointer) notify); } free_xml(notify); } static char * parse_exit_reason(const char *output) { const char *cur = NULL; const char *last = NULL; static int cookie_len = 0; char *eol = NULL; size_t reason_len = EXIT_REASON_MAX_LEN; if (output == NULL) { return NULL; } if (!cookie_len) { cookie_len = strlen(PCMK_OCF_REASON_PREFIX); } cur = strstr(output, PCMK_OCF_REASON_PREFIX); for (; cur != NULL; cur = strstr(cur, PCMK_OCF_REASON_PREFIX)) { /* skip over the cookie delimiter string */ cur += cookie_len; last = cur; } if (last == NULL) { return NULL; } // Truncate everything after a new line, and limit reason string size eol = strchr(last, '\n'); if (eol) { reason_len = QB_MIN(reason_len, eol - last); } return strndup(last, reason_len); } void client_disconnect_cleanup(const char *client_id) { GHashTableIter iter; lrmd_rsc_t *rsc = NULL; char *key = NULL; g_hash_table_iter_init(&iter, rsc_list); while (g_hash_table_iter_next(&iter, (gpointer *) & key, (gpointer *) & rsc)) { if (rsc->call_opts & lrmd_opt_drop_recurring) { /* This client is disconnecting, drop any recurring operations * it may have initiated on the resource */ cancel_all_recurring(rsc, client_id); } } } static void action_complete(svc_action_t * action) { lrmd_rsc_t *rsc; lrmd_cmd_t *cmd = action->cb_data; const char *rclass = NULL; bool goagain = false; if (!cmd) { crm_err("LRMD action (%s) completed does not match any known operations.", action->id); return; } #ifdef HAVE_SYS_TIMEB_H if (cmd->exec_rc != action->rc) { ftime(&cmd->t_rcchange); } #endif cmd->last_pid = action->pid; cmd->exec_rc = action_get_uniform_rc(action); cmd->lrmd_op_status = action->status; rsc = cmd->rsc_id ? g_hash_table_lookup(rsc_list, cmd->rsc_id) : NULL; if (rsc && safe_str_eq(rsc->class, PCMK_RESOURCE_CLASS_SERVICE)) { rclass = resources_find_service_class(rsc->class); } else if(rsc) { rclass = rsc->class; } if (safe_str_eq(rclass, PCMK_RESOURCE_CLASS_SYSTEMD)) { if(cmd->exec_rc == PCMK_OCF_OK && safe_str_eq(cmd->action, "start")) { /* systemd I curse thee! * * systemd returns from start actions after the start _begins_ * not after it completes. * * So we have to jump through a few hoops so that we don't * report 'complete' to the rest of pacemaker until, you know, * it's actually done. */ goagain = true; cmd->real_action = cmd->action; cmd->action = strdup("monitor"); } else if(cmd->exec_rc == PCMK_OCF_OK && safe_str_eq(cmd->action, "stop")) { goagain = true; cmd->real_action = cmd->action; cmd->action = strdup("monitor"); } else if(cmd->real_action) { /* Ok, so this is the follow up monitor action to check if start actually completed */ if(cmd->lrmd_op_status == PCMK_LRM_OP_DONE && cmd->exec_rc == PCMK_OCF_PENDING) { goagain = true; } else if(cmd->exec_rc == PCMK_OCF_OK && safe_str_eq(cmd->real_action, "stop")) { goagain = true; } else { #ifdef HAVE_SYS_TIMEB_H int time_sum = time_diff_ms(NULL, &cmd->t_first_run); int timeout_left = cmd->timeout_orig - time_sum; crm_debug("%s %s is now complete (elapsed=%dms, remaining=%dms): %s (%d)", cmd->rsc_id, cmd->real_action, time_sum, timeout_left, services_ocf_exitcode_str(cmd->exec_rc), cmd->exec_rc); cmd_original_times(cmd); #endif if(cmd->lrmd_op_status == PCMK_LRM_OP_DONE && cmd->exec_rc == PCMK_OCF_NOT_RUNNING && safe_str_eq(cmd->real_action, "stop")) { cmd->exec_rc = PCMK_OCF_OK; } } } } #if SUPPORT_NAGIOS if (rsc && safe_str_eq(rsc->class, PCMK_RESOURCE_CLASS_NAGIOS)) { if (safe_str_eq(cmd->action, "monitor") && cmd->interval == 0 && cmd->exec_rc == PCMK_OCF_OK) { /* Successfully executed --version for the nagios plugin */ cmd->exec_rc = PCMK_OCF_NOT_RUNNING; } else if (safe_str_eq(cmd->action, "start") && cmd->exec_rc != PCMK_OCF_OK) { goagain = true; } } #endif /* Wrapping this section in ifdef implies that systemd resources are not * fully supported on platforms without sys/timeb.h. Since timeb is * obsolete, we should eventually prefer a clock_gettime() implementation * (wrapped in its own ifdef) with timeb as a fallback. */ #ifdef HAVE_SYS_TIMEB_H if(goagain) { int time_sum = time_diff_ms(NULL, &cmd->t_first_run); int timeout_left = cmd->timeout_orig - time_sum; int delay = cmd->timeout_orig / 10; if(delay >= timeout_left && timeout_left > 20) { delay = timeout_left/2; } delay = QB_MIN(2000, delay); if (delay < timeout_left) { cmd->start_delay = delay; cmd->timeout = timeout_left; if(cmd->exec_rc == PCMK_OCF_OK) { crm_debug("%s %s may still be in progress: re-scheduling (elapsed=%dms, remaining=%dms, start_delay=%dms)", cmd->rsc_id, cmd->real_action, time_sum, timeout_left, delay); } else if(cmd->exec_rc == PCMK_OCF_PENDING) { crm_info("%s %s is still in progress: re-scheduling (elapsed=%dms, remaining=%dms, start_delay=%dms)", cmd->rsc_id, cmd->action, time_sum, timeout_left, delay); } else { crm_notice("%s %s failed '%s' (%d): re-scheduling (elapsed=%dms, remaining=%dms, start_delay=%dms)", cmd->rsc_id, cmd->action, services_ocf_exitcode_str(cmd->exec_rc), cmd->exec_rc, time_sum, timeout_left, delay); } cmd_reset(cmd); if(rsc) { rsc->active = NULL; } schedule_lrmd_cmd(rsc, cmd); /* Don't finalize cmd, we're not done with it yet */ return; } else { crm_notice("Giving up on %s %s (rc=%d): timeout (elapsed=%dms, remaining=%dms)", cmd->rsc_id, cmd->real_action?cmd->real_action:cmd->action, cmd->exec_rc, time_sum, timeout_left); cmd->lrmd_op_status = PCMK_LRM_OP_TIMEOUT; cmd->exec_rc = PCMK_OCF_TIMEOUT; cmd_original_times(cmd); } } #endif if (action->stderr_data) { cmd->output = strdup(action->stderr_data); cmd->exit_reason = parse_exit_reason(action->stderr_data); } else if (action->stdout_data) { cmd->output = strdup(action->stdout_data); } cmd_finalize(cmd, rsc); } static void stonith_action_complete(lrmd_cmd_t * cmd, int rc) { int recurring = cmd->interval; lrmd_rsc_t *rsc = NULL; cmd->exec_rc = get_uniform_rc(PCMK_RESOURCE_CLASS_STONITH, cmd->action, rc); rsc = g_hash_table_lookup(rsc_list, cmd->rsc_id); if (cmd->lrmd_op_status == PCMK_LRM_OP_CANCELLED) { recurring = 0; /* do nothing */ } else if (rc == -ENODEV && safe_str_eq(cmd->action, "monitor")) { /* Not registered == inactive */ cmd->lrmd_op_status = PCMK_LRM_OP_DONE; cmd->exec_rc = PCMK_OCF_NOT_RUNNING; } else if (rc) { /* Attempt to map return codes to op status if possible */ switch (rc) { case -EPROTONOSUPPORT: cmd->lrmd_op_status = PCMK_LRM_OP_NOTSUPPORTED; break; case -ETIME: cmd->lrmd_op_status = PCMK_LRM_OP_TIMEOUT; break; default: /* TODO: This looks wrong. Status should be _DONE and exec_rc set to an error */ cmd->lrmd_op_status = PCMK_LRM_OP_ERROR; } } else { /* command successful */ cmd->lrmd_op_status = PCMK_LRM_OP_DONE; if (safe_str_eq(cmd->action, "start") && rsc) { rsc->stonith_started = 1; } } if (recurring && rsc) { if (cmd->stonith_recurring_id) { g_source_remove(cmd->stonith_recurring_id); } cmd->stonith_recurring_id = g_timeout_add(cmd->interval, stonith_recurring_op_helper, cmd); } cmd_finalize(cmd, rsc); } static void lrmd_stonith_callback(stonith_t * stonith, stonith_callback_data_t * data) { stonith_action_complete(data->userdata, data->rc); } void stonith_connection_failed(void) { GHashTableIter iter; GList *cmd_list = NULL; GList *cmd_iter = NULL; lrmd_rsc_t *rsc = NULL; char *key = NULL; g_hash_table_iter_init(&iter, rsc_list); while (g_hash_table_iter_next(&iter, (gpointer *) & key, (gpointer *) & rsc)) { if (safe_str_eq(rsc->class, PCMK_RESOURCE_CLASS_STONITH)) { if (rsc->active) { cmd_list = g_list_append(cmd_list, rsc->active); } if (rsc->recurring_ops) { cmd_list = g_list_concat(cmd_list, rsc->recurring_ops); } if (rsc->pending_ops) { cmd_list = g_list_concat(cmd_list, rsc->pending_ops); } rsc->pending_ops = rsc->recurring_ops = NULL; } } if (!cmd_list) { return; } crm_err("STONITH connection failed, finalizing %d pending operations.", g_list_length(cmd_list)); for (cmd_iter = cmd_list; cmd_iter; cmd_iter = cmd_iter->next) { stonith_action_complete(cmd_iter->data, -ENOTCONN); } g_list_free(cmd_list); } static int lrmd_rsc_execute_stonith(lrmd_rsc_t * rsc, lrmd_cmd_t * cmd) { int rc = 0; int do_monitor = 0; stonith_t *stonith_api = get_stonith_connection(); if (!stonith_api) { cmd->exec_rc = get_uniform_rc(PCMK_RESOURCE_CLASS_STONITH, cmd->action, -ENOTCONN); cmd->lrmd_op_status = PCMK_LRM_OP_ERROR; cmd_finalize(cmd, rsc); return -EUNATCH; } if (safe_str_eq(cmd->action, "start")) { char *key = NULL; char *value = NULL; stonith_key_value_t *device_params = NULL; if (cmd->params) { GHashTableIter iter; g_hash_table_iter_init(&iter, cmd->params); while (g_hash_table_iter_next(&iter, (gpointer *) & key, (gpointer *) & value)) { device_params = stonith_key_value_add(device_params, key, value); } } /* Stonith automatically registers devices from the IPC when changes occur, * but to avoid a possible race condition between stonith receiving the IPC update * and the lrmd requesting that resource, the lrmd still registers the device as well. * Stonith knows how to handle duplicate device registrations correctly. */ rc = stonith_api->cmds->register_device(stonith_api, st_opt_sync_call, cmd->rsc_id, rsc->provider, rsc->type, device_params); stonith_key_value_freeall(device_params, 1, 1); if (rc == 0) { do_monitor = 1; } } else if (safe_str_eq(cmd->action, "stop")) { rc = stonith_api->cmds->remove_device(stonith_api, st_opt_sync_call, cmd->rsc_id); rsc->stonith_started = 0; } else if (safe_str_eq(cmd->action, "monitor")) { if (cmd->interval) { do_monitor = 1; } else { rc = rsc->stonith_started ? 0 : -ENODEV; } } if (!do_monitor) { goto cleanup_stonith_exec; } rc = stonith_api->cmds->monitor(stonith_api, 0, cmd->rsc_id, cmd->timeout / 1000); rc = stonith_api->cmds->register_callback(stonith_api, rc, 0, 0, cmd, "lrmd_stonith_callback", lrmd_stonith_callback); /* don't cleanup yet, we will find out the result of the monitor later */ if (rc > 0) { rsc->active = cmd; return rc; } else if (rc == 0) { rc = -1; } cleanup_stonith_exec: stonith_action_complete(cmd, rc); return rc; } static int lrmd_rsc_execute_service_lib(lrmd_rsc_t * rsc, lrmd_cmd_t * cmd) { svc_action_t *action = NULL; GHashTable *params_copy = NULL; CRM_ASSERT(rsc); CRM_ASSERT(cmd); crm_trace("Creating action, resource:%s action:%s class:%s provider:%s agent:%s", rsc->rsc_id, cmd->action, rsc->class, rsc->provider, rsc->type); #if SUPPORT_NAGIOS /* Recurring operations are cancelled anyway for a stop operation */ if (safe_str_eq(rsc->class, PCMK_RESOURCE_CLASS_NAGIOS) && safe_str_eq(cmd->action, "stop")) { cmd->exec_rc = PCMK_OCF_OK; goto exec_done; } #endif params_copy = crm_str_table_dup(cmd->params); action = resources_action_create(rsc->rsc_id, rsc->class, rsc->provider, rsc->type, normalize_action_name(rsc, cmd->action), cmd->interval, cmd->timeout, params_copy, cmd->service_flags); if (!action) { crm_err("Failed to create action, action:%s on resource %s", cmd->action, rsc->rsc_id); cmd->lrmd_op_status = PCMK_LRM_OP_ERROR; goto exec_done; } action->cb_data = cmd; /* 'cmd' may not be valid after this point if * services_action_async() returned TRUE * * Upstart and systemd both synchronously determine monitor/status * results and call action_complete (which may free 'cmd') if necessary. */ if (services_action_async(action, action_complete)) { return TRUE; } cmd->exec_rc = action->rc; if(action->status != PCMK_LRM_OP_DONE) { cmd->lrmd_op_status = action->status; } else { cmd->lrmd_op_status = PCMK_LRM_OP_ERROR; } services_action_free(action); action = NULL; exec_done: cmd_finalize(cmd, rsc); return TRUE; } static gboolean lrmd_rsc_execute(lrmd_rsc_t * rsc) { lrmd_cmd_t *cmd = NULL; CRM_CHECK(rsc != NULL, return FALSE); if (rsc->active) { crm_trace("%s is still active", rsc->rsc_id); return TRUE; } if (rsc->pending_ops) { GList *first = rsc->pending_ops; cmd = first->data; if (cmd->delay_id) { crm_trace ("Command %s %s was asked to run too early, waiting for start_delay timeout of %dms", cmd->rsc_id, cmd->action, cmd->start_delay); return TRUE; } rsc->pending_ops = g_list_remove_link(rsc->pending_ops, first); g_list_free_1(first); #ifdef HAVE_SYS_TIMEB_H if (cmd->t_first_run.time == 0) { ftime(&cmd->t_first_run); } ftime(&cmd->t_run); #endif } if (!cmd) { crm_trace("Nothing further to do for %s", rsc->rsc_id); return TRUE; } rsc->active = cmd; /* only one op at a time for a rsc */ if (cmd->interval) { rsc->recurring_ops = g_list_append(rsc->recurring_ops, cmd); } log_execute(cmd); if (safe_str_eq(rsc->class, PCMK_RESOURCE_CLASS_STONITH)) { lrmd_rsc_execute_stonith(rsc, cmd); } else { lrmd_rsc_execute_service_lib(rsc, cmd); } return TRUE; } static gboolean lrmd_rsc_dispatch(gpointer user_data) { return lrmd_rsc_execute(user_data); } void free_rsc(gpointer data) { GListPtr gIter = NULL; lrmd_rsc_t *rsc = data; int is_stonith = safe_str_eq(rsc->class, PCMK_RESOURCE_CLASS_STONITH); gIter = rsc->pending_ops; while (gIter != NULL) { GListPtr next = gIter->next; lrmd_cmd_t *cmd = gIter->data; /* command was never executed */ cmd->lrmd_op_status = PCMK_LRM_OP_CANCELLED; cmd_finalize(cmd, NULL); gIter = next; } /* frees list, but not list elements. */ g_list_free(rsc->pending_ops); gIter = rsc->recurring_ops; while (gIter != NULL) { GListPtr next = gIter->next; lrmd_cmd_t *cmd = gIter->data; if (is_stonith) { cmd->lrmd_op_status = PCMK_LRM_OP_CANCELLED; /* If a stonith command is in-flight, just mark it as cancelled; * it is not safe to finalize/free the cmd until the stonith api * says it has either completed or timed out. */ if (rsc->active != cmd) { cmd_finalize(cmd, NULL); } } else { /* This command is already handed off to service library, * let service library cancel it and tell us via the callback * when it is cancelled. The rsc can be safely destroyed * even if we are waiting for the cancel result */ services_action_cancel(rsc->rsc_id, normalize_action_name(rsc, cmd->action), cmd->interval); } gIter = next; } /* frees list, but not list elements. */ g_list_free(rsc->recurring_ops); free(rsc->rsc_id); free(rsc->class); free(rsc->provider); free(rsc->type); mainloop_destroy_trigger(rsc->work); free(rsc); } static int process_lrmd_signon(crm_client_t * client, uint32_t id, xmlNode * request) { xmlNode *reply = create_xml_node(NULL, "reply"); const char *is_ipc_provider = crm_element_value(request, F_LRMD_IS_IPC_PROVIDER); const char *protocol_version = crm_element_value(request, F_LRMD_PROTOCOL_VERSION); if (compare_version(protocol_version, LRMD_MIN_PROTOCOL_VERSION) < 0) { crm_err("Cluster API version must be greater than or equal to %s, not %s", LRMD_MIN_PROTOCOL_VERSION, protocol_version); crm_xml_add_int(reply, F_LRMD_RC, -EPROTO); } crm_xml_add(reply, F_LRMD_OPERATION, CRM_OP_REGISTER); crm_xml_add(reply, F_LRMD_CLIENTID, client->id); crm_xml_add(reply, F_LRMD_PROTOCOL_VERSION, LRMD_PROTOCOL_VERSION); lrmd_server_send_reply(client, id, reply); if (crm_is_true(is_ipc_provider)) { /* this is a remote connection from a cluster nodes crmd */ #ifdef SUPPORT_REMOTE ipc_proxy_add_provider(client); #endif } free_xml(reply); return pcmk_ok; } static int process_lrmd_rsc_register(crm_client_t * client, uint32_t id, xmlNode * request) { int rc = pcmk_ok; lrmd_rsc_t *rsc = build_rsc_from_xml(request); lrmd_rsc_t *dup = g_hash_table_lookup(rsc_list, rsc->rsc_id); if (dup && safe_str_eq(rsc->class, dup->class) && safe_str_eq(rsc->provider, dup->provider) && safe_str_eq(rsc->type, dup->type)) { crm_warn("Can't add, RSC '%s' already present in the rsc list (%d active resources)", rsc->rsc_id, g_hash_table_size(rsc_list)); free_rsc(rsc); return rc; } g_hash_table_replace(rsc_list, rsc->rsc_id, rsc); crm_info("Added '%s' to the rsc list (%d active resources)", rsc->rsc_id, g_hash_table_size(rsc_list)); return rc; } static void process_lrmd_get_rsc_info(crm_client_t * client, uint32_t id, xmlNode * request) { int rc = pcmk_ok; int send_rc = 0; int call_id = 0; xmlNode *rsc_xml = get_xpath_object("//" F_LRMD_RSC, request, LOG_ERR); const char *rsc_id = crm_element_value(rsc_xml, F_LRMD_RSC_ID); xmlNode *reply = NULL; lrmd_rsc_t *rsc = NULL; crm_element_value_int(request, F_LRMD_CALLID, &call_id); if (!rsc_id) { rc = -ENODEV; goto get_rsc_done; } if (!(rsc = g_hash_table_lookup(rsc_list, rsc_id))) { crm_info("Resource '%s' not found (%d active resources)", rsc_id, g_hash_table_size(rsc_list)); rc = -ENODEV; goto get_rsc_done; } get_rsc_done: reply = create_xml_node(NULL, T_LRMD_REPLY); crm_xml_add(reply, F_LRMD_ORIGIN, __FUNCTION__); crm_xml_add_int(reply, F_LRMD_RC, rc); crm_xml_add_int(reply, F_LRMD_CALLID, call_id); if (rsc) { crm_xml_add(reply, F_LRMD_RSC_ID, rsc->rsc_id); crm_xml_add(reply, F_LRMD_CLASS, rsc->class); crm_xml_add(reply, F_LRMD_PROVIDER, rsc->provider); crm_xml_add(reply, F_LRMD_TYPE, rsc->type); } send_rc = lrmd_server_send_reply(client, id, reply); if (send_rc < 0) { crm_warn("LRMD reply to %s failed: %d", client->name, send_rc); } free_xml(reply); } static int process_lrmd_rsc_unregister(crm_client_t * client, uint32_t id, xmlNode * request) { int rc = pcmk_ok; lrmd_rsc_t *rsc = NULL; xmlNode *rsc_xml = get_xpath_object("//" F_LRMD_RSC, request, LOG_ERR); const char *rsc_id = crm_element_value(rsc_xml, F_LRMD_RSC_ID); if (!rsc_id) { return -ENODEV; } if (!(rsc = g_hash_table_lookup(rsc_list, rsc_id))) { crm_info("Resource '%s' not found (%d active resources)", rsc_id, g_hash_table_size(rsc_list)); return pcmk_ok; } if (rsc->active) { /* let the caller know there are still active ops on this rsc to watch for */ crm_trace("Operation still in progress: %p", rsc->active); rc = -EINPROGRESS; } g_hash_table_remove(rsc_list, rsc_id); return rc; } static int process_lrmd_rsc_exec(crm_client_t * client, uint32_t id, xmlNode * request) { lrmd_rsc_t *rsc = NULL; lrmd_cmd_t *cmd = NULL; xmlNode *rsc_xml = get_xpath_object("//" F_LRMD_RSC, request, LOG_ERR); const char *rsc_id = crm_element_value(rsc_xml, F_LRMD_RSC_ID); int call_id; if (!rsc_id) { return -EINVAL; } if (!(rsc = g_hash_table_lookup(rsc_list, rsc_id))) { crm_info("Resource '%s' not found (%d active resources)", rsc_id, g_hash_table_size(rsc_list)); return -ENODEV; } cmd = create_lrmd_cmd(request, client); call_id = cmd->call_id; /* Don't reference cmd after handing it off to be scheduled. * The cmd could get merged and freed. */ schedule_lrmd_cmd(rsc, cmd); return call_id; } static int cancel_op(const char *rsc_id, const char *action, int interval) { GListPtr gIter = NULL; lrmd_rsc_t *rsc = g_hash_table_lookup(rsc_list, rsc_id); /* How to cancel an action. * 1. Check pending ops list, if it hasn't been handed off * to the service library or stonith recurring list remove * it there and that will stop it. * 2. If it isn't in the pending ops list, then it's either a * recurring op in the stonith recurring list, or the service * library's recurring list. Stop it there * 3. If not found in any lists, then this operation has either * been executed already and is not a recurring operation, or * never existed. */ if (!rsc) { return -ENODEV; } for (gIter = rsc->pending_ops; gIter != NULL; gIter = gIter->next) { lrmd_cmd_t *cmd = gIter->data; if (safe_str_eq(cmd->action, action) && cmd->interval == interval) { cmd->lrmd_op_status = PCMK_LRM_OP_CANCELLED; cmd_finalize(cmd, rsc); return pcmk_ok; } } if (safe_str_eq(rsc->class, PCMK_RESOURCE_CLASS_STONITH)) { /* The service library does not handle stonith operations. * We have to handle recurring stonith operations ourselves. */ for (gIter = rsc->recurring_ops; gIter != NULL; gIter = gIter->next) { lrmd_cmd_t *cmd = gIter->data; if (safe_str_eq(cmd->action, action) && cmd->interval == interval) { cmd->lrmd_op_status = PCMK_LRM_OP_CANCELLED; if (rsc->active != cmd) { cmd_finalize(cmd, rsc); } return pcmk_ok; } } } else if (services_action_cancel(rsc_id, normalize_action_name(rsc, action), interval) == TRUE) { /* The service library will tell the action_complete callback function * this action was cancelled, which will destroy the cmd and remove * it from the recurring_op list. Do not do that in this function * if the service library says it cancelled it. */ return pcmk_ok; } return -EOPNOTSUPP; } static void cancel_all_recurring(lrmd_rsc_t * rsc, const char *client_id) { GList *cmd_list = NULL; GList *cmd_iter = NULL; /* Notice a copy of each list is created when concat is called. * This prevents odd behavior from occurring when the cmd_list * is iterated through later on. It is possible the cancel_op * function may end up modifying the recurring_ops and pending_ops * lists. If we did not copy those lists, our cmd_list iteration * could get messed up.*/ if (rsc->recurring_ops) { cmd_list = g_list_concat(cmd_list, g_list_copy(rsc->recurring_ops)); } if (rsc->pending_ops) { cmd_list = g_list_concat(cmd_list, g_list_copy(rsc->pending_ops)); } if (!cmd_list) { return; } for (cmd_iter = cmd_list; cmd_iter; cmd_iter = cmd_iter->next) { lrmd_cmd_t *cmd = cmd_iter->data; if (cmd->interval == 0) { continue; } if (client_id && safe_str_neq(cmd->client_id, client_id)) { continue; } cancel_op(rsc->rsc_id, cmd->action, cmd->interval); } /* frees only the copied list data, not the cmds */ g_list_free(cmd_list); } static int process_lrmd_rsc_cancel(crm_client_t * client, uint32_t id, xmlNode * request) { xmlNode *rsc_xml = get_xpath_object("//" F_LRMD_RSC, request, LOG_ERR); const char *rsc_id = crm_element_value(rsc_xml, F_LRMD_RSC_ID); const char *action = crm_element_value(rsc_xml, F_LRMD_RSC_ACTION); int interval = 0; crm_element_value_int(rsc_xml, F_LRMD_RSC_INTERVAL, &interval); if (!rsc_id || !action) { return -EINVAL; } return cancel_op(rsc_id, action, interval); } void process_lrmd_message(crm_client_t * client, uint32_t id, xmlNode * request) { int rc = pcmk_ok; int call_id = 0; const char *op = crm_element_value(request, F_LRMD_OPERATION); int do_reply = 0; int do_notify = 0; crm_trace("Processing %s operation from %s", op, client->id); crm_element_value_int(request, F_LRMD_CALLID, &call_id); if (crm_str_eq(op, CRM_OP_IPC_FWD, TRUE)) { #ifdef SUPPORT_REMOTE ipc_proxy_forward_client(client, request); #endif do_reply = 1; } else if (crm_str_eq(op, CRM_OP_REGISTER, TRUE)) { rc = process_lrmd_signon(client, id, request); } else if (crm_str_eq(op, LRMD_OP_RSC_REG, TRUE)) { rc = process_lrmd_rsc_register(client, id, request); do_notify = 1; do_reply = 1; } else if (crm_str_eq(op, LRMD_OP_RSC_INFO, TRUE)) { process_lrmd_get_rsc_info(client, id, request); } else if (crm_str_eq(op, LRMD_OP_RSC_UNREG, TRUE)) { rc = process_lrmd_rsc_unregister(client, id, request); /* don't notify anyone about failed un-registers */ if (rc == pcmk_ok || rc == -EINPROGRESS) { do_notify = 1; } do_reply = 1; } else if (crm_str_eq(op, LRMD_OP_RSC_EXEC, TRUE)) { rc = process_lrmd_rsc_exec(client, id, request); do_reply = 1; } else if (crm_str_eq(op, LRMD_OP_RSC_CANCEL, TRUE)) { rc = process_lrmd_rsc_cancel(client, id, request); do_reply = 1; } else if (crm_str_eq(op, LRMD_OP_POKE, TRUE)) { do_notify = 1; do_reply = 1; } else if (crm_str_eq(op, LRMD_OP_CHECK, TRUE)) { xmlNode *data = get_message_xml(request, F_LRMD_CALLDATA); const char *timeout = crm_element_value(data, F_LRMD_WATCHDOG); CRM_LOG_ASSERT(data != NULL); check_sbd_timeout(timeout); } else if (crm_str_eq(op, LRMD_OP_ALERT_EXEC, TRUE)) { rc = process_lrmd_alert_exec(client, id, request); do_reply = 1; } else { rc = -EOPNOTSUPP; do_reply = 1; crm_err("Unknown %s from %s", op, client->name); crm_log_xml_warn(request, "UnknownOp"); } crm_debug("Processed %s operation from %s: rc=%d, reply=%d, notify=%d", op, client->id, rc, do_reply, do_notify); if (do_reply) { send_reply(client, rc, id, call_id); } if (do_notify) { send_generic_notify(rc, request); } } diff --git a/lrmd/tls_backend.c b/lrmd/tls_backend.c index bc57ee701c..edfb02da7d 100644 --- a/lrmd/tls_backend.c +++ b/lrmd/tls_backend.c @@ -1,394 +1,395 @@ /* * Copyright (c) 2012 David Vossel * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef HAVE_GNUTLS_GNUTLS_H # define LRMD_REMOTE_AUTH_TIMEOUT 10000 gnutls_psk_server_credentials_t psk_cred_s; gnutls_dh_params_t dh_params; static int ssock = -1; extern int lrmd_call_id; static void debug_log(int level, const char *str) { fputs(str, stderr); } static int lrmd_remote_client_msg(gpointer data) { int id = 0; int rc = 0; int disconnected = 0; xmlNode *request = NULL; crm_client_t *client = data; if (client->remote->tls_handshake_complete == FALSE) { int rc = 0; /* Muliple calls to handshake will be required, this callback * will be invoked once the client sends more handshake data. */ do { rc = gnutls_handshake(*client->remote->tls_session); if (rc < 0 && rc != GNUTLS_E_AGAIN) { crm_err("Remote lrmd tls handshake failed"); return -1; } } while (rc == GNUTLS_E_INTERRUPTED); if (rc == 0) { crm_debug("Remote lrmd tls handshake completed"); client->remote->tls_handshake_complete = TRUE; if (client->remote->auth_timeout) { g_source_remove(client->remote->auth_timeout); } client->remote->auth_timeout = 0; /* Alert other clients of the new connection */ notify_of_new_client(client); } return 0; } rc = crm_remote_ready(client->remote, 0); if (rc == 0) { /* no msg to read */ return 0; } else if (rc < 0) { - crm_info("Client disconnected during remote client read"); + crm_info("Client disconnected while polling it"); return -1; } crm_remote_recv(client->remote, -1, &disconnected); request = crm_remote_parse_buffer(client->remote); while (request) { crm_element_value_int(request, F_LRMD_REMOTE_MSG_ID, &id); crm_trace("processing request from remote client with remote msg id %d", id); if (!client->name) { const char *value = crm_element_value(request, F_LRMD_CLIENTNAME); if (value) { client->name = strdup(value); } } lrmd_call_id++; if (lrmd_call_id < 1) { lrmd_call_id = 1; } crm_xml_add(request, F_LRMD_CLIENTID, client->id); crm_xml_add(request, F_LRMD_CLIENTNAME, client->name); crm_xml_add_int(request, F_LRMD_CALLID, lrmd_call_id); process_lrmd_message(client, id, request); free_xml(request); /* process all the messages in the current buffer */ request = crm_remote_parse_buffer(client->remote); } if (disconnected) { - crm_info("Client disconnect detected in tls msg dispatcher."); + crm_info("Client disconnected while reading from it"); return -1; } return 0; } static void lrmd_remote_client_destroy(gpointer user_data) { crm_client_t *client = user_data; if (client == NULL) { return; } + crm_notice("Cleaning up after remote client %s disconnected " + CRM_XS " id=%s", + (client->name? client->name : ""), client->id); + ipc_proxy_remove_provider(client); /* if this is the last remote connection, stop recurring * operations */ if (crm_hash_table_size(client_connections) == 1) { client_disconnect_cleanup(NULL); } - crm_notice("LRMD client disconnecting remote client - name: %s id: %s", - client->name ? client->name : "", client->id); - if (client->remote->tls_session) { void *sock_ptr; int csock; sock_ptr = gnutls_transport_get_ptr(*client->remote->tls_session); csock = GPOINTER_TO_INT(sock_ptr); gnutls_bye(*client->remote->tls_session, GNUTLS_SHUT_RDWR); gnutls_deinit(*client->remote->tls_session); gnutls_free(client->remote->tls_session); close(csock); } lrmd_client_destroy(client); return; } static gboolean lrmd_auth_timeout_cb(gpointer data) { crm_client_t *client = data; client->remote->auth_timeout = 0; if (client->remote->tls_handshake_complete == TRUE) { return FALSE; } mainloop_del_fd(client->remote->source); client->remote->source = NULL; crm_err("Remote client authentication timed out"); return FALSE; } static int lrmd_remote_listen(gpointer data) { int csock = 0; gnutls_session_t *session = NULL; crm_client_t *new_client = NULL; static struct mainloop_fd_callbacks lrmd_remote_fd_cb = { .dispatch = lrmd_remote_client_msg, .destroy = lrmd_remote_client_destroy, }; csock = crm_remote_accept(ssock); if (csock < 0) { return TRUE; } session = create_psk_tls_session(csock, GNUTLS_SERVER, psk_cred_s); if (session == NULL) { crm_err("TLS session creation failed"); close(csock); return TRUE; } new_client = crm_client_alloc(NULL); new_client->remote = calloc(1, sizeof(crm_remote_t)); new_client->kind = CRM_CLIENT_TLS; new_client->remote->tls_session = session; new_client->remote->auth_timeout = g_timeout_add(LRMD_REMOTE_AUTH_TIMEOUT, lrmd_auth_timeout_cb, new_client); crm_notice("LRMD client connection established. %p id: %s", new_client, new_client->id); new_client->remote->source = mainloop_add_fd("lrmd-remote-client", G_PRIORITY_DEFAULT, csock, new_client, &lrmd_remote_fd_cb); return TRUE; } static void lrmd_remote_connection_destroy(gpointer user_data) { crm_notice("Remote tls server disconnected"); return; } static int lrmd_tls_server_key_cb(gnutls_session_t session, const char *username, gnutls_datum_t * key) { return lrmd_tls_set_key(key); } static int bind_and_listen(struct addrinfo *addr) { int optval; int fd; int rc; char buffer[INET6_ADDRSTRLEN] = { 0, }; crm_sockaddr2str(addr->ai_addr, buffer); crm_trace("Attempting to bind on address %s", buffer); fd = socket(addr->ai_family, addr->ai_socktype, addr->ai_protocol); if (fd < 0) { return -1; } /* reuse address */ optval = 1; rc = setsockopt(fd, SOL_SOCKET, SO_REUSEADDR, &optval, sizeof(optval)); if (rc < 0) { crm_perror(LOG_INFO, "Couldn't allow the reuse of local addresses by our remote listener, bind address %s", buffer); close(fd); return -1; } if (addr->ai_family == AF_INET6) { optval = 0; rc = setsockopt(fd, IPPROTO_IPV6, IPV6_V6ONLY, &optval, sizeof(optval)); if (rc < 0) { crm_perror(LOG_INFO, "Couldn't disable IPV6 only on address %s", buffer); close(fd); return -1; } } if (bind(fd, addr->ai_addr, addr->ai_addrlen) != 0) { close(fd); return -1; } if (listen(fd, 10) == -1) { crm_err("Can not start listen on address %s", buffer); close(fd); return -1; } crm_notice("Listening on address %s", buffer); return fd; } int lrmd_init_remote_tls_server() { int rc; int filter; int port = crm_default_remote_port(); struct addrinfo hints, *res = NULL, *iter; char port_str[6]; // at most "65535" gnutls_datum_t psk_key = { NULL, 0 }; static struct mainloop_fd_callbacks remote_listen_fd_callbacks = { .dispatch = lrmd_remote_listen, .destroy = lrmd_remote_connection_destroy, }; crm_notice("Starting TLS listener on port %d", port); crm_gnutls_global_init(); gnutls_global_set_log_function(debug_log); gnutls_dh_params_init(&dh_params); gnutls_dh_params_generate2(dh_params, 1024); gnutls_psk_allocate_server_credentials(&psk_cred_s); gnutls_psk_set_server_credentials_function(psk_cred_s, lrmd_tls_server_key_cb); gnutls_psk_set_server_dh_params(psk_cred_s, dh_params); /* The key callback won't get called until the first client connection * attempt. Do it once here, so we can warn the user at start-up if we can't * read the key. We don't error out, though, because it's fine if the key is * going to be added later. */ rc = lrmd_tls_set_key(&psk_key); if (rc != 0) { crm_warn("A cluster connection will not be possible until the key is available"); } memset(&hints, 0, sizeof(struct addrinfo)); /* Bind to the wildcard address (INADDR_ANY or IN6ADDR_ANY_INIT). * @TODO allow user to specify a specific address */ hints.ai_flags = AI_PASSIVE; hints.ai_family = AF_UNSPEC; /* Return IPv6 or IPv4 */ hints.ai_socktype = SOCK_STREAM; hints.ai_protocol = IPPROTO_TCP; snprintf(port_str, sizeof(port_str), "%d", port); rc = getaddrinfo(NULL, port_str, &hints, &res); if (rc) { crm_err("Unable to get IP address info for local node: %s", gai_strerror(rc)); return -1; } iter = res; filter = AF_INET6; /* Try IPv6 addresses first, then IPv4 */ while (iter) { if (iter->ai_family == filter) { ssock = bind_and_listen(iter); } if (ssock != -1) { break; } iter = iter->ai_next; if (iter == NULL && filter == AF_INET6) { iter = res; filter = AF_INET; } } if (ssock < 0) { crm_err("unable to bind to address"); goto init_remote_cleanup; } mainloop_add_fd("lrmd-remote", G_PRIORITY_DEFAULT, ssock, NULL, &remote_listen_fd_callbacks); rc = ssock; init_remote_cleanup: if (rc < 0) { close(ssock); ssock = 0; } freeaddrinfo(res); return rc; } void lrmd_tls_server_destroy(void) { if (psk_cred_s) { gnutls_psk_free_server_credentials(psk_cred_s); psk_cred_s = 0; } if (ssock > 0) { close(ssock); ssock = 0; } } #endif