diff --git a/cib/callbacks.c b/cib/callbacks.c index b12a33a691..d13295bdc4 100644 --- a/cib/callbacks.c +++ b/cib/callbacks.c @@ -1,1299 +1,1298 @@ /* * Copyright (C) 2004 Andrew Beekhof * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * This software is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "common.h" extern GMainLoop* mainloop; extern gboolean cib_shutdown_flag; extern gboolean stand_alone; extern const char* cib_root; #if SUPPORT_HEARTBEAT extern ll_cluster_t *hb_conn; #endif extern void cib_ha_connection_destroy(gpointer user_data); extern enum cib_errors cib_update_counter( xmlNode *xml_obj, const char *field, gboolean reset); extern void GHFunc_count_peers( gpointer key, gpointer value, gpointer user_data); void initiate_exit(void); void terminate_cib(const char *caller); gint cib_GCompareFunc(gconstpointer a, gconstpointer b); gboolean can_write(int flags); void send_cib_replace(const xmlNode *sync_request, const char *host); void cib_process_request( xmlNode *request, gboolean privileged, gboolean force_synchronous, gboolean from_peer, cib_client_t *cib_client); void cib_common_callback_worker(xmlNode *op_request, cib_client_t *cib_client, gboolean force_synchronous, gboolean privileged); extern GHashTable *client_list; int next_client_id = 0; extern const char *cib_our_uname; extern unsigned long cib_num_ops, cib_num_local, cib_num_updates, cib_num_fail; extern unsigned long cib_bad_connects, cib_num_timeouts; extern longclock_t cib_call_time; extern enum cib_errors cib_status; int send_via_callback_channel(xmlNode *msg, const char *token); enum cib_errors cib_process_command( xmlNode *request, xmlNode **reply, xmlNode **cib_diff, gboolean privileged); gboolean cib_common_callback(IPC_Channel *channel, cib_client_t *cib_client, gboolean force_synchronous, gboolean privileged); gboolean cib_process_disconnect(IPC_Channel *channel, cib_client_t *cib_client); int num_clients = 0; static void cib_ipc_connection_destroy(gpointer user_data) { cib_client_t *cib_client = user_data; /* cib_process_disconnect */ if(cib_client == NULL) { crm_debug_4("Destroying %p", user_data); return; } if(cib_client->source != NULL) { crm_debug_4("Deleting %s (%p) from mainloop", cib_client->name, cib_client->source); G_main_del_IPC_Channel(cib_client->source); cib_client->source = NULL; } crm_debug_3("Destroying %s (%p)", cib_client->name, user_data); num_clients--; crm_debug_2("Num unfree'd clients: %d", num_clients); crm_free(cib_client->name); crm_free(cib_client->callback_id); crm_free(cib_client->id); crm_free(cib_client); crm_debug_4("Freed the cib client"); return; } gboolean cib_client_connect(IPC_Channel *channel, gpointer user_data) { cl_uuid_t client_id; xmlNode *reg_msg = NULL; cib_client_t *new_client = NULL; char uuid_str[UU_UNPARSE_SIZEOF]; const char *channel_name = user_data; gboolean (*callback)(IPC_Channel *channel, gpointer user_data); crm_debug_3("Connecting channel"); if (channel == NULL) { crm_err("Channel was NULL"); cib_bad_connects++; return FALSE; } else if (channel->ch_status != IPC_CONNECT) { crm_err("Channel was disconnected"); cib_bad_connects++; return FALSE; } else if(channel_name == NULL) { crm_err("user_data must contain channel name"); cib_bad_connects++; return FALSE; } else if(cib_shutdown_flag) { crm_info("Ignoring new client [%d] during shutdown", channel->farside_pid); return FALSE; } callback = cib_ro_callback; if(safe_str_eq(channel_name, cib_channel_rw)) { callback = cib_rw_callback; } crm_malloc0(new_client, sizeof(cib_client_t)); num_clients++; new_client->channel = channel; new_client->channel_name = channel_name; crm_debug_3("Created channel %p for channel %s", new_client, new_client->channel_name); channel->ops->set_recv_qlen(channel, 1024); channel->ops->set_send_qlen(channel, 1024); new_client->source = G_main_add_IPC_Channel( G_PRIORITY_DEFAULT, channel, FALSE, callback, new_client, cib_ipc_connection_destroy); crm_debug_3("Channel %s connected for client %s", new_client->channel_name, new_client->id); cl_uuid_generate(&client_id); cl_uuid_unparse(&client_id, uuid_str); CRM_CHECK(new_client->id == NULL, crm_free(new_client->id)); new_client->id = crm_strdup(uuid_str); /* make sure we can find ourselves later for sync calls * redirected to the master instance */ g_hash_table_insert(client_list, new_client->id, new_client); reg_msg = create_xml_node(NULL, "callback"); crm_xml_add(reg_msg, F_CIB_OPERATION, CRM_OP_REGISTER); crm_xml_add(reg_msg, F_CIB_CLIENTID, new_client->id); send_ipc_message(channel, reg_msg); free_xml(reg_msg); return TRUE; } gboolean cib_rw_callback(IPC_Channel *channel, gpointer user_data) { gboolean result = FALSE; result = cib_common_callback(channel, user_data, FALSE, TRUE); return result; } gboolean cib_ro_callback(IPC_Channel *channel, gpointer user_data) { gboolean result = FALSE; result = cib_common_callback(channel, user_data, FALSE, FALSE); return result; } void cib_common_callback_worker(xmlNode *op_request, cib_client_t *cib_client, gboolean force_synchronous, gboolean privileged) { longclock_t call_stop = 0; longclock_t call_start = 0; const char *op = crm_element_value(op_request, F_CIB_OPERATION); if(crm_str_eq(op, CRM_OP_REGISTER, TRUE)) { return; } else if(crm_str_eq(op, T_CIB_NOTIFY, TRUE)) { /* Update the notify filters for this client */ int on_off = 0; const char *type = crm_element_value(op_request, F_CIB_NOTIFY_TYPE);; crm_element_value_int(op_request, F_CIB_NOTIFY_ACTIVATE, &on_off); crm_debug("Setting %s callbacks for %s (%s): %s", type, cib_client->name, cib_client->id, on_off?"on":"off"); if(safe_str_eq(type, T_CIB_POST_NOTIFY)) { cib_client->post_notify = on_off; } else if(safe_str_eq(type, T_CIB_PRE_NOTIFY)) { cib_client->pre_notify = on_off; } else if(safe_str_eq(type, T_CIB_UPDATE_CONFIRM)) { cib_client->confirmations = on_off; } else if(safe_str_eq(type, T_CIB_DIFF_NOTIFY)) { cib_client->diffs = on_off; } else if(safe_str_eq(type, T_CIB_REPLACE_NOTIFY)) { cib_client->replace = on_off; } return; } cib_client->num_calls++; call_start = time_longclock(); cib_process_request( op_request, force_synchronous, privileged, FALSE, cib_client); call_stop = time_longclock(); cib_call_time += (call_stop - call_start); } gboolean cib_common_callback(IPC_Channel *channel, cib_client_t *cib_client, gboolean force_synchronous, gboolean privileged) { int lpc = 0; const char *value = NULL; xmlNode *op_request = NULL; gboolean keep_channel = TRUE; CRM_CHECK(cib_client != NULL, crm_err("Invalid client"); return FALSE); CRM_CHECK(cib_client->id != NULL, crm_err("Invalid client: %p", cib_client); return FALSE); /* * Do enough work to make entering worthwhile * But don't allow a single client to monopolize the CIB */ while(lpc < 5 && IPC_ISRCONN(channel) && channel->ops->is_message_pending(channel)) { lpc++; op_request = xmlfromIPC(channel, MAX_IPC_DELAY); if (op_request == NULL) { break; } if(cib_client->name == NULL) { value = crm_element_value(op_request, F_CIB_CLIENTNAME); if(value == NULL) { cib_client->name = crm_itoa(channel->farside_pid); } else { cib_client->name = crm_strdup(value); } } crm_xml_add(op_request, F_CIB_CLIENTID, cib_client->id); crm_xml_add(op_request, F_CIB_CLIENTNAME, cib_client->name); /* crm_log_xml(LOG_MSG, "Client[inbound]", op_request); */ if(cib_client->callback_id == NULL) { value = crm_element_value(op_request, F_CIB_CALLBACK_TOKEN); if(value != NULL) { cib_client->callback_id = crm_strdup(value); } else { cib_client->callback_id = crm_strdup(cib_client->id); } } cib_common_callback_worker( op_request, cib_client, force_synchronous, privileged); free_xml(op_request); } if(channel->ch_status != IPC_CONNECT) { crm_debug_2("Client disconnected"); keep_channel = cib_process_disconnect(channel, cib_client); } return keep_channel; } static void do_local_notify(xmlNode *notify_src, const char *client_id, gboolean sync_reply, gboolean from_peer) { /* send callback to originating child */ cib_client_t *client_obj = NULL; enum cib_errors local_rc = cib_ok; crm_debug_2("Performing notification"); if(client_id != NULL) { client_obj = g_hash_table_lookup(client_list, client_id); } else { crm_debug_2("No client to sent the response to." " F_CIB_CLIENTID not set."); } crm_debug_3("Sending callback to request originator"); if(client_obj == NULL) { local_rc = cib_reply_failed; } else { const char *client_id = client_obj->callback_id; crm_debug_2("Sending %ssync response to %s %s", sync_reply?"":"an a-", client_obj->name, from_peer?"(originator of delegated request)":""); if(sync_reply) { client_id = client_obj->id; } local_rc = send_via_callback_channel(notify_src, client_id); } if(local_rc != cib_ok && client_obj != NULL) { crm_warn("%sSync reply to %s failed: %s", sync_reply?"":"A-", client_obj?client_obj->name:"", cib_error2string(local_rc)); } } static void parse_local_options( cib_client_t *cib_client, int call_type, int call_options, const char *host, const char *op, gboolean *local_notify, gboolean *needs_reply, gboolean *process, gboolean *needs_forward) { if(cib_op_modifies(call_type) && !(call_options & cib_inhibit_bcast)) { /* we need to send an update anyway */ *needs_reply = TRUE; } else { *needs_reply = FALSE; } if(host == NULL && (call_options & cib_scope_local)) { crm_debug_2("Processing locally scoped %s op from %s", op, cib_client->name); *local_notify = TRUE; } else if(host == NULL && cib_is_master) { crm_debug_2("Processing master %s op locally from %s", op, cib_client->name); *local_notify = TRUE; } else if(safe_str_eq(host, cib_our_uname)) { crm_debug_2("Processing locally addressed %s op from %s", op, cib_client->name); *local_notify = TRUE; } else if(stand_alone) { *needs_forward = FALSE; *local_notify = TRUE; *process = TRUE; } else { crm_debug_2("%s op from %s needs to be forwarded to %s", op, cib_client->name, host?host:"the master instance"); *needs_forward = TRUE; *process = FALSE; } } static gboolean parse_peer_options( int call_type, xmlNode *request, gboolean *local_notify, gboolean *needs_reply, gboolean *process, gboolean *needs_forward) { const char *op = crm_element_value(request, F_CIB_OPERATION); const char *originator = crm_element_value(request, F_ORIG); const char *host = crm_element_value(request, F_CIB_HOST); const char *reply_to = crm_element_value(request, F_CIB_ISREPLY); const char *update = crm_element_value(request, F_CIB_GLOBAL_UPDATE); const char *delegated = crm_element_value(request, F_CIB_DELEGATED); if(safe_str_eq(op, "cib_shutdown_req")) { if(reply_to != NULL) { crm_debug("Processing %s from %s", op, host); *needs_reply = FALSE; } else { crm_debug("Processing %s reply from %s", op, host); } return TRUE; } else if(crm_is_true(update) && safe_str_eq(reply_to, cib_our_uname)) { crm_debug_2("Processing global/peer update from %s" " that originated from us", originator); *needs_reply = FALSE; if(crm_element_value(request, F_CIB_CLIENTID) != NULL) { *local_notify = TRUE; } return TRUE; } else if(crm_is_true(update)) { crm_debug_2("Processing global/peer update from %s", originator); *needs_reply = FALSE; return TRUE; } else if(host != NULL && safe_str_eq(host, cib_our_uname)) { crm_debug_2("Processing request sent to us from %s", originator); return TRUE; } else if(delegated != NULL && cib_is_master == TRUE) { crm_debug_2("Processing request sent to master instance from %s", originator); return TRUE; } else if(reply_to != NULL && safe_str_eq(reply_to, cib_our_uname)) { crm_debug_2("Forward reply sent from %s to local clients", originator); *process = FALSE; *needs_reply = FALSE; *local_notify = TRUE; return TRUE; } else if(delegated != NULL) { crm_debug_2("Ignoring msg for master instance"); } else if(host != NULL) { /* this is for a specific instance and we're not it */ crm_debug_2("Ignoring msg for instance on %s", crm_str(host)); } else if(reply_to == NULL && cib_is_master == FALSE) { /* this is for the master instance and we're not it */ crm_debug_2("Ignoring reply to %s", crm_str(reply_to)); } else { crm_err("Nothing for us to do?"); crm_log_xml(LOG_ERR, "Peer[inbound]", request); } return FALSE; } static void forward_request(xmlNode *request, cib_client_t *cib_client, int call_options) { xmlNode *forward_msg = NULL; const char *op = crm_element_value(request, F_CIB_OPERATION); const char *host = crm_element_value(request, F_CIB_HOST); forward_msg = cib_msg_copy(request, TRUE); crm_xml_add(forward_msg, F_CIB_DELEGATED, cib_our_uname); if(host != NULL) { crm_debug_2("Forwarding %s op to %s", op, host); send_cluster_message(host, crm_msg_cib, forward_msg, FALSE); } else { crm_debug_2("Forwarding %s op to master instance", op); send_cluster_message(NULL, crm_msg_cib, forward_msg, FALSE); } if(call_options & cib_discard_reply) { crm_debug_2("Client not interested in reply"); } free_xml(forward_msg); } static void send_peer_reply( xmlNode *msg, xmlNode *result_diff, const char *originator, gboolean broadcast) { xmlNode *reply_copy = NULL; CRM_ASSERT(msg != NULL); reply_copy = cib_msg_copy(msg, TRUE); if(broadcast) { /* this (successful) call modified the CIB _and_ the * change needs to be broadcast... * send via HA to other nodes */ int diff_add_updates = 0; int diff_add_epoch = 0; int diff_add_admin_epoch = 0; int diff_del_updates = 0; int diff_del_epoch = 0; int diff_del_admin_epoch = 0; char *digest = NULL; cib_diff_version_details( result_diff, &diff_add_admin_epoch, &diff_add_epoch, &diff_add_updates, &diff_del_admin_epoch, &diff_del_epoch, &diff_del_updates); crm_debug_2("Sending update diff %d.%d.%d -> %d.%d.%d", diff_del_admin_epoch,diff_del_epoch,diff_del_updates, diff_add_admin_epoch,diff_add_epoch,diff_add_updates); crm_xml_add(reply_copy, F_CIB_ISREPLY, originator); crm_xml_add(reply_copy, F_CIB_GLOBAL_UPDATE, XML_BOOLEAN_TRUE); crm_xml_add(reply_copy, F_CIB_OPERATION, CIB_OP_APPLY_DIFF); digest = calculate_xml_digest(the_cib, FALSE, TRUE); crm_xml_add(result_diff, XML_ATTR_DIGEST, digest); /* crm_log_xml_debug(the_cib, digest); */ crm_free(digest); add_message_xml(reply_copy, F_CIB_UPDATE_DIFF, result_diff); crm_log_xml(LOG_DEBUG_3, "copy", reply_copy); send_cluster_message(NULL, crm_msg_cib, reply_copy, TRUE); } else if(originator != NULL) { /* send reply via HA to originating node */ crm_debug_2("Sending request result to originator only"); crm_xml_add(reply_copy, F_CIB_ISREPLY, originator); send_cluster_message(originator, crm_msg_cib, reply_copy, FALSE); } free_xml(reply_copy); } void cib_process_request( xmlNode *request, gboolean force_synchronous, gboolean privileged, gboolean from_peer, cib_client_t *cib_client) { int call_type = 0; int call_options = 0; gboolean process = TRUE; gboolean is_update = TRUE; gboolean needs_reply = TRUE; gboolean local_notify = FALSE; gboolean needs_forward = FALSE; gboolean global_update = crm_is_true(crm_element_value(request, F_CIB_GLOBAL_UPDATE)); xmlNode *op_reply = NULL; xmlNode *result_diff = NULL; enum cib_errors rc = cib_ok; const char *op = crm_element_value(request, F_CIB_OPERATION); const char *originator = crm_element_value(request, F_ORIG); const char *host = crm_element_value(request, F_CIB_HOST); crm_debug_4("%s Processing msg %s", cib_our_uname, crm_element_value(request, F_SEQ)); cib_num_ops++; if(cib_num_ops == 0) { cib_num_fail = 0; cib_num_local = 0; cib_num_updates = 0; crm_info("Stats wrapped around"); } if(host != NULL && strlen(host) == 0) { host = NULL; } crm_element_value_int(request, F_CIB_CALLOPTS, &call_options); if(force_synchronous) { call_options |= cib_sync_call; } crm_debug_2("Processing %s message (%s) for %s...", from_peer?"peer":"local", from_peer?originator:cib_our_uname, host?host:"master"); rc = cib_get_operation_id(op, &call_type); if(rc != cib_ok) { /* TODO: construct error reply? */ crm_err("Pre-processing of command failed: %s", cib_error2string(rc)); return; } is_update = cib_op_modifies(call_type); if(is_update) { cib_num_updates++; } if(from_peer == FALSE) { parse_local_options(cib_client, call_type, call_options, host, op, &local_notify, &needs_reply, &process, &needs_forward); } else if(parse_peer_options(call_type, request, &local_notify, &needs_reply, &process, &needs_forward) == FALSE) { return; } crm_debug_3("Finished determining processing actions"); if(call_options & cib_discard_reply) { needs_reply = is_update; local_notify = FALSE; } if(needs_forward) { forward_request(request, cib_client, call_options); return; } if(cib_status != cib_ok) { rc = cib_status; crm_err("Operation ignored, cluster configuration is invalid." " Please repair and restart: %s", cib_error2string(cib_status)); op_reply = cib_construct_reply(request, the_cib, cib_status); } else if(process) { int level = LOG_INFO; const char *section = crm_element_value(request, F_CIB_SECTION); cib_num_local++; rc = cib_process_command( request, &op_reply, &result_diff, privileged); if(global_update) { switch(rc) { case cib_ok: case cib_old_data: case cib_diff_resync: case cib_diff_failed: level = LOG_DEBUG_2; break; default: level = LOG_ERR; } } else if(safe_str_eq(op, CIB_OP_QUERY)) { level = LOG_DEBUG_2; } else if(rc != cib_ok) { cib_num_fail++; level = LOG_WARNING; } else if(safe_str_eq(op, CIB_OP_SLAVE)) { level = LOG_DEBUG_2; } else if(safe_str_eq(section, XML_CIB_TAG_STATUS)) { level = LOG_DEBUG_2; } if(crm_log_level >= level) { /* Avoid all the xml lookups if we're not going to print the results */ do_crm_log(level, "Operation complete: op %s for section %s (origin=%s/%s/%s, version=%s.%s.%s): %s (rc=%d)", op, section?section:"'all'", originator?originator:"local", crm_element_value(request, F_CIB_CLIENTNAME), crm_element_value(request, F_CIB_CALLID), the_cib?crm_element_value(the_cib, XML_ATTR_GENERATION_ADMIN):"0", the_cib?crm_element_value(the_cib, XML_ATTR_GENERATION):"0", the_cib?crm_element_value(the_cib, XML_ATTR_NUMUPDATES):"0", cib_error2string(rc), rc); } if(op_reply == NULL && (needs_reply || local_notify)) { crm_err("Unexpected NULL reply to message"); crm_log_xml(LOG_ERR, "null reply", request); needs_reply = FALSE; local_notify = FALSE; } } crm_debug_3("processing response cases"); if(local_notify) { const char *client_id = crm_element_value(request, F_CIB_CLIENTID); if(process == FALSE) { do_local_notify(request, client_id, call_options & cib_sync_call, from_peer); } else { do_local_notify(op_reply, client_id, call_options & cib_sync_call, from_peer); } } /* from now on we are the server */ if(needs_reply == FALSE || stand_alone) { /* nothing more to do... * this was a non-originating slave update */ crm_debug_2("Completed slave update"); } else if(rc == cib_ok && result_diff != NULL && !(call_options & cib_inhibit_bcast)) { send_peer_reply(request, result_diff, originator, TRUE); } else if(call_options & cib_discard_reply) { crm_debug_4("Caller isn't interested in reply"); } else if (from_peer) { if(is_update == FALSE || result_diff == NULL) { crm_debug_3("Request not broadcast: R/O call"); } else if(call_options & cib_inhibit_bcast) { crm_debug_3("Request not broadcast: inhibited"); } else if(rc != cib_ok) { crm_debug_3("Request not broadcast: call failed: %s", cib_error2string(rc)); } else { crm_debug_2("Directing reply to %s", originator); } send_peer_reply(op_reply, result_diff, originator, FALSE); } free_xml(op_reply); free_xml(result_diff); return; } xmlNode * cib_construct_reply(xmlNode *request, xmlNode *output, int rc) { int lpc = 0; xmlNode *reply = NULL; const char *name = NULL; const char *value = NULL; const char *names[] = { F_CIB_OPERATION, F_CIB_CALLID, F_CIB_CLIENTID, F_CIB_CALLOPTS }; crm_debug_4("Creating a basic reply"); reply = create_xml_node(NULL, "cib-reply"); crm_xml_add(reply, F_TYPE, T_CIB); for(lpc = 0; lpc < DIMOF(names); lpc++) { name = names[lpc]; value = crm_element_value(request, name); crm_xml_add(reply, name, value); } crm_xml_add_int(reply, F_CIB_RC, rc); if(output != NULL) { crm_debug_4("Attaching reply output"); add_message_xml(reply, F_CIB_CALLDATA, output); } return reply; } enum cib_errors cib_process_command(xmlNode *request, xmlNode **reply, xmlNode **cib_diff, gboolean privileged) { xmlNode *input = NULL; xmlNode *output = NULL; xmlNode *result_cib = NULL; xmlNode *current_cib = NULL; int call_type = 0; int call_options = 0; int log_level = LOG_DEBUG_4; const char *op = NULL; const char *section = NULL; enum cib_errors rc = cib_ok; enum cib_errors rc2 = cib_ok; gboolean send_r_notify = FALSE; gboolean global_update = FALSE; gboolean config_changed = FALSE; gboolean manage_counters = TRUE; CRM_ASSERT(cib_status == cib_ok); *reply = NULL; *cib_diff = NULL; current_cib = the_cib; /* Start processing the request... */ op = crm_element_value(request, F_CIB_OPERATION); crm_element_value_int(request, F_CIB_CALLOPTS, &call_options); rc = cib_get_operation_id(op, &call_type); if(rc == cib_ok) { rc = cib_op_can_run(call_type, call_options, privileged, global_update); } rc2 = cib_op_prepare(call_type, request, &input, §ion); if(rc == cib_ok) { rc = rc2; } if(rc != cib_ok) { crm_debug_2("Call setup failed: %s", cib_error2string(rc)); goto done; } else if(cib_op_modifies(call_type) == FALSE) { rc = cib_perform_op(op, call_options, cib_op_func(call_type), TRUE, section, request, input, FALSE, &config_changed, current_cib, &result_cib, NULL, &output); CRM_CHECK(result_cib == NULL, free_xml(result_cib)); goto done; } /* Handle a valid write action */ global_update = crm_is_true(crm_element_value(request, F_CIB_GLOBAL_UPDATE)); if(global_update) { manage_counters = FALSE; call_options |= cib_force_diff; CRM_CHECK(call_type == 3 || call_type == 4, crm_err("Call type: %d", call_type); crm_log_xml(LOG_ERR, "bad op", request)); } #ifdef SUPPORT_PRENOTIFY if((call_options & cib_inhibit_notify) == 0) { cib_pre_notify(call_options, op, the_cib, input); } #endif if(rc == cib_ok) { if(call_options & cib_inhibit_bcast) { /* skip */ crm_debug_2("Skipping update: inhibit broadcast"); manage_counters = FALSE; } rc = cib_perform_op(op, call_options, cib_op_func(call_type), FALSE, section, request, input, manage_counters, &config_changed, current_cib, &result_cib, cib_diff, &output); if(manage_counters == FALSE) { - *cib_diff = diff_cib_object(current_cib, result_cib, FALSE); - config_changed = cib_config_changed(*cib_diff); + config_changed = cib_config_changed(current_cib, result_cib, cib_diff); } } if(rc == cib_ok) { rc = activateCibXml(result_cib, config_changed, op); if(crm_str_eq(CIB_OP_REPLACE, op, TRUE)) { if(section == NULL) { send_r_notify = TRUE; } else if(safe_str_eq(section, XML_TAG_CIB)) { send_r_notify = TRUE; } else if(safe_str_eq(section, XML_CIB_TAG_NODES)) { send_r_notify = TRUE; } else if(safe_str_eq(section, XML_CIB_TAG_STATUS)) { send_r_notify = TRUE; } } else if(crm_str_eq(CIB_OP_ERASE, op, TRUE)) { send_r_notify = TRUE; } } else if(rc == cib_dtd_validation) { if(output != NULL) { crm_log_xml_info(output, "cib:output"); free_xml(output); } output = result_cib; } else { free_xml(result_cib); } if((call_options & cib_inhibit_notify) == 0) { const char *call_id = crm_element_value(request, F_CIB_CALLID); const char *client = crm_element_value(request, F_CIB_CLIENTNAME); #ifdef SUPPORT_POSTNOTIFY cib_post_notify(call_options, op, input, rc, the_cib); #endif cib_diff_notify(call_options, client, call_id, op, input, rc, *cib_diff); } if(send_r_notify) { const char *origin = crm_element_value(request, F_ORIG); cib_replace_notify(origin, the_cib, rc, *cib_diff); } if(rc != cib_ok) { log_level = LOG_DEBUG_4; if(rc == cib_dtd_validation && global_update) { log_level = LOG_WARNING; crm_log_xml_info(input, "cib:global_update"); } } else if(config_changed) { log_level = LOG_DEBUG_3; if(cib_is_master) { log_level = LOG_INFO; } } else if(cib_is_master) { log_level = LOG_DEBUG_2; } log_xml_diff(log_level, *cib_diff, "cib:diff"); done: if((call_options & cib_discard_reply) == 0) { *reply = cib_construct_reply(request, output, rc); /* crm_log_xml_info(*reply, "cib:reply"); */ } if(call_type >= 0) { cib_op_cleanup(call_type, call_options, &input, &output); } return rc; } int send_via_callback_channel(xmlNode *msg, const char *token) { cib_client_t *hash_client = NULL; enum cib_errors rc = cib_ok; crm_debug_3("Delivering msg %p to client %s", msg, token); if(token == NULL) { crm_err("No client id token, cant send message"); if(rc == cib_ok) { rc = cib_missing; } } else if(msg == NULL) { crm_err("No message to send"); rc = cib_reply_failed; } else { /* A client that left before we could reply is not really * _our_ error. Warn instead. */ hash_client = g_hash_table_lookup(client_list, token); if(hash_client == NULL) { crm_warn("Cannot find client for token %s", token); rc = cib_client_gone; } else if (crm_str_eq(hash_client->channel_name, "remote", FALSE)) { /* just hope it's alive */ } else if(hash_client->channel == NULL) { crm_err("Cannot find channel for client %s", token); rc = cib_client_corrupt; } } if(rc == cib_ok) { crm_debug_3("Delivering reply to client %s (%s)", token, hash_client->channel_name); if (crm_str_eq(hash_client->channel_name, "remote", FALSE)) { cib_send_remote_msg(hash_client->channel, msg, hash_client->encrypted); } else if(send_ipc_message(hash_client->channel, msg) == FALSE) { crm_warn("Delivery of reply to client %s/%s failed", hash_client->name, token); rc = cib_reply_failed; } } return rc; } gint cib_GCompareFunc(gconstpointer a, gconstpointer b) { const xmlNode *a_msg = a; const xmlNode *b_msg = b; int msg_a_id = 0; int msg_b_id = 0; const char *value = NULL; value = crm_element_value_const(a_msg, F_CIB_CALLID); msg_a_id = crm_parse_int(value, NULL); value = crm_element_value_const(b_msg, F_CIB_CALLID); msg_b_id = crm_parse_int(value, NULL); if(msg_a_id == msg_b_id) { return 0; } else if(msg_a_id < msg_b_id) { return -1; } return 1; } gboolean cib_process_disconnect(IPC_Channel *channel, cib_client_t *cib_client) { if (channel == NULL) { CRM_DEV_ASSERT(cib_client == NULL); } else if (cib_client == NULL) { crm_err("No client"); } else { CRM_DEV_ASSERT(channel->ch_status != IPC_CONNECT); crm_debug_2("Cleaning up after client disconnect: %s/%s/%s", crm_str(cib_client->name), cib_client->channel_name, cib_client->id); if(cib_client->id != NULL) { if(!g_hash_table_remove(client_list, cib_client->id)) { crm_err("Client %s not found in the hashtable", cib_client->name); } } } if(cib_shutdown_flag && g_hash_table_size(client_list) == 0) { crm_info("All clients disconnected..."); initiate_exit(); } return FALSE; } void cib_ha_peer_callback(HA_Message * msg, void* private_data) { xmlNode *xml = convert_ha_message(NULL, msg, __FUNCTION__); cib_peer_callback(xml, private_data); free_xml(xml); } void cib_peer_callback(xmlNode * msg, void* private_data) { crm_node_t *node = NULL; const char *reason = NULL; const char *originator = crm_element_value(msg, F_ORIG); if(originator == NULL || crm_str_eq(originator, cib_our_uname, TRUE)) { /* message is from ourselves */ return; } else if(crm_peer_cache == NULL) { reason = "membership not established"; goto bail; } node = crm_get_peer(0, originator); if(node == NULL || crm_is_member_active(node) == FALSE) { reason = "not in our membership"; goto bail; } if(crm_element_value(msg, F_CIB_CLIENTNAME) == NULL) { crm_xml_add(msg, F_CIB_CLIENTNAME, originator); } /* crm_log_xml(LOG_MSG, "Peer[inbound]", msg); */ cib_process_request(msg, FALSE, TRUE, TRUE, NULL); return; bail: if(reason) { const char *seq = crm_element_value(msg, F_SEQ); const char *op = crm_element_value(msg, F_CIB_OPERATION); crm_warn("Discarding %s message (%s) from %s: %s", op, seq, originator, reason); } } void cib_client_status_callback(const char * node, const char * client, const char * status, void * private) { crm_node_t *member = NULL; if(safe_str_eq(client, CRM_SYSTEM_CIB)) { crm_info("Status update: Client %s/%s now has status [%s]", node, client, status); if(safe_str_eq(status, JOINSTATUS)){ status = ONLINESTATUS; } else if(safe_str_eq(status, LEAVESTATUS)){ status = OFFLINESTATUS; } member = crm_get_peer(0, node); if(member == NULL) { /* Make sure it gets created */ const char *uuid = get_uuid(node); member = crm_update_peer(0, 0, 0, -1, 0, uuid, node, NULL, NULL); } crm_update_peer_proc(node, crm_proc_cib, status); } return; } #if SUPPORT_HEARTBEAT extern oc_ev_t *cib_ev_token; gboolean cib_ccm_dispatch(int fd, gpointer user_data) { int rc = 0; oc_ev_t *ccm_token = (oc_ev_t*)user_data; crm_debug_2("received callback"); rc = oc_ev_handle_event(ccm_token); if(0 == rc) { return TRUE; } crm_err("CCM connection appears to have failed: rc=%d.", rc); /* eventually it might be nice to recover and reconnect... but until then... */ crm_err("Exiting to recover from CCM connection failure"); exit(2); return FALSE; } int current_instance = 0; void cib_ccm_msg_callback( oc_ed_t event, void *cookie, size_t size, const void *data) { gboolean update_id = FALSE; const oc_ev_membership_t *membership = data; CRM_ASSERT(membership != NULL); crm_info("Processing CCM event=%s (id=%d)", ccm_event_name(event), membership->m_instance); if(current_instance > membership->m_instance) { crm_err("Membership instance ID went backwards! %d->%d", current_instance, membership->m_instance); CRM_ASSERT(current_instance <= membership->m_instance); } switch(event) { case OC_EV_MS_NEW_MEMBERSHIP: case OC_EV_MS_INVALID: update_id = TRUE; break; case OC_EV_MS_PRIMARY_RESTORED: update_id = TRUE; break; case OC_EV_MS_NOT_PRIMARY: crm_debug_2("Ignoring transitional CCM event: %s", ccm_event_name(event)); break; case OC_EV_MS_EVICTED: crm_err("Evicted from CCM: %s", ccm_event_name(event)); break; default: crm_err("Unknown CCM event: %d", event); } if(update_id) { unsigned int lpc = 0; CRM_CHECK(membership != NULL, return); current_instance = membership->m_instance; for(lpc=0; lpc < membership->m_n_out; lpc++) { crm_update_ccm_node( membership, lpc+membership->m_out_idx, CRM_NODE_LOST, current_instance); } for(lpc=0; lpc < membership->m_n_member; lpc++) { crm_update_ccm_node( membership, lpc+membership->m_memb_idx,CRM_NODE_ACTIVE, current_instance); } } oc_ev_callback_done(cookie); return; } #endif gboolean can_write(int flags) { return TRUE; } static gboolean cib_force_exit(gpointer data) { crm_notice("Forcing exit!"); terminate_cib(__FUNCTION__); return FALSE; } void initiate_exit(void) { int active = 0; xmlNode *leaving = NULL; active = crm_active_peers(crm_proc_cib); if(active < 2) { terminate_cib(__FUNCTION__); return; } crm_info("Sending disconnect notification to %d peers...", active); leaving = create_xml_node(NULL, "exit-notification"); crm_xml_add(leaving, F_TYPE, "cib"); crm_xml_add(leaving, F_CIB_OPERATION, "cib_shutdown_req"); send_cluster_message(NULL, crm_msg_cib, leaving, TRUE); free_xml(leaving); g_timeout_add(crm_get_msec("5s"), cib_force_exit, NULL); } extern int remote_fd; extern int remote_tls_fd; void terminate_cib(const char *caller) { if(remote_fd > 0) { close(remote_fd); } if(remote_tls_fd > 0) { close(remote_tls_fd); } #if SUPPORT_AIS if(is_openais_cluster()) { cib_ha_connection_destroy(NULL); return; } #endif #if SUPPORT_HEARTBEAT if(hb_conn != NULL) { crm_info("%s: Disconnecting heartbeat", caller); hb_conn->llc_ops->signoff(hb_conn, FALSE); } else { crm_err("%s: No heartbeat connection", caller); } #endif uninitializeCib(); crm_info("Exiting..."); if (mainloop != NULL && g_main_is_running(mainloop)) { g_main_quit(mainloop); } else { exit(LSB_EXIT_OK); } } diff --git a/crmd/callbacks.c b/crmd/callbacks.c index 687a5dd213..28f1ad4d6d 100755 --- a/crmd/callbacks.c +++ b/crmd/callbacks.c @@ -1,632 +1,632 @@ /* * Copyright (C) 2004 Andrew Beekhof * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * This software is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include void crmd_ha_connection_destroy(gpointer user_data); void crmd_ha_msg_filter(xmlNode *msg); /* From join_dc... */ extern gboolean check_join_state( enum crmd_fsa_state cur_state, const char *source); #define trigger_fsa(source) crm_debug_3("Triggering FSA: %s", __FUNCTION__); \ mainloop_set_trigger(source); #if SUPPORT_HEARTBEAT gboolean crmd_ha_msg_dispatch(ll_cluster_t *cluster_conn, gpointer user_data) { IPC_Channel *channel = NULL; gboolean stay_connected = TRUE; crm_debug_3("Invoked"); if(cluster_conn != NULL) { channel = cluster_conn->llc_ops->ipcchan(cluster_conn); } CRM_CHECK(cluster_conn != NULL, ;); CRM_CHECK(channel != NULL, ;); if(channel != NULL && IPC_ISRCONN(channel)) { if(cluster_conn->llc_ops->msgready(cluster_conn) == 0) { crm_debug_2("no message ready yet"); } /* invoke the callbacks but dont block */ cluster_conn->llc_ops->rcvmsg(cluster_conn, 0); } if (channel == NULL || channel->ch_status != IPC_CONNECT) { if(is_set(fsa_input_register, R_HA_DISCONNECTED) == FALSE) { crm_crit("Lost connection to heartbeat service."); } else { crm_info("Lost connection to heartbeat service."); } trigger_fsa(fsa_source); stay_connected = FALSE; } return stay_connected; } #endif void crmd_ha_connection_destroy(gpointer user_data) { crm_debug_3("Invoked"); if(is_set(fsa_input_register, R_HA_DISCONNECTED)) { /* we signed out, so this is expected */ crm_info("Heartbeat disconnection complete"); return; } crm_crit("Lost connection to heartbeat service!"); register_fsa_input(C_HA_DISCONNECT, I_ERROR, NULL); trigger_fsa(fsa_source); } void crmd_ha_msg_filter(xmlNode *msg) { if(AM_I_DC) { const char *sys_from = crm_element_value(msg, F_CRM_SYS_FROM); if(safe_str_eq(sys_from, CRM_SYSTEM_DC)) { const char *from = crm_element_value(msg, F_ORIG); if(safe_str_neq(from, fsa_our_uname)) { int level = LOG_INFO; const char *op = crm_element_value(msg, F_CRM_TASK); /* make sure the election happens NOW */ if(fsa_state != S_ELECTION) { ha_msg_input_t new_input; level = LOG_ERR; new_input.msg = msg; register_fsa_error_adv( C_FSA_INTERNAL, I_ELECTION, NULL, &new_input, __FUNCTION__); } do_crm_log(level, "Another DC detected: %s (op=%s)", from, op); goto done; } } } else { const char *sys_to = crm_element_value(msg, F_CRM_SYS_TO); if(safe_str_eq(sys_to, CRM_SYSTEM_DC)) { return; } } /* crm_log_xml(LOG_MSG, "HA[inbound]", msg); */ route_message(C_HA_MESSAGE, msg); done: trigger_fsa(fsa_source); } #if SUPPORT_HEARTBEAT void crmd_ha_msg_callback(HA_Message *hamsg, void* private_data) { int level = LOG_DEBUG; crm_node_t *from_node = NULL; xmlNode *msg = convert_ha_message(NULL, hamsg, __FUNCTION__); const char *from = crm_element_value(msg, F_ORIG); const char *op = crm_element_value(msg, F_CRM_TASK); const char *sys_from = crm_element_value(msg, F_CRM_SYS_FROM); CRM_CHECK(from != NULL, crm_log_xml_err(msg, "anon"); goto bail); crm_debug_2("HA[inbound]: %s from %s", op, from); if(crm_peer_cache == NULL || crm_active_members() == 0) { crm_debug("Ignoring HA messages until we are" " connected to the CCM (%s op from %s)", op, from); crm_log_xml(LOG_MSG, "HA[inbound]: Ignore (No CCM)", msg); goto bail; } from_node = crm_get_peer(0, from); if(crm_is_member_active(from_node) == FALSE) { if(safe_str_eq(op, CRM_OP_VOTE)) { level = LOG_WARNING; } else if(AM_I_DC && safe_str_eq(op, CRM_OP_JOIN_ANNOUNCE)) { level = LOG_WARNING; } else if(safe_str_eq(sys_from, CRM_SYSTEM_DC)) { level = LOG_WARNING; } do_crm_log(level, "Ignoring HA message (op=%s) from %s: not in our" " membership list (size=%d)", op, from, crm_active_members()); crm_log_xml(LOG_MSG, "HA[inbound]: CCM Discard", msg); } else { crmd_ha_msg_filter(msg); } bail: free_xml(msg); return; } #endif /* * Apparently returning TRUE means "stay connected, keep doing stuff". * Returning FALSE means "we're all done, close the connection" */ gboolean crmd_ipc_msg_callback(IPC_Channel *client, gpointer user_data) { int lpc = 0; xmlNode *msg = NULL; crmd_client_t *curr_client = (crmd_client_t*)user_data; gboolean stay_connected = TRUE; crm_debug_2("Invoked: %s", curr_client->table_key); while(IPC_ISRCONN(client)) { if(client->ops->is_message_pending(client) == 0) { break; } msg = xmlfromIPC(client, MAX_IPC_DELAY); if (msg == NULL) { break; } lpc++; crm_debug_2("Processing msg from %s", curr_client->table_key); crm_log_xml(LOG_DEBUG_2, "CRMd[inbound]", msg); if(crmd_authorize_message(msg, curr_client)) { route_message(C_IPC_MESSAGE, msg); } free_xml(msg); msg = NULL; if(client->ch_status != IPC_CONNECT) { break; } } crm_debug_2("Processed %d messages", lpc); if (client->ch_status != IPC_CONNECT) { stay_connected = FALSE; process_client_disconnect(curr_client); } trigger_fsa(fsa_source); return stay_connected; } extern GCHSource *lrm_source; gboolean lrm_dispatch(IPC_Channel *src_not_used, gpointer user_data) { /* ?? src == lrm_channel ?? */ ll_lrm_t *lrm = (ll_lrm_t*)user_data; IPC_Channel *lrm_channel = lrm->lrm_ops->ipcchan(lrm); lrm->lrm_ops->rcvmsg(lrm, FALSE); if(lrm_channel->ch_status != IPC_CONNECT) { lrm_connection_destroy(NULL); return FALSE; } return TRUE; } extern gboolean process_lrm_event(lrm_op_t *op); void lrm_op_callback(lrm_op_t* op) { CRM_CHECK(op != NULL, return); process_lrm_event(op); } void ais_status_callback(enum crm_status_type type, crm_node_t *node, const void *data) { gboolean reset_status_entry = FALSE; if(AM_I_DC == FALSE || node->uname == NULL) { return; } switch(type) { case crm_status_uname: crm_info("status: %s is now %s", node->uname, node->state); /* reset_status_entry = TRUE; */ /* If we've never seen the node, then it also wont be in the status section */ break; case crm_status_nstate: crm_info("status: %s is now %s (was %s)", node->uname, node->state, (const char *)data); reset_status_entry = TRUE; break; case crm_status_processes: break; } /* Can this be removed now that do_cl_join_finalize_respond() does the same thing? */ if(reset_status_entry && safe_str_eq(CRMD_STATE_ACTIVE, node->state)) { - erase_status_tag(node->uname, XML_CIB_TAG_LRM); - erase_status_tag(node->uname, XML_TAG_TRANSIENT_NODEATTRS); + erase_status_tag(node->uname, XML_CIB_TAG_LRM, cib_scope_local); + erase_status_tag(node->uname, XML_TAG_TRANSIENT_NODEATTRS, cib_scope_local); /* TODO: potentially we also want to set XML_CIB_ATTR_JOINSTATE and XML_CIB_ATTR_EXPSTATE here */ } } void crmd_ha_status_callback(const char *node, const char *status, void *private) { xmlNode *update = NULL; crm_node_t *member = NULL; crm_notice("Status update: Node %s now has status [%s] (DC=%s)", node, status, AM_I_DC?"true":"false"); member = crm_get_peer(0, node); if(member == NULL || crm_is_member_active(member) == FALSE) { /* Make sure it is created so crm_update_peer_proc() succeeds */ const char *uuid = get_uuid(node); member = crm_update_peer(0, 0, 0, -1, 0, uuid, node, NULL, NULL); } if(safe_str_eq(status, PINGSTATUS)) { return; } if(safe_str_eq(status, DEADSTATUS)) { /* this node is toast */ crm_update_peer_proc(node, crm_proc_ais, OFFLINESTATUS); if(AM_I_DC) { update = create_node_state( node, DEADSTATUS, XML_BOOLEAN_NO, OFFLINESTATUS, CRMD_JOINSTATE_DOWN, NULL, TRUE, __FUNCTION__); } } else { crm_update_peer_proc(node, crm_proc_ais, ONLINESTATUS); if(AM_I_DC) { update = create_node_state( node, ACTIVESTATUS, NULL, NULL, CRMD_JOINSTATE_PENDING, NULL, FALSE, __FUNCTION__); } } trigger_fsa(fsa_source); if(update != NULL) { fsa_cib_anon_update( XML_CIB_TAG_STATUS, update, cib_scope_local|cib_quorum_override|cib_can_create); free_xml(update); } } void crmd_client_status_callback(const char * node, const char * client, const char * status, void * private) { const char *join = NULL; crm_node_t *member = NULL; xmlNode *update = NULL; gboolean clear_shutdown = FALSE; crm_debug_3("Invoked"); if(safe_str_neq(client, CRM_SYSTEM_CRMD)) { return; } if(safe_str_eq(status, JOINSTATUS)){ clear_shutdown = TRUE; status = ONLINESTATUS; join = CRMD_JOINSTATE_PENDING; } else if(safe_str_eq(status, LEAVESTATUS)){ status = OFFLINESTATUS; join = CRMD_JOINSTATE_DOWN; /* clear_shutdown = TRUE; */ } set_bit_inplace(fsa_input_register, R_PEER_DATA); crm_notice("Status update: Client %s/%s now has status [%s] (DC=%s)", node, client, status, AM_I_DC?"true":"false"); if(safe_str_eq(status, ONLINESTATUS)) { /* remove the cached value in case it changed */ crm_debug_2("Uncaching UUID for %s", node); unget_uuid(node); } member = crm_get_peer(0, node); if(member == NULL || crm_is_member_active(member) == FALSE) { /* Make sure it is created so crm_update_peer_proc() succeeds */ const char *uuid = get_uuid(node); member = crm_update_peer(0, 0, 0, -1, 0, uuid, node, NULL, NULL); } crm_update_peer_proc(node, crm_proc_crmd, status); if(is_set(fsa_input_register, R_CIB_CONNECTED) == FALSE) { return; } else if(fsa_state == S_STOPPING) { return; } if(safe_str_eq(node, fsa_our_dc) && crm_is_member_active(member) == FALSE) { /* did our DC leave us */ crm_info("Got client status callback - our DC is dead"); register_fsa_input(C_CRMD_STATUS_CALLBACK, I_ELECTION, NULL); } else if(AM_I_DC == FALSE) { crm_info("Not the DC"); } else { crm_debug_3("Got client status callback"); update = create_node_state( node, NULL, NULL, status, join, NULL, clear_shutdown, __FUNCTION__); fsa_cib_anon_update( XML_CIB_TAG_STATUS, update, cib_scope_local|cib_quorum_override|cib_can_create); free_xml(update); if(safe_str_eq(status, OFFLINESTATUS)) { erase_node_from_join(node); check_join_state(fsa_state, __FUNCTION__); } } trigger_fsa(fsa_source); } void crmd_ipc_connection_destroy(gpointer user_data) { GCHSource *source = NULL; crmd_client_t *client = user_data; /* Calling this function on an _active_ connection results in: * crmd_ipc_connection_destroy (callbacks.c:431) * -> G_main_del_IPC_Channel (GSource.c:478) * -> g_source_unref * -> G_CH_destroy_int (GSource.c:647) * -> crmd_ipc_connection_destroy (callbacks.c:437)\ * * A better alternative is to call G_main_del_IPC_Channel() directly */ if(client == NULL) { crm_debug_4("No client to delete"); return; } crm_debug_2("Disconnecting client %s (%p)", client->table_key, client); source = client->client_source; client->client_source = NULL; if(source != NULL) { crm_debug_3("Deleting %s (%p) from mainloop", client->table_key, source); G_main_del_IPC_Channel(source); } crm_free(client->table_key); crm_free(client->sub_sys); crm_free(client->uuid); crm_free(client); return; } gboolean crmd_client_connect(IPC_Channel *client_channel, gpointer user_data) { crm_debug_3("Invoked"); if (client_channel == NULL) { crm_err("Channel was NULL"); } else if (client_channel->ch_status == IPC_DISCONNECT) { crm_err("Channel was disconnected"); } else { crmd_client_t *blank_client = NULL; crm_debug_3("Channel connected"); crm_malloc0(blank_client, sizeof(crmd_client_t)); CRM_ASSERT(blank_client != NULL); crm_debug_2("Created client: %p", blank_client); client_channel->ops->set_recv_qlen(client_channel, 1024); client_channel->ops->set_send_qlen(client_channel, 1024); blank_client->client_channel = client_channel; blank_client->sub_sys = NULL; blank_client->uuid = NULL; blank_client->table_key = NULL; blank_client->client_source = G_main_add_IPC_Channel( G_PRIORITY_LOW, client_channel, FALSE, crmd_ipc_msg_callback, blank_client, crmd_ipc_connection_destroy); } return TRUE; } #if SUPPORT_HEARTBEAT static gboolean fsa_have_quorum = FALSE; gboolean ccm_dispatch(int fd, gpointer user_data) { int rc = 0; oc_ev_t *ccm_token = (oc_ev_t*)user_data; gboolean was_error = FALSE; crm_debug_3("Invoked"); rc = oc_ev_handle_event(ccm_token); if(rc != 0) { if(is_set(fsa_input_register, R_CCM_DISCONNECTED) == FALSE) { /* we signed out, so this is expected */ register_fsa_input(C_CCM_CALLBACK, I_ERROR, NULL); crm_err("CCM connection appears to have failed: rc=%d.", rc); } was_error = TRUE; } trigger_fsa(fsa_source); return !was_error; } void crmd_ccm_msg_callback( oc_ed_t event, void *cookie, size_t size, const void *data) { gboolean update_cache = FALSE; const oc_ev_membership_t *membership = data; gboolean update_quorum = FALSE; crm_debug_3("Invoked"); CRM_ASSERT(data != NULL); crm_info("Quorum %s after event=%s (id=%d)", ccm_have_quorum(event)?"(re)attained":"lost", ccm_event_name(event), membership->m_instance); if(crm_peer_seq > membership->m_instance) { crm_err("Membership instance ID went backwards! %llu->%d", crm_peer_seq, membership->m_instance); CRM_ASSERT(crm_peer_seq <= membership->m_instance); return; } /* * OC_EV_MS_NEW_MEMBERSHIP: membership with quorum * OC_EV_MS_MS_INVALID: membership without quorum * OC_EV_MS_NOT_PRIMARY: previous membership no longer valid * OC_EV_MS_PRIMARY_RESTORED: previous membership restored * OC_EV_MS_EVICTED: the client is evicted from ccm. */ switch(event) { case OC_EV_MS_NEW_MEMBERSHIP: case OC_EV_MS_INVALID: update_cache = TRUE; update_quorum = TRUE; break; case OC_EV_MS_NOT_PRIMARY: break; case OC_EV_MS_PRIMARY_RESTORED: update_cache = TRUE; crm_peer_seq = membership->m_instance; break; case OC_EV_MS_EVICTED: update_quorum = TRUE; register_fsa_input(C_FSA_INTERNAL, I_STOP, NULL); crm_err("Shutting down after CCM event: %s", ccm_event_name(event)); break; default: crm_err("Unknown CCM event: %d", event); } if(update_quorum) { crm_have_quorum = ccm_have_quorum(event); crm_update_quorum(crm_have_quorum, FALSE); if(crm_have_quorum == FALSE) { /* did we just loose quorum? */ if(fsa_have_quorum) { crm_info("Quorum lost: %s", ccm_event_name(event)); } } } if(update_cache) { crm_debug_2("Updating cache after event %s", ccm_event_name(event)); do_ccm_update_cache(C_CCM_CALLBACK, fsa_state, event, data, NULL); } else if(event != OC_EV_MS_NOT_PRIMARY) { crm_peer_seq = membership->m_instance; register_fsa_action(A_TE_CANCEL); } oc_ev_callback_done(cookie); return; } #endif void crmd_cib_connection_destroy(gpointer user_data) { CRM_CHECK(user_data == fsa_cib_conn, ;); crm_debug_3("Invoked"); trigger_fsa(fsa_source); fsa_cib_conn->state = cib_disconnected; if(is_set(fsa_input_register, R_CIB_CONNECTED) == FALSE) { crm_info("Connection to the CIB terminated..."); return; } /* eventually this will trigger a reconnect, not a shutdown */ crm_err("Connection to the CIB terminated..."); register_fsa_input(C_FSA_INTERNAL, I_ERROR, NULL); clear_bit_inplace(fsa_input_register, R_CIB_CONNECTED); return; } gboolean crm_fsa_trigger(gpointer user_data) { crm_debug_2("Invoked (queue len: %d)", g_list_length(fsa_message_queue)); s_crmd_fsa(C_FSA_INTERNAL); crm_debug_2("Exited (queue len: %d)", g_list_length(fsa_message_queue)); return TRUE; } diff --git a/crmd/crmd_utils.h b/crmd/crmd_utils.h index 920a6ca351..152779d4ae 100644 --- a/crmd/crmd_utils.h +++ b/crmd/crmd_utils.h @@ -1,99 +1,99 @@ /* * Copyright (C) 2004 Andrew Beekhof * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * This software is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #ifndef CRMD_UTILS__H #define CRMD_UTILS__H #include #include #define CLIENT_EXIT_WAIT 30 #define FAKE_TE_ID "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" extern void process_client_disconnect(crmd_client_t *curr_client); #define fsa_cib_update(section, data, options, call_id) \ if(fsa_cib_conn != NULL) { \ call_id = fsa_cib_conn->cmds->modify( \ fsa_cib_conn, section, data, options); \ \ } else { \ crm_err("No CIB connection available"); \ } #define fsa_cib_anon_update(section, data, options) \ if(fsa_cib_conn != NULL) { \ fsa_cib_conn->cmds->modify( \ fsa_cib_conn, section, data, options); \ \ } else { \ crm_err("No CIB connection available"); \ } extern gboolean fsa_has_quorum; extern int last_peer_update; extern gboolean crm_timer_stop (fsa_timer_t *timer); extern gboolean crm_timer_start(fsa_timer_t *timer); extern gboolean crm_timer_popped(gpointer data); extern xmlNode *create_node_state( const char *uname, const char *ha_state, const char *ccm_state, const char *crmd_state, const char *join_state, const char *exp_state, gboolean clear_shutdown, const char *src); extern void create_node_entry( const char *uuid, const char *uname, const char *type); extern gboolean stop_subsystem ( struct crm_subsystem_s *centry, gboolean force_quit); extern gboolean start_subsystem(struct crm_subsystem_s *centry); extern lrm_op_t *copy_lrm_op(const lrm_op_t *op); extern lrm_rsc_t *copy_lrm_rsc(const lrm_rsc_t *rsc); extern void fsa_dump_actions(long long action, const char *text); extern void fsa_dump_inputs( int log_level, const char *text, long long input_register); extern gboolean update_dc(xmlNode *msg); extern void erase_node_from_join(const char *node); extern void populate_cib_nodes(gboolean with_client_status); extern void crm_update_quorum(gboolean quorum, gboolean force_update); -extern void erase_status_tag(const char *uname, const char *tag); +extern void erase_status_tag(const char *uname, const char *tag, int options); extern void update_attrd(const char *host, const char *name, const char *value); extern const char *get_timer_desc(fsa_timer_t *timer); #define start_transition(state) do { \ switch(state) { \ case S_TRANSITION_ENGINE: \ register_fsa_action(A_TE_CANCEL); \ break; \ case S_POLICY_ENGINE: \ case S_IDLE: \ register_fsa_input(C_FSA_INTERNAL, I_PE_CALC, NULL); \ break; \ default: \ crm_debug("NOT starting a new transition in state %s", \ fsa_state2string(fsa_state)); \ break; \ } \ } while(0) #endif diff --git a/crmd/join_client.c b/crmd/join_client.c index 6e6a535b10..ce3dd3cb40 100644 --- a/crmd/join_client.c +++ b/crmd/join_client.c @@ -1,297 +1,297 @@ /* * Copyright (C) 2004 Andrew Beekhof * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * This software is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include #include #include #include #include #include #include int reannounce_count = 0; void join_query_callback(xmlNode *msg, int call_id, int rc, xmlNode *output, void *user_data); extern ha_msg_input_t *copy_ha_msg_input(ha_msg_input_t *orig); /* A_CL_JOIN_QUERY */ /* is there a DC out there? */ void do_cl_join_query(long long action, enum crmd_fsa_cause cause, enum crmd_fsa_state cur_state, enum crmd_fsa_input current_input, fsa_data_t *msg_data) { xmlNode *req = create_request(CRM_OP_JOIN_ANNOUNCE, NULL, NULL, CRM_SYSTEM_DC, CRM_SYSTEM_CRMD, NULL); sleep(1); /* give the CCM time to propogate to the DC */ update_dc(NULL); /* Unset any existing value so that the result is not discarded */ crm_debug("Querying for a DC"); send_cluster_message(NULL, crm_msg_crmd, req, FALSE); free_xml(req); } /* A_CL_JOIN_ANNOUNCE */ /* this is kind of a workaround for the fact that we may not be around * or are otherwise unable to reply when the DC sends out A_WELCOME_ALL */ void do_cl_join_announce(long long action, enum crmd_fsa_cause cause, enum crmd_fsa_state cur_state, enum crmd_fsa_input current_input, fsa_data_t *msg_data) { /* Once we hear from the DC, we can stop the timer * * This timer was started either on startup or when a node * left the CCM list */ /* dont announce if we're in one of these states */ if(cur_state != S_PENDING) { crm_warn("Do not announce ourselves in state %s", fsa_state2string(cur_state)); return; } if(AM_I_OPERATIONAL) { /* send as a broadcast */ xmlNode *req = create_request( CRM_OP_JOIN_ANNOUNCE, NULL, NULL, CRM_SYSTEM_DC, CRM_SYSTEM_CRMD, NULL); crm_debug("Announcing availability"); update_dc(NULL); send_cluster_message(NULL, crm_msg_crmd, req, FALSE); free_xml(req); } else { /* Delay announce until we have finished local startup */ crm_warn("Delaying announce until local startup is complete"); return; } } static int query_call_id = 0; /* A_CL_JOIN_REQUEST */ /* aka. accept the welcome offer */ void do_cl_join_offer_respond(long long action, enum crmd_fsa_cause cause, enum crmd_fsa_state cur_state, enum crmd_fsa_input current_input, fsa_data_t *msg_data) { ha_msg_input_t *input = fsa_typed_data(fsa_dt_ha_msg); const char *welcome_from = crm_element_value(input->msg, F_CRM_HOST_FROM); const char *join_id = crm_element_value(input->msg, F_CRM_JOIN_ID); #if 0 if(we are sick) { log error ; /* save the request for later? */ return; } #endif crm_debug_2("Accepting join offer: join-%s", crm_element_value(input->msg, F_CRM_JOIN_ID)); /* we only ever want the last one */ if(query_call_id > 0) { crm_debug_3("Cancelling previous join query: %d", query_call_id); remove_cib_op_callback(query_call_id, FALSE); query_call_id = 0; } if(update_dc(input->msg) == FALSE) { crm_warn("Discarding offer from %s (expected %s)", welcome_from, fsa_our_dc); return; } CRM_DEV_ASSERT(input != NULL); query_call_id = fsa_cib_conn->cmds->query( fsa_cib_conn, NULL, NULL, cib_scope_local); add_cib_op_callback( fsa_cib_conn, query_call_id, FALSE, crm_strdup(join_id), join_query_callback); crm_debug_2("Registered join query callback: %d", query_call_id); register_fsa_action(A_DC_TIMER_STOP); } void join_query_callback(xmlNode *msg, int call_id, int rc, xmlNode *output, void *user_data) { xmlNode *local_cib = NULL; char *join_id = user_data; xmlNode *generation = create_xml_node( NULL, XML_CIB_TAG_GENERATION_TUPPLE); CRM_DEV_ASSERT(join_id != NULL); query_call_id = 0; if(rc == cib_ok) { local_cib = output; CRM_DEV_ASSERT(safe_str_eq(crm_element_name(local_cib), XML_TAG_CIB)); } if(local_cib != NULL) { xmlNode *reply = NULL; crm_debug("Respond to join offer join-%s", join_id); crm_debug("Acknowledging %s as our DC", fsa_our_dc); copy_in_properties(generation, local_cib); reply = create_request( CRM_OP_JOIN_REQUEST, generation, fsa_our_dc, CRM_SYSTEM_DC, CRM_SYSTEM_CRMD, NULL); crm_xml_add(reply, F_CRM_JOIN_ID, join_id); send_cluster_message(fsa_our_dc, crm_msg_crmd, reply, TRUE); free_xml(reply); } else { crm_err("Could not retrieve Generation to attach to our" " join acknowledgement: %s", cib_error2string(rc)); register_fsa_error_adv( C_FSA_INTERNAL, I_ERROR, NULL, NULL, __FUNCTION__); } crm_free(join_id); free_xml(generation); } /* A_CL_JOIN_RESULT */ /* aka. this is notification that we have (or have not) been accepted */ void do_cl_join_finalize_respond(long long action, enum crmd_fsa_cause cause, enum crmd_fsa_state cur_state, enum crmd_fsa_input current_input, fsa_data_t *msg_data) { xmlNode *tmp1 = NULL; gboolean was_nack = TRUE; static gboolean first_join = TRUE; ha_msg_input_t *input = fsa_typed_data(fsa_dt_ha_msg); int join_id = -1; const char *op = crm_element_value(input->msg,F_CRM_TASK); const char *ack_nack = crm_element_value(input->msg,CRM_OP_JOIN_ACKNAK); const char *welcome_from = crm_element_value(input->msg,F_CRM_HOST_FROM); if(safe_str_neq(op, CRM_OP_JOIN_ACKNAK)) { crm_debug_2("Ignoring op=%s message", op); return; } /* calculate if it was an ack or a nack */ if(crm_is_true(ack_nack)) { was_nack = FALSE; } crm_element_value_int(input->msg, F_CRM_JOIN_ID, &join_id); if(was_nack) { crm_err("Join (join-%d) with leader %s failed (NACK'd): Shutting down", join_id, welcome_from); register_fsa_error(C_FSA_INTERNAL, I_ERROR, NULL); return; } if(AM_I_DC == FALSE && safe_str_eq(welcome_from, fsa_our_uname)) { crm_warn("Discarding our own welcome - we're no longer the DC"); return; } if(update_dc(input->msg) == FALSE) { crm_warn("Discarding %s from %s (expected %s)", op, welcome_from, fsa_our_dc); return; } /* send our status section to the DC */ crm_debug("Confirming join join-%d: %s", join_id, crm_element_value(input->msg, F_CRM_TASK)); tmp1 = do_lrm_query(TRUE); if(tmp1 != NULL) { xmlNode *reply = create_request( CRM_OP_JOIN_CONFIRM, tmp1, fsa_our_dc, CRM_SYSTEM_DC, CRM_SYSTEM_CRMD, NULL); crm_xml_add_int(reply, F_CRM_JOIN_ID, join_id); crm_debug("join-%d: Join complete." " Sending local LRM status to %s", join_id, fsa_our_dc); if(first_join) { first_join = FALSE; /* * Clear any previous transient node attribute and lrm operations * * OpenAIS has a nasty habit of not being able to tell if a * node is returning or didn't leave in the first place. * This confuses Pacemaker because it never gets a "node up" * event which is normally used to clean up the status section. * * Do not remove the resources though, they'll be cleaned up in * do_dc_join_ack(). Removing them here creates a race * condition if the crmd is being recovered. * Instead of a list of active resources from the lrmd * we may end up with a blank status section. * If we are _NOT_ lucky, we will probe for the "wrong" instance * of anonymous clones and end up with multiple active * instances on the machine. */ - erase_status_tag(fsa_our_uname, XML_TAG_TRANSIENT_NODEATTRS); + erase_status_tag(fsa_our_uname, XML_TAG_TRANSIENT_NODEATTRS, 0); /* Just in case attrd was still around too */ if(is_not_set(fsa_input_register, R_SHUTDOWN)) { update_attrd(fsa_our_uname, "terminate", NULL); update_attrd(fsa_our_uname, XML_CIB_ATTR_SHUTDOWN, NULL); } } send_cluster_message(fsa_our_dc, crm_msg_crmd, reply, TRUE); free_xml(reply); if(AM_I_DC == FALSE) { register_fsa_input_adv( cause, I_NOT_DC, NULL, A_NOTHING, TRUE, __FUNCTION__); update_attrd(NULL, NULL, NULL); } free_xml(tmp1); } else { crm_err("Could not send our LRM state to the DC"); register_fsa_error(C_FSA_INTERNAL, I_FAIL, NULL); } } diff --git a/crmd/join_dc.c b/crmd/join_dc.c index 733acbbf56..a4fac20c00 100644 --- a/crmd/join_dc.c +++ b/crmd/join_dc.c @@ -1,706 +1,706 @@ /* * Copyright (C) 2004 Andrew Beekhof * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * This software is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include #include #include #include #include #include #include #include GHashTable *welcomed_nodes = NULL; GHashTable *integrated_nodes = NULL; GHashTable *finalized_nodes = NULL; GHashTable *confirmed_nodes = NULL; char *max_epoch = NULL; char *max_generation_from = NULL; xmlNode *max_generation_xml = NULL; void initialize_join(gboolean before); gboolean finalize_join_for(gpointer key, gpointer value, gpointer user_data); void finalize_sync_callback(xmlNode *msg, int call_id, int rc, xmlNode *output, void *user_data); gboolean check_join_state(enum crmd_fsa_state cur_state, const char *source); static int current_join_id = 0; unsigned long long saved_ccm_membership_id = 0; void initialize_join(gboolean before) { /* clear out/reset a bunch of stuff */ crm_debug("join-%d: Initializing join data (flag=%s)", current_join_id, before?"true":"false"); g_hash_table_destroy(welcomed_nodes); g_hash_table_destroy(integrated_nodes); g_hash_table_destroy(finalized_nodes); g_hash_table_destroy(confirmed_nodes); if(before) { if(max_generation_from != NULL) { crm_free(max_generation_from); max_generation_from = NULL; } if(max_generation_xml != NULL) { free_xml(max_generation_xml); max_generation_xml = NULL; } clear_bit_inplace(fsa_input_register, R_HAVE_CIB); clear_bit_inplace(fsa_input_register, R_CIB_ASKED); } welcomed_nodes = g_hash_table_new_full( g_str_hash, g_str_equal, g_hash_destroy_str, g_hash_destroy_str); integrated_nodes = g_hash_table_new_full( g_str_hash, g_str_equal, g_hash_destroy_str, g_hash_destroy_str); finalized_nodes = g_hash_table_new_full( g_str_hash, g_str_equal, g_hash_destroy_str, g_hash_destroy_str); confirmed_nodes = g_hash_table_new_full( g_str_hash, g_str_equal, g_hash_destroy_str, g_hash_destroy_str); } void erase_node_from_join(const char *uname) { gboolean w = FALSE, i = FALSE, f = FALSE, c = FALSE; if(uname == NULL) { return; } if(welcomed_nodes != NULL) { w = g_hash_table_remove(welcomed_nodes, uname); } if(integrated_nodes != NULL) { i = g_hash_table_remove(integrated_nodes, uname); } if(finalized_nodes != NULL) { f = g_hash_table_remove(finalized_nodes, uname); } if(confirmed_nodes != NULL) { c = g_hash_table_remove(confirmed_nodes, uname); } if(w || i || f || c) { crm_info("Removed node %s from join calculations:" " welcomed=%d itegrated=%d finalized=%d confirmed=%d", uname, w, i, f, c); } } static void join_make_offer(gpointer key, gpointer value, gpointer user_data) { const char *join_to = NULL; const crm_node_t *member = value; CRM_ASSERT(member != NULL); if(crm_is_member_active(member) == FALSE) { return; } join_to = member->uname; if(join_to == NULL) { crm_err("No recipient for welcome message"); return; } erase_node_from_join(join_to); if(saved_ccm_membership_id != crm_peer_seq) { saved_ccm_membership_id = crm_peer_seq; crm_info("Making join offers based on membership %llu", crm_peer_seq); } if(member->processes & crm_proc_crmd) { xmlNode *offer = create_request( CRM_OP_JOIN_OFFER, NULL, join_to, CRM_SYSTEM_CRMD, CRM_SYSTEM_DC, NULL); char *join_offered = crm_itoa(current_join_id); crm_xml_add_int(offer, F_CRM_JOIN_ID, current_join_id); /* send the welcome */ crm_debug("join-%d: Sending offer to %s", current_join_id, join_to); send_cluster_message(join_to, crm_msg_crmd, offer, TRUE); free_xml(offer); g_hash_table_insert( welcomed_nodes, crm_strdup(join_to), join_offered); } else { crm_info("Peer process on %s is not active (yet?): %.8lx %d", join_to, (long)member->processes, g_hash_table_size(crm_peer_cache)); } } /* A_DC_JOIN_OFFER_ALL */ void do_dc_join_offer_all(long long action, enum crmd_fsa_cause cause, enum crmd_fsa_state cur_state, enum crmd_fsa_input current_input, fsa_data_t *msg_data) { /* reset everyones status back to down or in_ccm in the CIB * * any nodes that are active in the CIB but not in the CCM list * will be seen as offline by the PE anyway */ current_join_id++; initialize_join(TRUE); /* do_update_cib_nodes(TRUE, __FUNCTION__); */ update_dc(NULL); if(cause == C_HA_MESSAGE && current_input == I_NODE_JOIN) { crm_info("A new node joined the cluster"); } g_hash_table_foreach(crm_peer_cache, join_make_offer, NULL); /* dont waste time by invoking the PE yet; */ crm_info("join-%d: Waiting on %d outstanding join acks", current_join_id, g_hash_table_size(welcomed_nodes)); } /* A_DC_JOIN_OFFER_ONE */ void do_dc_join_offer_one(long long action, enum crmd_fsa_cause cause, enum crmd_fsa_state cur_state, enum crmd_fsa_input current_input, fsa_data_t *msg_data) { crm_node_t *member; ha_msg_input_t *welcome = NULL; const char *op = NULL; const char *join_to = NULL; if(msg_data->data) { welcome = fsa_typed_data(fsa_dt_ha_msg); } else { crm_info("A new node joined - wait until it contacts us"); return; } if(welcome == NULL) { crm_err("Attempt to send welcome message " "without a message to reply to!"); return; } join_to = crm_element_value(welcome->msg, F_CRM_HOST_FROM); if(join_to == NULL) { crm_err("Attempt to send welcome message " "without a host to reply to!"); return; } member = crm_get_peer(0, join_to); if(member == NULL || crm_is_member_active(member) == FALSE) { crm_err("Attempt to send welcome message " "to a node not part of our partition!"); return; } op = crm_element_value(welcome->msg, F_CRM_TASK); if(join_to != NULL && (cur_state == S_INTEGRATION || cur_state == S_FINALIZE_JOIN)) { /* note: it _is_ possible that a node will have been * sick or starting up when the original offer was made. * however, it will either re-announce itself in due course * _or_ we can re-store the original offer on the client. */ crm_debug("(Re-)offering membership to %s...", join_to); } crm_info("join-%d: Processing %s request from %s in state %s", current_join_id, op, join_to, fsa_state2string(cur_state)); join_make_offer(NULL, member, NULL); /* always offer to the DC (ourselves) * this ensures the correct value for max_generation_from */ member = crm_get_peer(0, fsa_our_uname); join_make_offer(NULL, member, NULL); /* this was a genuine join request, cancel any existing * transition and invoke the PE */ start_transition(fsa_state); /* dont waste time by invoking the pe yet; */ crm_debug("Waiting on %d outstanding join acks for join-%d", g_hash_table_size(welcomed_nodes), current_join_id); } static int compare_int_fields(xmlNode *left, xmlNode *right, const char *field) { const char *elem_l = crm_element_value(left, field); const char *elem_r = crm_element_value(right, field); int int_elem_l = crm_int_helper(elem_l, NULL); int int_elem_r = crm_int_helper(elem_r, NULL); if(int_elem_l < int_elem_r) { return -1; } else if(int_elem_l > int_elem_r) { return 1; } return 0; } /* A_DC_JOIN_PROCESS_REQ */ void do_dc_join_filter_offer(long long action, enum crmd_fsa_cause cause, enum crmd_fsa_state cur_state, enum crmd_fsa_input current_input, fsa_data_t *msg_data) { xmlNode *generation = NULL; int cmp = 0; int join_id = -1; gboolean ack_nack_bool = TRUE; const char *ack_nack = CRMD_JOINSTATE_MEMBER; ha_msg_input_t *join_ack = fsa_typed_data(fsa_dt_ha_msg); const char *join_from = crm_element_value(join_ack->msg, F_CRM_HOST_FROM); const char *ref = crm_element_value(join_ack->msg, XML_ATTR_REFERENCE); crm_node_t *join_node = crm_get_peer(0, join_from); crm_debug("Processing req from %s", join_from); generation = join_ack->xml; crm_element_value_int(join_ack->msg, F_CRM_JOIN_ID, &join_id); if(max_generation_xml != NULL && generation != NULL) { int lpc = 0; const char *attributes[] = { XML_ATTR_GENERATION_ADMIN, XML_ATTR_GENERATION, XML_ATTR_NUMUPDATES, }; for(lpc = 0; cmp == 0 && lpc < DIMOF(attributes); lpc++) { cmp = compare_int_fields(max_generation_xml, generation, attributes[lpc]); } } if(join_id != current_join_id) { crm_debug("Invalid response from %s: join-%d vs. join-%d", join_from, join_id, current_join_id); check_join_state(cur_state, __FUNCTION__); return; } else if(join_node == NULL || crm_is_member_active(join_node) == FALSE) { crm_err("Node %s is not a member", join_from); ack_nack_bool = FALSE; } else if(generation == NULL) { crm_err("Generation was NULL"); ack_nack_bool = FALSE; } else if(max_generation_xml == NULL) { max_generation_xml = copy_xml(generation); max_generation_from = crm_strdup(join_from); } else if(cmp < 0 || (cmp == 0 && safe_str_eq(join_from, fsa_our_uname))) { crm_debug("%s has a better generation number than" " the current max %s", join_from, max_generation_from); if(max_generation_xml) { crm_log_xml_debug(max_generation_xml, "Max generation"); } crm_log_xml_debug(generation, "Their generation"); crm_free(max_generation_from); free_xml(max_generation_xml); max_generation_from = crm_strdup(join_from); max_generation_xml = copy_xml(join_ack->xml); } if(ack_nack_bool == FALSE) { /* NACK this client */ ack_nack = CRMD_JOINSTATE_NACK; crm_err("join-%d: NACK'ing node %s (ref %s)", join_id, join_from, ref); } else { crm_debug("join-%d: Welcoming node %s (ref %s)", join_id, join_from, ref); } /* add them to our list of CRMD_STATE_ACTIVE nodes */ g_hash_table_insert( integrated_nodes, crm_strdup(join_from), crm_strdup(ack_nack)); crm_debug("%u nodes have been integrated into join-%d", g_hash_table_size(integrated_nodes), join_id); g_hash_table_remove(welcomed_nodes, join_from); if(check_join_state(cur_state, __FUNCTION__) == FALSE) { /* dont waste time by invoking the PE yet; */ crm_debug("join-%d: Still waiting on %d outstanding offers", join_id, g_hash_table_size(welcomed_nodes)); } } /* A_DC_JOIN_FINALIZE */ void do_dc_join_finalize(long long action, enum crmd_fsa_cause cause, enum crmd_fsa_state cur_state, enum crmd_fsa_input current_input, fsa_data_t *msg_data) { char *sync_from = NULL; enum cib_errors rc = cib_ok; /* This we can do straight away and avoid clients timing us out * while we compute the latest CIB */ crm_debug("Finializing join-%d for %d clients", current_join_id, g_hash_table_size(integrated_nodes)); if(g_hash_table_size(integrated_nodes) == 0) { /* If we don't even have ourself, start again */ register_fsa_error_adv( C_FSA_INTERNAL, I_ELECTION_DC, NULL, NULL, __FUNCTION__); return; } clear_bit_inplace(fsa_input_register, R_HAVE_CIB); if(max_generation_from == NULL || safe_str_eq(max_generation_from, fsa_our_uname)){ set_bit_inplace(fsa_input_register, R_HAVE_CIB); } if(is_set(fsa_input_register, R_IN_TRANSITION)) { crm_warn("join-%d: We are still in a transition." " Delaying until the TE completes.", current_join_id); crmd_fsa_stall(NULL); return; } if(is_set(fsa_input_register, R_HAVE_CIB) == FALSE) { /* ask for the agreed best CIB */ sync_from = crm_strdup(max_generation_from); crm_log_xml_debug(max_generation_xml, "Requesting version"); set_bit_inplace(fsa_input_register, R_CIB_ASKED); } else { /* Send _our_ CIB out to everyone */ sync_from = crm_strdup(fsa_our_uname); } crm_info("join-%d: Syncing the CIB from %s to the rest of the cluster", current_join_id, sync_from); rc = fsa_cib_conn->cmds->sync_from( fsa_cib_conn, sync_from, NULL,cib_quorum_override); fsa_cib_conn->cmds->register_callback( fsa_cib_conn, rc, 60, FALSE, sync_from, "finalize_sync_callback", finalize_sync_callback); } void finalize_sync_callback(xmlNode *msg, int call_id, int rc, xmlNode *output, void *user_data) { CRM_DEV_ASSERT(cib_not_master != rc); clear_bit_inplace(fsa_input_register, R_CIB_ASKED); if(rc != cib_ok) { do_crm_log((rc==cib_old_data?LOG_WARNING:LOG_ERR), "Sync from %s resulted in an error: %s", (char*)user_data, cib_error2string(rc)); /* restart the whole join process */ register_fsa_error_adv( C_FSA_INTERNAL, I_ELECTION_DC,NULL,NULL,__FUNCTION__); } else if(AM_I_DC && fsa_state == S_FINALIZE_JOIN) { set_bit_inplace(fsa_input_register, R_HAVE_CIB); clear_bit_inplace(fsa_input_register, R_CIB_ASKED); /* make sure dc_uuid is re-set to us */ if(check_join_state(fsa_state, __FUNCTION__) == FALSE) { crm_debug("Notifying %d clients of join-%d results", g_hash_table_size(integrated_nodes), current_join_id); g_hash_table_foreach_remove( integrated_nodes, finalize_join_for, NULL); } } else { crm_debug("No longer the DC in S_FINALIZE_JOIN: %s/%s", AM_I_DC?"DC":"CRMd", fsa_state2string(fsa_state)); } crm_free(user_data); } static void join_update_complete_callback(xmlNode *msg, int call_id, int rc, xmlNode *output, void *user_data) { fsa_data_t *msg_data = NULL; if(rc == cib_ok) { crm_debug("Join update %d complete", call_id); check_join_state(fsa_state, __FUNCTION__); } else { crm_err("Join update %d failed", call_id); crm_log_xml(LOG_DEBUG, "failed", msg); register_fsa_error(C_FSA_INTERNAL, I_ERROR, NULL); } } /* A_DC_JOIN_PROCESS_ACK */ void do_dc_join_ack(long long action, enum crmd_fsa_cause cause, enum crmd_fsa_state cur_state, enum crmd_fsa_input current_input, fsa_data_t *msg_data) { int join_id = -1; int call_id = 0; ha_msg_input_t *join_ack = fsa_typed_data(fsa_dt_ha_msg); const char *join_id_s = NULL; const char *join_state = NULL; const char *op = crm_element_value(join_ack->msg, F_CRM_TASK); const char *join_from = crm_element_value(join_ack->msg, F_CRM_HOST_FROM); if(safe_str_neq(op, CRM_OP_JOIN_CONFIRM)) { crm_debug("Ignoring op=%s message from %s", op, join_from); return; } crm_element_value_int(join_ack->msg, F_CRM_JOIN_ID, &join_id); join_id_s = crm_element_value(join_ack->msg, F_CRM_JOIN_ID); /* now update them to "member" */ crm_debug_2("Processing ack from %s", join_from); join_state = (const char *) g_hash_table_lookup(finalized_nodes, join_from); if(join_state == NULL) { crm_err("Join not in progress: ignoring join-%d from %s", join_id, join_from); return; } else if(safe_str_neq(join_state, CRMD_JOINSTATE_MEMBER)) { crm_err("Node %s wasnt invited to join the cluster",join_from); g_hash_table_remove(finalized_nodes, join_from); return; } else if(join_id != current_join_id) { crm_err("Invalid response from %s: join-%d vs. join-%d", join_from, join_id, current_join_id); g_hash_table_remove(finalized_nodes, join_from); return; } g_hash_table_remove(finalized_nodes, join_from); if(g_hash_table_lookup(confirmed_nodes, join_from) != NULL) { crm_err("join-%d: hash already contains confirmation from %s", join_id, join_from); } g_hash_table_insert( confirmed_nodes, crm_strdup(join_from), crm_strdup(join_id_s)); crm_info("join-%d: Updating node state to %s for %s", join_id, CRMD_JOINSTATE_MEMBER, join_from); /* update CIB with the current LRM status from the node * We dont need to notify the TE of these updates, a transition will * be started in due time */ - erase_status_tag(join_from, XML_CIB_TAG_LRM); + erase_status_tag(join_from, XML_CIB_TAG_LRM, cib_scope_local); fsa_cib_update(XML_CIB_TAG_STATUS, join_ack->xml, cib_scope_local|cib_quorum_override|cib_can_create, call_id); add_cib_op_callback( fsa_cib_conn, call_id, FALSE, NULL, join_update_complete_callback); crm_debug("join-%d: Registered callback for LRM update %d", join_id, call_id); } gboolean finalize_join_for(gpointer key, gpointer value, gpointer user_data) { const char *join_to = NULL; const char *join_state = NULL; xmlNode *acknak = NULL; crm_node_t *join_node = NULL; if(key == NULL || value == NULL) { return TRUE; } join_to = (const char *)key; join_state = (const char *)value; /* make sure the node exists in the config section */ create_node_entry(join_to, join_to, NORMALNODE); join_node = crm_get_peer(0, join_to); if(crm_is_member_active(join_node) == FALSE) { /* * NACK'ing nodes that the membership layer doesn't know about yet * simply creates more churn * * Better to leave them waiting and let the join restart when * the new membership event comes in * * All other NACKs (due to versions etc) should still be processed */ return TRUE; } /* send the ack/nack to the node */ acknak = create_request( CRM_OP_JOIN_ACKNAK, NULL, join_to, CRM_SYSTEM_CRMD, CRM_SYSTEM_DC, NULL); crm_xml_add_int(acknak, F_CRM_JOIN_ID, current_join_id); /* set the ack/nack */ if(safe_str_eq(join_state, CRMD_JOINSTATE_MEMBER)) { crm_debug("join-%d: ACK'ing join request from %s, state %s", current_join_id, join_to, join_state); crm_xml_add(acknak, CRM_OP_JOIN_ACKNAK, XML_BOOLEAN_TRUE); g_hash_table_insert( finalized_nodes, crm_strdup(join_to), crm_strdup(CRMD_JOINSTATE_MEMBER)); } else { crm_warn("join-%d: NACK'ing join request from %s, state %s", current_join_id, join_to, join_state); crm_xml_add(acknak, CRM_OP_JOIN_ACKNAK, XML_BOOLEAN_FALSE); } send_cluster_message(join_to, crm_msg_crmd, acknak, TRUE); free_xml(acknak); return TRUE; } void ghash_print_node(gpointer key, gpointer value, gpointer user_data); gboolean check_join_state(enum crmd_fsa_state cur_state, const char *source) { crm_debug("Invoked by %s in state: %s", source, fsa_state2string(cur_state)); if(saved_ccm_membership_id != crm_peer_seq) { crm_info("%s: Membership changed since join started: %llu -> %llu", source, saved_ccm_membership_id, crm_peer_seq); register_fsa_input_before(C_FSA_INTERNAL, I_NODE_JOIN, NULL); } else if(cur_state == S_INTEGRATION) { if(g_hash_table_size(welcomed_nodes) == 0) { crm_debug("join-%d: Integration of %d peers complete: %s", current_join_id, g_hash_table_size(integrated_nodes), source); register_fsa_input_before( C_FSA_INTERNAL, I_INTEGRATED, NULL); return TRUE; } } else if(cur_state == S_FINALIZE_JOIN) { if(is_set(fsa_input_register, R_HAVE_CIB) == FALSE) { crm_debug("join-%d: Delaying I_FINALIZED until we have the CIB", current_join_id); return TRUE; } else if(g_hash_table_size(integrated_nodes) == 0 && g_hash_table_size(finalized_nodes) == 0) { crm_debug("join-%d complete: %s", current_join_id, source); register_fsa_input_later(C_FSA_INTERNAL, I_FINALIZED, NULL); } else if(g_hash_table_size(integrated_nodes) != 0 && g_hash_table_size(finalized_nodes) != 0) { char *msg = NULL; crm_err("join-%d: Waiting on %d integrated nodes" " AND %d finalized nodes", current_join_id, g_hash_table_size(integrated_nodes), g_hash_table_size(finalized_nodes)); msg = crm_strdup("Integrated node"); g_hash_table_foreach(integrated_nodes, ghash_print_node, msg); crm_free(msg); msg = crm_strdup("Finalized node"); g_hash_table_foreach(finalized_nodes, ghash_print_node, msg); crm_free(msg); } else if(g_hash_table_size(integrated_nodes) != 0) { crm_debug("join-%d: Still waiting on %d integrated nodes", current_join_id, g_hash_table_size(integrated_nodes)); } else if(g_hash_table_size(finalized_nodes) != 0) { crm_debug("join-%d: Still waiting on %d finalized nodes", current_join_id, g_hash_table_size(finalized_nodes)); } } return FALSE; } void do_dc_join_final(long long action, enum crmd_fsa_cause cause, enum crmd_fsa_state cur_state, enum crmd_fsa_input current_input, fsa_data_t *msg_data) { crm_info("Ensuring DC, quorum and node attributes are up-to-date"); update_attrd(NULL, NULL, NULL); crm_update_quorum(crm_have_quorum, TRUE); } diff --git a/crmd/lrm.c b/crmd/lrm.c index fa459bb06e..4be970ef0f 100644 --- a/crmd/lrm.c +++ b/crmd/lrm.c @@ -1,1775 +1,1775 @@ /* * Copyright (C) 2004 Andrew Beekhof * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * This software is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include struct recurring_op_s { char *rsc_id; char *op_key; int call_id; int interval; gboolean remove; gboolean cancelled; }; char *make_stop_id(const char *rsc, int call_id); void cib_rsc_callback(xmlNode *msg, int call_id, int rc, xmlNode *output, void *user_data); gboolean build_operation_update( xmlNode *rsc_list, lrm_rsc_t *rsc, lrm_op_t *op, const char *src, int lpc, int level); gboolean build_active_RAs(xmlNode *rsc_list); gboolean is_rsc_active(const char *rsc_id); int do_update_resource(lrm_op_t *op); gboolean process_lrm_event(lrm_op_t *op); void do_lrm_rsc_op(lrm_rsc_t *rsc, const char *operation, xmlNode *msg, xmlNode *request); lrm_op_t *construct_op( xmlNode *rsc_op, const char *rsc_id, const char *operation); void send_direct_ack(const char *to_host, const char *to_sys, lrm_rsc_t *rsc, lrm_op_t* op, const char *rsc_id); void free_recurring_op(gpointer value); GHashTable *resources = NULL; GHashTable *pending_ops = NULL; GCHSource *lrm_source = NULL; int num_lrm_register_fails = 0; int max_lrm_register_fails = 30; void lrm_connection_destroy(gpointer user_data) { if(is_set(fsa_input_register, R_LRM_CONNECTED)) { crm_crit("LRM Connection failed"); register_fsa_input(C_FSA_INTERNAL, I_ERROR, NULL); clear_bit_inplace(fsa_input_register, R_LRM_CONNECTED); } else { crm_info("LRM Connection disconnected"); } lrm_source = NULL; } /* A_LRM_CONNECT */ void do_lrm_control(long long action, enum crmd_fsa_cause cause, enum crmd_fsa_state cur_state, enum crmd_fsa_input current_input, fsa_data_t *msg_data) { if(fsa_lrm_conn == NULL) { register_fsa_error(C_FSA_INTERNAL, I_ERROR, NULL); return; } if(action & A_LRM_DISCONNECT) { if(verify_stopped(cur_state, LOG_INFO) == FALSE) { crmd_fsa_stall(NULL); return; } if(is_set(fsa_input_register, R_LRM_CONNECTED)) { clear_bit_inplace(fsa_input_register, R_LRM_CONNECTED); fsa_lrm_conn->lrm_ops->signoff(fsa_lrm_conn); crm_info("Disconnected from the LRM"); } /* TODO: Clean up the hashtable */ } if(action & A_LRM_CONNECT) { int ret = HA_OK; pending_ops = g_hash_table_new_full( g_str_hash, g_str_equal, g_hash_destroy_str, free_recurring_op); resources = g_hash_table_new_full( g_str_hash, g_str_equal, g_hash_destroy_str, g_hash_destroy_str); if(ret == HA_OK) { crm_debug("Connecting to the LRM"); ret = fsa_lrm_conn->lrm_ops->signon( fsa_lrm_conn, CRM_SYSTEM_CRMD); } if(ret != HA_OK) { if(++num_lrm_register_fails < max_lrm_register_fails) { crm_warn("Failed to sign on to the LRM %d" " (%d max) times", num_lrm_register_fails, max_lrm_register_fails); crm_timer_start(wait_timer); crmd_fsa_stall(NULL); return; } } if(ret == HA_OK) { crm_debug_4("LRM: set_lrm_callback..."); ret = fsa_lrm_conn->lrm_ops->set_lrm_callback( fsa_lrm_conn, lrm_op_callback); if(ret != HA_OK) { crm_err("Failed to set LRM callbacks"); } } if(ret != HA_OK) { crm_err("Failed to sign on to the LRM %d" " (max) times", num_lrm_register_fails); register_fsa_error(C_FSA_INTERNAL, I_ERROR, NULL); return; } /* TODO: create a destroy handler that causes * some recovery to happen */ lrm_source = G_main_add_IPC_Channel( G_PRIORITY_LOW, fsa_lrm_conn->lrm_ops->ipcchan(fsa_lrm_conn), FALSE, lrm_dispatch, fsa_lrm_conn, lrm_connection_destroy); set_bit_inplace(fsa_input_register, R_LRM_CONNECTED); crm_debug("LRM connection established"); } if(action & ~(A_LRM_CONNECT|A_LRM_DISCONNECT)) { crm_err("Unexpected action %s in %s", fsa_action2string(action), __FUNCTION__); } } static void ghash_print_pending(gpointer key, gpointer value, gpointer user_data) { const char *stop_id = key; int *log_level = user_data; struct recurring_op_s *pending = value; do_crm_log(*log_level, "Pending action: %s (%s)", stop_id, pending->op_key); } static void ghash_print_pending_for_rsc(gpointer key, gpointer value, gpointer user_data) { const char *stop_id = key; char *rsc = user_data; struct recurring_op_s *pending = value; if(safe_str_eq(rsc, pending->rsc_id)) { do_crm_log(LOG_NOTICE, "%sction %s (%s) incomplete at shutdown", pending->interval==0?"A":"Recurring a", stop_id, pending->op_key); } } static void ghash_count_pending(gpointer key, gpointer value, gpointer user_data) { int *counter = user_data; struct recurring_op_s *pending = value; if(pending->interval > 0) { /* Ignore recurring actions in the shutdown calculations */ return; } (*counter)++; } gboolean verify_stopped(enum crmd_fsa_state cur_state, int log_level) { int counter = 0; gboolean rc = TRUE; GListPtr lrm_list = NULL; crm_debug("Checking for active resources before exit"); if(cur_state == S_TERMINATE) { log_level = LOG_ERR; } if(pending_ops) { g_hash_table_foreach(pending_ops, ghash_count_pending, &counter); } if(counter > 0) { rc = FALSE; do_crm_log(log_level, "%d pending LRM operations at shutdown%s", counter, cur_state == S_TERMINATE?"":"... waiting"); if(cur_state == S_TERMINATE || !is_set(fsa_input_register, R_SENT_RSC_STOP)) { g_hash_table_foreach( pending_ops, ghash_print_pending, &log_level); } goto bail; } if(is_set(fsa_input_register, R_LRM_CONNECTED)) { lrm_list = fsa_lrm_conn->lrm_ops->get_all_rscs(fsa_lrm_conn); } slist_iter( rsc_id, char, lrm_list, lpc, if(is_rsc_active(rsc_id) == FALSE) { continue; } crm_err("Resource %s was active at shutdown." " You may ignore this error if it is unmanaged.", rsc_id); g_hash_table_foreach( pending_ops, ghash_print_pending_for_rsc, rsc_id); ); slist_destroy(char, rid, lrm_list, free(rid)); bail: set_bit_inplace(fsa_input_register, R_SENT_RSC_STOP); if(cur_state == S_TERMINATE) { rc = TRUE; } return rc; } static char * get_rsc_metadata(const char *type, const char *class, const char *provider) { char *metadata = NULL; CRM_CHECK(type != NULL, return NULL); CRM_CHECK(class != NULL, return NULL); if(provider == NULL) { provider = "heartbeat"; } crm_debug_2("Retreiving metadata for %s::%s:%s", type, class, provider); metadata = fsa_lrm_conn->lrm_ops->get_rsc_type_metadata( fsa_lrm_conn, class, type, provider); if(metadata) { /* copy the metadata because the LRM likes using * g_alloc instead of cl_malloc */ char *m_copy = crm_strdup(metadata); g_free(metadata); metadata = m_copy; } else { crm_warn("No metadata found for %s::%s:%s", type, class, provider); } return metadata; } typedef struct reload_data_s { char *key; char *metadata; time_t last_query; gboolean can_reload; GListPtr restart_list; } reload_data_t; static void g_hash_destroy_reload(gpointer data) { reload_data_t *reload = data; crm_free(reload->key); crm_free(reload->metadata); slist_destroy(char, child, reload->restart_list, crm_free(child)); crm_free(reload); } GHashTable *reload_hash = NULL; static GListPtr get_rsc_restart_list(lrm_rsc_t *rsc, lrm_op_t *op) { int len = 0; char *key = NULL; char *copy = NULL; const char *value = NULL; const char *provider = NULL; xmlNode *params = NULL; xmlNode *actions = NULL; xmlNode *metadata = NULL; time_t now = time(NULL); reload_data_t *reload = NULL; if(reload_hash == NULL) { reload_hash = g_hash_table_new_full( g_str_hash, g_str_equal, NULL, g_hash_destroy_reload); } provider = rsc->provider; if(provider == NULL) { provider = "heartbeat"; } len = strlen(rsc->type) + strlen(rsc->class) + strlen(provider) + 4; crm_malloc(key, len); snprintf(key, len, "%s::%s:%s", rsc->type, rsc->class, provider); reload = g_hash_table_lookup(reload_hash, key); if(reload && ((now - 9) > reload->last_query) && safe_str_eq(op->op_type, RSC_START)) { reload = NULL; /* re-query */ } if(reload == NULL) { crm_malloc0(reload, sizeof(reload_data_t)); g_hash_table_replace(reload_hash, key, reload); reload->last_query = now; reload->key = key; key = NULL; reload->metadata = get_rsc_metadata(rsc->type, rsc->class, provider); metadata = string2xml(reload->metadata); if(metadata == NULL) { crm_err("Metadata for %s::%s:%s is not valid XML", rsc->provider, rsc->class, rsc->type); goto cleanup; } actions = find_xml_node(metadata, "actions", TRUE); xml_child_iter_filter( actions, action, "action", value = crm_element_value(action, "name"); if(safe_str_eq("reload", value)) { reload->can_reload = TRUE; break; } ); if(reload->can_reload == FALSE) { goto cleanup; } params = find_xml_node(metadata, "parameters", TRUE); xml_child_iter_filter( params, param, "parameter", value = crm_element_value(param, "unique"); if(crm_is_true(value)) { value = crm_element_value(param, "name"); if(value == NULL) { crm_err("%s: NULL param", key); continue; } crm_debug("Attr %s is not reloadable", value); copy = crm_strdup(value); CRM_CHECK(copy != NULL, continue); reload->restart_list = g_list_append(reload->restart_list, copy); } ); } cleanup: crm_free(key); free_xml(metadata); return reload?reload->restart_list:NULL; } static void append_restart_list(lrm_rsc_t *rsc, lrm_op_t *op, xmlNode *update, const char *version) { int len = 0; char *list = NULL; char *digest = NULL; const char *value = NULL; gboolean non_empty = FALSE; xmlNode *restart = NULL; GListPtr restart_list = NULL; if(op->interval > 0) { /* monitors are not reloadable */ return; } else if(op->params == NULL) { crm_debug("%s has no parameters", ID(update)); return; } else if(rsc == NULL) { return; } else if(crm_str_eq(CRMD_ACTION_START, op->op_type, TRUE) == FALSE) { /* only starts are potentially reloadable */ return; } else if(compare_version("1.0.8", version) > 0) { /* Caller version does not support reloads */ return; } restart_list = get_rsc_restart_list(rsc, op); if(restart_list == NULL) { /* Resource does not support reloads */ return; } restart = create_xml_node(NULL, XML_TAG_PARAMS); slist_iter(param, const char, restart_list, lpc, int start = len; CRM_CHECK(param != NULL, continue); value = g_hash_table_lookup(op->params, param); if(value != NULL) { non_empty = TRUE; crm_xml_add(restart, param, value); } len += strlen(param) + 2; crm_realloc(list, len+1); sprintf(list+start, " %s ", param); ); digest = calculate_xml_digest(restart, TRUE, FALSE); crm_xml_add(update, XML_LRM_ATTR_OP_RESTART, list); crm_xml_add(update, XML_LRM_ATTR_RESTART_DIGEST, digest); #if 0 crm_debug("%s: %s, %s", rsc->id, digest, list); if(non_empty) { crm_log_xml_debug(restart, "restart digest source"); } #endif free_xml(restart); crm_free(digest); crm_free(list); } gboolean build_operation_update( xmlNode *parent, lrm_rsc_t *rsc, lrm_op_t *op, const char *src, int lpc, int level) { xmlNode *xml_op = NULL; const char *caller_version = CRM_FEATURE_SET; if(AM_I_DC) { } else if(fsa_our_dc_version != NULL) { caller_version = fsa_our_dc_version; } else if(op->params == NULL) { caller_version = fsa_our_dc_version; } else { /* there is a small risk in formerly mixed clusters that * it will be sub-optimal. * however with our upgrade policy, the update we send * should still be completely supported anyway */ caller_version = g_hash_table_lookup( op->params, XML_ATTR_CRM_VERSION); crm_warn("Falling back to operation originator version: %s", caller_version); } - xml_op = create_operation_update(parent, op, caller_version, 0, src); + xml_op = create_operation_update(parent, op, caller_version, 0, src, level); if(xml_op) { append_restart_list(rsc, op, xml_op, caller_version); } return TRUE; } gboolean is_rsc_active(const char *rsc_id) { GList *op_list = NULL; gboolean active = FALSE; lrm_rsc_t *the_rsc = NULL; state_flag_t cur_state = 0; int max_call_id = -1; if(fsa_lrm_conn == NULL) { return FALSE; } the_rsc = fsa_lrm_conn->lrm_ops->get_rsc(fsa_lrm_conn, rsc_id); crm_debug_3("Processing lrm_rsc_t entry %s", rsc_id); if(the_rsc == NULL) { crm_err("NULL resource returned from the LRM"); return FALSE; } op_list = the_rsc->ops->get_cur_state(the_rsc, &cur_state); crm_debug_3("\tcurrent state:%s",cur_state==LRM_RSC_IDLE?"Idle":"Busy"); slist_iter( op, lrm_op_t, op_list, llpc, crm_debug_2("Processing op %s_%d (%d) for %s (status=%d, rc=%d)", op->op_type, op->interval, op->call_id, the_rsc->id, op->op_status, op->rc); CRM_ASSERT(max_call_id <= op->call_id); if(op->rc == EXECRA_OK && safe_str_eq(op->op_type, CRMD_ACTION_STOP)) { active = FALSE; } else if(op->rc == EXECRA_OK && safe_str_eq(op->op_type, CRMD_ACTION_MIGRATE)) { /* a stricter check is too complex... * leave that to the PE */ active = FALSE; } else if(op->rc == EXECRA_NOT_RUNNING) { active = FALSE; } else { active = TRUE; } max_call_id = op->call_id; lrm_free_op(op); ); g_list_free(op_list); lrm_free_rsc(the_rsc); return active; } gboolean build_active_RAs(xmlNode *rsc_list) { GList *op_list = NULL; GList *lrm_list = NULL; gboolean found_op = FALSE; state_flag_t cur_state = 0; if(fsa_lrm_conn == NULL) { return FALSE; } lrm_list = fsa_lrm_conn->lrm_ops->get_all_rscs(fsa_lrm_conn); slist_iter( rid, char, lrm_list, lpc, int max_call_id = -1; xmlNode *xml_rsc = NULL; lrm_rsc_t *the_rsc = fsa_lrm_conn->lrm_ops->get_rsc(fsa_lrm_conn, rid); if(the_rsc == NULL) { crm_err("NULL resource returned from the LRM: %s", rid); continue; } xml_rsc = create_xml_node(rsc_list, XML_LRM_TAG_RESOURCE); crm_xml_add(xml_rsc, XML_ATTR_ID, the_rsc->id); crm_xml_add(xml_rsc, XML_ATTR_TYPE, the_rsc->type); crm_xml_add(xml_rsc, XML_AGENT_ATTR_CLASS, the_rsc->class); crm_xml_add(xml_rsc, XML_AGENT_ATTR_PROVIDER,the_rsc->provider); op_list = the_rsc->ops->get_cur_state(the_rsc, &cur_state); slist_iter( op, lrm_op_t, op_list, llpc, if(max_call_id < op->call_id) { build_operation_update( - xml_rsc, the_rsc, op, __FUNCTION__, llpc, LOG_DEBUG_2); + xml_rsc, the_rsc, op, __FUNCTION__, llpc, LOG_DEBUG); } else if(max_call_id > op->call_id) { crm_err("Bad call_id in list=%d. Previous call_id=%d", op->call_id, max_call_id); } else { crm_warn("lrm->get_cur_state() returned" " duplicate entries for call_id=%d", op->call_id); } max_call_id = op->call_id; found_op = TRUE; lrm_free_op(op); ); if(found_op == FALSE && g_list_length(op_list) != 0) { crm_err("Could not properly determin last op" " for %s from %d entries", the_rsc->id, g_list_length(op_list)); } g_list_free(op_list); lrm_free_rsc(the_rsc); ); slist_destroy(char, rid, lrm_list, free(rid)); return TRUE; } xmlNode* do_lrm_query(gboolean is_replace) { gboolean shut_down = FALSE; xmlNode *xml_result= NULL; xmlNode *xml_state = NULL; xmlNode *xml_data = NULL; xmlNode *rsc_list = NULL; const char *exp_state = CRMD_STATE_ACTIVE; if(is_set(fsa_input_register, R_SHUTDOWN)) { exp_state = CRMD_STATE_INACTIVE; shut_down = TRUE; } xml_state = create_node_state( fsa_our_uname, ACTIVESTATUS, XML_BOOLEAN_TRUE, ONLINESTATUS, CRMD_JOINSTATE_MEMBER, exp_state, !shut_down, __FUNCTION__); xml_data = create_xml_node(xml_state, XML_CIB_TAG_LRM); crm_xml_add(xml_data, XML_ATTR_ID, fsa_our_uuid); rsc_list = create_xml_node(xml_data, XML_LRM_TAG_RESOURCES); /* Build a list of active (not always running) resources */ build_active_RAs(rsc_list); xml_result = create_cib_fragment(xml_state, XML_CIB_TAG_STATUS); crm_log_xml_debug_3(xml_state, "Current state of the LRM"); free_xml(xml_state); return xml_result; } /* * Remove the rsc from the CIB * * Avoids refreshing the entire LRM section of this host */ #define rsc_template "//"XML_CIB_TAG_STATE"[@uname='%s']//"XML_LRM_TAG_RESOURCE"[@id='%s']" static void delete_rsc_entry(const char *rsc_id) { int max = 0; char *rsc_xpath = NULL; CRM_CHECK(rsc_id != NULL, return); max = strlen(rsc_template) + strlen(rsc_id) + strlen(fsa_our_uname) + 1; crm_malloc0(rsc_xpath, max); snprintf(rsc_xpath, max, rsc_template, fsa_our_uname, rsc_id); CRM_CHECK(rsc_id != NULL, return); crm_debug("sync: Sending delete op for %s", rsc_id); fsa_cib_conn->cmds->delete( fsa_cib_conn, rsc_xpath, NULL, cib_quorum_override|cib_xpath); crm_free(rsc_xpath); } /* * Remove the op from the CIB * * Avoids refreshing the entire LRM section of this host */ #define op_template "//"XML_CIB_TAG_STATE"[@uname='%s']//"XML_LRM_TAG_RESOURCE"[@id='%s']/"XML_LRM_TAG_RSC_OP"[@id='%s']" #define op_call_template "//"XML_CIB_TAG_STATE"[@uname='%s']//"XML_LRM_TAG_RESOURCE"[@id='%s']/"XML_LRM_TAG_RSC_OP"[@id='%s' and @"XML_LRM_ATTR_CALLID"='%d']" static void delete_op_entry(lrm_op_t *op, const char *rsc_id, const char *key, int call_id) { xmlNode *xml_top = NULL; if(op != NULL) { xml_top = create_xml_node(NULL, XML_LRM_TAG_RSC_OP); crm_xml_add_int(xml_top, XML_LRM_ATTR_CALLID, op->call_id); crm_xml_add(xml_top, XML_ATTR_TRANSITION_KEY, op->user_data); crm_debug("async: Sending delete op for %s_%s_%d (call=%d)", op->rsc_id, op->op_type, op->interval, op->call_id); fsa_cib_conn->cmds->delete( fsa_cib_conn, XML_CIB_TAG_STATUS, xml_top, cib_quorum_override); } else if (rsc_id != NULL && key != NULL) { int max = 0; char *op_xpath = NULL; if(call_id > 0) { max = strlen(op_call_template) + strlen(rsc_id) + strlen(fsa_our_uname) + strlen(key) + 10; crm_malloc0(op_xpath, max); snprintf(op_xpath, max, op_call_template, fsa_our_uname, rsc_id, key, call_id); } else { max = strlen(op_template) + strlen(rsc_id) + strlen(fsa_our_uname) + strlen(key) + 1; crm_malloc0(op_xpath, max); snprintf(op_xpath, max, op_template, fsa_our_uname, rsc_id, key); } crm_debug("sync: Sending delete op for %s (call=%d)", rsc_id, call_id); fsa_cib_conn->cmds->delete( fsa_cib_conn, op_xpath, NULL, cib_quorum_override|cib_xpath); crm_free(op_xpath); } else { crm_err("Not enough information to delete op entry: rsc=%p key=%p", rsc_id, key); return; } crm_log_xml_debug_2(xml_top, "op:cancel"); free_xml(xml_top); } static gboolean cancel_op(lrm_rsc_t *rsc, const char *key, int op, gboolean remove) { int rc = HA_OK; struct recurring_op_s *pending = NULL; CRM_CHECK(op != 0, return FALSE); CRM_CHECK(rsc != NULL, return FALSE); if(key == NULL) { key = make_stop_id(rsc->id, op); } pending = g_hash_table_lookup(pending_ops, key); if(pending) { if(remove && pending->remove == FALSE) { pending->remove = TRUE; crm_debug("Scheduling %s for removal", key); } if(pending->cancelled) { crm_debug("Operation %s already cancelled", key); return TRUE; } pending->cancelled = TRUE; } else { crm_info("No pending op found for %s", key); } crm_debug("Cancelling op %d for %s (%s)", op, rsc->id, key); rc = rsc->ops->cancel_op(rsc, op); if(rc != HA_OK) { crm_debug("Op %d for %s (%s): Nothing to cancel", op, rsc->id, key); /* The caller needs to make sure the entry is * removed from the pending_ops list * * Usually by returning TRUE inside the worker function * supplied to g_hash_table_foreach_remove() * * Not removing the entry from pending_ops will block * the node from shutting down */ return FALSE; } return TRUE; } struct cancel_data { gboolean done; gboolean remove; const char *key; lrm_rsc_t *rsc; }; static gboolean cancel_action_by_key(gpointer key, gpointer value, gpointer user_data) { struct cancel_data *data = user_data; struct recurring_op_s *op = (struct recurring_op_s*)value; if(safe_str_eq(op->op_key, data->key)) { data->done = TRUE; if (cancel_op(data->rsc, key, op->call_id, data->remove) == FALSE) { return TRUE; } } return FALSE; } static gboolean cancel_op_key(lrm_rsc_t *rsc, const char *key, gboolean remove) { struct cancel_data data; CRM_CHECK(rsc != NULL, return FALSE); CRM_CHECK(key != NULL, return FALSE); data.key = key; data.rsc = rsc; data.done = FALSE; data.remove = remove; g_hash_table_foreach_remove(pending_ops, cancel_action_by_key, &data); return data.done; } static lrm_rsc_t * get_lrm_resource(xmlNode *resource, xmlNode *op_msg, gboolean do_create) { char rid[64]; lrm_rsc_t *rsc = NULL; const char *short_id = ID(resource); const char *long_id = crm_element_value(resource, XML_ATTR_ID_LONG); crm_debug_2("Retrieving %s from the LRM.", short_id); CRM_CHECK(short_id != NULL, return NULL); if(rsc == NULL) { /* check if its already there (short name) */ strncpy(rid, short_id, 64); rid[63] = 0; rsc = fsa_lrm_conn->lrm_ops->get_rsc(fsa_lrm_conn, rid); } if(rsc == NULL && long_id != NULL) { /* try the long name instead */ strncpy(rid, long_id, 64); rid[63] = 0; rsc = fsa_lrm_conn->lrm_ops->get_rsc(fsa_lrm_conn, rid); } if(rsc == NULL && do_create) { /* add it to the LRM */ const char *type = crm_element_value(resource, XML_ATTR_TYPE); const char *class = crm_element_value(resource, XML_AGENT_ATTR_CLASS); const char *provider = crm_element_value(resource, XML_AGENT_ATTR_PROVIDER); GHashTable *params = xml2list(op_msg); CRM_CHECK(class != NULL, return NULL); CRM_CHECK(type != NULL, return NULL); crm_debug_2("Adding rsc %s before operation", short_id); strncpy(rid, short_id, 64); rid[63] = 0; if(g_hash_table_size(params) == 0) { crm_log_xml_warn(op_msg, "EmptyParams"); } if(params != NULL) { g_hash_table_remove(params, CRM_META"_op_target_rc"); } fsa_lrm_conn->lrm_ops->add_rsc( fsa_lrm_conn, rid, class, type, provider, params); rsc = fsa_lrm_conn->lrm_ops->get_rsc(fsa_lrm_conn, rid); g_hash_table_destroy(params); if(rsc == NULL) { fsa_data_t *msg_data = NULL; crm_err("Could not add resource %s to LRM", rid); register_fsa_error(C_FSA_INTERNAL, I_FAIL, NULL); } } return rsc; } static gboolean lrm_remove_deleted_op( gpointer key, gpointer value, gpointer user_data) { const char *rsc = user_data; struct recurring_op_s *pending = value; if(safe_str_eq(rsc, pending->rsc_id)) { crm_info("Removing op %s:%d for deleted resource %s", pending->op_key, pending->call_id, rsc); return TRUE; } return FALSE; } /* A_LRM_INVOKE */ void do_lrm_invoke(long long action, enum crmd_fsa_cause cause, enum crmd_fsa_state cur_state, enum crmd_fsa_input current_input, fsa_data_t *msg_data) { gboolean done = FALSE; gboolean create_rsc = TRUE; const char *crm_op = NULL; const char *from_sys = NULL; const char *from_host = NULL; const char *operation = NULL; ha_msg_input_t *input = fsa_typed_data(fsa_dt_ha_msg); crm_op = crm_element_value(input->msg, F_CRM_TASK); from_sys = crm_element_value(input->msg, F_CRM_SYS_FROM); if(safe_str_neq(from_sys, CRM_SYSTEM_TENGINE)) { from_host = crm_element_value(input->msg, F_CRM_HOST_FROM); } crm_debug_2("LRM command from: %s", from_sys); if(safe_str_eq(crm_op, CRM_OP_LRM_DELETE)) { operation = CRMD_ACTION_DELETE; } else if(safe_str_eq(operation, CRM_OP_LRM_REFRESH)) { crm_op = CRM_OP_LRM_REFRESH; } else if(safe_str_eq(crm_op, CRM_OP_LRM_FAIL)) { #if HAVE_STRUCT_LRM_OPS_FAIL_RSC lrm_rsc_t *rsc = NULL; xmlNode *xml_rsc = find_xml_node( input->xml, XML_CIB_TAG_RESOURCE, TRUE); CRM_CHECK(xml_rsc != NULL, return); rsc = get_lrm_resource(xml_rsc, input->xml, create_rsc); if(rsc) { int rc = HA_OK; crm_info("Failing resource %s...", rsc->id); rc = fsa_lrm_conn->lrm_ops->fail_rsc(fsa_lrm_conn, rsc->id, 1, "do_lrm_invoke: Async failure"); if(rc != HA_OK) { crm_err("Could not initiate an asynchronous failure for %s (%d)", rsc->id, rc); } lrm_free_rsc(rsc); } else { crm_info("Cannot find/create resource in order to fail it..."); crm_log_xml_warn(input->msg, "bad input"); } return; #else crm_info("Failing resource..."); operation = "fail"; #endif } else if(input->xml != NULL) { operation = crm_element_value(input->xml, XML_LRM_ATTR_TASK); } if(safe_str_eq(crm_op, CRM_OP_LRM_REFRESH)) { enum cib_errors rc = cib_ok; xmlNode *fragment = do_lrm_query(TRUE); crm_info("Forcing a local LRM refresh"); fsa_cib_update(XML_CIB_TAG_STATUS, fragment, cib_quorum_override, rc); free_xml(fragment); } else if(safe_str_eq(crm_op, CRM_OP_LRM_QUERY)) { xmlNode *data = do_lrm_query(FALSE); xmlNode *reply = create_reply(input->msg, data); if(relay_message(reply, TRUE) == FALSE) { crm_err("Unable to route reply"); crm_log_xml(LOG_ERR, "reply", reply); } free_xml(reply); free_xml(data); } else if(safe_str_eq(operation, CRM_OP_PROBED) || safe_str_eq(crm_op, CRM_OP_REPROBE)) { int cib_options = cib_inhibit_notify; const char *probed = XML_BOOLEAN_TRUE; if(safe_str_eq(crm_op, CRM_OP_REPROBE)) { cib_options = cib_none; probed = XML_BOOLEAN_FALSE; } update_attrd(NULL, CRM_OP_PROBED, probed); } else if(operation != NULL) { lrm_rsc_t *rsc = NULL; xmlNode *params = NULL; xmlNode *xml_rsc = find_xml_node( input->xml, XML_CIB_TAG_RESOURCE, TRUE); CRM_CHECK(xml_rsc != NULL, return); /* only the first 16 chars are used by the LRM */ params = find_xml_node(input->xml, XML_TAG_ATTRS, TRUE); if(safe_str_eq(operation, CRMD_ACTION_DELETE)) { create_rsc = FALSE; } rsc = get_lrm_resource(xml_rsc, input->xml, create_rsc); if(rsc == NULL && create_rsc) { crm_err("Invalid resource definition"); crm_log_xml_warn(input->msg, "bad input"); } else if(rsc == NULL) { lrm_op_t* op = NULL; crm_err("Not creating resource for a %s event: %s", operation, ID(input->xml)); crm_log_xml_warn(input->msg, "bad input"); op = construct_op(input->xml, ID(xml_rsc), operation); op->op_status = LRM_OP_DONE; op->rc = EXECRA_OK; CRM_ASSERT(op != NULL); send_direct_ack(from_host, from_sys, NULL, op, ID(xml_rsc)); free_lrm_op(op); } else if(safe_str_eq(operation, CRMD_ACTION_CANCEL)) { lrm_op_t* op = NULL; char *op_key = NULL; char *meta_key = NULL; int call = 0; const char *call_id = NULL; const char *op_task = NULL; const char *op_interval = NULL; CRM_CHECK(params != NULL, crm_log_xml_warn(input->xml, "Bad command"); return); meta_key = crm_meta_name(XML_LRM_ATTR_INTERVAL); op_interval = crm_element_value(params, meta_key); crm_free(meta_key); meta_key = crm_meta_name(XML_LRM_ATTR_TASK); op_task = crm_element_value(params, meta_key); crm_free(meta_key); meta_key = crm_meta_name(XML_LRM_ATTR_CALLID); call_id = crm_element_value(params, meta_key); crm_free(meta_key); CRM_CHECK(op_task != NULL, crm_log_xml_warn(input->xml, "Bad command"); return); CRM_CHECK(op_interval != NULL, crm_log_xml_warn(input->xml, "Bad command"); return); op = construct_op(input->xml, rsc->id, op_task); CRM_ASSERT(op != NULL); op_key = generate_op_key( rsc->id,op_task,crm_parse_int(op_interval,"0")); crm_debug("PE requested op %s (call=%s) be cancelled", op_key, call_id?call_id:"NA"); call = crm_parse_int(call_id, "0"); if(call == 0) { /* the normal case when the PE cancels a recurring op */ done = cancel_op_key(rsc, op_key, TRUE); } else { /* the normal case when the PE cancels an orphan op */ done = cancel_op(rsc, NULL, call, TRUE); } if(done == FALSE) { crm_debug("Nothing known about operation %d for %s", call, op_key); delete_op_entry(NULL, rsc->id, op_key, call); /* needed?? surely not otherwise the cancel_op_(_key) wouldn't * have failed in the first place */ g_hash_table_remove(pending_ops, op_key); } op->rc = EXECRA_OK; op->op_status = LRM_OP_DONE; send_direct_ack(from_host, from_sys, rsc, op, rsc->id); crm_free(op_key); free_lrm_op(op); } else if(safe_str_eq(operation, CRMD_ACTION_DELETE)) { int rc = HA_OK; lrm_op_t* op = NULL; CRM_ASSERT(rsc != NULL); op = construct_op(input->xml, rsc->id, operation); CRM_ASSERT(op != NULL); op->op_status = LRM_OP_DONE; op->rc = EXECRA_OK; crm_info("Removing resource %s from the LRM", rsc->id); rc = fsa_lrm_conn->lrm_ops->delete_rsc(fsa_lrm_conn, rsc->id); if(rc != HA_OK) { crm_err("Failed to remove resource %s", rsc->id); op->op_status = LRM_OP_ERROR; op->rc = EXECRA_UNKNOWN_ERROR; } delete_rsc_entry(rsc->id); send_direct_ack(from_host, from_sys, rsc, op, rsc->id); free_lrm_op(op); g_hash_table_foreach_remove(pending_ops, lrm_remove_deleted_op, rsc->id); if(safe_str_neq(from_sys, CRM_SYSTEM_TENGINE)) { /* this isn't expected - trigger a new transition */ time_t now = time(NULL); char *now_s = crm_itoa(now); crm_debug("Triggering a refresh after %s deleted %s from the LRM", from_sys, rsc->id); update_attr(fsa_cib_conn, cib_none, XML_CIB_TAG_CRMCONFIG, NULL, NULL, NULL, NULL, "last-lrm-refresh", now_s, FALSE); crm_free(now_s); } } else if(rsc != NULL) { do_lrm_rsc_op(rsc, operation, input->xml, input->msg); } lrm_free_rsc(rsc); } else { crm_err("Operation was neither a lrm_query, nor a rsc op. %s", crm_str(crm_op)); register_fsa_error(C_FSA_INTERNAL, I_ERROR, NULL); } } lrm_op_t * construct_op(xmlNode *rsc_op, const char *rsc_id, const char *operation) { lrm_op_t *op = NULL; const char *op_delay = NULL; const char *op_timeout = NULL; const char *op_interval = NULL; const char *transition = NULL; CRM_DEV_ASSERT(rsc_id != NULL); crm_malloc0(op, sizeof(lrm_op_t)); op->op_type = crm_strdup(operation); op->op_status = LRM_OP_PENDING; op->rc = -1; op->rsc_id = crm_strdup(rsc_id); op->interval = 0; op->timeout = 0; op->start_delay = 0; op->copyparams = 0; op->app_name = crm_strdup(CRM_SYSTEM_CRMD); if(rsc_op == NULL) { CRM_DEV_ASSERT(safe_str_eq(CRMD_ACTION_STOP, operation)); op->user_data = NULL; op->user_data_len = 0; /* the stop_all_resources() case * by definition there is no DC (or they'd be shutting * us down). * So we should put our version here. */ op->params = g_hash_table_new_full( g_str_hash, g_str_equal, g_hash_destroy_str, g_hash_destroy_str); g_hash_table_insert(op->params, crm_strdup(XML_ATTR_CRM_VERSION), crm_strdup(CRM_FEATURE_SET)); crm_debug_2("Constructed %s op for %s", operation, rsc_id); return op; } op->params = xml2list(rsc_op); if(op->params == NULL) { CRM_DEV_ASSERT(safe_str_eq(CRMD_ACTION_STOP, operation)); } else { g_hash_table_remove(op->params, CRM_META"_op_target_rc"); } op_delay = crm_meta_value(op->params, XML_OP_ATTR_START_DELAY); op_timeout = crm_meta_value(op->params, XML_ATTR_TIMEOUT); op_interval = crm_meta_value(op->params, XML_LRM_ATTR_INTERVAL); op->interval = crm_parse_int(op_interval, "0"); op->timeout = crm_parse_int(op_timeout, "0"); op->start_delay = crm_parse_int(op_delay, "0"); /* sanity */ if(op->interval < 0) { op->interval = 0; } if(op->timeout < 0) { op->timeout = 0; } if(op->start_delay < 0) { op->start_delay = 0; } transition = crm_element_value(rsc_op, XML_ATTR_TRANSITION_KEY); CRM_CHECK(transition != NULL, return op); op->user_data = crm_strdup(transition); op->user_data_len = 1+strlen(op->user_data); if(op->interval != 0) { if(safe_str_eq(operation, CRMD_ACTION_START) || safe_str_eq(operation, CRMD_ACTION_STOP)) { crm_err("Start and Stop actions cannot have an interval: %d", op->interval); op->interval = 0; } } /* reset the resource's parameters? */ if(op->interval == 0) { if(safe_str_eq(CRMD_ACTION_START, operation) || safe_str_eq(CRMD_ACTION_STATUS, operation)) { op->copyparams = 1; } } crm_debug_2("Constructed %s op for %s: interval=%d", operation, rsc_id, op->interval); return op; } void send_direct_ack(const char *to_host, const char *to_sys, lrm_rsc_t *rsc, lrm_op_t* op, const char *rsc_id) { xmlNode *reply = NULL; xmlNode *update, *iter; xmlNode *fragment; CRM_CHECK(op != NULL, return); if(op->rsc_id == NULL) { CRM_DEV_ASSERT(rsc_id != NULL); op->rsc_id = crm_strdup(rsc_id); } if(to_sys == NULL) { to_sys = CRM_SYSTEM_TENGINE; } update = create_node_state( fsa_our_uname, NULL, NULL, NULL, NULL, NULL, FALSE, __FUNCTION__); iter = create_xml_node(update, XML_CIB_TAG_LRM); crm_xml_add(iter, XML_ATTR_ID, fsa_our_uuid); iter = create_xml_node(iter, XML_LRM_TAG_RESOURCES); iter = create_xml_node(iter, XML_LRM_TAG_RESOURCE); crm_xml_add(iter, XML_ATTR_ID, op->rsc_id); build_operation_update(iter, rsc, op, __FUNCTION__, 0, LOG_DEBUG); fragment = create_cib_fragment(update, XML_CIB_TAG_STATUS); reply = create_request(CRM_OP_INVOKE_LRM, fragment, to_host, to_sys, CRM_SYSTEM_LRMD, NULL); crm_log_xml_debug_2(update, "ACK Update"); crm_info("ACK'ing resource op %s_%s_%d from %s: %s", op->rsc_id, op->op_type, op->interval, op->user_data, crm_element_value(reply, XML_ATTR_REFERENCE)); if(relay_message(reply, TRUE) == FALSE) { crm_log_xml(LOG_ERR, "Unable to route reply", reply); } free_xml(fragment); free_xml(update); free_xml(reply); } static gboolean stop_recurring_action_by_rsc(gpointer key, gpointer value, gpointer user_data) { lrm_rsc_t *rsc = user_data; struct recurring_op_s *op = (struct recurring_op_s*)value; if(op->interval != 0 && safe_str_eq(op->rsc_id, rsc->id)) { if (cancel_op(rsc, key, op->call_id, FALSE) == FALSE) { return TRUE; } } return FALSE; } void do_lrm_rsc_op(lrm_rsc_t *rsc, const char *operation, xmlNode *msg, xmlNode *request) { int call_id = 0; char *op_id = NULL; lrm_op_t* op = NULL; fsa_data_t *msg_data = NULL; const char *transition = NULL; CRM_CHECK(rsc != NULL, return); if(msg != NULL) { transition = crm_element_value(msg, XML_ATTR_TRANSITION_KEY); if(transition == NULL) { crm_log_xml_err(msg, "Missing transition number"); } } op = construct_op(msg, rsc->id, operation); /* stop the monitor before stopping the resource */ if(crm_str_eq(operation, CRMD_ACTION_STOP, TRUE) || crm_str_eq(operation, CRMD_ACTION_DEMOTE, TRUE) || crm_str_eq(operation, CRMD_ACTION_PROMOTE, TRUE) || crm_str_eq(operation, CRMD_ACTION_MIGRATE, TRUE)) { g_hash_table_foreach_remove(pending_ops, stop_recurring_action_by_rsc, rsc); } /* now do the op */ crm_info("Performing key=%s op=%s_%s_%d )", transition, rsc->id, operation, op->interval); if(fsa_state != S_NOT_DC && fsa_state != S_TRANSITION_ENGINE) { if(safe_str_neq(operation, "fail") && safe_str_neq(operation, CRMD_ACTION_STOP)) { crm_info("Discarding attempt to perform action %s on %s" " in state %s", operation, rsc->id, fsa_state2string(fsa_state)); op->rc = 99; op->op_status = LRM_OP_ERROR; send_direct_ack(NULL, NULL, rsc, op, rsc->id); free_lrm_op(op); crm_free(op_id); return; } } op_id = generate_op_key(rsc->id, op->op_type, op->interval); if(op->interval > 0) { /* cancel it so we can then restart it without conflict */ cancel_op_key(rsc, op_id, FALSE); op->target_rc = CHANGED; } else { op->target_rc = EVERYTIME; } g_hash_table_replace(resources,crm_strdup(rsc->id), crm_strdup(op_id)); call_id = rsc->ops->perform_op(rsc, op); if(call_id <= 0) { crm_err("Operation %s on %s failed: %d", operation, rsc->id, call_id); register_fsa_error(C_FSA_INTERNAL, I_FAIL, NULL); } else if(op->interval > 0 && op->start_delay > 5 * 60 * 1000) { char *uuid = NULL; int dummy = 0, target_rc = 0; crm_info("Faking confirmation of %s: execution postponed for over 5 minutes", op_id); decode_transition_key(op->user_data, &uuid, &dummy, &dummy, &target_rc); crm_free(uuid); op->rc = target_rc; op->op_status = LRM_OP_DONE; send_direct_ack(NULL, NULL, rsc, op, rsc->id); } else { /* record all operations so we can wait * for them to complete during shutdown */ char *call_id_s = make_stop_id(rsc->id, call_id); struct recurring_op_s *pending = NULL; crm_malloc0(pending, sizeof(struct recurring_op_s)); crm_debug_2("Recording pending op: %d - %s %s", call_id, op_id, call_id_s); pending->call_id = call_id; pending->interval = op->interval; pending->op_key = crm_strdup(op_id); pending->rsc_id = crm_strdup(rsc->id); g_hash_table_replace(pending_ops, call_id_s, pending); } crm_free(op_id); free_lrm_op(op); return; } void free_recurring_op(gpointer value) { struct recurring_op_s *op = (struct recurring_op_s*)value; crm_free(op->rsc_id); crm_free(op->op_key); crm_free(op); } void free_lrm_op(lrm_op_t *op) { g_hash_table_destroy(op->params); crm_free(op->user_data); crm_free(op->output); crm_free(op->rsc_id); crm_free(op->op_type); crm_free(op->app_name); crm_free(op); } static void dup_attr(gpointer key, gpointer value, gpointer user_data) { g_hash_table_replace(user_data, crm_strdup(key), crm_strdup(value)); } lrm_op_t * copy_lrm_op(const lrm_op_t *op) { lrm_op_t *op_copy = NULL; CRM_CHECK(op != NULL, return NULL); CRM_CHECK(op->rsc_id != NULL, return NULL); crm_malloc0(op_copy, sizeof(lrm_op_t)); op_copy->op_type = crm_strdup(op->op_type); /* input fields */ op_copy->params = g_hash_table_new_full( g_str_hash, g_str_equal, g_hash_destroy_str, g_hash_destroy_str); if(op->params != NULL) { g_hash_table_foreach(op->params, dup_attr, op_copy->params); } op_copy->timeout = op->timeout; op_copy->interval = op->interval; op_copy->target_rc = op->target_rc; /* in the CRM, this is always a string */ if(op->user_data != NULL) { op_copy->user_data = crm_strdup(op->user_data); } /* output fields */ op_copy->op_status = op->op_status; op_copy->rc = op->rc; op_copy->call_id = op->call_id; op_copy->output = NULL; op_copy->rsc_id = crm_strdup(op->rsc_id); if(op->app_name != NULL) { op_copy->app_name = crm_strdup(op->app_name); } if(op->output != NULL) { op_copy->output = crm_strdup(op->output); } return op_copy; } lrm_rsc_t * copy_lrm_rsc(const lrm_rsc_t *rsc) { lrm_rsc_t *rsc_copy = NULL; if(rsc == NULL) { return NULL; } crm_malloc0(rsc_copy, sizeof(lrm_rsc_t)); rsc_copy->id = crm_strdup(rsc->id); rsc_copy->type = crm_strdup(rsc->type); rsc_copy->class = NULL; rsc_copy->provider = NULL; if(rsc->class != NULL) { rsc_copy->class = crm_strdup(rsc->class); } if(rsc->provider != NULL) { rsc_copy->provider = crm_strdup(rsc->provider); } /* GHashTable* params; */ rsc_copy->params = NULL; rsc_copy->ops = NULL; return rsc_copy; } void cib_rsc_callback(xmlNode *msg, int call_id, int rc, xmlNode *output, void *user_data) { switch(rc) { case cib_ok: case cib_diff_failed: case cib_diff_resync: crm_debug_2("Resource update %d complete: rc=%d", call_id, rc); break; default: crm_warn("Resource update %d failed: (rc=%d) %s", call_id, rc, cib_error2string(rc)); } } int do_update_resource(lrm_op_t* op) { /* */ int rc = cib_ok; lrm_rsc_t *rsc = NULL; xmlNode *update, *iter = NULL; int call_opt = cib_quorum_override; CRM_CHECK(op != NULL, return 0); if(fsa_state == S_ELECTION || fsa_state == S_PENDING) { crm_info("Sending update to local CIB in state: %s", fsa_state2string(fsa_state)); call_opt |= cib_scope_local; } iter = create_xml_node(iter, XML_CIB_TAG_STATUS); update = iter; iter = create_xml_node(iter, XML_CIB_TAG_STATE); set_uuid(iter, XML_ATTR_UUID, fsa_our_uname); crm_xml_add(iter, XML_ATTR_UNAME, fsa_our_uname); crm_xml_add(iter, XML_ATTR_ORIGIN, __FUNCTION__); iter = create_xml_node(iter, XML_CIB_TAG_LRM); crm_xml_add(iter, XML_ATTR_ID, fsa_our_uuid); iter = create_xml_node(iter, XML_LRM_TAG_RESOURCES); iter = create_xml_node(iter, XML_LRM_TAG_RESOURCE); crm_xml_add(iter, XML_ATTR_ID, op->rsc_id); rsc = fsa_lrm_conn->lrm_ops->get_rsc(fsa_lrm_conn, op->rsc_id); build_operation_update(iter, rsc, op, __FUNCTION__, 0, LOG_DEBUG); if(rsc) { crm_xml_add(iter, XML_ATTR_TYPE, rsc->type); crm_xml_add(iter, XML_AGENT_ATTR_CLASS, rsc->class); crm_xml_add(iter, XML_AGENT_ATTR_PROVIDER,rsc->provider); CRM_CHECK(rsc->type != NULL, crm_err("Resource %s has no value for type", op->rsc_id)); CRM_CHECK(rsc->class != NULL, crm_err("Resource %s has no value for class", op->rsc_id)); lrm_free_rsc(rsc); } else { crm_warn("Resource %s no longer exists in the lrmd", op->rsc_id); goto cleanup; } /* make it an asyncronous call and be done with it * * Best case: * the resource state will be discovered during * the next signup or election. * * Bad case: * we are shutting down and there is no DC at the time, * but then why were we shutting down then anyway? * (probably because of an internal error) * * Worst case: * we get shot for having resources "running" when the really weren't * * the alternative however means blocking here for too long, which * isnt acceptable */ fsa_cib_update(XML_CIB_TAG_STATUS, update, call_opt, rc); /* the return code is a call number, not an error code */ crm_debug_2("Sent resource state update message: %d", rc); fsa_cib_conn->cmds->register_callback( fsa_cib_conn, rc, 60, FALSE, NULL, "cib_rsc_callback", cib_rsc_callback); cleanup: free_xml(update); return rc; } void do_lrm_event(long long action, enum crmd_fsa_cause cause, enum crmd_fsa_state cur_state, enum crmd_fsa_input cur_input, fsa_data_t *msg_data) { CRM_CHECK(FALSE, return); } gboolean process_lrm_event(lrm_op_t *op) { char *op_id = NULL; char *op_key = NULL; int update_id = 0; int log_level = LOG_ERR; gboolean removed = FALSE; struct recurring_op_s *pending = NULL; CRM_CHECK(op != NULL, return FALSE); CRM_CHECK(op->rsc_id != NULL, return FALSE); op_key = generate_op_key(op->rsc_id, op->op_type, op->interval); switch(op->op_status) { case LRM_OP_ERROR: case LRM_OP_PENDING: case LRM_OP_NOTSUPPORTED: break; case LRM_OP_CANCELLED: log_level = LOG_INFO; break; case LRM_OP_DONE: log_level = LOG_INFO; break; case LRM_OP_TIMEOUT: log_level = LOG_DEBUG_3; crm_err("LRM operation %s (%d) %s (timeout=%dms)", op_key, op->call_id, op_status2text(op->op_status), op->timeout); break; default: crm_err("Mapping unknown status (%d) to ERROR", op->op_status); op->op_status = LRM_OP_ERROR; } if(op->op_status == LRM_OP_ERROR && (op->rc == EXECRA_RUNNING_MASTER || op->rc == EXECRA_NOT_RUNNING)) { /* Leave it up to the TE/PE to decide if this is an error */ op->op_status = LRM_OP_DONE; log_level = LOG_INFO; } op_id = make_stop_id(op->rsc_id, op->call_id); pending = g_hash_table_lookup(pending_ops, op_id); if(op->op_status != LRM_OP_CANCELLED) { update_id = do_update_resource(op); if(op->interval != 0) { goto out; } } else if(op->interval == 0) { /* This will occur when "crm resource cleanup" is called while actions are in-flight */ crm_err("Op %s (call=%d): Cancelled", op_key, op->call_id); send_direct_ack(NULL, NULL, NULL, op, op->rsc_id); } else if(pending == NULL) { crm_err("Op %s (call=%d): No 'pending' entry", op_key, op->call_id); } else if(op->user_data == NULL) { crm_err("Op %s (call=%d): No user data", op_key, op->call_id); } else if(pending->remove) { delete_op_entry(op, op->rsc_id, op_key, op->call_id); } else { /* Before a stop is called, no need to direct ack */ crm_debug_2("Op %s (call=%d): no delete event required", op_key, op->call_id); } if(g_hash_table_remove(pending_ops, op_id)) { removed = TRUE; crm_debug_2("Op %s (call=%d, stop-id=%s): Confirmed", op_key, op->call_id, op_id); } out: if(op->op_status == LRM_OP_DONE) { do_crm_log(log_level, "LRM operation %s (call=%d, rc=%d, cib-update=%d, confirmed=%s) %s", op_key, op->call_id, op->rc, update_id, removed?"true":"false", execra_code2string(op->rc)); } else { do_crm_log(log_level, "LRM operation %s (call=%d, status=%d, cib-update=%d, confirmed=%s) %s", op_key, op->call_id, op->op_status, update_id, removed?"true":"false", op_status2text(op->op_status)); } if(op->rc != 0 && op->output != NULL) { crm_info("Result: %s", op->output); } else if(op->output != NULL) { crm_debug("Result: %s", op->output); } /* If a shutdown was escalated while operations were pending, * then the FSA will be stalled right now... allow it to continue */ mainloop_set_trigger(fsa_source); crm_free(op_key); crm_free(op_id); return TRUE; } char * make_stop_id(const char *rsc, int call_id) { char *op_id = NULL; crm_malloc0(op_id, strlen(rsc) + 34); if(op_id != NULL) { snprintf(op_id, strlen(rsc) + 34, "%s:%d", rsc, call_id); } return op_id; } diff --git a/crmd/te_actions.c b/crmd/te_actions.c index 2d8a47e87e..f0933e974d 100644 --- a/crmd/te_actions.c +++ b/crmd/te_actions.c @@ -1,568 +1,565 @@ /* * Copyright (C) 2004 Andrew Beekhof * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * This software is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include #include #include #include #include #include #include #include #include #include #include char *te_uuid = NULL; void send_rsc_command(crm_action_t *action); static void te_start_action_timer(crm_graph_t *graph, crm_action_t *action) { crm_malloc0(action->timer, sizeof(crm_action_timer_t)); action->timer->timeout = action->timeout; action->timer->reason = timeout_action; action->timer->action = action; action->timer->source_id = g_timeout_add( action->timer->timeout + graph->network_delay, action_timer_callback, (void*)action->timer); CRM_ASSERT(action->timer->source_id != 0); } static gboolean te_pseudo_action(crm_graph_t *graph, crm_action_t *pseudo) { crm_info("Pseudo action %d fired and confirmed", pseudo->id); pseudo->confirmed = TRUE; update_graph(graph, pseudo); trigger_graph(); return TRUE; } void send_stonith_update(crm_action_t *action, const char *target, const char *uuid) { enum cib_errors rc = cib_ok; /* zero out the node-status & remove all LRM status info */ xmlNode *node_state = create_xml_node(NULL, XML_CIB_TAG_STATE); CRM_CHECK(target != NULL, return); CRM_CHECK(uuid != NULL, return); crm_xml_add(node_state, XML_ATTR_UUID, uuid); crm_xml_add(node_state, XML_ATTR_UNAME, target); crm_xml_add(node_state, XML_CIB_ATTR_HASTATE, DEADSTATUS); crm_xml_add(node_state, XML_CIB_ATTR_INCCM, XML_BOOLEAN_NO); crm_xml_add(node_state, XML_CIB_ATTR_CRMDSTATE, OFFLINESTATUS); crm_xml_add(node_state, XML_CIB_ATTR_JOINSTATE, CRMD_JOINSTATE_DOWN); crm_xml_add(node_state, XML_CIB_ATTR_EXPSTATE, CRMD_JOINSTATE_DOWN); crm_xml_add(node_state, XML_ATTR_ORIGIN, __FUNCTION__); rc = fsa_cib_conn->cmds->update( fsa_cib_conn, XML_CIB_TAG_STATUS, node_state, cib_quorum_override|cib_scope_local|cib_can_create); - if(rc < cib_ok) { - crm_err("CIB update failed: %s", cib_error2string(rc)); - abort_transition( - INFINITY, tg_shutdown, "CIB update failed", node_state); - - } else { - /* delay processing the trigger until the update completes */ - add_cib_op_callback(fsa_cib_conn, rc, FALSE, crm_strdup(target), cib_fencing_updated); - } + /* Delay processing the trigger until the update completes */ + crm_info("Sending fencing update %d for %s", rc, target); + add_cib_op_callback(fsa_cib_conn, rc, FALSE, crm_strdup(target), cib_fencing_updated); - erase_status_tag(target, XML_CIB_TAG_LRM); - erase_status_tag(target, XML_TAG_TRANSIENT_NODEATTRS); + /* Make sure it sticks */ + /* fsa_cib_conn->cmds->bump_epoch(fsa_cib_conn, cib_quorum_override|cib_scope_local); */ + + erase_status_tag(target, XML_CIB_TAG_LRM, cib_scope_local); + erase_status_tag(target, XML_TAG_TRANSIENT_NODEATTRS, cib_scope_local); free_xml(node_state); #if 0 /* Make sure the membership cache is accurate */ crm_update_peer(0, 0, 0, -1, 0, uuid, target, NULL, CRM_NODE_LOST); #endif return; } static gboolean te_fence_node(crm_graph_t *graph, crm_action_t *action) { int rc = 0; const char *id = NULL; const char *uuid = NULL; const char *target = NULL; const char *type = NULL; gboolean invalid_action = FALSE; id = ID(action->xml); target = crm_element_value(action->xml, XML_LRM_ATTR_TARGET); uuid = crm_element_value(action->xml, XML_LRM_ATTR_TARGET_UUID); type = crm_meta_value(action->params, "stonith_action"); CRM_CHECK(id != NULL, invalid_action = TRUE); CRM_CHECK(uuid != NULL, invalid_action = TRUE); CRM_CHECK(type != NULL, invalid_action = TRUE); CRM_CHECK(target != NULL, invalid_action = TRUE); if(invalid_action) { crm_log_xml_warn(action->xml, "BadAction"); return FALSE; } te_log_action(LOG_INFO, "Executing %s fencing operation (%s) on %s (timeout=%d)", type, id, target, transition_graph->stonith_timeout); /* Passing NULL means block until we can connect... */ te_connect_stonith(NULL); if(type == NULL) { type = "reboot"; } rc = stonith_api->cmds->fence( stonith_api, 0, target, action->params, type, transition_graph->stonith_timeout/1000); stonith_api->cmds->register_callback( stonith_api, rc, transition_graph->stonith_timeout/1000, FALSE, generate_transition_key(transition_graph->id, action->id, 0, te_uuid), "tengine_stonith_callback", tengine_stonith_callback); return TRUE; } static int get_target_rc(crm_action_t *action) { const char *target_rc_s = crm_meta_value(action->params, XML_ATTR_TE_TARGET_RC); if(target_rc_s != NULL) { return crm_parse_int(target_rc_s, "0"); } return 0; } static gboolean te_crm_command(crm_graph_t *graph, crm_action_t *action) { char *counter = NULL; xmlNode *cmd = NULL; gboolean is_local = FALSE; const char *id = NULL; const char *task = NULL; const char *value = NULL; const char *on_node = NULL; gboolean rc = TRUE; gboolean no_wait = FALSE; id = ID(action->xml); task = crm_element_value(action->xml, XML_LRM_ATTR_TASK); on_node = crm_element_value(action->xml, XML_LRM_ATTR_TARGET); CRM_CHECK(on_node != NULL && strlen(on_node) != 0, te_log_action(LOG_ERR, "Corrupted command (id=%s) %s: no node", crm_str(id), crm_str(task)); return FALSE); te_log_action(LOG_INFO, "Executing crm-event (%s): %s on %s%s%s", crm_str(id), crm_str(task), on_node, is_local?" (local)":"", no_wait?" - no waiting":""); if(safe_str_eq(on_node, fsa_our_uname)) { is_local = TRUE; } value = crm_meta_value(action->params, XML_ATTR_TE_NOWAIT); if(crm_is_true(value)) { no_wait = TRUE; } if(is_local && safe_str_eq(task, CRM_OP_SHUTDOWN)) { /* defer until everything else completes */ te_log_action(LOG_INFO, "crm-event (%s) is a local shutdown", crm_str(id)); graph->completion_action = tg_shutdown; graph->abort_reason = "local shutdown"; action->confirmed = TRUE; update_graph(graph, action); trigger_graph(); return TRUE; } cmd = create_request(task, NULL, on_node, CRM_SYSTEM_CRMD, CRM_SYSTEM_TENGINE, NULL); counter = generate_transition_key( transition_graph->id, action->id, get_target_rc(action), te_uuid); crm_xml_add(cmd, XML_ATTR_TRANSITION_KEY, counter); rc = send_cluster_message(on_node, crm_msg_crmd, cmd, TRUE); crm_free(counter); free_xml(cmd); value = crm_meta_value(action->params, XML_ATTR_TE_NOWAIT); if(rc == FALSE) { crm_err("Action %d failed: send", action->id); return FALSE; } else if(no_wait) { action->confirmed = TRUE; update_graph(graph, action); trigger_graph(); } else { if(action->timeout <= 0) { crm_err("Action %d: %s on %s had an invalid timeout (%dms). Using %dms instead", action->id, task, on_node, action->timeout, graph->network_delay); action->timeout = graph->network_delay; } te_start_action_timer(graph, action); } return TRUE; } gboolean cib_action_update(crm_action_t *action, int status, int op_rc) { char *op_id = NULL; char *code = NULL; char *digest = NULL; xmlNode *tmp = NULL; xmlNode *params = NULL; xmlNode *state = NULL; xmlNode *rsc = NULL; xmlNode *xml_op = NULL; xmlNode *action_rsc = NULL; enum cib_errors rc = cib_ok; const char *name = NULL; const char *value = NULL; const char *rsc_id = NULL; const char *task = crm_element_value(action->xml, XML_LRM_ATTR_TASK); const char *target = crm_element_value(action->xml, XML_LRM_ATTR_TARGET); const char *task_uuid = crm_element_value(action->xml, XML_LRM_ATTR_TASK_KEY); const char *target_uuid = crm_element_value(action->xml, XML_LRM_ATTR_TARGET_UUID); int call_options = cib_quorum_override|cib_scope_local; if(status == LRM_OP_PENDING) { crm_debug("%s %d: Recording pending operation %s on %s", crm_element_name(action->xml), action->id, task_uuid, target); } else { crm_warn("%s %d: %s on %s timed out", crm_element_name(action->xml), action->id, task_uuid, target); } action_rsc = find_xml_node(action->xml, XML_CIB_TAG_RESOURCE, TRUE); if(action_rsc == NULL) { return FALSE; } rsc_id = ID(action_rsc); CRM_CHECK(rsc_id != NULL, crm_log_xml_err(action->xml, "Bad:action"); return FALSE); /* update the CIB */ state = create_xml_node(NULL, XML_CIB_TAG_STATE); crm_xml_add(state, XML_ATTR_UUID, target_uuid); crm_xml_add(state, XML_ATTR_UNAME, target); rsc = create_xml_node(state, XML_CIB_TAG_LRM); crm_xml_add(rsc, XML_ATTR_ID, target_uuid); rsc = create_xml_node(rsc, XML_LRM_TAG_RESOURCES); rsc = create_xml_node(rsc, XML_LRM_TAG_RESOURCE); crm_xml_add(rsc, XML_ATTR_ID, rsc_id); name = XML_ATTR_TYPE; value = crm_element_value(action_rsc, name); crm_xml_add(rsc, name, value); name = XML_AGENT_ATTR_CLASS; value = crm_element_value(action_rsc, name); crm_xml_add(rsc, name, value); name = XML_AGENT_ATTR_PROVIDER; value = crm_element_value(action_rsc, name); crm_xml_add(rsc, name, value); xml_op = create_xml_node(rsc, XML_LRM_TAG_RSC_OP); crm_xml_add(xml_op, XML_ATTR_ID, task); op_id = generate_op_key(rsc_id, task, action->interval); crm_xml_add(xml_op, XML_ATTR_ID, op_id); crm_free(op_id); crm_xml_add_int(xml_op, XML_LRM_ATTR_CALLID, -1); crm_xml_add(xml_op, XML_LRM_ATTR_TASK, task); crm_xml_add(xml_op, XML_ATTR_CRM_VERSION, CRM_FEATURE_SET); crm_xml_add_int(xml_op, XML_LRM_ATTR_OPSTATUS, status); crm_xml_add_int(xml_op, XML_LRM_ATTR_INTERVAL, action->interval); crm_xml_add_int(xml_op, XML_LRM_ATTR_RC, op_rc); crm_xml_add(xml_op, XML_ATTR_ORIGIN, __FUNCTION__); if(crm_str_eq(task, CRMD_ACTION_MIGRATED, TRUE)) { char *key = crm_meta_name("migrate_source_uuid"); xmlNode *attrs = first_named_child(action->xml, XML_TAG_ATTRS); const char *host = crm_element_value(attrs, key); CRM_CHECK(host != NULL, crm_log_xml_err(action->xml, "Bad Op")); crm_xml_add(xml_op, CRMD_ACTION_MIGRATED, host); crm_free(key); } code = generate_transition_key( transition_graph->id, action->id, get_target_rc(action), te_uuid); crm_xml_add(xml_op, XML_ATTR_TRANSITION_KEY, code); crm_free(code); code = generate_transition_magic( crm_element_value(xml_op, XML_ATTR_TRANSITION_KEY), status, op_rc); crm_xml_add(xml_op, XML_ATTR_TRANSITION_MAGIC, code); crm_free(code); tmp = find_xml_node(action->xml, "attributes", TRUE); params = create_xml_node(NULL, XML_TAG_PARAMS); copy_in_properties(params, tmp); filter_action_parameters(params, CRM_FEATURE_SET); digest = calculate_xml_digest(params, TRUE, FALSE); /* info for now as this area has been problematic to debug */ crm_debug("Calculated digest %s for %s (%s)\n", digest, ID(xml_op), crm_element_value(xml_op, XML_ATTR_TRANSITION_MAGIC)); crm_log_xml(LOG_DEBUG, "digest:source", params); crm_xml_add(xml_op, XML_LRM_ATTR_OP_DIGEST, digest); crm_free(digest); free_xml(params); crm_debug_3("Updating CIB with \"%s\" (%s): %s %s on %s", status<0?"new action":XML_ATTR_TIMEOUT, crm_element_name(action->xml), crm_str(task), rsc_id, target); rc = fsa_cib_conn->cmds->update( fsa_cib_conn, XML_CIB_TAG_STATUS, state, call_options); crm_debug_2("Updating CIB with %s action %d: %s on %s (call_id=%d)", op_status2text(status), action->id, task_uuid, target, rc); add_cib_op_callback(fsa_cib_conn, rc, FALSE, NULL, cib_action_updated); free_xml(state); action->sent_update = TRUE; if(rc < cib_ok) { return FALSE; } return TRUE; } static gboolean te_rsc_command(crm_graph_t *graph, crm_action_t *action) { /* never overwrite stop actions in the CIB with * anything other than completed results * * Writing pending stops makes it look like the * resource is running again */ xmlNode *cmd = NULL; xmlNode *rsc_op = NULL; gboolean rc = TRUE; gboolean no_wait = FALSE; gboolean is_local = FALSE; char *counter = NULL; const char *task = NULL; const char *value = NULL; const char *on_node = NULL; const char *task_uuid = NULL; CRM_ASSERT(action != NULL); CRM_ASSERT(action->xml != NULL); action->executed = FALSE; on_node = crm_element_value(action->xml, XML_LRM_ATTR_TARGET); CRM_CHECK(on_node != NULL && strlen(on_node) != 0, te_log_action(LOG_ERR, "Corrupted command(id=%s) %s: no node", ID(action->xml), crm_str(task)); return FALSE); rsc_op = action->xml; task = crm_element_value(rsc_op, XML_LRM_ATTR_TASK); task_uuid = crm_element_value(action->xml, XML_LRM_ATTR_TASK_KEY); on_node = crm_element_value(rsc_op, XML_LRM_ATTR_TARGET); counter = generate_transition_key( transition_graph->id, action->id, get_target_rc(action), te_uuid); crm_xml_add(rsc_op, XML_ATTR_TRANSITION_KEY, counter); if(safe_str_eq(on_node, fsa_our_uname)) { is_local = TRUE; } value = crm_meta_value(action->params, XML_ATTR_TE_NOWAIT); if(crm_is_true(value)) { no_wait = TRUE; } crm_info("Initiating action %d: %s %s on %s%s%s", action->id, task, task_uuid, on_node, is_local?" (local)":"", no_wait?" - no waiting":""); cmd = create_request(CRM_OP_INVOKE_LRM, rsc_op, on_node, CRM_SYSTEM_LRMD, CRM_SYSTEM_TENGINE, NULL); if(is_local) { /* shortcut local resource commands */ ha_msg_input_t data = { .msg = cmd, .xml = rsc_op, }; fsa_data_t msg = { .id = 0, .data = &data, .data_type = fsa_dt_ha_msg, .fsa_input = I_NULL, .fsa_cause = C_FSA_INTERNAL, .actions = A_LRM_INVOKE, .origin = __FUNCTION__, }; do_lrm_invoke(A_LRM_INVOKE, C_FSA_INTERNAL, fsa_state, I_NULL, &msg); } else { rc = send_cluster_message(on_node, crm_msg_lrmd, cmd, TRUE); } crm_free(counter); free_xml(cmd); action->executed = TRUE; if(rc == FALSE) { crm_err("Action %d failed: send", action->id); return FALSE; } else if(no_wait) { action->confirmed = TRUE; update_graph(transition_graph, action); trigger_graph(); } else { if(action->timeout <= 0) { crm_err("Action %d: %s %s on %s had an invalid timeout (%dms). Using %dms instead", action->id, task, task_uuid, on_node, action->timeout, graph->network_delay); action->timeout = graph->network_delay; } te_start_action_timer(graph, action); } value = crm_meta_value(action->params, XML_OP_ATTR_PENDING); if(crm_is_true(value)) { /* write a "pending" entry to the CIB, inhibit notification */ crm_info("Recording pending op %s in the CIB", task_uuid); cib_action_update(action, LRM_OP_PENDING, EXECRA_STATUS_UNKNOWN); } return TRUE; } crm_graph_functions_t te_graph_fns = { te_pseudo_action, te_rsc_command, te_crm_command, te_fence_node }; void notify_crmd(crm_graph_t *graph) { int log_level = LOG_DEBUG; const char *type = "unknown"; enum crmd_fsa_input event = I_NULL; crm_debug("Processing transition completion in state %s", fsa_state2string(fsa_state)); CRM_CHECK(graph->complete, graph->complete = TRUE); switch(graph->completion_action) { case tg_stop: type = "stop"; /* fall through */ case tg_done: type = "done"; log_level = LOG_INFO; if(fsa_state == S_TRANSITION_ENGINE) { event = I_TE_SUCCESS; } break; case tg_restart: type = "restart"; if(fsa_state == S_TRANSITION_ENGINE) { event = I_PE_CALC; } else if(fsa_state == S_POLICY_ENGINE) { register_fsa_action(A_PE_INVOKE); } break; case tg_shutdown: type = "shutdown"; if(is_set(fsa_input_register, R_SHUTDOWN)) { event = I_STOP; } else { event = I_TERMINATE; } } te_log_action(log_level, "Transition %d status: %s - %s", graph->id, type, crm_str(graph->abort_reason)); graph->abort_reason = NULL; graph->completion_action = tg_done; clear_bit_inplace(fsa_input_register, R_IN_TRANSITION); if(event != I_NULL) { register_fsa_input(C_FSA_INTERNAL, event, NULL); } } diff --git a/crmd/te_callbacks.c b/crmd/te_callbacks.c index 66d5a441f8..2fb7975c0e 100644 --- a/crmd/te_callbacks.c +++ b/crmd/te_callbacks.c @@ -1,507 +1,512 @@ /* * Copyright (C) 2004 Andrew Beekhof * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * This software is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include #include #include #include #include #include #include #include #include #include /* For ONLINESTATUS etc */ void te_update_confirm(const char *event, xmlNode *msg); extern char *te_uuid; gboolean shuttingdown = FALSE; crm_graph_t *transition_graph; crm_trigger_t *transition_trigger = NULL; /* #define rsc_op_template "//"XML_TAG_DIFF_ADDED"//"XML_TAG_CIB"//"XML_CIB_TAG_STATE"[@uname='%s']"//"XML_LRM_TAG_RSC_OP"[@id='%s]" */ #define rsc_op_template "//"XML_TAG_DIFF_ADDED"//"XML_TAG_CIB"//"XML_LRM_TAG_RSC_OP"[@id='%s']" static const char *get_node_id(xmlNode *rsc_op) { xmlNode *node = rsc_op; while(node != NULL && safe_str_neq(XML_CIB_TAG_STATE, TYPE(node))) { node = node->parent; } CRM_CHECK(node != NULL, return NULL); return ID(node); } static void process_resource_updates(xmlXPathObject *xpathObj) { /* */ int lpc = 0, max = xpathObj->nodesetval->nodeNr; for(lpc = 0; lpc < max; lpc++) { xmlNode *rsc_op = getXpathResult(xpathObj, lpc); const char *node = get_node_id(rsc_op); process_graph_event(rsc_op, node); } } void te_update_diff(const char *event, xmlNode *msg) { int rc = -1; const char *op = NULL; xmlNode *diff = NULL; xmlNode *cib_top = NULL; xmlXPathObject *xpathObj = NULL; int diff_add_updates = 0; int diff_add_epoch = 0; int diff_add_admin_epoch = 0; int diff_del_updates = 0; int diff_del_epoch = 0; int diff_del_admin_epoch = 0; CRM_CHECK(msg != NULL, return); crm_element_value_int(msg, F_CIB_RC, &rc); if(transition_graph == NULL) { crm_debug_3("No graph"); return; } else if(rc < cib_ok) { crm_debug_3("Filter rc=%d (%s)", rc, cib_error2string(rc)); return; } else if(transition_graph->complete == TRUE && fsa_state != S_IDLE && fsa_state != S_TRANSITION_ENGINE && fsa_state != S_POLICY_ENGINE) { crm_debug_2("Filter state=%s, complete=%d", fsa_state2string(fsa_state), transition_graph->complete); return; } op = crm_element_value(msg, F_CIB_OPERATION); diff = get_message_xml(msg, F_CIB_UPDATE_RESULT); cib_diff_version_details( diff, &diff_add_admin_epoch, &diff_add_epoch, &diff_add_updates, &diff_del_admin_epoch, &diff_del_epoch, &diff_del_updates); crm_debug("Processing diff (%s): %d.%d.%d -> %d.%d.%d (%s)", op, diff_del_admin_epoch,diff_del_epoch,diff_del_updates, diff_add_admin_epoch,diff_add_epoch,diff_add_updates, fsa_state2string(fsa_state)); log_cib_diff(LOG_DEBUG_2, diff, op); /* Process crm_config updates */ cib_top = get_xpath_object("//"F_CIB_UPDATE_RESULT"//"XML_TAG_DIFF_ADDED"//"XML_CIB_TAG_CRMCONFIG, diff, LOG_DEBUG); if(cib_top != NULL) { mainloop_set_trigger(config_read); } /* Process anything that was added */ cib_top = get_xpath_object("//"F_CIB_UPDATE_RESULT"//"XML_TAG_DIFF_ADDED"//"XML_TAG_CIB, diff, LOG_ERR); if(need_abort(cib_top)) { goto bail; /* configuration changed */ } /* Process anything that was removed */ cib_top = get_xpath_object("//"F_CIB_UPDATE_RESULT"//"XML_TAG_DIFF_REMOVED"//"XML_TAG_CIB, diff, LOG_ERR); if(need_abort(cib_top)) { goto bail; /* configuration changed */ } /* Transient Attributes - Added/Updated */ xpathObj = xpath_search(diff,"//"F_CIB_UPDATE_RESULT"//"XML_TAG_DIFF_ADDED"//"XML_TAG_TRANSIENT_NODEATTRS); if(xpathObj && xpathObj->nodesetval->nodeNr > 0) { xmlNode *aborted = getXpathResult(xpathObj, 0); abort_transition(INFINITY, tg_restart, "Transient attribute: update", aborted); goto bail; } else if(xpathObj) { xmlXPathFreeObject(xpathObj); } /* Transient Attributes - Removed */ xpathObj = xpath_search(diff,"//"F_CIB_UPDATE_RESULT"//"XML_TAG_DIFF_REMOVED"//"XML_TAG_TRANSIENT_NODEATTRS); if(xpathObj && xpathObj->nodesetval->nodeNr > 0) { xmlNode *aborted = getXpathResult(xpathObj, 0); abort_transition(INFINITY, tg_restart, "Transient attribute: removal", aborted); goto bail; } else if(xpathObj) { xmlXPathFreeObject(xpathObj); } /* Check for node state updates... possibly from a shutdown we requested */ xpathObj = xpath_search(diff, "//"F_CIB_UPDATE_RESULT"//"XML_TAG_DIFF_ADDED"//"XML_CIB_TAG_STATE); if(xpathObj) { int lpc = 0, max = xpathObj->nodesetval->nodeNr; for(lpc = 0; lpc < max; lpc++) { xmlNode *node = getXpathResult(xpathObj, lpc); const char *event_node = crm_element_value(node, XML_ATTR_ID); const char *ccm_state = crm_element_value(node, XML_CIB_ATTR_INCCM); const char *ha_state = crm_element_value(node, XML_CIB_ATTR_HASTATE); const char *shutdown_s = crm_element_value(node, XML_CIB_ATTR_SHUTDOWN); const char *crmd_state = crm_element_value(node, XML_CIB_ATTR_CRMDSTATE); if(safe_str_eq(ccm_state, XML_BOOLEAN_FALSE) || safe_str_eq(ha_state, DEADSTATUS) || safe_str_eq(crmd_state, CRMD_JOINSTATE_DOWN)) { crm_action_t *shutdown = match_down_event(0, event_node, NULL); if(shutdown != NULL) { const char *task = crm_element_value(shutdown->xml, XML_LRM_ATTR_TASK); if(safe_str_neq(task, CRM_OP_FENCE)) { /* Wait for stonithd to tell us it is complete via tengine_stonith_callback() */ crm_debug("Confirming %s op %d", task, shutdown->id); /* match->confirmed = TRUE; */ stop_te_timer(shutdown->timer); update_graph(transition_graph, shutdown); trigger_graph(); } } else { crm_info("Stonith/shutdown of %s not matched", event_node); abort_transition(INFINITY, tg_restart, "Node failure", node); } fail_incompletable_actions(transition_graph, event_node); } if(shutdown_s) { int shutdown = crm_parse_int(shutdown_s, NULL); if(shutdown > 0) { crm_info("Aborting on "XML_CIB_ATTR_SHUTDOWN" attribute for %s", event_node); abort_transition(INFINITY, tg_restart, "Shutdown request", node); } } } xmlXPathFreeObject(xpathObj); } /* * Check for and fast-track the processing of LRM refreshes * In large clusters this can result in _huge_ speedups * * Unfortunately we can only do so when there are no pending actions * Otherwise we could miss updates we're waiting for and stall * */ xpathObj = NULL; if(transition_graph->pending == 0) { xpathObj = xpath_search(diff, "//"F_CIB_UPDATE_RESULT"//"XML_TAG_DIFF_ADDED"//"XML_LRM_TAG_RESOURCE); } if(xpathObj) { int updates = xpathObj->nodesetval->nodeNr; if(updates > 1) { /* Updates by, or in response to, TE actions will never contain updates * for more than one resource at a time */ crm_info("Detected LRM refresh - %d resources updated: Skipping all resource events", updates); abort_transition(INFINITY, tg_restart, "LRM Refresh", diff); goto bail; } xmlXPathFreeObject(xpathObj); } /* Process operation updates */ xpathObj = xpath_search(diff, "//"F_CIB_UPDATE_RESULT"//"XML_TAG_DIFF_ADDED"//"XML_LRM_TAG_RSC_OP); if(xpathObj) { process_resource_updates(xpathObj); xmlXPathFreeObject(xpathObj); } /* Detect deleted (as opposed to replaced or added) actions - eg. crm_resource -C */ xpathObj = xpath_search(diff, "//"XML_TAG_DIFF_REMOVED"//"XML_LRM_TAG_RSC_OP); if(xpathObj) { int lpc = 0, max = xpathObj->nodesetval->nodeNr; for(lpc = 0; lpc < max; lpc++) { int max = 0; const char *op_id = NULL; char *rsc_op_xpath = NULL; xmlXPathObject *op_match = NULL; xmlNode *match = getXpathResult(xpathObj, lpc); CRM_CHECK(match != NULL, continue); op_id = ID(match); max = strlen(rsc_op_template) + strlen(op_id) + 1; crm_malloc0(rsc_op_xpath, max); snprintf(rsc_op_xpath, max, rsc_op_template, op_id); op_match = xpath_search(diff, rsc_op_xpath); if(op_match == NULL || op_match->nodesetval->nodeNr == 0) { /* Prevent false positives by matching cancelations too */ const char *node = get_node_id(match); crm_action_t *cancelled = get_cancel_action(op_id, node); if(cancelled == NULL) { crm_debug("No match for deleted action %s (%s on %s)", rsc_op_xpath, op_id, node); abort_transition(INFINITY, tg_restart, "Resource op removal", match); if(op_match) { xmlXPathFreeObject(op_match); } crm_free(rsc_op_xpath); goto bail; } else { crm_debug("Deleted lrm_rsc_op %s on %s was for graph event %d", op_id, node, cancelled->id); } } if(op_match) { xmlXPathFreeObject(op_match); } crm_free(rsc_op_xpath); } } bail: if(xpathObj) { xmlXPathFreeObject(xpathObj); } } gboolean process_te_message(xmlNode *msg, xmlNode *xml_data) { const char *from = crm_element_value(msg, F_ORIG); const char *sys_to = crm_element_value(msg, F_CRM_SYS_TO); const char *sys_from = crm_element_value(msg, F_CRM_SYS_FROM); const char *ref = crm_element_value(msg, XML_ATTR_REFERENCE); const char *op = crm_element_value(msg, F_CRM_TASK); const char *type = crm_element_value(msg, F_CRM_MSG_TYPE); crm_debug_2("Processing %s (%s) message", op, ref); crm_log_xml(LOG_DEBUG_3, "ipc", msg); if(op == NULL){ /* error */ } else if(sys_to == NULL || strcasecmp(sys_to, CRM_SYSTEM_TENGINE) != 0) { crm_debug_2("Bad sys-to %s", crm_str(sys_to)); return FALSE; } else if(safe_str_eq(op, CRM_OP_INVOKE_LRM) && safe_str_eq(sys_from, CRM_SYSTEM_LRMD) /* && safe_str_eq(type, XML_ATTR_RESPONSE) */ ){ xmlXPathObject *xpathObj = NULL; crm_log_xml(LOG_DEBUG_2, "Processing (N)ACK", msg); crm_info("Processing (N)ACK %s from %s", crm_element_value(msg, XML_ATTR_REFERENCE), from); xpathObj = xpath_search(xml_data, "//"XML_LRM_TAG_RSC_OP); if(xpathObj) { process_resource_updates(xpathObj); xmlXPathFreeObject(xpathObj); xpathObj = NULL; } else { crm_log_xml(LOG_ERR, "Invalid (N)ACK", msg); return FALSE; } } else { crm_err("Unknown command: %s::%s from %s", type, op, sys_from); } crm_debug_3("finished processing message"); return TRUE; } void tengine_stonith_callback( stonith_t *stonith, const xmlNode *msg, int call_id, int rc, xmlNode *output, void *userdata) { char *uuid = NULL; int target_rc = -1; int stonith_id = -1; int transition_id = -1; crm_action_t *action = NULL; CRM_CHECK(userdata != NULL, return); crm_log_xml_info(output, "StonithOp"); crm_info("Stonith operation %d/%s: %s (%d)", call_id, (char*)userdata, stonith_error2string(rc), rc); if(AM_I_DC == FALSE) { return; } /* crm_info("call=%d, optype=%d, node_name=%s, result=%d, node_list=%s, action=%s", */ /* op->call_id, op->optype, op->node_name, op->op_result, */ /* (char *)op->node_list, op->private_data); */ /* filter out old STONITH actions */ CRM_CHECK(decode_transition_key(userdata, &uuid, &transition_id, &stonith_id, &target_rc), crm_err("Invalid event detected"); goto bail; ); if(transition_graph->complete || stonith_id < 0 || safe_str_neq(uuid, te_uuid) || transition_graph->id != transition_id) { crm_info("Ignoring STONITH action initiated outside" " of the current transition"); goto bail; } /* this will mark the event complete if a match is found */ action = get_action(stonith_id, TRUE); if(action == NULL) { crm_err("Stonith action not matched"); goto bail; } if(rc == stonith_ok) { const char *target = crm_element_value(action->xml, XML_LRM_ATTR_TARGET); const char *uuid = crm_element_value(action->xml, XML_LRM_ATTR_TARGET_UUID); crm_info("Stonith of %s passed", target); send_stonith_update(action, target, uuid); } else { const char *target = crm_element_value_const(action->xml, XML_LRM_ATTR_TARGET); const char *allow_fail = crm_meta_value(action->params, XML_ATTR_TE_ALLOWFAIL); action->failed = TRUE; if(crm_is_true(allow_fail) == FALSE) { crm_err("Stonith of %s failed (%d)... aborting transition.", target, rc); abort_transition(INFINITY, tg_restart, "Stonith failed", NULL); } } update_graph(transition_graph, action); trigger_graph(); bail: crm_free(userdata); crm_free(uuid); return; } void cib_fencing_updated(xmlNode *msg, int call_id, int rc, xmlNode *output, void *user_data) { if(rc < cib_ok) { - crm_err("CIB update failed: %s", cib_error2string(rc)); + crm_err("Fencing update %d for %s: failed - %s (%d)", + call_id, (char*)user_data, cib_error2string(rc), rc); crm_log_xml_warn(msg, "Failed update"); + abort_transition(INFINITY, tg_shutdown, "CIB update failed", NULL); + + } else { + crm_info("Fencing update %d for %s: complete", call_id, (char*)user_data); } crm_free(user_data); } void cib_action_updated(xmlNode *msg, int call_id, int rc, xmlNode *output, void *user_data) { if(rc < cib_ok) { crm_err("Update %d FAILED: %s", call_id, cib_error2string(rc)); } } void cib_failcount_updated(xmlNode *msg, int call_id, int rc, xmlNode *output, void *user_data) { if(rc < cib_ok) { crm_err("Update %d FAILED: %s", call_id, cib_error2string(rc)); } } gboolean action_timer_callback(gpointer data) { crm_action_timer_t *timer = NULL; CRM_CHECK(data != NULL, return FALSE); timer = (crm_action_timer_t*)data; stop_te_timer(timer); crm_warn("Timer popped (timeout=%d, abort_level=%d, complete=%s)", timer->timeout, transition_graph->abort_priority, transition_graph->complete?"true":"false"); CRM_CHECK(timer->action != NULL, return FALSE); if(transition_graph->complete) { crm_warn("Ignoring timeout while not in transition"); } else if(timer->reason == timeout_action_warn) { print_action( LOG_WARNING,"Action missed its timeout: ", timer->action); /* Don't check the FSA state * * We might also be in S_INTEGRATION or some other state waiting for this * action so we can close the transition and continue */ } else { /* fail the action */ gboolean send_update = TRUE; const char *task = crm_element_value(timer->action->xml, XML_LRM_ATTR_TASK); print_action(LOG_ERR, "Aborting transition, action lost: ", timer->action); timer->action->failed = TRUE; timer->action->confirmed = TRUE; abort_transition(INFINITY, tg_restart, "Action lost", NULL); update_graph(transition_graph, timer->action); trigger_graph(); if(timer->action->type != action_type_rsc) { send_update = FALSE; } else if(safe_str_eq(task, "cancel")) { /* we dont need to update the CIB with these */ send_update = FALSE; } else if(safe_str_eq(task, "stop")) { /* *never* update the CIB with these */ send_update = FALSE; } if(send_update) { /* cib_action_update(timer->action, LRM_OP_PENDING, EXECRA_STATUS_UNKNOWN); */ cib_action_update(timer->action, LRM_OP_TIMEOUT, EXECRA_UNKNOWN_ERROR); } } return FALSE; } diff --git a/crmd/te_utils.c b/crmd/te_utils.c index cd4b6d3de8..742fa6acf7 100644 --- a/crmd/te_utils.c +++ b/crmd/te_utils.c @@ -1,348 +1,356 @@ /* * Copyright (C) 2004 Andrew Beekhof * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * This software is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include #include #include #include #include #include #include #include #include #include GCHSource *stonith_src = NULL; crm_trigger_t *stonith_reconnect = NULL; static gboolean fail_incompletable_stonith(crm_graph_t *graph) { const char *task = NULL; xmlNode *last_action = NULL; if(graph == NULL) { return FALSE; } slist_iter( synapse, synapse_t, graph->synapses, lpc, if (synapse->confirmed) { continue; } slist_iter( action, crm_action_t, synapse->actions, lpc, if(action->type != action_type_crm || action->confirmed) { continue; } task = crm_element_value(action->xml, XML_LRM_ATTR_TASK); if(task && safe_str_eq(task, CRM_OP_FENCE)) { action->failed = TRUE; last_action = action->xml; update_graph(graph, action); crm_notice("Failing action %d (%s): STONITHd terminated", action->id, ID(action->xml)); } ); ); if(last_action != NULL) { crm_warn("STONITHd failure resulted in un-runnable actions"); abort_transition(INFINITY, tg_restart, "Stonith failure", last_action); return TRUE; } return FALSE; } static void tengine_stonith_connection_destroy(stonith_t *st, const char *event, xmlNode *msg) { if(is_set(fsa_input_register, R_ST_REQUIRED)) { crm_crit("Fencing daemon connection failed"); mainloop_set_trigger(stonith_reconnect); } else { crm_info("Fencing daemon disconnected"); } /* cbchan will be garbage at this point, arrange for it to be reset */ stonith_api->state = stonith_disconnected; if(AM_I_DC) { fail_incompletable_stonith(transition_graph); trigger_graph(); } } /* */ static void tengine_stonith_notify(stonith_t *st, const char *event, xmlNode *msg) { int rc = -99; const char *origin = NULL; const char *target = NULL; + const char *executioner = NULL; xmlNode *action = get_xpath_object("//st-data", msg, LOG_ERR); if(action == NULL) { crm_log_xml(LOG_ERR, "Notify data not found", msg); return; } crm_log_xml(LOG_DEBUG, "stonith_notify", msg); crm_element_value_int(msg, F_STONITH_RC, &rc); origin = crm_element_value(action, F_STONITH_ORIGIN); target = crm_element_value(action, F_STONITH_TARGET); + executioner = crm_element_value(action, F_STONITH_DELEGATE); crm_info("Peer %s was terminated (%s) by %s for %s (ref=%s): %s", target, crm_element_value(action, F_STONITH_OPERATION), - crm_element_value(action, F_STONITH_DELEGATE), - origin, + executioner, origin, crm_element_value(action, F_STONITH_REMOTE), stonith_error2string(rc)); if(rc == stonith_ok && safe_str_eq(target, origin)) { if(fsa_our_dc == NULL || safe_str_eq(fsa_our_dc, target)) { const char *uuid = get_uuid(target); crm_notice("Target was our leader %s/%s (recorded leader: %s)", target, uuid, fsa_our_dc?fsa_our_dc:""); - /* send_stonith_update(action, target, uuid); */ + /* There's no need for everyone to update the cib. + * Have the node that performed the op do the update too. + * In the unlikely event that both die, the DC would be + * shot a second time which is not ideal but safe. + */ + if(safe_str_eq(executioner, fsa_our_uname)) { + send_stonith_update(NULL, target, uuid); + } } } } gboolean te_connect_stonith(gpointer user_data) { int lpc = 0; int rc = stonith_ok; if(stonith_api == NULL) { stonith_api = stonith_api_new(); } if(stonith_api->state != stonith_disconnected) { crm_debug_2("Still connected"); return TRUE; } for(lpc = 0; lpc < 30; lpc++) { crm_info("Attempting connection to fencing daemon..."); sleep(1); rc = stonith_api->cmds->connect(stonith_api, crm_system_name, NULL, NULL); if(rc == stonith_ok) { break; } if(user_data != NULL) { crm_err("Sign-in failed: triggered a retry"); mainloop_set_trigger(stonith_reconnect); return TRUE; } crm_err("Sign-in failed: pausing and trying again in 2s..."); sleep(1); } CRM_CHECK(rc == stonith_ok, return TRUE); /* If not, we failed 30 times... just get out */ stonith_api->cmds->register_notification( stonith_api, T_STONITH_NOTIFY_DISCONNECT, tengine_stonith_connection_destroy); stonith_api->cmds->register_notification( stonith_api, STONITH_OP_FENCE, tengine_stonith_notify); crm_info("Connected"); return TRUE; } gboolean stop_te_timer(crm_action_timer_t *timer) { const char *timer_desc = "action timer"; if(timer == NULL) { return FALSE; } if(timer->reason == timeout_abort) { timer_desc = "global timer"; crm_debug_2("Stopping %s", timer_desc); } if(timer->source_id != 0) { crm_debug_2("Stopping %s", timer_desc); g_source_remove(timer->source_id); timer->source_id = 0; } else { crm_debug_2("%s was already stopped", timer_desc); return FALSE; } return TRUE; } gboolean te_graph_trigger(gpointer user_data) { enum transition_status graph_rc = -1; if(transition_graph == NULL) { crm_debug("Nothing to do"); return TRUE; } crm_debug_2("Invoking graph %d in state %s", transition_graph->id, fsa_state2string(fsa_state)); switch(fsa_state) { case S_STARTING: case S_PENDING: case S_NOT_DC: case S_HALT: case S_ILLEGAL: case S_STOPPING: case S_TERMINATE: return TRUE; break; default: break; } if(transition_graph->complete == FALSE) { graph_rc = run_graph(transition_graph); print_graph(LOG_DEBUG_3, transition_graph); if(graph_rc == transition_active) { crm_debug_3("Transition not yet complete"); return TRUE; } else if(graph_rc == transition_pending) { crm_debug_3("Transition not yet complete - no actions fired"); return TRUE; } if(graph_rc != transition_complete) { crm_err("Transition failed: %s", transition_status(graph_rc)); print_graph(LOG_WARNING, transition_graph); } } crm_info("Transition %d is now complete", transition_graph->id); transition_graph->complete = TRUE; notify_crmd(transition_graph); return TRUE; } void trigger_graph_processing(const char *fn, int line) { mainloop_set_trigger(transition_trigger); crm_debug_2("%s:%d - Triggered graph processing", fn, line); } void abort_transition_graph( int abort_priority, enum transition_action abort_action, const char *abort_text, xmlNode *reason, const char *fn, int line) { int log_level = LOG_INFO; const char *magic = NULL; CRM_CHECK(transition_graph != NULL, return); if(reason) { int diff_add_updates = 0; int diff_add_epoch = 0; int diff_add_admin_epoch = 0; int diff_del_updates = 0; int diff_del_epoch = 0; int diff_del_admin_epoch = 0; xmlNode *diff = get_xpath_object("//"F_CIB_UPDATE_RESULT"//diff", reason, LOG_DEBUG_2); magic = crm_element_value(reason, XML_ATTR_TRANSITION_MAGIC); if(diff) { cib_diff_version_details( diff, &diff_add_admin_epoch, &diff_add_epoch, &diff_add_updates, &diff_del_admin_epoch, &diff_del_epoch, &diff_del_updates); do_crm_log(log_level, "%s:%d - Triggered transition abort (complete=%d, tag=%s, id=%s, magic=%s, cib=%d.%d.%d) : %s", fn, line, transition_graph->complete, TYPE(reason), ID(reason), magic?magic:"NA", diff_add_admin_epoch,diff_add_epoch,diff_add_updates, abort_text); } else { do_crm_log(log_level, "%s:%d - Triggered transition abort (complete=%d, tag=%s, id=%s, magic=%s) : %s", fn, line, transition_graph->complete, TYPE(reason), ID(reason), magic?magic:"NA", abort_text); } } else { do_crm_log(log_level, "%s:%d - Triggered transition abort (complete=%d) : %s", fn, line, transition_graph->complete, abort_text); } switch(fsa_state) { case S_STARTING: case S_PENDING: case S_NOT_DC: case S_HALT: case S_ILLEGAL: case S_STOPPING: case S_TERMINATE: do_crm_log(log_level, "Abort suppressed: state=%s (complete=%d)", fsa_state2string(fsa_state), transition_graph->complete); return; default: break; } if(magic == NULL && reason != NULL) { crm_log_xml(log_level+1, "Cause", reason); } /* Make sure any queued calculations are discarded ASAP */ crm_free(fsa_pe_ref); fsa_pe_ref = NULL; if(transition_graph->complete) { register_fsa_input(C_FSA_INTERNAL, I_PE_CALC, NULL); return; } update_abort_priority( transition_graph, abort_priority, abort_action, abort_text); mainloop_set_trigger(transition_trigger); } diff --git a/crmd/utils.c b/crmd/utils.c index 3d14645e13..d94a7f1e6b 100644 --- a/crmd/utils.c +++ b/crmd/utils.c @@ -1,1296 +1,1296 @@ /* * Copyright (C) 2004 Andrew Beekhof * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * This software is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include <../tools/attrd.h> /* A_DC_TIMER_STOP, A_DC_TIMER_START, * A_FINALIZE_TIMER_STOP, A_FINALIZE_TIMER_START * A_INTEGRATE_TIMER_STOP, A_INTEGRATE_TIMER_START */ void do_timer_control(long long action, enum crmd_fsa_cause cause, enum crmd_fsa_state cur_state, enum crmd_fsa_input current_input, fsa_data_t *msg_data) { gboolean timer_op_ok = TRUE; if(action & A_DC_TIMER_STOP) { timer_op_ok = crm_timer_stop(election_trigger); } else if(action & A_FINALIZE_TIMER_STOP) { timer_op_ok = crm_timer_stop(finalization_timer); } else if(action & A_INTEGRATE_TIMER_STOP) { timer_op_ok = crm_timer_stop(integration_timer); /* } else if(action & A_ELECTION_TIMEOUT_STOP) { */ /* timer_op_ok = crm_timer_stop(election_timeout); */ } /* dont start a timer that wasnt already running */ if(action & A_DC_TIMER_START && timer_op_ok) { crm_timer_start(election_trigger); if(AM_I_DC) { /* there can be only one */ register_fsa_input(cause, I_ELECTION, NULL); } } else if(action & A_FINALIZE_TIMER_START) { crm_timer_start(finalization_timer); } else if(action & A_INTEGRATE_TIMER_START) { crm_timer_start(integration_timer); /* } else if(action & A_ELECTION_TIMEOUT_START) { */ /* crm_timer_start(election_timeout); */ } } const char * get_timer_desc(fsa_timer_t *timer) { if(timer == election_trigger) { return "Election Trigger"; } else if(timer == election_timeout) { return "Election Timeout"; } else if(timer == shutdown_escalation_timer) { return "Shutdown Escalation"; } else if(timer == integration_timer) { return "Integration Timer"; } else if(timer == finalization_timer) { return "Finalization Timer"; } else if(timer == wait_timer) { return "Wait Timer"; } else if(timer == recheck_timer) { return "PEngine Recheck Timer"; } return "Unknown Timer"; } gboolean crm_timer_popped(gpointer data) { fsa_timer_t *timer = (fsa_timer_t *)data; if(timer == wait_timer || timer == recheck_timer || timer == finalization_timer || timer == election_trigger) { crm_info("%s (%s) just popped!", get_timer_desc(timer), fsa_input2string(timer->fsa_input)); } else { crm_err("%s (%s) just popped!", get_timer_desc(timer), fsa_input2string(timer->fsa_input)); } if(timer->repeat == FALSE) { crm_timer_stop(timer); /* make it _not_ go off again */ } if(timer->fsa_input == I_INTEGRATED) { crm_info("Welcomed: %d, Integrated: %d", g_hash_table_size(welcomed_nodes), g_hash_table_size(integrated_nodes)); if(g_hash_table_size(welcomed_nodes) == 0) { /* If we don't even have ourself, start again */ register_fsa_error_adv( C_FSA_INTERNAL, I_ELECTION, NULL, NULL, __FUNCTION__); } else { register_fsa_input_before( C_TIMER_POPPED, timer->fsa_input, NULL); } } else if(timer->fsa_input == I_PE_CALC && fsa_state != S_IDLE) { crm_debug("Discarding %s event in state: %s", fsa_input2string(timer->fsa_input), fsa_state2string(fsa_state)); } else if(timer->fsa_input == I_FINALIZED && fsa_state != S_FINALIZE_JOIN) { crm_debug("Discarding %s event in state: %s", fsa_input2string(timer->fsa_input), fsa_state2string(fsa_state)); } else if(timer->fsa_input != I_NULL) { register_fsa_input(C_TIMER_POPPED, timer->fsa_input, NULL); } crm_debug_3("Triggering FSA: %s", __FUNCTION__); mainloop_set_trigger(fsa_source); return TRUE; } gboolean crm_timer_start(fsa_timer_t *timer) { const char *timer_desc = get_timer_desc(timer); if(timer->source_id == 0 && timer->period_ms > 0) { timer->source_id = g_timeout_add( timer->period_ms, timer->callback, (void*)timer); CRM_ASSERT(timer->source_id != 0); crm_debug("Started %s (%s:%dms), src=%d", timer_desc, fsa_input2string(timer->fsa_input), timer->period_ms, timer->source_id); } else if(timer->period_ms < 0) { crm_err("Tried to start %s (%s:%dms) with a -ve period", timer_desc, fsa_input2string(timer->fsa_input), timer->period_ms); } else { crm_debug("%s (%s:%dms) already running: src=%d", timer_desc, fsa_input2string(timer->fsa_input), timer->period_ms, timer->source_id); return FALSE; } return TRUE; } gboolean crm_timer_stop(fsa_timer_t *timer) { const char *timer_desc = get_timer_desc(timer); if(timer == NULL) { crm_err("Attempted to stop NULL timer"); return FALSE; } else if(timer->source_id != 0) { crm_debug_2("Stopping %s (%s:%dms), src=%d", timer_desc, fsa_input2string(timer->fsa_input), timer->period_ms, timer->source_id); g_source_remove(timer->source_id); timer->source_id = 0; } else { crm_debug_2("%s (%s:%dms) already stopped", timer_desc, fsa_input2string(timer->fsa_input), timer->period_ms); return FALSE; } return TRUE; } const char * fsa_input2string(enum crmd_fsa_input input) { const char *inputAsText = NULL; switch(input){ case I_NULL: inputAsText = "I_NULL"; break; case I_CIB_OP: inputAsText = "I_CIB_OP (unused)"; break; case I_CIB_UPDATE: inputAsText = "I_CIB_UPDATE"; break; case I_DC_TIMEOUT: inputAsText = "I_DC_TIMEOUT"; break; case I_ELECTION: inputAsText = "I_ELECTION"; break; case I_PE_CALC: inputAsText = "I_PE_CALC"; break; case I_RELEASE_DC: inputAsText = "I_RELEASE_DC"; break; case I_ELECTION_DC: inputAsText = "I_ELECTION_DC"; break; case I_ERROR: inputAsText = "I_ERROR"; break; case I_FAIL: inputAsText = "I_FAIL"; break; case I_INTEGRATED: inputAsText = "I_INTEGRATED"; break; case I_FINALIZED: inputAsText = "I_FINALIZED"; break; case I_NODE_JOIN: inputAsText = "I_NODE_JOIN"; break; case I_JOIN_OFFER: inputAsText = "I_JOIN_OFFER"; break; case I_JOIN_REQUEST: inputAsText = "I_JOIN_REQUEST"; break; case I_JOIN_RESULT: inputAsText = "I_JOIN_RESULT"; break; case I_NOT_DC: inputAsText = "I_NOT_DC"; break; case I_RECOVERED: inputAsText = "I_RECOVERED"; break; case I_RELEASE_FAIL: inputAsText = "I_RELEASE_FAIL"; break; case I_RELEASE_SUCCESS: inputAsText = "I_RELEASE_SUCCESS"; break; case I_RESTART: inputAsText = "I_RESTART"; break; case I_PE_SUCCESS: inputAsText = "I_PE_SUCCESS"; break; case I_ROUTER: inputAsText = "I_ROUTER"; break; case I_SHUTDOWN: inputAsText = "I_SHUTDOWN"; break; case I_STARTUP: inputAsText = "I_STARTUP"; break; case I_TE_SUCCESS: inputAsText = "I_TE_SUCCESS"; break; case I_STOP: inputAsText = "I_STOP"; break; case I_DC_HEARTBEAT: inputAsText = "I_DC_HEARTBEAT"; break; case I_WAIT_FOR_EVENT: inputAsText = "I_WAIT_FOR_EVENT"; break; case I_LRM_EVENT: inputAsText = "I_LRM_EVENT"; break; case I_PENDING: inputAsText = "I_PENDING"; break; case I_HALT: inputAsText = "I_HALT"; break; case I_TERMINATE: inputAsText = "I_TERMINATE"; break; case I_ILLEGAL: inputAsText = "I_ILLEGAL"; break; } if(inputAsText == NULL) { crm_err("Input %d is unknown", input); inputAsText = ""; } return inputAsText; } const char * fsa_state2string(enum crmd_fsa_state state) { const char *stateAsText = NULL; switch(state){ case S_IDLE: stateAsText = "S_IDLE"; break; case S_ELECTION: stateAsText = "S_ELECTION"; break; case S_INTEGRATION: stateAsText = "S_INTEGRATION"; break; case S_FINALIZE_JOIN: stateAsText = "S_FINALIZE_JOIN"; break; case S_NOT_DC: stateAsText = "S_NOT_DC"; break; case S_POLICY_ENGINE: stateAsText = "S_POLICY_ENGINE"; break; case S_RECOVERY: stateAsText = "S_RECOVERY"; break; case S_RELEASE_DC: stateAsText = "S_RELEASE_DC"; break; case S_PENDING: stateAsText = "S_PENDING"; break; case S_STOPPING: stateAsText = "S_STOPPING"; break; case S_TERMINATE: stateAsText = "S_TERMINATE"; break; case S_TRANSITION_ENGINE: stateAsText = "S_TRANSITION_ENGINE"; break; case S_STARTING: stateAsText = "S_STARTING"; break; case S_HALT: stateAsText = "S_HALT"; break; case S_ILLEGAL: stateAsText = "S_ILLEGAL"; break; } if(stateAsText == NULL) { crm_err("State %d is unknown", state); stateAsText = ""; } return stateAsText; } const char * fsa_cause2string(enum crmd_fsa_cause cause) { const char *causeAsText = NULL; switch(cause){ case C_UNKNOWN: causeAsText = "C_UNKNOWN"; break; case C_STARTUP: causeAsText = "C_STARTUP"; break; case C_IPC_MESSAGE: causeAsText = "C_IPC_MESSAGE"; break; case C_HA_MESSAGE: causeAsText = "C_HA_MESSAGE"; break; case C_CCM_CALLBACK: causeAsText = "C_CCM_CALLBACK"; break; case C_TIMER_POPPED: causeAsText = "C_TIMER_POPPED"; break; case C_SHUTDOWN: causeAsText = "C_SHUTDOWN"; break; case C_HEARTBEAT_FAILED: causeAsText = "C_HEARTBEAT_FAILED"; break; case C_SUBSYSTEM_CONNECT: causeAsText = "C_SUBSYSTEM_CONNECT"; break; case C_LRM_OP_CALLBACK: causeAsText = "C_LRM_OP_CALLBACK"; break; case C_LRM_MONITOR_CALLBACK: causeAsText = "C_LRM_MONITOR_CALLBACK"; break; case C_CRMD_STATUS_CALLBACK: causeAsText = "C_CRMD_STATUS_CALLBACK"; break; case C_HA_DISCONNECT: causeAsText = "C_HA_DISCONNECT"; break; case C_FSA_INTERNAL: causeAsText = "C_FSA_INTERNAL"; break; case C_ILLEGAL: causeAsText = "C_ILLEGAL"; break; } if(causeAsText == NULL) { crm_err("Cause %d is unknown", cause); causeAsText = ""; } return causeAsText; } const char * fsa_action2string(long long action) { const char *actionAsText = NULL; switch(action){ case A_NOTHING: actionAsText = "A_NOTHING"; break; case A_ELECTION_START: actionAsText = "A_ELECTION_START"; break; case A_DC_JOIN_FINAL: actionAsText = "A_DC_JOIN_FINAL"; break; case A_READCONFIG: actionAsText = "A_READCONFIG"; break; case O_RELEASE: actionAsText = "O_RELEASE"; break; case A_STARTUP: actionAsText = "A_STARTUP"; break; case A_STARTED: actionAsText = "A_STARTED"; break; case A_HA_CONNECT: actionAsText = "A_HA_CONNECT"; break; case A_HA_DISCONNECT: actionAsText = "A_HA_DISCONNECT"; break; case A_LRM_CONNECT: actionAsText = "A_LRM_CONNECT"; break; case A_LRM_EVENT: actionAsText = "A_LRM_EVENT"; break; case A_LRM_INVOKE: actionAsText = "A_LRM_INVOKE"; break; case A_LRM_DISCONNECT: actionAsText = "A_LRM_DISCONNECT"; break; case A_CL_JOIN_QUERY: actionAsText = "A_CL_JOIN_QUERY"; break; case A_DC_TIMER_STOP: actionAsText = "A_DC_TIMER_STOP"; break; case A_DC_TIMER_START: actionAsText = "A_DC_TIMER_START"; break; case A_INTEGRATE_TIMER_START: actionAsText = "A_INTEGRATE_TIMER_START"; break; case A_INTEGRATE_TIMER_STOP: actionAsText = "A_INTEGRATE_TIMER_STOP"; break; case A_FINALIZE_TIMER_START: actionAsText = "A_FINALIZE_TIMER_START"; break; case A_FINALIZE_TIMER_STOP: actionAsText = "A_FINALIZE_TIMER_STOP"; break; case A_ELECTION_COUNT: actionAsText = "A_ELECTION_COUNT"; break; case A_ELECTION_VOTE: actionAsText = "A_ELECTION_VOTE"; break; case A_ELECTION_CHECK: actionAsText = "A_ELECTION_CHECK"; break; case A_CL_JOIN_ANNOUNCE: actionAsText = "A_CL_JOIN_ANNOUNCE"; break; case A_CL_JOIN_REQUEST: actionAsText = "A_CL_JOIN_REQUEST"; break; case A_CL_JOIN_RESULT: actionAsText = "A_CL_JOIN_RESULT"; break; case A_DC_JOIN_OFFER_ALL: actionAsText = "A_DC_JOIN_OFFER_ALL"; break; case A_DC_JOIN_OFFER_ONE: actionAsText = "A_DC_JOIN_OFFER_ONE"; break; case A_DC_JOIN_PROCESS_REQ: actionAsText = "A_DC_JOIN_PROCESS_REQ"; break; case A_DC_JOIN_PROCESS_ACK: actionAsText = "A_DC_JOIN_PROCESS_ACK"; break; case A_DC_JOIN_FINALIZE: actionAsText = "A_DC_JOIN_FINALIZE"; break; case A_MSG_PROCESS: actionAsText = "A_MSG_PROCESS"; break; case A_MSG_ROUTE: actionAsText = "A_MSG_ROUTE"; break; case A_RECOVER: actionAsText = "A_RECOVER"; break; case A_DC_RELEASE: actionAsText = "A_DC_RELEASE"; break; case A_DC_RELEASED: actionAsText = "A_DC_RELEASED"; break; case A_DC_TAKEOVER: actionAsText = "A_DC_TAKEOVER"; break; case A_SHUTDOWN: actionAsText = "A_SHUTDOWN"; break; case A_SHUTDOWN_REQ: actionAsText = "A_SHUTDOWN_REQ"; break; case A_STOP: actionAsText = "A_STOP "; break; case A_EXIT_0: actionAsText = "A_EXIT_0"; break; case A_EXIT_1: actionAsText = "A_EXIT_1"; break; case A_CCM_CONNECT: actionAsText = "A_CCM_CONNECT"; break; case A_CCM_DISCONNECT: actionAsText = "A_CCM_DISCONNECT"; break; case O_CIB_RESTART: actionAsText = "O_CIB_RESTART"; break; case A_CIB_START: actionAsText = "A_CIB_START"; break; case A_CIB_STOP: actionAsText = "A_CIB_STOP"; break; case A_TE_INVOKE: actionAsText = "A_TE_INVOKE"; break; case O_TE_RESTART: actionAsText = "O_TE_RESTART"; break; case A_TE_START: actionAsText = "A_TE_START"; break; case A_TE_STOP: actionAsText = "A_TE_STOP"; break; case A_TE_HALT: actionAsText = "A_TE_HALT"; break; case A_TE_CANCEL: actionAsText = "A_TE_CANCEL"; break; case A_PE_INVOKE: actionAsText = "A_PE_INVOKE"; break; case O_PE_RESTART: actionAsText = "O_PE_RESTART"; break; case A_PE_START: actionAsText = "A_PE_START"; break; case A_PE_STOP: actionAsText = "A_PE_STOP"; break; case A_NODE_BLOCK: actionAsText = "A_NODE_BLOCK"; break; case A_UPDATE_NODESTATUS: actionAsText = "A_UPDATE_NODESTATUS"; break; case A_LOG: actionAsText = "A_LOG "; break; case A_ERROR: actionAsText = "A_ERROR "; break; case A_WARN: actionAsText = "A_WARN "; break; } if(actionAsText == NULL) { crm_err("Action %.16llx is unknown", action); actionAsText = ""; } return actionAsText; } void fsa_dump_inputs(int log_level, const char *text, long long input_register) { if(input_register == A_NOTHING) { return; } if(text == NULL) { text = "Input register contents:"; } if(is_set(input_register, R_THE_DC)) { do_crm_log_unlikely(log_level, "%s %.16llx (R_THE_DC)", text, R_THE_DC); } if(is_set(input_register, R_STARTING)) { do_crm_log_unlikely(log_level, "%s %.16llx (R_STARTING)", text, R_STARTING); } if(is_set(input_register, R_SHUTDOWN)) { do_crm_log_unlikely(log_level, "%s %.16llx (R_SHUTDOWN)", text, R_SHUTDOWN); } if(is_set(input_register, R_STAYDOWN)) { do_crm_log_unlikely(log_level, "%s %.16llx (R_STAYDOWN)", text, R_STAYDOWN); } if(is_set(input_register, R_JOIN_OK)) { do_crm_log_unlikely(log_level, "%s %.16llx (R_JOIN_OK)", text, R_JOIN_OK); } if(is_set(input_register, R_READ_CONFIG)) { do_crm_log_unlikely(log_level, "%s %.16llx (R_READ_CONFIG)", text, R_READ_CONFIG); } if(is_set(input_register, R_INVOKE_PE)) { do_crm_log_unlikely(log_level, "%s %.16llx (R_INVOKE_PE)", text, R_INVOKE_PE); } if(is_set(input_register, R_CIB_CONNECTED)) { do_crm_log_unlikely(log_level, "%s %.16llx (R_CIB_CONNECTED)", text, R_CIB_CONNECTED); } if(is_set(input_register, R_PE_CONNECTED)) { do_crm_log_unlikely(log_level, "%s %.16llx (R_PE_CONNECTED)", text, R_PE_CONNECTED); } if(is_set(input_register, R_TE_CONNECTED)) { do_crm_log_unlikely(log_level, "%s %.16llx (R_TE_CONNECTED)", text, R_TE_CONNECTED); } if(is_set(input_register, R_LRM_CONNECTED)) { do_crm_log_unlikely(log_level, "%s %.16llx (R_LRM_CONNECTED)", text, R_LRM_CONNECTED); } if(is_set(input_register, R_CIB_REQUIRED)) { do_crm_log_unlikely(log_level, "%s %.16llx (R_CIB_REQUIRED)", text, R_CIB_REQUIRED); } if(is_set(input_register, R_PE_REQUIRED)) { do_crm_log_unlikely(log_level, "%s %.16llx (R_PE_REQUIRED)", text, R_PE_REQUIRED); } if(is_set(input_register, R_TE_REQUIRED)) { do_crm_log_unlikely(log_level, "%s %.16llx (R_TE_REQUIRED)", text, R_TE_REQUIRED); } if(is_set(input_register, R_REQ_PEND)) { do_crm_log_unlikely(log_level, "%s %.16llx (R_REQ_PEND)", text, R_REQ_PEND); } if(is_set(input_register, R_PE_PEND)) { do_crm_log_unlikely(log_level, "%s %.16llx (R_PE_PEND)", text, R_PE_PEND); } if(is_set(input_register, R_TE_PEND)) { do_crm_log_unlikely(log_level, "%s %.16llx (R_TE_PEND)", text, R_TE_PEND); } if(is_set(input_register, R_RESP_PEND)) { do_crm_log_unlikely(log_level, "%s %.16llx (R_RESP_PEND)", text, R_RESP_PEND); } if(is_set(input_register, R_CIB_DONE)) { do_crm_log_unlikely(log_level, "%s %.16llx (R_CIB_DONE)", text, R_CIB_DONE); } if(is_set(input_register, R_HAVE_CIB)) { do_crm_log_unlikely(log_level, "%s %.16llx (R_HAVE_CIB)", text, R_HAVE_CIB); } if(is_set(input_register, R_CIB_ASKED)) { do_crm_log_unlikely(log_level, "%s %.16llx (R_CIB_ASKED)", text, R_CIB_ASKED); } if(is_set(input_register, R_CCM_DATA)) { do_crm_log_unlikely(log_level, "%s %.16llx (R_CCM_DATA)", text, R_CCM_DATA); } if(is_set(input_register, R_PEER_DATA)) { do_crm_log_unlikely(log_level, "%s %.16llx (R_PEER_DATA)", text, R_PEER_DATA); } if(is_set(input_register, R_IN_RECOVERY)) { do_crm_log_unlikely(log_level, "%s %.16llx (R_IN_RECOVERY)", text, R_IN_RECOVERY); } } void fsa_dump_actions(long long action, const char *text) { int log_level = LOG_DEBUG_3; if(is_set(action, A_READCONFIG)) { do_crm_log_unlikely(log_level, "Action %.16llx (A_READCONFIG) %s", A_READCONFIG, text); } if(is_set(action, A_STARTUP)) { do_crm_log_unlikely(log_level, "Action %.16llx (A_STARTUP) %s", A_STARTUP, text); } if(is_set(action, A_STARTED)) { do_crm_log_unlikely(log_level, "Action %.16llx (A_STARTED) %s", A_STARTED, text); } if(is_set(action, A_HA_CONNECT)) { do_crm_log_unlikely(log_level, "Action %.16llx (A_CONNECT) %s", A_HA_CONNECT, text); } if(is_set(action, A_HA_DISCONNECT)) { do_crm_log_unlikely(log_level, "Action %.16llx (A_DISCONNECT) %s", A_HA_DISCONNECT, text); } if(is_set(action, A_LRM_CONNECT)) { do_crm_log_unlikely(log_level, "Action %.16llx (A_LRM_CONNECT) %s", A_LRM_CONNECT, text); } if(is_set(action, A_LRM_EVENT)) { do_crm_log_unlikely(log_level, "Action %.16llx (A_LRM_EVENT) %s", A_LRM_EVENT, text); } if(is_set(action, A_LRM_INVOKE)) { do_crm_log_unlikely(log_level, "Action %.16llx (A_LRM_INVOKE) %s", A_LRM_INVOKE, text); } if(is_set(action, A_LRM_DISCONNECT)) { do_crm_log_unlikely(log_level, "Action %.16llx (A_LRM_DISCONNECT) %s", A_LRM_DISCONNECT, text); } if(is_set(action, A_DC_TIMER_STOP)) { do_crm_log_unlikely(log_level, "Action %.16llx (A_DC_TIMER_STOP) %s", A_DC_TIMER_STOP, text); } if(is_set(action, A_DC_TIMER_START)) { do_crm_log_unlikely(log_level, "Action %.16llx (A_DC_TIMER_START) %s", A_DC_TIMER_START, text); } if(is_set(action, A_INTEGRATE_TIMER_START)) { do_crm_log_unlikely(log_level, "Action %.16llx (A_INTEGRATE_TIMER_START) %s", A_INTEGRATE_TIMER_START, text); } if(is_set(action, A_INTEGRATE_TIMER_STOP)) { do_crm_log_unlikely(log_level, "Action %.16llx (A_INTEGRATE_TIMER_STOP) %s", A_INTEGRATE_TIMER_STOP, text); } if(is_set(action, A_FINALIZE_TIMER_START)) { do_crm_log_unlikely(log_level, "Action %.16llx (A_FINALIZE_TIMER_START) %s", A_FINALIZE_TIMER_START, text); } if(is_set(action, A_FINALIZE_TIMER_STOP)) { do_crm_log_unlikely(log_level, "Action %.16llx (A_FINALIZE_TIMER_STOP) %s", A_FINALIZE_TIMER_STOP, text); } if(is_set(action, A_ELECTION_COUNT)) { do_crm_log_unlikely(log_level, "Action %.16llx (A_ELECTION_COUNT) %s", A_ELECTION_COUNT, text); } if(is_set(action, A_ELECTION_VOTE)) { do_crm_log_unlikely(log_level, "Action %.16llx (A_ELECTION_VOTE) %s", A_ELECTION_VOTE, text); } if(is_set(action, A_ELECTION_CHECK)) { do_crm_log_unlikely(log_level, "Action %.16llx (A_ELECTION_CHECK) %s", A_ELECTION_CHECK, text); } if(is_set(action, A_CL_JOIN_ANNOUNCE)) { do_crm_log_unlikely(log_level, "Action %.16llx (A_CL_JOIN_ANNOUNCE) %s", A_CL_JOIN_ANNOUNCE, text); } if(is_set(action, A_CL_JOIN_REQUEST)) { do_crm_log_unlikely(log_level, "Action %.16llx (A_CL_JOIN_REQUEST) %s", A_CL_JOIN_REQUEST, text); } if(is_set(action, A_CL_JOIN_RESULT)) { do_crm_log_unlikely(log_level, "Action %.16llx (A_CL_JOIN_RESULT) %s", A_CL_JOIN_RESULT, text); } if(is_set(action, A_DC_JOIN_OFFER_ALL)) { do_crm_log_unlikely(log_level, "Action %.16llx (A_DC_JOIN_OFFER_ALL) %s", A_DC_JOIN_OFFER_ALL, text); } if(is_set(action, A_DC_JOIN_OFFER_ONE)) { do_crm_log_unlikely(log_level, "Action %.16llx (A_DC_JOIN_OFFER_ONE) %s", A_DC_JOIN_OFFER_ONE, text); } if(is_set(action, A_DC_JOIN_PROCESS_REQ)) { do_crm_log_unlikely(log_level, "Action %.16llx (A_DC_JOIN_PROCESS_REQ) %s", A_DC_JOIN_PROCESS_REQ, text); } if(is_set(action, A_DC_JOIN_PROCESS_ACK)) { do_crm_log_unlikely(log_level, "Action %.16llx (A_DC_JOIN_PROCESS_ACK) %s", A_DC_JOIN_PROCESS_ACK, text); } if(is_set(action, A_DC_JOIN_FINALIZE)) { do_crm_log_unlikely(log_level, "Action %.16llx (A_DC_JOIN_FINALIZE) %s", A_DC_JOIN_FINALIZE, text); } if(is_set(action, A_MSG_PROCESS)) { do_crm_log_unlikely(log_level, "Action %.16llx (A_MSG_PROCESS) %s", A_MSG_PROCESS, text); } if(is_set(action, A_MSG_ROUTE)) { do_crm_log_unlikely(log_level, "Action %.16llx (A_MSG_ROUTE) %s", A_MSG_ROUTE, text); } if(is_set(action, A_RECOVER)) { do_crm_log_unlikely(log_level, "Action %.16llx (A_RECOVER) %s", A_RECOVER, text); } if(is_set(action, A_DC_RELEASE)) { do_crm_log_unlikely(log_level, "Action %.16llx (A_DC_RELEASE) %s", A_DC_RELEASE, text); } if(is_set(action, A_DC_RELEASED)) { do_crm_log_unlikely(log_level, "Action %.16llx (A_DC_RELEASED) %s", A_DC_RELEASED, text); } if(is_set(action, A_DC_TAKEOVER)) { do_crm_log_unlikely(log_level, "Action %.16llx (A_DC_TAKEOVER) %s", A_DC_TAKEOVER, text); } if(is_set(action, A_SHUTDOWN)) { do_crm_log_unlikely(log_level, "Action %.16llx (A_SHUTDOWN) %s", A_SHUTDOWN, text); } if(is_set(action, A_SHUTDOWN_REQ)) { do_crm_log_unlikely(log_level, "Action %.16llx (A_SHUTDOWN_REQ) %s", A_SHUTDOWN_REQ, text); } if(is_set(action, A_STOP)) { do_crm_log_unlikely(log_level, "Action %.16llx (A_STOP ) %s", A_STOP , text); } if(is_set(action, A_EXIT_0)) { do_crm_log_unlikely(log_level, "Action %.16llx (A_EXIT_0) %s", A_EXIT_0, text); } if(is_set(action, A_EXIT_1)) { do_crm_log_unlikely(log_level, "Action %.16llx (A_EXIT_1) %s", A_EXIT_1, text); } if(is_set(action, A_CCM_CONNECT)) { do_crm_log_unlikely(log_level, "Action %.16llx (A_CCM_CONNECT) %s", A_CCM_CONNECT, text); } if(is_set(action, A_CCM_DISCONNECT)) { do_crm_log_unlikely(log_level, "Action %.16llx (A_CCM_DISCONNECT) %s", A_CCM_DISCONNECT, text); } if(is_set(action, A_CIB_START)) { do_crm_log_unlikely(log_level, "Action %.16llx (A_CIB_START) %s", A_CIB_START, text); } if(is_set(action, A_CIB_STOP)) { do_crm_log_unlikely(log_level, "Action %.16llx (A_CIB_STOP) %s", A_CIB_STOP, text); } if(is_set(action, A_TE_INVOKE)) { do_crm_log_unlikely(log_level, "Action %.16llx (A_TE_INVOKE) %s", A_TE_INVOKE, text); } if(is_set(action, A_TE_START)) { do_crm_log_unlikely(log_level, "Action %.16llx (A_TE_START) %s", A_TE_START, text); } if(is_set(action, A_TE_STOP)) { do_crm_log_unlikely(log_level, "Action %.16llx (A_TE_STOP) %s", A_TE_STOP, text); } if(is_set(action, A_TE_CANCEL)) { do_crm_log_unlikely(log_level, "Action %.16llx (A_TE_CANCEL) %s", A_TE_CANCEL, text); } if(is_set(action, A_PE_INVOKE)) { do_crm_log_unlikely(log_level, "Action %.16llx (A_PE_INVOKE) %s", A_PE_INVOKE, text); } if(is_set(action, A_PE_START)) { do_crm_log_unlikely(log_level, "Action %.16llx (A_PE_START) %s", A_PE_START, text); } if(is_set(action, A_PE_STOP)) { do_crm_log_unlikely(log_level, "Action %.16llx (A_PE_STOP) %s", A_PE_STOP, text); } if(is_set(action, A_NODE_BLOCK)) { do_crm_log_unlikely(log_level, "Action %.16llx (A_NODE_BLOCK) %s", A_NODE_BLOCK, text); } if(is_set(action, A_UPDATE_NODESTATUS)) { do_crm_log_unlikely(log_level, "Action %.16llx (A_UPDATE_NODESTATUS) %s", A_UPDATE_NODESTATUS, text); } if(is_set(action, A_LOG)) { do_crm_log_unlikely(log_level, "Action %.16llx (A_LOG ) %s", A_LOG, text); } if(is_set(action, A_ERROR)) { do_crm_log_unlikely(log_level, "Action %.16llx (A_ERROR ) %s", A_ERROR, text); } if(is_set(action, A_WARN)) { do_crm_log_unlikely(log_level, "Action %.16llx (A_WARN ) %s", A_WARN, text); } } void create_node_entry(const char *uuid, const char *uname, const char *type) { /* make sure a node entry exists for the new node * * this will add anyone except the first ever node in the cluster * since it will also be the DC which doesnt go through the * join process (with itself). We can include a special case * later if desired. */ xmlNode *tmp1 = create_xml_node(NULL, XML_CIB_TAG_NODE); crm_debug_3("Creating node entry for %s", uname); set_uuid(tmp1, XML_ATTR_UUID, uname); crm_xml_add(tmp1, XML_ATTR_UNAME, uname); crm_xml_add(tmp1, XML_ATTR_TYPE, type); fsa_cib_anon_update(XML_CIB_TAG_NODES, tmp1, cib_scope_local|cib_quorum_override|cib_can_create); free_xml(tmp1); } xmlNode* create_node_state( const char *uname, const char *ha_state, const char *ccm_state, const char *crmd_state, const char *join_state, const char *exp_state, gboolean clear_shutdown, const char *src) { xmlNode *node_state = create_xml_node(NULL, XML_CIB_TAG_STATE); crm_debug_2("%s Creating node state entry for %s", src, uname); set_uuid(node_state, XML_ATTR_UUID, uname); if(crm_element_value(node_state, XML_ATTR_UUID) == NULL) { crm_debug("Node %s is not a cluster member", uname); free_xml(node_state); return NULL; } crm_xml_add(node_state, XML_ATTR_UNAME, uname); crm_xml_add(node_state, XML_CIB_ATTR_HASTATE, ha_state); crm_xml_add(node_state, XML_CIB_ATTR_INCCM, ccm_state); crm_xml_add(node_state, XML_CIB_ATTR_CRMDSTATE, crmd_state); crm_xml_add(node_state, XML_CIB_ATTR_JOINSTATE, join_state); crm_xml_add(node_state, XML_CIB_ATTR_EXPSTATE, exp_state); crm_xml_add(node_state, XML_ATTR_ORIGIN, src); if(clear_shutdown) { crm_xml_add(node_state, XML_CIB_ATTR_SHUTDOWN, "0"); } crm_log_xml_debug_3(node_state, "created"); return node_state; } extern GHashTable *ipc_clients; void process_client_disconnect(crmd_client_t *curr_client) { struct crm_subsystem_s *the_subsystem = NULL; CRM_CHECK(curr_client != NULL, return); crm_debug_2("received HUP from %s", curr_client->table_key); if (curr_client->sub_sys == NULL) { crm_debug_2("Client hadn't registered with us yet"); } else if (strcasecmp(CRM_SYSTEM_PENGINE, curr_client->sub_sys) == 0) { the_subsystem = pe_subsystem; } else if (strcasecmp(CRM_SYSTEM_TENGINE, curr_client->sub_sys) == 0) { the_subsystem = te_subsystem; } else if (strcasecmp(CRM_SYSTEM_CIB, curr_client->sub_sys) == 0){ the_subsystem = cib_subsystem; } if(the_subsystem != NULL) { the_subsystem->ipc = NULL; the_subsystem->client = NULL; crm_info("Received HUP from %s:[%d]", the_subsystem->name, the_subsystem->pid); } else { /* else that was a transient client */ crm_debug_2("Received HUP from transient client"); } if (curr_client->table_key != NULL) { /* * Key is destroyed below as: * curr_client->table_key * Value is cleaned up by: * crmd_ipc_connection_destroy * which will also call: * G_main_del_IPC_Channel */ g_hash_table_remove(ipc_clients, curr_client->table_key); } } gboolean update_dc(xmlNode *msg) { char *last_dc = fsa_our_dc; const char *dc_version = NULL; const char *welcome_from = NULL; if(msg != NULL) { gboolean invalid = FALSE; dc_version = crm_element_value(msg, F_CRM_VERSION); welcome_from = crm_element_value(msg, F_CRM_HOST_FROM); CRM_CHECK(dc_version != NULL, return FALSE); CRM_CHECK(welcome_from != NULL, return FALSE); if(AM_I_DC && safe_str_neq(welcome_from, fsa_our_uname)) { invalid = TRUE; } else if(fsa_our_dc && safe_str_neq(welcome_from, fsa_our_dc)) { invalid = TRUE; } if(invalid) { CRM_CHECK(fsa_our_dc != NULL, crm_err("We have no DC")); if(AM_I_DC) { crm_err("Not updating DC to %s (%s): we are also a DC", welcome_from, dc_version); } else { crm_warn("New DC %s is not %s", welcome_from, fsa_our_dc); } register_fsa_action(A_CL_JOIN_QUERY); return FALSE; } } crm_free(fsa_our_dc_version); fsa_our_dc_version = NULL; fsa_our_dc = NULL; /* Free'd as last_dc */ if(welcome_from != NULL) { fsa_our_dc = crm_strdup(welcome_from); } if(dc_version != NULL) { fsa_our_dc_version = crm_strdup(dc_version); } if(safe_str_eq(fsa_our_dc, last_dc)) { /* do nothing */ } else if(fsa_our_dc != NULL) { crm_info("Set DC to %s (%s)", crm_str(fsa_our_dc), crm_str(fsa_our_dc_version)); } else if(last_dc != NULL) { crm_info("Unset DC %s", crm_str(last_dc)); } crm_free(last_dc); return TRUE; } #define STATUS_PATH_MAX 512 static void erase_xpath_callback(xmlNode *msg, int call_id, int rc, xmlNode *output, void *user_data) { char *xpath = user_data; do_crm_log(rc==0?LOG_INFO:LOG_NOTICE, "Deletion of \"%s\": %s (rc=%d)", xpath, cib_error2string(rc), rc); crm_free(xpath); } -void erase_status_tag(const char *uname, const char *tag) +void erase_status_tag(const char *uname, const char *tag, int options) { int rc = cib_ok; char xpath[STATUS_PATH_MAX]; - int cib_opts = cib_quorum_override|cib_xpath; + int cib_opts = cib_quorum_override|cib_xpath|options; if(fsa_cib_conn && uname) { snprintf(xpath, STATUS_PATH_MAX, "//node_state[@uname='%s']/%s", uname, tag); rc = fsa_cib_conn->cmds->delete(fsa_cib_conn, xpath, NULL, cib_opts); add_cib_op_callback(fsa_cib_conn, rc, FALSE, crm_strdup(xpath), erase_xpath_callback); } } static IPC_Channel *attrd = NULL; static gboolean attrd_dispatch(IPC_Channel *client, gpointer user_data) { xmlNode *msg = NULL; gboolean stay_connected = TRUE; while(IPC_ISRCONN(client)) { if(client->ops->is_message_pending(client) == 0) { break; } msg = xmlfromIPC(client, MAX_IPC_DELAY); if (msg != NULL) { crm_log_xml_err(msg, "attrd:msg"); free_xml(msg); } } if (client->ch_status != IPC_CONNECT) { stay_connected = FALSE; } return stay_connected; } static void attrd_connection_destroy(gpointer user_data) { crm_err("Lost connection to attrd"); attrd = NULL; return; } void update_attrd(const char *host, const char *name, const char *value) { int retries = 5; gboolean rc = FALSE; retry: if(attrd == NULL) { crm_info("Connecting to attrd..."); attrd = init_client_ipc_comms_nodispatch(T_ATTRD); if(attrd) { G_main_add_IPC_Channel( G_PRIORITY_LOW, attrd, FALSE, attrd_dispatch, NULL, attrd_connection_destroy); } } if(attrd != NULL) { rc = attrd_update(attrd, 'U', host, name, value, XML_CIB_TAG_STATUS, NULL, NULL); } else { crm_warn("Could not connect to %s", T_ATTRD); } if(rc == FALSE) { crm_err("Could not send %s %s", T_ATTRD, name?"update":"refresh"); attrd = NULL; if(retries > 0) { retries--; sleep(1); goto retry; } } } diff --git a/cts/CTS.py b/cts/CTS.py index dd2f816bee..0ca78b94ad 100644 --- a/cts/CTS.py +++ b/cts/CTS.py @@ -1,1340 +1,1340 @@ '''CTS: Cluster Testing System: Main module Classes related to testing high-availability clusters... ''' __copyright__=''' Copyright (C) 2000, 2001 Alan Robertson Licensed under the GNU GPL. ''' # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. import types, string, select, sys, time, re, os, struct, signal import time, syslog, random, traceback, base64, pickle, binascii, fcntl from socket import gethostbyname_ex from UserDict import UserDict from subprocess import Popen,PIPE from cts.CTSvars import * class CtsLab(UserDict): '''This class defines the Lab Environment for the Cluster Test System. It defines those things which are expected to change from test environment to test environment for the same cluster manager. It is where you define the set of nodes that are in your test lab what kind of reset mechanism you use, etc. This class is derived from a UserDict because we hold many different parameters of different kinds, and this provides provide a uniform and extensible interface useful for any kind of communication between the user/administrator/tester and CTS. At this point in time, it is the intent of this class to model static configuration and/or environmental data about the environment which doesn't change as the tests proceed. Well-known names (keys) are an important concept in this class. The HasMinimalKeys member function knows the minimal set of well-known names for the class. The following names are standard (well-known) at this time: nodes An array of the nodes in the cluster reset A ResetMechanism object logger An array of objects that log strings... CMclass The type of ClusterManager we are running (This is a class object, not a class instance) RandSeed Random seed. It is a triple of bytes. (optional) The CTS code ignores names it doesn't know about/need. The individual tests have access to this information, and it is perfectly acceptable to provide hints, tweaks, fine-tuning directions or other information to the tests through this mechanism. ''' def __init__(self): self.data = {} self.rsh = RemoteExec(self) self.RandomGen = random.Random() self.Scenario = None # Get a random seed for the random number generator. self["LogWatcher"] = "any" self["LogFileName"] = "/var/log/messages" self["OutputFile"] = None self["SyslogFacility"] = None self["CMclass"] = None self["logrestartcmd"] = "/etc/init.d/syslog-ng restart 2>&1 > /dev/null" self["logger"] = ([StdErrLog(self)]) self.SeedRandom() def SeedRandom(self, seed=None): if not seed: seed = int(time.time()) if self.has_key("RandSeed"): self.log("New random seed is: " + str(seed)) else: self.log("Random seed is: " + str(seed)) self["RandSeed"] = seed self.RandomGen.seed(str(seed)) def HasMinimalKeys(self): 'Return TRUE if our object has the minimal set of keys/values in it' result = 1 for key in self.MinimalKeys: if not self.has_key(key): result = None return result def log(self, args): "Log using each of the supplied logging methods" for logfcn in self._logfunctions: logfcn(string.strip(args)) def debug(self, args): "Log using each of the supplied logging methods" for logfcn in self._logfunctions: if logfcn.name() != "StdErrLog": logfcn("debug: %s" % string.strip(args)) def dump(self): keys = [] for key in self.keys(): keys.append(key) keys.sort() for key in keys: self.debug("Environment["+key+"]:\t"+str(self[key])) def run(self, Scenario, Iterations): if not Scenario: self.log("No scenario was defined") return 1 self.log("Cluster nodes: ") for node in self["nodes"]: self.log(" * %s" % (node)) if not Scenario.SetUp(): return 1 try : Scenario.run(Iterations) except : self.log("Exception by %s" % sys.exc_info()[0]) for logmethod in self["logger"]: traceback.print_exc(50, logmethod) Scenario.summarize() Scenario.TearDown() return 1 #ClusterManager.oprofileSave(Iterations) Scenario.TearDown() Scenario.summarize() if Scenario.Stats["failure"] > 0: return Scenario.Stats["failure"] elif Scenario.Stats["success"] != Iterations: self.log("No failure count but success != requested iterations") return 1 return 0 def __setitem__(self, key, value): '''Since this function gets called whenever we modify the dictionary (object), we can (and do) validate those keys that we know how to validate. For the most part, we know how to validate the "MinimalKeys" elements. ''' # # List of nodes in the system # if key == "nodes": self.Nodes = {} for node in value: # I don't think I need the IP address, etc. but this validates # the node name against /etc/hosts and/or DNS, so it's a # GoodThing(tm). try: self.Nodes[node] = gethostbyname_ex(node) except: print node+" not found in DNS... aborting" raise # # List of Logging Mechanism(s) # elif key == "logger": if len(value) < 1: raise ValueError("Must have at least one logging mechanism") for logger in value: if not callable(logger): raise ValueError("'logger' elements must be callable") self._logfunctions = value # # Cluster Manager Class # elif key == "CMclass": if value and not issubclass(value, ClusterManager): raise ValueError("'CMclass' must be a subclass of" " ClusterManager") # # Initial Random seed... # #elif key == "RandSeed": # if len(value) != 3: # raise ValueError("'Randseed' must be a 3-element list/tuple") # for elem in value: # if not isinstance(elem, types.IntType): # raise ValueError("'Randseed' list must all be ints") self.data[key] = value def IsValidNode(self, node): 'Return TRUE if the given node is valid' return self.Nodes.has_key(node) def __CheckNode(self, node): "Raise a ValueError if the given node isn't valid" if not self.IsValidNode(node): raise ValueError("Invalid node [%s] in CheckNode" % node) def RandomNode(self): '''Choose a random node from the cluster''' return self.RandomGen.choice(self["nodes"]) class Logger: TimeFormat = "%b %d %H:%M:%S\t" def __call__(self, lines): raise ValueError("Abstract class member (__call__)") def write(self, line): return self(line.rstrip()) def writelines(self, lines): for s in lines: self.write(s) return 1 def flush(self): return 1 def isatty(self): return None class SysLog(Logger): # http://docs.python.org/lib/module-syslog.html defaultsource="CTS" map = { "kernel": syslog.LOG_KERN, "user": syslog.LOG_USER, "mail": syslog.LOG_MAIL, "daemon": syslog.LOG_DAEMON, "auth": syslog.LOG_AUTH, "lpr": syslog.LOG_LPR, "news": syslog.LOG_NEWS, "uucp": syslog.LOG_UUCP, "cron": syslog.LOG_CRON, "local0": syslog.LOG_LOCAL0, "local1": syslog.LOG_LOCAL1, "local2": syslog.LOG_LOCAL2, "local3": syslog.LOG_LOCAL3, "local4": syslog.LOG_LOCAL4, "local5": syslog.LOG_LOCAL5, "local6": syslog.LOG_LOCAL6, "local7": syslog.LOG_LOCAL7, } def __init__(self, labinfo): if labinfo.has_key("syslogsource"): self.source=labinfo["syslogsource"] else: self.source=SysLog.defaultsource self.facility="daemon" if labinfo.has_key("SyslogFacility") and labinfo["SyslogFacility"]: if SysLog.map.has_key(labinfo["SyslogFacility"]): self.facility=labinfo["SyslogFacility"] else: raise ValueError("%s: bad syslog facility"%labinfo["SyslogFacility"]) self.facility=SysLog.map[self.facility] syslog.openlog(self.source, 0, self.facility) def setfacility(self, facility): self.facility = facility if SysLog.map.has_key(self.facility): self.facility=SysLog.map[self.facility] syslog.closelog() syslog.openlog(self.source, 0, self.facility) def __call__(self, lines): if isinstance(lines, types.StringType): syslog.syslog(lines) else: for line in lines: syslog.syslog(line) def name(self): return "Syslog" class StdErrLog(Logger): def __init__(self, labinfo): pass def __call__(self, lines): t = time.strftime(Logger.TimeFormat, time.localtime(time.time())) if isinstance(lines, types.StringType): sys.__stderr__.writelines([t, lines, "\n"]) else: for line in lines: sys.__stderr__.writelines([t, line, "\n"]) sys.__stderr__.flush() def name(self): return "StdErrLog" class FileLog(Logger): def __init__(self, labinfo, filename=None): if filename == None: filename=labinfo["LogFileName"] self.logfile=filename import os self.hostname = os.uname()[1]+" " self.source = "CTS: " def __call__(self, lines): fd = open(self.logfile, "a") t = time.strftime(Logger.TimeFormat, time.localtime(time.time())) if isinstance(lines, types.StringType): fd.writelines([t, self.hostname, self.source, lines, "\n"]) else: for line in lines: fd.writelines([t, self.hostname, self.source, line, "\n"]) fd.close() def name(self): return "FileLog" class RemoteExec: '''This is an abstract remote execution class. It runs a command on another machine - somehow. The somehow is up to us. This particular class uses ssh. Most of the work is done by fork/exec of ssh or scp. ''' def __init__(self, Env=None, silent=False): self.Env = Env self.silent = silent # -n: no stdin, -x: no X11, # -o ServerAliveInterval=5 disconnect after 3*5s if the server stops responding self.Command = "ssh -l root -n -x -o ServerAliveInterval=5" # -B: batch mode, -q: no stats (quiet) self.CpCommand = "scp -B -q" self.OurNode=string.lower(os.uname()[1]) def enable_qarsh(self): # http://nstraz.wordpress.com/2008/12/03/introducing-qarsh/ self.log("Using QARSH for connections to cluster nodes") self.Command = "qarsh -l root" self.CpCommand = "qacp" def _fixcmd(self, cmd): return re.sub("\'", "'\\''", cmd) def _cmd(self, *args): '''Compute the string that will run the given command on the given remote system''' args= args[0] sysname = args[0] command = args[1] #print "sysname: %s, us: %s" % (sysname, self.OurNode) if sysname == None or string.lower(sysname) == self.OurNode or sysname == "localhost": ret = command else: ret = self.Command + " " + sysname + " '" + self._fixcmd(command) + "'" #print ("About to run %s\n" % ret) return ret def log(self, args): if not self.silent: if not self.Env: print (args) else: self.Env.log(args) def debug(self, args): if not self.silent: if not self.Env: print (args) else: self.Env.debug(args) - def __call__(self, node, command, stdout=0, blocking=1): + def __call__(self, node, command, stdout=0, blocking=1, silent=False): '''Run the given command on the given remote system If you call this class like a function, this is the function that gets called. It just runs it roughly as though it were a system() call on the remote machine. The first argument is name of the machine to run it on. ''' rc = 0 result = None if not blocking: proc = Popen(self._cmd([node, command]), stdout = PIPE, stderr = PIPE, close_fds = True, shell = True) self.debug("cmd: async: target=%s, rc=%d: %s" % (node, proc.pid, command)) if proc.pid > 0: return 0 return -1 proc = Popen(self._cmd([node, command]), stdout = PIPE, stderr = PIPE, close_fds = True, shell = True) if stdout == 1: result = proc.stdout.readline() else: result = proc.stdout.readlines() proc.stdout.close() rc = proc.wait() - if not self.silent: self.debug("cmd: target=%s, rc=%d: %s" % (node, rc, command)) + if not silent: self.debug("cmd: target=%s, rc=%d: %s" % (node, rc, command)) if stdout == 1: return result if proc.stderr: errors = proc.stderr.readlines() proc.stderr.close() - for err in errors: - self.debug("cmd: stderr: %s" % err) + if not silent: + for err in errors: + self.debug("cmd: stderr: %s" % err) if stdout == 0: - if result: + if not silent and result: for line in result: self.debug("cmd: stdout: %s" % line) return rc return (rc, result) - def cp(self, *args): + def cp(self, source, target, silent=False): '''Perform a remote copy''' - cpstring = self.CpCommand - for arg in args: - cpstring = cpstring + " \'" + arg + "\'" - + cpstring = self.CpCommand + " \'" + source + "\'" + " \'" + target + "\'" rc = os.system(cpstring) - if not self.silent: self.debug("cmd: rc=%d: %s" % (rc, cpstring)) + if not silent: self.debug("cmd: rc=%d: %s" % (rc, cpstring)) return rc has_log_watcher = {} log_watcher_bin = "/tmp/cts_log_watcher.py" log_watcher = """ import sys, os, fcntl ''' Remote logfile reader for CTS Reads a specified number of lines from the supplied offset Returns the current offset Contains logic for handling truncation ''' limit = 100 offset = 0 prefix = '' filename = '/var/log/messages' skipthis=None args=sys.argv[1:] for i in range(0, len(args)): if skipthis: skipthis=None continue elif args[i] == '-l' or args[i] == '--limit': skipthis=1 limit = int(args[i+1]) elif args[i] == '-f' or args[i] == '--filename': skipthis=1 filename = args[i+1] elif args[i] == '-o' or args[i] == '--offset': skipthis=1 offset = args[i+1] elif args[i] == '-p' or args[i] == '--prefix': skipthis=1 prefix = args[i+1] logfile=open(filename, 'r') logfile.seek(0, os.SEEK_END) newsize=logfile.tell() if offset != 'EOF': offset = int(offset) if newsize >= offset: logfile.seek(offset) else: print prefix + 'File truncated' logfile.seek(0) # Don't block when we reach EOF fcntl.fcntl(logfile.fileno(), fcntl.F_SETFL, os.O_NONBLOCK) count = limit while count > 0: count -= 1 line = logfile.readline() if line: print line.strip() else: break print prefix + 'Last read: %d %d' % (logfile.tell(), limit - count) logfile.close() """ class SearchObj: def __init__(self, Env, filename, host=None): self.Env = Env self.host = host self.filename = filename self.cache = [] self.offset = "EOF" - self.rsh = RemoteExec(Env, silent=True) if host == None: host = "localhost" global has_log_watcher if not has_log_watcher.has_key(host): global log_watcher global log_watcher_bin self.debug("Installing %s on %s" % (log_watcher_bin, host)) - self.rsh(host, '''echo "%s" > %s''' % (log_watcher, log_watcher_bin)) + self.Env.rsh(host, '''echo "%s" > %s''' % (log_watcher, log_watcher_bin), silent=True) has_log_watcher[host] = 1 self.next() def __str__(self): if self.host: return "%s:%s" % (self.host, self.filename) return self.filename def log(self, args): message = "lw: %s: %s" % (self, args) if not self.Env: print (message) else: self.Env.log(message) def debug(self, args): message = "lw: %s: %s" % (self, args) if not self.Env: print (message) else: self.Env.debug(message) def next(self): cache = [] if not len(self.cache): global log_watcher_bin - (rc, lines) = self.rsh( + (rc, lines) = self.Env.rsh( self.host, "python %s -p CTSwatcher: -f %s -o %s" % (log_watcher_bin, self.filename, self.offset), - stdout=None) + stdout=None, silent=True) for line in lines: match = re.search("^CTSwatcher:Last read: (\d+)", line) if match: last_offset = self.offset self.offset = match.group(1) - if last_offset == "EOF": - self.debug("Got %d lines, new offset: %s" % (len(lines), self.offset)) + #if last_offset == "EOF": self.debug("Got %d lines, new offset: %s" % (len(lines), self.offset)) elif re.search("^CTSwatcher:", line): self.debug("Got control line: "+ line) else: cache.append(line) return cache class LogWatcher(RemoteExec): '''This class watches logs for messages that fit certain regular expressions. Watching logs for events isn't the ideal way to do business, but it's better than nothing :-) On the other hand, this class is really pretty cool ;-) The way you use this class is as follows: Construct a LogWatcher object Call setwatch() when you want to start watching the log Call look() to scan the log looking for the patterns ''' def __init__(self, Env, log, regexes, name="Anon", timeout=10, debug_level=None): '''This is the constructor for the LogWatcher class. It takes a log name to watch, and a list of regular expressions to watch for." ''' RemoteExec.__init__(self, Env) # Validate our arguments. Better sooner than later ;-) for regex in regexes: assert re.compile(regex) self.name = name self.regexes = regexes self.filename = log self.debug_level = debug_level self.whichmatch = -1 self.unmatched = None self.file_list = [] self.line_cache = [] for regex in self.regexes: self.debug("Looking for regex: "+regex) self.Timeout = int(timeout) self.returnonlymatch = None def debug(self, args): message = "lw: %s: %s" % (self.name, args) if not self.Env: print (message) else: self.Env.debug(message) def setwatch(self): '''Mark the place to start watching the log from. ''' if self.Env["LogWatcher"] == "remote": for node in self.Env["nodes"]: self.file_list.append(SearchObj(self.Env, self.filename, node)) else: self.file_list.append(SearchObj(self.Env, self.filename)) def __del__(self): if self.debug_level > 1: self.debug("Destroy") def ReturnOnlyMatch(self, onlymatch=1): '''Specify one or more subgroups of the match to return rather than the whole string http://www.python.org/doc/2.5.2/lib/match-objects.html ''' self.returnonlymatch = onlymatch def __get_line(self): if not len(self.line_cache): if not len(self.file_list): raise ValueError("No sources to read from") for f in self.file_list: lines = f.next() self.line_cache.extend(lines) if self.line_cache: line = self.line_cache[0] self.line_cache.remove(line) return line return None def look(self, timeout=None): '''Examine the log looking for the given patterns. It starts looking from the place marked by setwatch(). This function looks in the file in the fashion of tail -f. It properly recovers from log file truncation, but not from removing and recreating the log. It would be nice if it recovered from this as well :-) We return the first line which matches any of our patterns. ''' if timeout == None: timeout = self.Timeout - done=time.time()+timeout+1 - if self.debug_level > 2: self.debug("starting single search: timeout=%d" % timeout) + now=time.time() + done=now+timeout+1 + if self.debug_level > 2: self.debug("starting single search: timeout=%d, now=%d, limit=%d" % (timeout, now, done)) while (timeout <= 0 or time.time() <= done): line = self.__get_line() if line: which=-1 if re.search("CTS:", line): continue if self.debug_level > 2: self.debug("Processing: "+ line) for regex in self.regexes: which=which+1 if self.debug_level > 2: self.debug("Comparing line to: "+ regex) #matchobj = re.search(string.lower(regex), string.lower(line)) matchobj = re.search(regex, line) if matchobj: self.whichmatch=which if self.returnonlymatch: return matchobj.group(self.returnonlymatch) else: self.debug("Matched: "+line) if self.debug_level > 1: self.debug("With: "+ regex) return line elif timeout > 0: time.sleep(1) #time.sleep(0.025) else: self.debug("End of file") return None - self.debug("Timeout") + self.debug("Single search timed out: timeout=%d, now=%d, limit=%d" % (timeout, now, done)) return None - def lookforall(self, timeout=None, allow_multiple_matches=None): + def lookforall(self, timeout=None, allow_multiple_matches=None, silent=False): '''Examine the log looking for ALL of the given patterns. It starts looking from the place marked by setwatch(). We return when the timeout is reached, or when we have found ALL of the regexes that were part of the watch ''' if timeout == None: timeout = self.Timeout save_regexes = self.regexes returnresult = [] - self.debug("starting search: timeout=%d" % timeout) - for regex in self.regexes: - if self.debug_level > 2: self.debug("Looking for regex: "+regex) + if not silent: + self.debug("starting search: timeout=%d" % timeout) + for regex in self.regexes: + if self.debug_level > 2: self.debug("Looking for regex: "+regex) while (len(self.regexes) > 0): oneresult = self.look(timeout) if not oneresult: self.unmatched = self.regexes self.matched = returnresult self.regexes = save_regexes return None returnresult.append(oneresult) if not allow_multiple_matches: del self.regexes[self.whichmatch] else: # Allow multiple regexes to match a single line tmp_regexes = self.regexes self.regexes = [] which = 0 for regex in tmp_regexes: matchobj = re.search(regex, oneresult) if not matchobj: self.regexes.append(regex) self.unmatched = None self.matched = returnresult self.regexes = save_regexes return returnresult class NodeStatus: def __init__(self, Env): self.Env = Env def IsNodeBooted(self, node): '''Return TRUE if the given node is booted (responds to pings)''' - return self.Env.rsh("localhost", "ping -nq -c1 -w1 %s" % node) == 0 + return self.Env.rsh("localhost", "ping -nq -c1 -w1 %s" % node, silent=True) == 0 def IsSshdUp(self, node): - #return self.rsh(node, "true") == 0; - rc = self.Env.rsh(node, "true") + rc = self.Env.rsh(node, "true", silent=True) return rc == 0 def WaitForNodeToComeUp(self, node, Timeout=300): '''Return TRUE when given node comes up, or None/FALSE if timeout''' timeout=Timeout anytimeouts=0 while timeout > 0: if self.IsNodeBooted(node) and self.IsSshdUp(node): if anytimeouts: # Fudge to wait for the system to finish coming up time.sleep(30) self.Env.debug("Node %s now up" % node) return 1 time.sleep(30) if (not anytimeouts): self.Env.debug("Waiting for node %s to come up" % node) anytimeouts=1 timeout = timeout - 1 self.Env.log("%s did not come up within %d tries" % (node, Timeout)) answer = raw_input('Continue? [nY]') if answer and answer == "n": raise ValueError("%s did not come up within %d tries" % (node, Timeout)) def WaitForAllNodesToComeUp(self, nodes, timeout=300): '''Return TRUE when all nodes come up, or FALSE if timeout''' for node in nodes: if not self.WaitForNodeToComeUp(node, timeout): return None return 1 class ClusterManager(UserDict): '''The Cluster Manager class. This is an subclass of the Python dictionary class. (this is because it contains lots of {name,value} pairs, not because it's behavior is that terribly similar to a dictionary in other ways.) This is an abstract class which class implements high-level operations on the cluster and/or its cluster managers. Actual cluster managers classes are subclassed from this type. One of the things we do is track the state we think every node should be in. ''' def __InitialConditions(self): #if os.geteuid() != 0: # raise ValueError("Must Be Root!") None def _finalConditions(self): for key in self.keys(): if self[key] == None: raise ValueError("Improper derivation: self[" + key + "] must be overridden by subclass.") def __init__(self, Environment, randseed=None): self.Env = Environment self.__InitialConditions() self.clear_cache = 0 self.TestLoggingLevel=0 self.data = { "up" : "up", # Status meaning up "down" : "down", # Status meaning down "StonithCmd" : "stonith -t baytech -p '10.10.10.100 admin admin' %s", "DeadTime" : 30, # Max time to detect dead node... "StartTime" : 90, # Max time to start up # # These next values need to be overridden in the derived class. # "Name" : None, "StartCmd" : None, "StopCmd" : None, "StatusCmd" : None, #"RereadCmd" : None, "BreakCommCmd" : None, "FixCommCmd" : None, #"TestConfigDir" : None, "LogFileName" : None, #"Pat:Master_started" : None, #"Pat:Slave_started" : None, "Pat:We_stopped" : None, "Pat:They_stopped" : None, "BadRegexes" : None, # A set of "bad news" regexes # to apply to the log } self.rsh = self.Env.rsh self.ShouldBeStatus={} self.ns = NodeStatus(self.Env) self.OurNode=string.lower(os.uname()[1]) def key_for_node(self, node): return node def errorstoignore(self): '''Return list of errors which are 'normal' and should be ignored''' return [] def log(self, args): self.Env.log(args) def debug(self, args): self.Env.debug(args) def prepare(self): '''Finish the Initialization process. Prepare to test...''' for node in self.Env["nodes"]: if self.StataCM(node): self.ShouldBeStatus[node]="up" else: self.ShouldBeStatus[node]="down" self.unisolate_node(node) def upcount(self): '''How many nodes are up?''' count=0 for node in self.Env["nodes"]: if self.ShouldBeStatus[node]=="up": count=count+1 return count def install_config(self, node): return None def clear_all_caches(self): if self.clear_cache: for node in self.Env["nodes"]: if self.ShouldBeStatus[node] == "down": self.debug("Removing cache file on: "+node) self.rsh(node, "rm -f "+CTSvars.HA_VARLIBHBDIR+"/hostcache") else: self.debug("NOT Removing cache file on: "+node) - def StartaCM(self, node): + def StartaCM(self, node, verbose=False): '''Start up the cluster manager on a given node''' - self.debug("Starting %s on node %s" %(self["Name"], node)) + if verbose: self.log("Starting %s on node %s" %(self["Name"], node)) + else: self.debug("Starting %s on node %s" %(self["Name"], node)) ret = 1 if not self.ShouldBeStatus.has_key(node): self.ShouldBeStatus[node] = "down" if self.ShouldBeStatus[node] != "down": return 1 patterns = [] # Technically we should always be able to notice ourselves starting patterns.append(self["Pat:Local_started"] % node) if self.upcount() == 0: patterns.append(self["Pat:Master_started"] % node) else: patterns.append(self["Pat:Slave_started"] % node) watch = LogWatcher( self.Env, self["LogFileName"], patterns, "StartaCM", self["StartTime"]+10) watch.setwatch() self.install_config(node) self.ShouldBeStatus[node] = "any" if self.StataCM(node) and self.cluster_stable(self["DeadTime"]): self.log ("%s was already started" %(node)) return 1 # Clear out the host cache so autojoin can be exercised if self.clear_cache: self.debug("Removing cache file on: "+node) self.rsh(node, "rm -f "+CTSvars.HA_VARLIBHBDIR+"/hostcache") if not(self.Env["valgrind-tests"]): startCmd = self["StartCmd"] else: if self.Env["valgrind-prefix"]: prefix = self.Env["valgrind-prefix"] else: prefix = "cts" startCmd = """G_SLICE=always-malloc HA_VALGRIND_ENABLED='%s' VALGRIND_OPTS='%s --log-file=/tmp/%s-%s.valgrind' %s""" % ( self.Env["valgrind-procs"], self.Env["valgrind-opts"], prefix, """%p""", self["StartCmd"]) if self.rsh(node, startCmd) != 0: self.log ("Warn: Start command failed on node %s" %(node)) return None self.ShouldBeStatus[node]="up" watch_result = watch.lookforall() if watch.unmatched: for regex in watch.unmatched: self.log ("Warn: Startup pattern not found: %s" %(regex)) if watch_result: #self.debug("Found match: "+ repr(watch_result)) self.cluster_stable(self["DeadTime"]) return 1 if self.StataCM(node) and self.cluster_stable(self["DeadTime"]): return 1 self.log ("Warn: Start failed for node %s" %(node)) return None - def StartaCMnoBlock(self, node): + def StartaCMnoBlock(self, node, verbose=False): '''Start up the cluster manager on a given node with none-block mode''' - self.debug("Starting %s on node %s" %(self["Name"], node)) + if verbose: self.log("Starting %s on node %s" %(self["Name"], node)) + else: self.debug("Starting %s on node %s" %(self["Name"], node)) # Clear out the host cache so autojoin can be exercised if self.clear_cache: self.debug("Removing cache file on: "+node) self.rsh(node, "rm -f "+CTSvars.HA_VARLIBHBDIR+"/hostcache") if not(self.Env["valgrind-tests"]): startCmd = self["StartCmd"] else: if self.Env["valgrind-prefix"]: prefix = self.Env["valgrind-prefix"] else: prefix = "cts" startCmd = """G_SLICE=always-malloc HA_VALGRIND_ENABLED='%s' VALGRIND_OPTS='%s --log-file=/tmp/%s-%s.valgrind' %s""" % ( self.Env["valgrind-procs"], self.Env["valgrind-opts"], prefix, """%p""", self["StartCmd"]) self.rsh(node, startCmd, blocking=0) self.ShouldBeStatus[node]="up" return 1 - def StopaCM(self, node): + def StopaCM(self, node, verbose=False): '''Stop the cluster manager on a given node''' - self.debug("Stopping %s on node %s" %(self["Name"], node)) + if verbose: self.log("Stopping %s on node %s" %(self["Name"], node)) + else: self.debug("Stopping %s on node %s" %(self["Name"], node)) if self.ShouldBeStatus[node] != "up": return 1 if self.rsh(node, self["StopCmd"]) == 0: self.ShouldBeStatus[node]="down" self.cluster_stable(self["DeadTime"]) return 1 else: self.log ("Could not stop %s on node %s" %(self["Name"], node)) return None def StopaCMnoBlock(self, node): '''Stop the cluster manager on a given node with none-block mode''' self.debug("Stopping %s on node %s" %(self["Name"], node)) self.rsh(node, self["StopCmd"], blocking=0) self.ShouldBeStatus[node]="down" return 1 def cluster_stable(self, timeout = None): time.sleep(self["StableTime"]) return 1 def node_stable(self, node): return 1 def RereadCM(self, node): '''Force the cluster manager on a given node to reread its config This may be a no-op on certain cluster managers. ''' rc=self.rsh(node, self["RereadCmd"]) if rc == 0: return 1 else: self.log ("Could not force %s on node %s to reread its config" % (self["Name"], node)) return None def StataCM(self, node): '''Report the status of the cluster manager on a given node''' out=self.rsh(node, self["StatusCmd"], 1) ret= (string.find(out, 'stopped') == -1) try: if ret: if self.ShouldBeStatus[node] == "down": self.log( "Node status for %s is %s but we think it should be %s" % (node, "up", self.ShouldBeStatus[node])) else: if self.ShouldBeStatus[node] == "up": self.log( "Node status for %s is %s but we think it should be %s" % (node, "down", self.ShouldBeStatus[node])) except KeyError: pass if ret: self.ShouldBeStatus[node]="up" else: self.ShouldBeStatus[node]="down" return ret - def startall(self, nodelist=None): + def startall(self, nodelist=None, verbose=False): '''Start the cluster manager on every node in the cluster. We can do it on a subset of the cluster if nodelist is not None. ''' ret = 1 map = {} if not nodelist: nodelist=self.Env["nodes"] for node in nodelist: if self.ShouldBeStatus[node] == "down": - if not self.StartaCM(node): + if not self.StartaCM(node, verbose=verbose): ret = 0 return ret - def stopall(self, nodelist=None): + def stopall(self, nodelist=None, verbose=False): '''Stop the cluster managers on every node in the cluster. We can do it on a subset of the cluster if nodelist is not None. ''' ret = 1 map = {} if not nodelist: nodelist=self.Env["nodes"] for node in self.Env["nodes"]: if self.ShouldBeStatus[node] == "up": - if not self.StopaCM(node): + if not self.StopaCM(node, verbose=verbose): ret = 0 return ret def rereadall(self, nodelist=None): '''Force the cluster managers on every node in the cluster to reread their config files. We can do it on a subset of the cluster if nodelist is not None. ''' map = {} if not nodelist: nodelist=self.Env["nodes"] for node in self.Env["nodes"]: if self.ShouldBeStatus[node] == "up": self.RereadCM(node) def statall(self, nodelist=None): '''Return the status of the cluster managers in the cluster. We can do it on a subset of the cluster if nodelist is not None. ''' result={} if not nodelist: nodelist=self.Env["nodes"] for node in nodelist: if self.StataCM(node): result[node] = "up" else: result[node] = "down" return result def isolate_node(self, target, nodes=None): '''isolate the communication between the nodes''' if not nodes: nodes = self.Env["nodes"] for node in nodes: if node != target: rc = self.rsh(target, self["BreakCommCmd"] % self.key_for_node(node)) if rc != 0: self.log("Could not break the communication between %s and %s: %d" % (target, node, rc)) return None else: self.debug("Communication cut between %s and %s" % (target, node)) return 1 def unisolate_node(self, target, nodes=None): '''fix the communication between the nodes''' if not nodes: nodes = self.Env["nodes"] for node in nodes: if node != target: restored = 0 # Limit the amount of time we have asynchronous connectivity for # Restore both sides as simultaneously as possible self.rsh(target, self["FixCommCmd"] % self.key_for_node(node), blocking=0) self.rsh(node, self["FixCommCmd"] % self.key_for_node(target), blocking=0) self.debug("Communication restored between %s and %s" % (target, node)) def reducecomm_node(self,node): '''reduce the communication between the nodes''' rc = self.rsh(node, self["ReduceCommCmd"]%(self.Env["XmitLoss"],self.Env["RecvLoss"])) if rc == 0: return 1 else: self.log("Could not reduce the communication between the nodes from node: %s" % node) return None def restorecomm_node(self,node): '''restore the saved communication between the nodes''' rc = 0 if float(self.Env["XmitLoss"])!=0 or float(self.Env["RecvLoss"])!=0 : rc = self.rsh(node, self["RestoreCommCmd"]); if rc == 0: return 1 else: self.log("Could not restore the communication between the nodes from node: %s" % node) return None def HasQuorum(self, node_list): "Return TRUE if the cluster currently has quorum" # If we are auditing a partition, then one side will # have quorum and the other not. # So the caller needs to tell us which we are checking # If no value for node_list is specified... assume all nodes raise ValueError("Abstract Class member (HasQuorum)") def Components(self): raise ValueError("Abstract Class member (Components)") def oprofileStart(self, node=None): if not node: for n in self.Env["oprofile"]: self.oprofileStart(n) elif node in self.Env["oprofile"]: self.debug("Enabling oprofile on %s" % node) self.rsh(node, "opcontrol --init") self.rsh(node, "opcontrol --setup --no-vmlinux --separate=lib --callgraph=20 --image=all") self.rsh(node, "opcontrol --start") self.rsh(node, "opcontrol --reset") def oprofileSave(self, test, node=None): if not node: for n in self.Env["oprofile"]: self.oprofileSave(test, n) elif node in self.Env["oprofile"]: self.rsh(node, "opcontrol --dump") self.rsh(node, "opcontrol --save=cts.%d" % test) # Read back with: opreport -l session:cts.0 image:/usr/lib/heartbeat/c* if None: self.rsh(node, "opcontrol --reset") else: self.oprofileStop(node) self.oprofileStart(node) def oprofileStop(self, node=None): if not node: for n in self.Env["oprofile"]: self.oprofileStop(n) elif node in self.Env["oprofile"]: self.debug("Stopping oprofile on %s" % node) self.rsh(node, "opcontrol --reset") self.rsh(node, "opcontrol --shutdown 2>&1 > /dev/null") class Resource: ''' This is an HA resource (not a resource group). A resource group is just an ordered list of Resource objects. ''' def __init__(self, cm, rsctype=None, instance=None): self.CM = cm self.ResourceType = rsctype self.Instance = instance self.needs_quorum = 1 def Type(self): return self.ResourceType def Instance(self, nodename): return self.Instance def IsRunningOn(self, nodename): ''' This member function returns true if our resource is running on the given node in the cluster. It is analagous to the "status" operation on SystemV init scripts and heartbeat scripts. FailSafe calls it the "exclusive" operation. ''' raise ValueError("Abstract Class member (IsRunningOn)") return None def IsWorkingCorrectly(self, nodename): ''' This member function returns true if our resource is operating correctly on the given node in the cluster. Heartbeat does not require this operation, but it might be called the Monitor operation, which is what FailSafe calls it. For remotely monitorable resources (like IP addresses), they *should* be monitored remotely for testing. ''' raise ValueError("Abstract Class member (IsWorkingCorrectly)") return None def Start(self, nodename): ''' This member function starts or activates the resource. ''' raise ValueError("Abstract Class member (Start)") return None def Stop(self, nodename): ''' This member function stops or deactivates the resource. ''' raise ValueError("Abstract Class member (Stop)") return None def __repr__(self): if (self.Instance and len(self.Instance) > 1): return "{" + self.ResourceType + "::" + self.Instance + "}" else: return "{" + self.ResourceType + "}" class Component: def kill(self, node): None class Process(Component): def __init__(self, cm, name, process=None, dc_only=0, pats=[], dc_pats=[], badnews_ignore=[], triggersreboot=0): self.name = str(name) self.dc_only = dc_only self.pats = pats self.dc_pats = dc_pats self.CM = cm self.badnews_ignore = badnews_ignore self.triggersreboot = triggersreboot if process: self.proc = str(process) else: self.proc = str(name) self.KillCmd = "killall -9 " + self.proc def kill(self, node): if self.CM.rsh(node, self.KillCmd) != 0: self.CM.log ("ERROR: Kill %s failed on node %s" %(self.name,node)) return None return 1 diff --git a/cts/CTSaudits.py b/cts/CTSaudits.py index 9eb144b717..344ae6c893 100755 --- a/cts/CTSaudits.py +++ b/cts/CTSaudits.py @@ -1,783 +1,782 @@ '''CTS: Cluster Testing System: Audit module ''' __copyright__=''' Copyright (C) 2000, 2001,2005 Alan Robertson Licensed under the GNU GPL. ''' # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. import time, os, string, re import CTS class ClusterAudit: def __init__(self, cm): self.CM = cm def __call__(self): raise ValueError("Abstract Class member (__call__)") def is_applicable(self): '''Return TRUE if we are applicable in the current test configuration''' raise ValueError("Abstract Class member (is_applicable)") return 1 def log(self, args): self.CM.log("audit: %s" % args) def debug(self, args): self.CM.debug("audit: %s" % args) def name(self): raise ValueError("Abstract Class member (name)") AllAuditClasses = [ ] class LogAudit(ClusterAudit): def name(self): return "LogAudit" def __init__(self, cm): self.CM = cm def RestartClusterLogging(self, nodes=None): if not nodes: nodes = self.CM.Env["nodes"] self.CM.log("Restarting logging on: %s" % repr(nodes)) for node in nodes: cmd=self.CM.Env["logrestartcmd"] if self.CM.rsh(node, cmd, blocking=0) != 0: self.CM.log ("ERROR: Cannot restart logging on %s [%s failed]" % (node, cmd)) def TestLogging(self): patterns = [] prefix = "Test message from" watch_syslog = None watch_remote = None for node in self.CM.Env["nodes"]: # Look for the node name in two places to make sure # that syslog is logging with the correct hostname patterns.append("%s.*%s %s" % (node, prefix, node)) watch_pref = self.CM.Env["LogWatcher"] if watch_pref == "any" or watch_pref == "syslog": self.CM.Env["LogWatcher"] = "syslog" if watch_pref == "any": self.CM.log("Testing for %s logs" % self.CM.Env["LogWatcher"]) watch_syslog = CTS.LogWatcher(self.CM.Env, self.CM.Env["LogFileName"], patterns, "LogAudit", 30) watch_syslog.setwatch() if watch_pref == "any" or watch_pref == "remote": self.CM.Env["LogWatcher"] = "remote" if watch_pref == "any": self.CM.log("Testing for %s logs" % self.CM.Env["LogWatcher"]) watch_remote = CTS.LogWatcher(self.CM.Env, self.CM.Env["LogFileName"], patterns, "LogAudit", 30) watch_remote.setwatch() for node in self.CM.Env["nodes"]: cmd="logger -p %s.info %s %s" % (self.CM.Env["SyslogFacility"], prefix, node) - if self.CM.rsh(node, cmd, blocking=0) != 0: + if self.CM.rsh(node, cmd, blocking=0, silent=True) != 0: self.CM.log ("ERROR: Cannot execute remote command [%s] on %s" % (cmd, node)) if watch_syslog: watch = watch_syslog self.CM.Env["LogWatcher"] = "syslog" - watch_result = watch.lookforall() + watch_result = watch.lookforall(silent=True) if not watch.unmatched: if watch_pref == "any": self.CM.log ("Continuing with %s-based log reader" % (self.CM.Env["LogWatcher"])) return 1 if watch_remote: watch = watch_remote self.CM.Env["LogWatcher"] = "remote" - watch_result = watch.lookforall() + watch_result = watch.lookforall(silent=True) if not watch.unmatched: if watch_pref == "any": self.CM.log ("Continuing with %s-based log reader" % (self.CM.Env["LogWatcher"])) return 1 if watch_syslog and not watch_syslog.unmatched: for regex in watch_syslog.unmatched: self.CM.log ("Test message [%s] not found in syslog logs." % regex) if watch_remote and not watch_remote.unmatched: for regex in watch_remote.unmatched: self.CM.log ("Test message [%s] not found in remote logs." % regex) return 0 def __call__(self): max=3 attempt=0 self.CM.ns.WaitForAllNodesToComeUp(self.CM.Env["nodes"]) while attempt <= max and self.TestLogging() == 0: attempt = attempt + 1 self.RestartClusterLogging() time.sleep(60*attempt) if attempt > max: self.CM.log("ERROR: Cluster logging unrecoverable.") return 0 return 1 def is_applicable(self): if self.CM.Env["DoBSC"]: return 0 return 1 class DiskAudit(ClusterAudit): def name(self): return "DiskspaceAudit" def __init__(self, cm): self.CM = cm def __call__(self): result=1 dfcmd="df -k /var/log | tail -1 | tr -s ' ' | cut -d' ' -f2" self.CM.ns.WaitForAllNodesToComeUp(self.CM.Env["nodes"]) for node in self.CM.Env["nodes"]: dfout=self.CM.rsh(node, dfcmd, 1) if not dfout: self.CM.log ("ERROR: Cannot execute remote df command [%s] on %s" % (dfcmd, node)) else: try: idfout = int(dfout) except (ValueError, TypeError): self.CM.log("Warning: df output from %s was invalid [%s]" % (node, dfout)) else: if idfout == 0: self.CM.log("CRIT: Completely out of log disk space on %s" % node) result=None elif idfout <= 1000: self.CM.log("WARN: Low on log disk space (%d Mbytes) on %s" % (idfout, node)) return result def is_applicable(self): if self.CM.Env["DoBSC"]: return 0 return 1 class AuditResource: def __init__(self, cm, line): fields = line.split() self.CM = cm self.line = line self.type = fields[1] self.id = fields[2] self.clone_id = fields[3] self.parent = fields[4] self.rprovider = fields[5] self.rclass = fields[6] self.rtype = fields[7] self.host = fields[8] self.needs_quorum = fields[9] self.flags = int(fields[10]) self.flags_s = fields[11] if self.parent == "NA": self.parent = None def unique(self): if self.flags & int("0x00000020", 16): return 1 return 0 def orphan(self): if self.flags & int("0x00000001", 16): return 1 return 0 def managed(self): if self.flags & int("0x00000002", 16): return 1 return 0 class AuditConstraint: def __init__(self, cm, line): fields = line.split() self.CM = cm self.line = line self.type = fields[1] self.id = fields[2] self.rsc = fields[3] self.target = fields[4] self.score = fields[5] self.rsc_role = fields[6] self.target_role = fields[7] if self.rsc_role == "NA": self.rsc_role = None if self.target_role == "NA": self.target_role = None class PrimitiveAudit(ClusterAudit): def name(self): return "PrimitiveAudit" def __init__(self, cm): self.CM = cm def doResourceAudit(self, resource, quorum): rc=1 active = self.CM.ResourceLocation(resource.id) if len(active) == 1: if quorum: self.debug("Resource %s active on %s" % (resource.id, repr(active))) elif resource.needs_quorum == 1: self.CM.log("Resource %s active without quorum: %s" % (resource.id, repr(active))) rc=0 elif not resource.managed(): self.CM.log("Resource %s not managed. Active on %s" % (resource.id, repr(active))) elif not resource.unique(): # TODO: Figure out a clever way to actually audit these resource types if len(active) > 1: self.debug("Non-unique resource %s is active on: %s" % (resource.id, repr(active))) else: self.debug("Non-unique resource %s is not active" % resource.id) elif len(active) > 1: self.CM.log("Resource %s is active multiple times: %s" % (resource.id, repr(active))) rc=0 elif resource.orphan(): self.debug("Resource %s is an inactive orphan" % resource.id) elif len(self.inactive_nodes) == 0: self.CM.log("WARN: Resource %s not served anywhere" % resource.id) rc=0 elif self.CM.Env["warn-inactive"] == 1: if quorum or not resource.needs_quorum: self.CM.log("WARN: Resource %s not served anywhere (Inactive nodes: %s)" % (resource.id, repr(self.inactive_nodes))) else: self.debug("Resource %s not served anywhere (Inactive nodes: %s)" % (resource.id, repr(self.inactive_nodes))) elif quorum or not resource.needs_quorum: self.debug("Resource %s not served anywhere (Inactive nodes: %s)" % (resource.id, repr(self.inactive_nodes))) return rc def setup(self): self.target = None self.resources = [] self.constraints = [] self.active_nodes = [] self.inactive_nodes = [] for node in self.CM.Env["nodes"]: if self.CM.ShouldBeStatus[node] == "up": self.active_nodes.append(node) else: self.inactive_nodes.append(node) for node in self.CM.Env["nodes"]: if self.target == None and self.CM.ShouldBeStatus[node] == "up": self.target = node if not self.target: # TODO: In Pacemaker 1.0 clusters we'll be able to run crm_resource # with CIB_file=/path/to/cib.xml even when the cluster isn't running self.debug("No nodes active - skipping %s" % self.name()) return 0 (rc, lines) = self.CM.rsh(self.target, "crm_resource -c", None) for line in lines: if re.search("^Resource", line): self.resources.append(AuditResource(self.CM, line)) elif re.search("^Constraint", line): self.constraints.append(AuditConstraint(self.CM, line)) else: self.CM.log("Unknown entry: %s" % line); return 1 def __call__(self): rc = 1 if not self.setup(): return 1 quorum = self.CM.HasQuorum(None) for resource in self.resources: if resource.type == "primitive": if self.doResourceAudit(resource, quorum) == 0: rc = 0 return rc def is_applicable(self): if self.CM["Name"] == "crm-lha": return 1 if self.CM["Name"] == "crm-ais": return 1 return 0 class GroupAudit(PrimitiveAudit): def name(self): return "GroupAudit" def __call__(self): rc = 1 if not self.setup(): return 1 for group in self.resources: if group.type == "group": first_match = 1 group_location = None for child in self.resources: if child.parent == group.id: nodes = self.CM.ResourceLocation(child.id) if first_match and len(nodes) > 0: group_location = nodes[0] first_match = 0 if len(nodes) > 1: rc = 0 self.CM.log("Child %s of %s is active more than once: %s" % (child.id, group.id, repr(nodes))) elif len(nodes) == 0: # Groups are allowed to be partially active # However we do need to make sure later children aren't running group_location = None self.debug("Child %s of %s is stopped" % (child.id, group.id)) elif nodes[0] != group_location: rc = 0 self.CM.log("Child %s of %s is active on the wrong node (%s) expected %s" % (child.id, group.id, nodes[0], group_location)) else: self.debug("Child %s of %s is active on %s" % (child.id, group.id, nodes[0])) return rc class CloneAudit(PrimitiveAudit): def name(self): return "CloneAudit" def __call__(self): rc = 1 if not self.setup(): return 1 for clone in self.resources: if clone.type == "clone": for child in self.resources: if child.parent == clone.id and child.type == "primitive": self.debug("Checking child %s of %s..." % (child.id, clone.id)) # Check max and node_max # Obtain with: # crm_resource -g clone_max --meta -r child.id # crm_resource -g clone_node_max --meta -r child.id return rc class ColocationAudit(PrimitiveAudit): def name(self): return "ColocationAudit" def crm_location(self, resource): (rc, lines) = self.CM.rsh(self.target, "crm_resource -W -r %s -Q"%resource, None) hosts = [] if rc == 0: for line in lines: fields = line.split() hosts.append(fields[0]) return hosts def __call__(self): rc = 1 if not self.setup(): return 1 for coloc in self.constraints: if coloc.type == "rsc_colocation": source = self.crm_location(coloc.rsc) target = self.crm_location(coloc.target) if len(source) == 0: self.debug("Colocation audit (%s): %s not running" % (coloc.id, coloc.rsc)) else: for node in source: if not node in target: rc = 0 self.CM.log("Colocation audit (%s): %s running on %s (not in %s)" % (coloc.id, coloc.rsc, node, repr(target))) else: self.debug("Colocation audit (%s): %s running on %s (in %s)" % (coloc.id, coloc.rsc, node, repr(target))) return rc class CrmdStateAudit(ClusterAudit): def __init__(self, cm): self.CM = cm self.Stats = {"calls":0 , "success":0 , "failure":0 , "skipped":0 , "auditfail":0} def has_key(self, key): return self.Stats.has_key(key) def __setitem__(self, key, value): self.Stats[key] = value def __getitem__(self, key): return self.Stats[key] def incr(self, name): '''Increment (or initialize) the value associated with the given name''' if not self.Stats.has_key(name): self.Stats[name]=0 self.Stats[name] = self.Stats[name]+1 def __call__(self): passed = 1 up_are_down = 0 down_are_up = 0 unstable_list = [] for node in self.CM.Env["nodes"]: should_be = self.CM.ShouldBeStatus[node] rc = self.CM.test_node_CM(node) if rc > 0: if should_be == "down": down_are_up = down_are_up + 1 if rc == 1: unstable_list.append(node) elif should_be == "up": up_are_down = up_are_down + 1 if len(unstable_list) > 0: passed = 0 self.CM.log("Cluster is not stable: %d (of %d): %s" %(len(unstable_list), self.CM.upcount(), repr(unstable_list))) if up_are_down > 0: passed = 0 self.CM.log("%d (of %d) nodes expected to be up were down." %(up_are_down, len(self.CM.Env["nodes"]))) if down_are_up > 0: passed = 0 self.CM.log("%d (of %d) nodes expected to be down were up." %(down_are_up, len(self.CM.Env["nodes"]))) return passed def name(self): return "CrmdStateAudit" def is_applicable(self): if self.CM["Name"] == "crm-lha": return 1 if self.CM["Name"] == "crm-ais": return 1 return 0 class CIBAudit(ClusterAudit): def __init__(self, cm): self.CM = cm - self.silentrsh = CTS.RemoteExec(cm.Env, silent=True) self.Stats = {"calls":0 , "success":0 , "failure":0 , "skipped":0 , "auditfail":0} def has_key(self, key): return self.Stats.has_key(key) def __setitem__(self, key, value): self.Stats[key] = value def __getitem__(self, key): return self.Stats[key] def incr(self, name): '''Increment (or initialize) the value associated with the given name''' if not self.Stats.has_key(name): self.Stats[name]=0 self.Stats[name] = self.Stats[name]+1 def __call__(self): passed = 1 ccm_partitions = self.CM.find_partitions() if len(ccm_partitions) == 0: self.debug("\tNo partitions to audit") return 1 for partition in ccm_partitions: self.debug("\tAuditing CIB consistency for: %s" %partition) partition_passed = 0 if self.audit_cib_contents(partition) == 0: passed = 0 return passed def audit_cib_contents(self, hostlist): passed = 1 node0 = None node0_xml = None partition_hosts = hostlist.split() for node in partition_hosts: node_xml = self.store_remote_cib(node, node0) if node_xml == None: self.CM.log("Could not perform audit: No configuration from %s" % node) passed = 0 elif node0 == None: node0 = node node0_xml = node_xml elif node0_xml == None: self.CM.log("Could not perform audit: No configuration from %s" % node0) passed = 0 else: (rc, result) = self.CM.rsh( node0, "crm_diff -VV -cf --new %s --original %s" % (node_xml, node0_xml), None) if rc != 0: self.CM.log("Diff between %s and %s failed: %d" % (node0_xml, node_xml, rc)) passed = 0 for line in result: if not re.search("", line): passed = 0 self.debug("CibDiff[%s-%s]: %s" % (node0, node, line)) else: self.debug("CibDiff[%s-%s] Ignoring: %s" % (node0, node, line)) # self.CM.rsh(node0, "rm -f %s" % node_xml) # self.CM.rsh(node0, "rm -f %s" % node0_xml) return passed def store_remote_cib(self, node, target): combined = "" filename="/tmp/ctsaudit.%s.xml" % node if not target: target = node (rc, lines) = self.CM.rsh(node, self.CM["CibQuery"], None) if rc != 0: self.CM.log("Could not retrieve configuration") return None self.CM.rsh("localhost", "rm -f %s" % filename) for line in lines: - self.silentrsh("localhost", "echo \'%s\' >> %s" % (line[:-1], filename)) + self.CM.rsh("localhost", "echo \'%s\' >> %s" % (line[:-1], filename), silent=True) - if self.silentrsh.cp(filename, "root@%s:%s" % (target, filename)) != 0: + if self.CM.rsh.cp(filename, "root@%s:%s" % (target, filename), silent=True) != 0: self.CM.log("Could not store configuration") return None return filename def name(self): return "CibAudit" def is_applicable(self): if self.CM["Name"] == "crm-lha": return 1 if self.CM["Name"] == "crm-ais": return 1 return 0 class PartitionAudit(ClusterAudit): def __init__(self, cm): self.CM = cm self.Stats = {"calls":0 , "success":0 , "failure":0 , "skipped":0 , "auditfail":0} self.NodeEpoche={} self.NodeState={} self.NodeQuorum={} def has_key(self, key): return self.Stats.has_key(key) def __setitem__(self, key, value): self.Stats[key] = value def __getitem__(self, key): return self.Stats[key] def incr(self, name): '''Increment (or initialize) the value associated with the given name''' if not self.Stats.has_key(name): self.Stats[name]=0 self.Stats[name] = self.Stats[name]+1 def __call__(self): passed = 1 ccm_partitions = self.CM.find_partitions() if ccm_partitions == None or len(ccm_partitions) == 0: return 1 self.CM.cluster_stable(double_check=True) if len(ccm_partitions) != self.CM.partitions_expected: self.CM.log("ERROR: %d cluster partitions detected:" %len(ccm_partitions)) passed = 0 for partition in ccm_partitions: self.CM.log("\t %s" %partition) for partition in ccm_partitions: partition_passed = 0 if self.audit_partition(partition) == 0: passed = 0 return passed def trim_string(self, avalue): if not avalue: return None if len(avalue) > 1: return avalue[:-1] def trim2int(self, avalue): if not avalue: return None if len(avalue) > 1: return int(avalue[:-1]) def audit_partition(self, partition): passed = 1 dc_found = [] dc_allowed_list = [] lowest_epoche = None node_list = partition.split() self.debug("Auditing partition: %s" %(partition)) for node in node_list: if self.CM.ShouldBeStatus[node] != "up": self.CM.log("Warn: Node %s appeared out of nowhere" %(node)) self.CM.ShouldBeStatus[node] = "up" # not in itself a reason to fail the audit (not what we're # checking for in this audit) self.NodeState[node] = self.CM.rsh(node, self.CM["StatusCmd"]%node, 1) self.NodeEpoche[node] = self.CM.rsh(node, self.CM["EpocheCmd"], 1) self.NodeQuorum[node] = self.CM.rsh(node, self.CM["QuorumCmd"], 1) self.debug("Node %s: %s - %s - %s." %(node, self.NodeState[node], self.NodeEpoche[node], self.NodeQuorum[node])) self.NodeState[node] = self.trim_string(self.NodeState[node]) self.NodeEpoche[node] = self.trim2int(self.NodeEpoche[node]) self.NodeQuorum[node] = self.trim_string(self.NodeQuorum[node]) if not self.NodeEpoche[node]: self.CM.log("Warn: Node %s dissappeared: cant determin epoche" %(node)) self.CM.ShouldBeStatus[node] = "down" # not in itself a reason to fail the audit (not what we're # checking for in this audit) elif lowest_epoche == None or self.NodeEpoche[node] < lowest_epoche: lowest_epoche = self.NodeEpoche[node] if not lowest_epoche: self.CM.log("Lowest epoche not determined in %s" % (partition)) passed = 0 for node in node_list: if self.CM.ShouldBeStatus[node] == "up": if self.CM.is_node_dc(node, self.NodeState[node]): dc_found.append(node) if self.NodeEpoche[node] == lowest_epoche: self.debug("%s: OK" % node) elif not self.NodeEpoche[node]: self.debug("Check on %s ignored: no node epoche" % node) elif not lowest_epoche: self.debug("Check on %s ignored: no lowest epoche" % node) else: self.CM.log("DC %s is not the oldest node (%d vs. %d)" %(node, self.NodeEpoche[node], lowest_epoche)) passed = 0 if len(dc_found) == 0: self.CM.log("DC not found on any of the %d allowed nodes: %s (of %s)" %(len(dc_allowed_list), str(dc_allowed_list), str(node_list))) elif len(dc_found) > 1: self.CM.log("%d DCs (%s) found in cluster partition: %s" %(len(dc_found), str(dc_found), str(node_list))) passed = 0 if passed == 0: for node in node_list: if self.CM.ShouldBeStatus[node] == "up": self.CM.log("epoche %s : %s" %(self.NodeEpoche[node], self.NodeState[node])) return passed def name(self): return "PartitionAudit" def is_applicable(self): if self.CM["Name"] == "crm-lha": return 1 if self.CM["Name"] == "crm-ais": return 1 return 0 AllAuditClasses.append(DiskAudit) AllAuditClasses.append(LogAudit) AllAuditClasses.append(CrmdStateAudit) AllAuditClasses.append(PartitionAudit) AllAuditClasses.append(PrimitiveAudit) AllAuditClasses.append(GroupAudit) AllAuditClasses.append(CloneAudit) AllAuditClasses.append(ColocationAudit) AllAuditClasses.append(CIBAudit) def AuditList(cm): result = [] for auditclass in AllAuditClasses: a = auditclass(cm) if a.is_applicable(): result.append(a) return result diff --git a/cts/CTSscenarios.py b/cts/CTSscenarios.py index 4f627686b5..b4eafb9e4b 100644 --- a/cts/CTSscenarios.py +++ b/cts/CTSscenarios.py @@ -1,539 +1,539 @@ from CTS import * from CTStests import CTSTest from CTSaudits import ClusterAudit class ScenarioComponent: def __init__(self, Env): self.Env = Env def IsApplicable(self): '''Return TRUE if the current ScenarioComponent is applicable in the given LabEnvironment given to the constructor. ''' raise ValueError("Abstract Class member (IsApplicable)") def SetUp(self, CM): '''Set up the given ScenarioComponent''' raise ValueError("Abstract Class member (Setup)") def TearDown(self, CM): '''Tear down (undo) the given ScenarioComponent''' raise ValueError("Abstract Class member (Setup)") class Scenario: ( '''The basic idea of a scenario is that of an ordered list of ScenarioComponent objects. Each ScenarioComponent is SetUp() in turn, and then after the tests have been run, they are torn down using TearDown() (in reverse order). A Scenario is applicable to a particular cluster manager iff each ScenarioComponent is applicable. A partially set up scenario is torn down if it fails during setup. ''') def __init__(self, ClusterManager, Components, Audits, Tests): "Initialize the Scenario from the list of ScenarioComponents" self.ClusterManager = ClusterManager self.Components = Components self.Audits = Audits self.Tests = Tests self.BadNews = None self.TestSets = [] self.Stats = {"success":0, "failure":0, "BadNews":0, "skipped":0} self.Sets = [] #self.ns=CTS.NodeStatus(self.Env) for comp in Components: if not issubclass(comp.__class__, ScenarioComponent): raise ValueError("Init value must be subclass of ScenarioComponent") for audit in Audits: if not issubclass(audit.__class__, ClusterAudit): raise ValueError("Init value must be subclass of ClusterAudit") for test in Tests: if not issubclass(test.__class__, CTSTest): raise ValueError("Init value must be a subclass of CTSTest") def IsApplicable(self): ( '''A Scenario IsApplicable() iff each of its ScenarioComponents IsApplicable() ''' ) for comp in self.Components: if not comp.IsApplicable(): return None return 1 def SetUp(self): '''Set up the Scenario. Return TRUE on success.''' self.ClusterManager.prepare() self.ClusterManager.ns.WaitForAllNodesToComeUp(self.ClusterManager.Env["nodes"]) self.audit() self.BadNews = LogWatcher(self.ClusterManager.Env, self.ClusterManager["LogFileName"], self.ClusterManager["BadRegexes"], "BadNews", 0) self.BadNews.setwatch() # Call after we've figured out what type of log watching to do in LogAudit j=0 while j < len(self.Components): if not self.Components[j].SetUp(self.ClusterManager): # OOPS! We failed. Tear partial setups down. self.audit() self.ClusterManager.log("Tearing down partial setup") self.TearDown(self.ClusterManager, j) return None j=j+1 self.audit() return 1 def TearDown(self, max=None): '''Tear Down the Scenario - in reverse order.''' if max == None: max = len(self.Components)-1 j=max while j >= 0: self.Components[j].TearDown(self.ClusterManager) j=j-1 self.audit() def incr(self, name): '''Increment (or initialize) the value associated with the given name''' if not self.Stats.has_key(name): self.Stats[name]=0 self.Stats[name] = self.Stats[name]+1 def run(self, Iterations): self.ClusterManager.oprofileStart() try: self.run_loop(Iterations) self.ClusterManager.oprofileStop() except: self.ClusterManager.oprofileStop() raise def run_loop(self, Iterations): raise ValueError("Abstract Class member (run_loop)") def run_test(self, test, testcount): nodechoice = self.ClusterManager.Env.RandomNode() ret = 1 where = "" did_run = 0 self.ClusterManager.log(("Running test %s" % test.name).ljust(35) + (" (%s) " % nodechoice).ljust(15) +"["+ ("%d" % testcount).rjust(3) +"]") starttime = test.set_timer() if not test.setup(nodechoice): self.ClusterManager.log("Setup failed") ret = 0 elif not test.canrunnow(nodechoice): self.ClusterManager.log("Skipped") test.skipped() else: did_run = 1 ret = test(nodechoice) if not test.teardown(nodechoice): self.ClusterManager.log("Teardown failed") ret = 0 stoptime=time.time() self.ClusterManager.oprofileSave(testcount) elapsed_time = stoptime - starttime test_time = stoptime - test.get_timer() if not test.has_key("min_time"): test["elapsed_time"] = elapsed_time test["min_time"] = test_time test["max_time"] = test_time else: test["elapsed_time"] = test["elapsed_time"] + elapsed_time if test_time < test["min_time"]: test["min_time"] = test_time if test_time > test["max_time"]: test["max_time"] = test_time if ret: self.incr("success") test.log_timer() else: self.incr("failure") self.ClusterManager.statall() did_run = 1 # Force the test count to be incrimented anyway so test extraction works self.audit(test.errorstoignore()) return did_run def summarize(self): self.ClusterManager.log("****************") self.ClusterManager.log("Overall Results:" + repr(self.Stats)) self.ClusterManager.log("****************") stat_filter = { "calls":0, "failure":0, "skipped":0, "auditfail":0, } self.ClusterManager.log("Test Summary") for test in self.Tests: for key in stat_filter.keys(): stat_filter[key] = test.Stats[key] self.ClusterManager.log(("Test %s: "%test.name).ljust(25) + " %s"%repr(stat_filter)) self.ClusterManager.debug("Detailed Results") for test in self.Tests: self.ClusterManager.debug(("Test %s: "%test.name).ljust(25) + " %s"%repr(test.Stats)) self.ClusterManager.log("<<<<<<<<<<<<<<<< TESTS COMPLETED") def audit(self, LocalIgnore=[]): errcount=0 ignorelist = [] ignorelist.append("CTS:") ignorelist.extend(LocalIgnore) ignorelist.extend(self.ClusterManager.errorstoignore()) # This makes sure everything is stabilized before starting... failed = 0 for audit in self.Audits: if not audit(): self.ClusterManager.log("Audit " + audit.name() + " FAILED.") failed += 1 else: self.ClusterManager.debug("Audit " + audit.name() + " passed.") while errcount < 1000: match = None if self.BadNews: match=self.BadNews.look(0) if match: add_err = 1 for ignore in ignorelist: if add_err == 1 and re.search(ignore, match): add_err = 0 if add_err == 1: self.ClusterManager.log("BadNews: " + match) self.incr("BadNews") errcount=errcount+1 else: break else: answer = raw_input('Big problems. Continue? [nY]') if answer and answer == "n": self.ClusterManager.log("Shutting down.") self.summarize() self.TearDown() raise ValueError("Looks like we hit a BadNews jackpot!") return failed class AllOnce(Scenario): '''Every Test Once''' # Accessable as __doc__ def run_loop(self, Iterations): testcount=1 for test in self.Tests: self.run_test(test, testcount) testcount += 1 class RandomTests(Scenario): '''Random Test Execution''' def run_loop(self, Iterations): testcount=1 while testcount <= Iterations: test = self.ClusterManager.Env.RandomGen.choice(self.Tests) self.run_test(test, testcount) testcount += 1 class BasicSanity(Scenario): '''Basic Cluster Sanity''' def run_loop(self, Iterations): testcount=1 while testcount <= Iterations: test = self.Environment.RandomGen.choice(self.Tests) self.run_test(test, testcount) testcount += 1 class Sequence(Scenario): '''Named Tests in Sequence''' def run_loop(self, Iterations): testcount=1 while testcount <= Iterations: for test in self.Tests: self.run_test(test, testcount) testcount += 1 class InitClusterManager(ScenarioComponent): ( '''InitClusterManager is the most basic of ScenarioComponents. This ScenarioComponent simply starts the cluster manager on all the nodes. It is fairly robust as it waits for all nodes to come up before starting as they might have been rebooted or crashed for some reason beforehand. ''') def __init__(self, Env): pass def IsApplicable(self): '''InitClusterManager is so generic it is always Applicable''' return 1 def SetUp(self, CM): '''Basic Cluster Manager startup. Start everything''' CM.prepare() # Clear out the cobwebs ;-) self.TearDown(CM) # Now start the Cluster Manager on all the nodes. CM.log("Starting Cluster Manager on all nodes.") - return CM.startall() + return CM.startall(verbose=True) def TearDown(self, CM): '''Set up the given ScenarioComponent''' # Stop the cluster manager everywhere CM.log("Stopping Cluster Manager on all nodes") - return CM.stopall() + return CM.stopall(verbose=True) class PingFest(ScenarioComponent): ( '''PingFest does a flood ping to each node in the cluster from the test machine. If the LabEnvironment Parameter PingSize is set, it will be used as the size of ping packet requested (via the -s option). If it is not set, it defaults to 1024 bytes. According to the manual page for ping: Outputs packets as fast as they come back or one hundred times per second, whichever is more. For every ECHO_REQUEST sent a period ``.'' is printed, while for every ECHO_REPLY received a backspace is printed. This provides a rapid display of how many packets are being dropped. Only the super-user may use this option. This can be very hard on a net- work and should be used with caution. ''' ) def __init__(self, Env): self.Env = Env def IsApplicable(self): '''PingFests are always applicable ;-) ''' return 1 def SetUp(self, CM): '''Start the PingFest!''' self.PingSize=1024 if CM.Env.has_key("PingSize"): self.PingSize=CM.Env["PingSize"] CM.log("Starting %d byte flood pings" % self.PingSize) self.PingPids=[] for node in CM.Env["nodes"]: self.PingPids.append(self._pingchild(node)) CM.log("Ping PIDs: " + repr(self.PingPids)) return 1 def TearDown(self, CM): '''Stop it right now! My ears are pinging!!''' for pid in self.PingPids: if pid != None: CM.log("Stopping ping process %d" % pid) os.kill(pid, signal.SIGKILL) def _pingchild(self, node): Args = ["ping", "-qfn", "-s", str(self.PingSize), node] sys.stdin.flush() sys.stdout.flush() sys.stderr.flush() pid = os.fork() if pid < 0: self.Env.log("Cannot fork ping child") return None if pid > 0: return pid # Otherwise, we're the child process. os.execvp("ping", Args) self.Env.log("Cannot execvp ping: " + repr(Args)) sys.exit(1) class PacketLoss(ScenarioComponent): ( ''' It would be useful to do some testing of CTS with a modest amount of packet loss enabled - so we could see that everything runs like it should with a certain amount of packet loss present. ''') def IsApplicable(self): '''always Applicable''' return 1 def SetUp(self, CM): '''Reduce the reliability of communications''' if float(CM.Env["XmitLoss"]) == 0 and float(CM.Env["RecvLoss"]) == 0 : return 1 for node in CM.Env["nodes"]: CM.reducecomm_node(node) CM.log("Reduce the reliability of communications") return 1 def TearDown(self, CM): '''Fix the reliability of communications''' if float(CM.Env["XmitLoss"]) == 0 and float(CM.Env["RecvLoss"]) == 0 : return 1 for node in CM.Env["nodes"]: CM.unisolate_node(node) CM.log("Fix the reliability of communications") class BasicSanityCheck(ScenarioComponent): ( ''' ''') def IsApplicable(self): return self.Env["DoBSC"] def SetUp(self, CM): CM.prepare() # Clear out the cobwebs self.TearDown(CM) # Now start the Cluster Manager on all the nodes. CM.log("Starting Cluster Manager on BSC node(s).") return CM.startall() def TearDown(self, CM): CM.log("Stopping Cluster Manager on BSC node(s).") return CM.stopall() class Benchmark(ScenarioComponent): ( ''' ''') def IsApplicable(self): return self.Env["benchmark"] def SetUp(self, CM): CM.prepare() # Clear out the cobwebs self.TearDown(CM) # Now start the Cluster Manager on all the nodes. CM.log("Starting Cluster Manager on all node(s).") return CM.startall() def TearDown(self, CM): CM.log("Stopping Cluster Manager on all node(s).") return CM.stopall() class RollingUpgrade(ScenarioComponent): ( ''' Test a rolling upgrade between two versions of the stack ''') def __init__(self, Env): self.Env = Env def IsApplicable(self): if not self.Env["rpm-dir"]: return None if not self.Env["current-version"]: return None if not self.Env["previous-version"]: return None return 1 def install(self, node, version): target_dir = "/tmp/rpm-%s" % version src_dir = "%s/%s" % (self.CM.Env["rpm-dir"], version) rc = self.CM.rsh(node, "mkdir -p %s" % target_dir) rc = self.CM.cp("%s/*.rpm %s:%s" % (src_dir, node, target_dir)) rc = self.CM.rsh(node, "rpm -Uvh --force %s/*.rpm" % (target_dir)) return self.success() def upgrade(self, node): return self.install(node, self.CM.Env["current-version"]) def downgrade(self, node): return self.install(node, self.CM.Env["previous-version"]) def SetUp(self, CM): CM.prepare() # Clear out the cobwebs CM.stopall() CM.log("Downgrading all nodes to %s." % self.Env["previous-version"]) for node in self.Env["nodes"]: if not self.downgrade(node): CM.log("Couldn't downgrade %s" % node) return None return 1 def TearDown(self, CM): # Stop everything CM.log("Stopping Cluster Manager on Upgrade nodes.") CM.stopall() CM.log("Upgrading all nodes to %s." % self.Env["current-version"]) for node in self.Env["nodes"]: if not self.upgrade(node): CM.log("Couldn't upgrade %s" % node) return None return 1 diff --git a/cts/CTStests.py b/cts/CTStests.py index d771ca5c3d..66af26a6d0 100644 --- a/cts/CTStests.py +++ b/cts/CTStests.py @@ -1,2188 +1,2194 @@ '''CTS: Cluster Testing System: Tests module There are a few things we want to do here: ''' __copyright__=''' Copyright (C) 2000, 2001 Alan Robertson Licensed under the GNU GPL. Add RecourceRecover testcase Zhao Kai ''' # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. # # SPECIAL NOTE: # # Tests may NOT implement any cluster-manager-specific code in them. # EXTEND the ClusterManager object to provide the base capabilities # the test needs if you need to do something that the current CM classes # do not. Otherwise you screw up the whole point of the object structure # in CTS. # # Thank you. # import time, os, re, types, string, tempfile, sys from stat import * from cts import CTS from cts.CTSaudits import * AllTestClasses = [ ] class CTSTest: ''' A Cluster test. We implement the basic set of properties and behaviors for a generic cluster test. Cluster tests track their own statistics. We keep each of the kinds of counts we track as separate {name,value} pairs. ''' def __init__(self, cm): #self.name="the unnamed test" self.Stats = {"calls":0 , "success":0 , "failure":0 , "skipped":0 , "auditfail":0} # if not issubclass(cm.__class__, ClusterManager): # raise ValueError("Must be a ClusterManager object") self.CM = cm self.Audits = [] self.timeout=120 self.passed = 1 self.is_loop = 0 self.is_unsafe = 0 self.is_experimental = 0 self.is_valgrind = 0 self.benchmark = 0 # which tests to benchmark self.timer = {} # timers def has_key(self, key): return self.Stats.has_key(key) def __setitem__(self, key, value): self.Stats[key] = value def __getitem__(self, key): return self.Stats[key] def log_mark(self, msg): self.CM.debug("MARK: test %s %s %d" % (self.name,msg,time.time())) return def get_timer(self,key = "test"): try: return self.timer[key] except: return 0 def set_timer(self,key = "test"): self.timer[key] = time.time() return self.timer[key] def log_timer(self,key = "test"): elapsed = 0 if key in self.timer: elapsed = time.time() - self.timer[key] s = key == "test" and self.name or "%s:%s" %(self.name,key) self.CM.debug("%s runtime: %.2f" % (s, elapsed)) del self.timer[key] return elapsed def incr(self, name): '''Increment (or initialize) the value associated with the given name''' if not self.Stats.has_key(name): self.Stats[name]=0 self.Stats[name] = self.Stats[name]+1 # Reset the test passed boolean if name == "calls": self.passed = 1 def failure(self, reason="none"): '''Increment the failure count''' self.passed = 0 self.incr("failure") self.CM.log(("Test %s" % self.name).ljust(35) +" FAILED: %s" % reason) return None def success(self): '''Increment the success count''' self.incr("success") return 1 def skipped(self): '''Increment the skipped count''' self.incr("skipped") return 1 def __call__(self, node): '''Perform the given test''' raise ValueError("Abstract Class member (__call__)") self.incr("calls") return self.failure() def audit(self): passed = 1 if len(self.Audits) > 0: for audit in self.Audits: if not audit(): self.CM.log("Internal %s Audit %s FAILED." % (self.name, audit.name())) self.incr("auditfail") passed = 0 return passed def setup(self, node): '''Setup the given test''' return self.success() def teardown(self, node): '''Tear down the given test''' return self.success() def create_watch(self, patterns, timeout, name=None): if not name: name = self.name return CTS.LogWatcher(self.CM.Env, self.CM["LogFileName"], patterns, name, timeout) def local_badnews(self, prefix, watch, local_ignore=[]): errcount = 0 if not prefix: prefix = "LocalBadNews:" ignorelist = [] ignorelist.append(" CTS: ") ignorelist.append(prefix) ignorelist.extend(local_ignore) while errcount < 100: match=watch.look(0) if match: add_err = 1 for ignore in ignorelist: if add_err == 1 and re.search(ignore, match): add_err = 0 if add_err == 1: self.CM.log(prefix + " " + match) errcount=errcount+1 else: break else: self.CM.log("Too many errors!") return errcount def is_applicable(self): return self.is_applicable_common() def is_applicable_common(self): '''Return TRUE if we are applicable in the current test configuration''' #raise ValueError("Abstract Class member (is_applicable)") if self.is_loop and not self.CM.Env["loop-tests"]: return 0 elif self.is_unsafe and not self.CM.Env["unsafe-tests"]: return 0 elif self.is_valgrind and not self.CM.Env["valgrind-tests"]: return 0 elif self.is_experimental and not self.CM.Env["experimental-tests"]: return 0 elif self.CM.Env["benchmark"] and self.benchmark == 0: return 0 return 1 def find_ocfs2_resources(self, node): self.r_o2cb = None self.r_ocfs2 = [] (rc, lines) = self.CM.rsh(node, "crm_resource -c", None) for line in lines: if re.search("^Resource", line): r = AuditResource(self.CM, line) if r.rtype == "o2cb" and r.parent != "NA": self.CM.debug("Found o2cb: %s" % self.r_o2cb) self.r_o2cb = r.parent if re.search("^Constraint", line): c = AuditConstraint(self.CM, line) if c.type == "rsc_colocation" and c.target == self.r_o2cb: self.r_ocfs2.append(c.rsc) self.CM.debug("Found ocfs2 filesystems: %s" % repr(self.r_ocfs2)) return len(self.r_ocfs2) def canrunnow(self, node): '''Return TRUE if we can meaningfully run right now''' return 1 def errorstoignore(self): '''Return list of errors which are 'normal' and should be ignored''' return [] ################################################################### class StopTest(CTSTest): ################################################################### '''Stop (deactivate) the cluster manager on a node''' def __init__(self, cm): CTSTest.__init__(self, cm) self.name="Stop" def __call__(self, node): '''Perform the 'stop' test. ''' self.incr("calls") if self.CM.ShouldBeStatus[node] != "up": return self.skipped() patterns = [] # Technically we should always be able to notice ourselves stopping patterns.append(self.CM["Pat:We_stopped"] % node) #if self.CM.Env["use_logd"]: # patterns.append(self.CM["Pat:Logd_stopped"] % node) # Any active node needs to notice this one left # NOTE: This wont work if we have multiple partitions for other in self.CM.Env["nodes"]: if self.CM.ShouldBeStatus[other] == "up" and other != node: patterns.append(self.CM["Pat:They_stopped"] %(other, self.CM.key_for_node(node))) #self.debug("Checking %s will notice %s left"%(other, node)) watch = self.create_watch(patterns, self.CM["DeadTime"]) watch.setwatch() if node == self.CM.OurNode: self.incr("us") else: if self.CM.upcount() <= 1: self.incr("all") else: self.incr("them") self.CM.StopaCM(node) watch_result = watch.lookforall() failreason=None UnmatchedList = "||" if watch.unmatched: (rc, output) = self.CM.rsh(node, "/bin/ps axf", None) for line in output: self.CM.debug(line) for regex in watch.unmatched: self.CM.log ("ERROR: Shutdown pattern not found: %s" % (regex)) UnmatchedList += regex + "||"; failreason="Missing shutdown pattern" self.CM.cluster_stable(self.CM["DeadTime"]) if not watch.unmatched or self.CM.upcount() == 0: return self.success() if len(watch.unmatched) >= self.CM.upcount(): return self.failure("no match against (%s)" % UnmatchedList) if failreason == None: return self.success() else: return self.failure(failreason) # # We don't register StopTest because it's better when called by # another test... # ################################################################### class StartTest(CTSTest): ################################################################### '''Start (activate) the cluster manager on a node''' def __init__(self, cm, debug=None): CTSTest.__init__(self,cm) self.name="start" self.debug = debug def __call__(self, node): '''Perform the 'start' test. ''' self.incr("calls") if self.CM.upcount() == 0: self.incr("us") else: self.incr("them") if self.CM.ShouldBeStatus[node] != "down": return self.skipped() elif self.CM.StartaCM(node): return self.success() else: return self.failure("Startup %s on node %s failed" %(self.CM["Name"], node)) # # We don't register StartTest because it's better when called by # another test... # ################################################################### class FlipTest(CTSTest): ################################################################### '''If it's running, stop it. If it's stopped start it. Overthrow the status quo... ''' def __init__(self, cm): CTSTest.__init__(self,cm) self.name="Flip" self.start = StartTest(cm) self.stop = StopTest(cm) def __call__(self, node): '''Perform the 'Flip' test. ''' self.incr("calls") if self.CM.ShouldBeStatus[node] == "up": self.incr("stopped") ret = self.stop(node) type="up->down" # Give the cluster time to recognize it's gone... time.sleep(self.CM["StableTime"]) elif self.CM.ShouldBeStatus[node] == "down": self.incr("started") ret = self.start(node) type="down->up" else: return self.skipped() self.incr(type) if ret: return self.success() else: return self.failure("%s failure" % type) # Register FlipTest as a good test to run AllTestClasses.append(FlipTest) ################################################################### class RestartTest(CTSTest): ################################################################### '''Stop and restart a node''' def __init__(self, cm): CTSTest.__init__(self,cm) self.name="Restart" self.start = StartTest(cm) self.stop = StopTest(cm) self.benchmark = 1 def __call__(self, node): '''Perform the 'restart' test. ''' self.incr("calls") self.incr("node:" + node) ret1 = 1 if self.CM.StataCM(node): self.incr("WasStopped") if not self.start(node): return self.failure("start (setup) failure: "+node) self.set_timer() if not self.stop(node): return self.failure("stop failure: "+node) if not self.start(node): return self.failure("start failure: "+node) return self.success() # Register RestartTest as a good test to run AllTestClasses.append(RestartTest) ################################################################### class StonithdTest(CTSTest): ################################################################### def __init__(self, cm): CTSTest.__init__(self, cm) self.name="Stonithd" self.startall = SimulStartLite(cm) self.benchmark = 1 def __call__(self, node): self.incr("calls") if len(self.CM.Env["nodes"]) < 2: return self.skipped() ret = self.startall(None) if not ret: return self.failure("Setup failed") + is_dc = self.CM.is_node_dc(node) + watchpats = [] - watchpats.append("Node %s will be fenced because termination was requested" % node) - watchpats.append("Scheduling Node %s for STONITH" % node) - watchpats.append("Executing .* fencing operation") watchpats.append("stonith-ng:.*Operation .* for host '%s' with device .* returned: 0" % node) + watchpats.append("tengine_stonith_notify: Peer %s was terminated .*: OK" % node) - if not self.CM.is_node_dc(node): - # Won't be found if the DC is shot (and there's no equivalent message from stonithd) + if is_dc: + watchpats.append("tengine_stonith_notify: Target was our leader .*%s" % node) + else: watchpats.append("tengine_stonith_callback: .*: OK ") - # TODO else: look for the notification on a peer once implimented + + if self.CM.Env["LogWatcher"] != "remote" or not is_dc: + # Often remote logs aren't flushed to disk by the time the node is shot, + # so we wont be able to find them + # Remote syslog doesn't suffer this problem because they're already on + # the loghost when the node is shot + watchpats.append("Node %s will be fenced because termination was requested" % node) + watchpats.append("Scheduling Node %s for STONITH" % node) + watchpats.append("Executing .* fencing operation") if self.CM.Env["at-boot"] == 0: self.CM.debug("Expecting %s to stay down" % node) self.CM.ShouldBeStatus[node]="down" else: self.CM.debug("Expecting %s to come up again %d" % (node, self.CM.Env["at-boot"])) watchpats.append("%s crmd: .* S_STARTING -> S_PENDING" % node) watchpats.append("%s crmd: .* S_PENDING -> S_NOT_DC" % node) watch = self.create_watch(watchpats, 30 + self.CM["DeadTime"] + self.CM["StableTime"] + self.CM["StartTime"]) watch.setwatch() self.CM.rsh(node, "crm_attribute --node %s --type status --attr-name terminate --attr-value true" % node) self.set_timer("fence") matched = watch.lookforall() self.log_timer("fence") self.set_timer("reform") - if matched: - self.CM.debug("Found: "+ repr(matched)) - else: + if watch.unmatched: self.CM.log("Patterns not found: " + repr(watch.unmatched)) self.CM.debug("Waiting for the cluster to recover") self.CM.cluster_stable() self.CM.debug("Waiting STONITHd node to come back up") self.CM.ns.WaitForAllNodesToComeUp(self.CM.Env["nodes"], 600) self.CM.debug("Waiting for the cluster to re-stabilize with all nodes") is_stable = self.CM.cluster_stable(self.CM["StartTime"]) if not matched: return self.failure("Didn't find all expected patterns") elif not is_stable: return self.failure("Cluster did not become stable") self.log_timer("reform") return self.success() def errorstoignore(self): return [ "Executing .* fencing operation" ] def is_applicable(self): if not self.is_applicable_common(): return 0 if self.CM.Env.has_key("DoStonith"): return self.CM.Env["DoStonith"] return 1 AllTestClasses.append(StonithdTest) ################################################################### class StartOnebyOne(CTSTest): ################################################################### '''Start all the nodes ~ one by one''' def __init__(self, cm): CTSTest.__init__(self,cm) self.name="StartOnebyOne" self.stopall = SimulStopLite(cm) self.start = StartTest(cm) self.ns=CTS.NodeStatus(cm.Env) def __call__(self, dummy): '''Perform the 'StartOnebyOne' test. ''' self.incr("calls") # We ignore the "node" parameter... # Shut down all the nodes... ret = self.stopall(None) if not ret: return self.failure("Test setup failed") failed=[] self.set_timer() for node in self.CM.Env["nodes"]: if not self.start(node): failed.append(node) if len(failed) > 0: return self.failure("Some node failed to start: " + repr(failed)) return self.success() # Register StartOnebyOne as a good test to run AllTestClasses.append(StartOnebyOne) ################################################################### class SimulStart(CTSTest): ################################################################### '''Start all the nodes ~ simultaneously''' def __init__(self, cm): CTSTest.__init__(self,cm) self.name="SimulStart" self.stopall = SimulStopLite(cm) self.startall = SimulStartLite(cm) def __call__(self, dummy): '''Perform the 'SimulStart' test. ''' self.incr("calls") # We ignore the "node" parameter... # Shut down all the nodes... ret = self.stopall(None) if not ret: return self.failure("Setup failed") self.CM.clear_all_caches() if not self.startall(None): return self.failure("Startall failed") return self.success() # Register SimulStart as a good test to run AllTestClasses.append(SimulStart) ################################################################### class SimulStop(CTSTest): ################################################################### '''Stop all the nodes ~ simultaneously''' def __init__(self, cm): CTSTest.__init__(self,cm) self.name="SimulStop" self.startall = SimulStartLite(cm) self.stopall = SimulStopLite(cm) def __call__(self, dummy): '''Perform the 'SimulStop' test. ''' self.incr("calls") # We ignore the "node" parameter... # Start up all the nodes... ret = self.startall(None) if not ret: return self.failure("Setup failed") if not self.stopall(None): return self.failure("Stopall failed") return self.success() # Register SimulStop as a good test to run AllTestClasses.append(SimulStop) ################################################################### class StopOnebyOne(CTSTest): ################################################################### '''Stop all the nodes in order''' def __init__(self, cm): CTSTest.__init__(self,cm) self.name="StopOnebyOne" self.startall = SimulStartLite(cm) self.stop = StopTest(cm) def __call__(self, dummy): '''Perform the 'StopOnebyOne' test. ''' self.incr("calls") # We ignore the "node" parameter... # Start up all the nodes... ret = self.startall(None) if not ret: return self.failure("Setup failed") failed=[] self.set_timer() for node in self.CM.Env["nodes"]: if not self.stop(node): failed.append(node) if len(failed) > 0: return self.failure("Some node failed to stop: " + repr(failed)) self.CM.clear_all_caches() return self.success() # Register StopOnebyOne as a good test to run AllTestClasses.append(StopOnebyOne) ################################################################### class RestartOnebyOne(CTSTest): ################################################################### '''Restart all the nodes in order''' def __init__(self, cm): CTSTest.__init__(self,cm) self.name="RestartOnebyOne" self.startall = SimulStartLite(cm) def __call__(self, dummy): '''Perform the 'RestartOnebyOne' test. ''' self.incr("calls") # We ignore the "node" parameter... # Start up all the nodes... ret = self.startall(None) if not ret: return self.failure("Setup failed") did_fail=[] self.set_timer() self.restart = RestartTest(self.CM) for node in self.CM.Env["nodes"]: if not self.restart(node): did_fail.append(node) if did_fail: return self.failure("Could not restart %d nodes: %s" %(len(did_fail), repr(did_fail))) return self.success() # Register StopOnebyOne as a good test to run AllTestClasses.append(RestartOnebyOne) ################################################################### class PartialStart(CTSTest): ################################################################### '''Start a node - but tell it to stop before it finishes starting up''' def __init__(self, cm): CTSTest.__init__(self,cm) self.name="PartialStart" self.startall = SimulStartLite(cm) self.stopall = SimulStopLite(cm) #self.is_unsafe = 1 def __call__(self, node): '''Perform the 'PartialStart' test. ''' self.incr("calls") ret = self.stopall(None) if not ret: return self.failure("Setup failed") # FIXME! This should use the CM class to get the pattern # then it would be applicable in general watchpats = [] watchpats.append("Starting crmd") watch = self.create_watch(watchpats, self.CM["DeadTime"]+10) watch.setwatch() self.CM.StartaCMnoBlock(node) ret = watch.lookforall() if not ret: self.CM.log("Patterns not found: " + repr(watch.unmatched)) return self.failure("Setup of %s failed" % node) ret = self.stopall(None) if not ret: return self.failure("%s did not stop in time" % node) return self.success() # Register StopOnebyOne as a good test to run AllTestClasses.append(PartialStart) ####################################################################### class StandbyTest(CTSTest): ####################################################################### def __init__(self, cm): CTSTest.__init__(self,cm) self.name="Standby" self.benchmark = 1 self.start = StartTest(cm) self.startall = SimulStartLite(cm) # make sure the node is active # set the node to standby mode # check resources, none resource should be running on the node # set the node to active mode # check resouces, resources should have been migrated back (SHOULD THEY?) def __call__(self, node): self.incr("calls") ret=self.startall(None) if not ret: return self.failure("Start all nodes failed") self.CM.debug("Make sure node %s is active" % node) if self.CM.StandbyStatus(node) != "off": if not self.CM.SetStandbyMode(node, "off"): return self.failure("can't set node %s to active mode" % node) self.CM.cluster_stable() status = self.CM.StandbyStatus(node) if status != "off": return self.failure("standby status of %s is [%s] but we expect [off]" % (node, status)) self.CM.debug("Getting resources running on node %s" % node) rsc_on_node = self.CM.active_resources(node) self.CM.debug("Setting node %s to standby mode" % node) if not self.CM.SetStandbyMode(node, "on"): return self.failure("can't set node %s to standby mode" % node) self.set_timer("on") time.sleep(1) # Allow time for the update to be applied and cause something self.CM.cluster_stable() status = self.CM.StandbyStatus(node) if status != "on": return self.failure("standby status of %s is [%s] but we expect [on]" % (node, status)) self.log_timer("on") self.CM.debug("Checking resources") bad_run = self.CM.active_resources(node) if len(bad_run) > 0: rc = self.failure("%s set to standby, %s is still running on it" % (node, repr(bad_run))) self.CM.debug("Setting node %s to active mode" % node) self.CM.SetStandbyMode(node, "off") return rc self.CM.debug("Setting node %s to active mode" % node) if not self.CM.SetStandbyMode(node, "off"): return self.failure("can't set node %s to active mode" % node) self.set_timer("off") self.CM.cluster_stable() status = self.CM.StandbyStatus(node) if status != "off": return self.failure("standby status of %s is [%s] but we expect [off]" % (node, status)) self.log_timer("off") return self.success() AllTestClasses.append(StandbyTest) ####################################################################### class ValgrindTest(CTSTest): ####################################################################### '''Check for memory leaks''' def __init__(self, cm): CTSTest.__init__(self,cm) self.name="Valgrind" self.stopall = SimulStopLite(cm) self.startall = SimulStartLite(cm) self.is_valgrind = 1 self.is_loop = 1 def setup(self, node): self.incr("calls") ret=self.stopall(None) if not ret: return self.failure("Stop all nodes failed") # Enable valgrind self.logPat = "/tmp/%s-*.valgrind" % self.name self.CM.Env["valgrind-prefix"] = self.name self.CM.rsh(node, "rm -f %s" % self.logPat, None) ret=self.startall(None) if not ret: return self.failure("Start all nodes failed") for node in self.CM.Env["nodes"]: (rc, output) = self.CM.rsh(node, "ps u --ppid `pidofproc aisexec`", None) for line in output: self.CM.debug(line) return self.success() def teardown(self, node): # Disable valgrind self.CM.Env["valgrind-prefix"] = None # Return all nodes to normal ret=self.stopall(None) if not ret: return self.failure("Stop all nodes failed") return self.success() def find_leaks(self): # Check for leaks leaked = [] self.stop = StopTest(self.CM) for node in self.CM.Env["nodes"]: (rc, ps_out) = self.CM.rsh(node, "ps u --ppid `pidofproc aisexec`", None) rc = self.stop(node) if not rc: self.failure("Couldn't shut down %s" % node) rc = self.CM.rsh(node, "grep -e indirectly.*lost:.*[1-9] -e definitely.*lost:.*[1-9] -e ERROR.*SUMMARY:.*[1-9].*errors %s" % self.logPat, 0) if rc != 1: leaked.append(node) self.failure("Valgrind errors detected on %s" % node) for line in ps_out: self.CM.log(line) (rc, output) = self.CM.rsh(node, "grep -e lost: -e SUMMARY: %s" % self.logPat, None) for line in output: self.CM.log(line) (rc, output) = self.CM.rsh(node, "cat %s" % self.logPat, None) for line in output: self.CM.debug(line) self.CM.rsh(node, "rm -f %s" % self.logPat, None) return leaked def __call__(self, node): leaked = self.find_leaks() if len(leaked) > 0: return self.failure("Nodes %s leaked" % repr(leaked)) return self.success() def errorstoignore(self): '''Return list of errors which should be ignored''' return [ """cib:.*readCibXmlFile:""", """HA_VALGRIND_ENABLED""" ] ####################################################################### class StandbyLoopTest(ValgrindTest): ####################################################################### '''Check for memory leaks by putting a node in and out of standby for an hour''' def __init__(self, cm): ValgrindTest.__init__(self,cm) self.name="StandbyLoop" def __call__(self, node): lpc = 0 delay = 2 failed = 0 done=time.time() + self.CM.Env["loop-minutes"]*60 while time.time() <= done and not failed: lpc = lpc + 1 time.sleep(delay) if not self.CM.SetStandbyMode(node, "on"): self.failure("can't set node %s to standby mode" % node) failed = lpc time.sleep(delay) if not self.CM.SetStandbyMode(node, "off"): self.failure("can't set node %s to active mode" % node) failed = lpc leaked = self.find_leaks() if failed: return self.failure("Iteration %d failed" % failed) elif len(leaked) > 0: return self.failure("Nodes %s leaked" % repr(leaked)) return self.success() AllTestClasses.append(StandbyLoopTest) ############################################################################## class BandwidthTest(CTSTest): ############################################################################## # Tests should not be cluster-manager-specific # If you need to find out cluster manager configuration to do this, then # it should be added to the generic cluster manager API. '''Test the bandwidth which heartbeat uses''' def __init__(self, cm): CTSTest.__init__(self, cm) self.name = "Bandwidth" self.start = StartTest(cm) self.__setitem__("min",0) self.__setitem__("max",0) self.__setitem__("totalbandwidth",0) self.tempfile = tempfile.mktemp(".cts") self.startall = SimulStartLite(cm) def __call__(self, node): '''Perform the Bandwidth test''' self.incr("calls") if self.CM.upcount()<1: return self.skipped() Path = self.CM.InternalCommConfig() if "ip" not in Path["mediatype"]: return self.skipped() port = Path["port"][0] port = int(port) ret = self.startall(None) if not ret: return self.failure("Test setup failed") time.sleep(5) # We get extra messages right after startup. fstmpfile = "/var/run/band_estimate" dumpcmd = "tcpdump -p -n -c 102 -i any udp port %d > %s 2>&1" \ % (port, fstmpfile) rc = self.CM.rsh(node, dumpcmd) if rc == 0: farfile = "root@%s:%s" % (node, fstmpfile) self.CM.rsh.cp(farfile, self.tempfile) Bandwidth = self.countbandwidth(self.tempfile) if not Bandwidth: self.CM.log("Could not compute bandwidth.") return self.success() intband = int(Bandwidth + 0.5) self.CM.log("...bandwidth: %d bits/sec" % intband) self.Stats["totalbandwidth"] = self.Stats["totalbandwidth"] + Bandwidth if self.Stats["min"] == 0: self.Stats["min"] = Bandwidth if Bandwidth > self.Stats["max"]: self.Stats["max"] = Bandwidth if Bandwidth < self.Stats["min"]: self.Stats["min"] = Bandwidth self.CM.rsh(node, "rm -f %s" % fstmpfile) os.unlink(self.tempfile) return self.success() else: return self.failure("no response from tcpdump command [%d]!" % rc) def countbandwidth(self, file): fp = open(file, "r") fp.seek(0) count = 0 sum = 0 while 1: line = fp.readline() if not line: return None if re.search("udp",line) or re.search("UDP,", line): count=count+1 linesplit = string.split(line," ") for j in range(len(linesplit)-1): if linesplit[j]=="udp": break if linesplit[j]=="length:": break try: sum = sum + int(linesplit[j+1]) except ValueError: self.CM.log("Invalid tcpdump line: %s" % line) return None T1 = linesplit[0] timesplit = string.split(T1,":") time2split = string.split(timesplit[2],".") time1 = (long(timesplit[0])*60+long(timesplit[1]))*60+long(time2split[0])+long(time2split[1])*0.000001 break while count < 100: line = fp.readline() if not line: return None if re.search("udp",line) or re.search("UDP,", line): count = count+1 linessplit = string.split(line," ") for j in range(len(linessplit)-1): if linessplit[j] =="udp": break if linesplit[j]=="length:": break try: sum=int(linessplit[j+1])+sum except ValueError: self.CM.log("Invalid tcpdump line: %s" % line) return None T2 = linessplit[0] timesplit = string.split(T2,":") time2split = string.split(timesplit[2],".") time2 = (long(timesplit[0])*60+long(timesplit[1]))*60+long(time2split[0])+long(time2split[1])*0.000001 time = time2-time1 if (time <= 0): return 0 return (sum*8)/time def is_applicable(self): '''BandwidthTest never applicable''' return 0 AllTestClasses.append(BandwidthTest) ################################################################### class ResourceRecover(CTSTest): ################################################################### def __init__(self, cm): CTSTest.__init__(self,cm) self.name="ResourceRecover" self.start = StartTest(cm) self.startall = SimulStartLite(cm) self.max=30 self.rid=None #self.is_unsafe = 1 self.benchmark = 1 # these are the values used for the new LRM API call self.action = "asyncmon" self.interval = 0 def __call__(self, node): '''Perform the 'ResourceRecover' test. ''' self.incr("calls") ret = self.startall(None) if not ret: return self.failure("Setup failed") resourcelist = self.CM.active_resources(node) # if there are no resourcelist, return directly if len(resourcelist)==0: self.CM.log("No active resources on %s" % node) return self.skipped() self.rid = self.CM.Env.RandomGen.choice(resourcelist) rsc = None (rc, lines) = self.CM.rsh(node, "crm_resource -c", None) for line in lines: if re.search("^Resource", line): tmp = AuditResource(self.CM, line) if tmp.id == self.rid: rsc = tmp # Handle anonymous clones that get renamed self.rid = rsc.clone_id break if not rsc: return self.failure("Could not find %s in the resource list" % self.rid) self.CM.debug("Shooting %s aka. %s" % (rsc.clone_id, rsc.id)) pats = [] pats.append("Updating failcount for %s on .* after .* %s" % (self.rid, self.action)) if rsc.managed(): pats.append("crmd:.* Performing .* op=%s_stop_0" % self.rid) if rsc.unique(): pats.append("crmd:.* Performing .* op=%s_start_0" % self.rid) pats.append("crmd:.* LRM operation %s_start_0.*confirmed.*ok" % self.rid) else: # Anonymous clones may get restarted with a different clone number pats.append("crmd:.* Performing .* op=.*_start_0") pats.append("crmd:.* LRM operation .*_start_0.*confirmed.*ok") watch = self.create_watch(pats, 60) watch.setwatch() self.CM.rsh(node, "crm_resource -F -r %s -H %s &>/dev/null" % (self.rid, node)) self.set_timer("recover") watch.lookforall() self.log_timer("recover") self.CM.cluster_stable() recovered=self.CM.ResourceLocation(self.rid) if watch.unmatched: return self.failure("Patterns not found: %s" % repr(watch.unmatched)) elif rsc.unique() and len(recovered) > 1: return self.failure("%s is now active on more than one node: %s"%(self.rid, repr(recovered))) elif len(recovered) > 0: self.CM.debug("%s is running on: %s" %(self.rid, repr(recovered))) elif rsc.managed(): return self.failure("%s was not recovered and is inactive" % self.rid) return self.success() def errorstoignore(self): '''Return list of errors which should be ignored''' return [ """Updating failcount for %s""" % self.rid, """Unknown operation: fail""", """ERROR: sending stonithRA op to stonithd failed.""", """ERROR: process_lrm_event: LRM operation %s_%s_%d""" % (self.rid, self.action, self.interval), """ERROR: process_graph_event: Action %s_%s_%d .* initiated outside of a transition""" % (self.rid, self.action, self.interval), ] AllTestClasses.append(ResourceRecover) ################################################################### class ComponentFail(CTSTest): ################################################################### def __init__(self, cm): CTSTest.__init__(self,cm) self.name="ComponentFail" self.startall = SimulStartLite(cm) self.complist = cm.Components() self.patterns = [] self.okerrpatterns = [] self.is_unsafe = 1 def __call__(self, node): '''Perform the 'ComponentFail' test. ''' self.incr("calls") self.patterns = [] self.okerrpatterns = [] # start all nodes ret = self.startall(None) if not ret: return self.failure("Setup failed") if not self.CM.cluster_stable(self.CM["StableTime"]): return self.failure("Setup failed - unstable") node_is_dc = self.CM.is_node_dc(node, None) # select a component to kill chosen = self.CM.Env.RandomGen.choice(self.complist) while chosen.dc_only == 1 and node_is_dc == 0: chosen = self.CM.Env.RandomGen.choice(self.complist) self.CM.debug("...component %s (dc=%d,boot=%d)" % (chosen.name, node_is_dc,chosen.triggersreboot)) self.incr(chosen.name) if chosen.name != "aisexec": if self.CM["Name"] != "crm-lha" or chosen.name != "pengine": self.patterns.append(self.CM["Pat:ChildKilled"] %(node, chosen.name)) self.patterns.append(self.CM["Pat:ChildRespawn"] %(node, chosen.name)) self.patterns.extend(chosen.pats) if node_is_dc: self.patterns.extend(chosen.dc_pats) # In an ideal world, this next stuff should be in the "chosen" object as a member function if self.CM["Name"] == "crm-lha" and chosen.triggersreboot: # Make sure the node goes down and then comes back up if it should reboot... for other in self.CM.Env["nodes"]: if other != node: self.patterns.append(self.CM["Pat:They_stopped"] %(other, self.CM.key_for_node(node))) self.patterns.append(self.CM["Pat:Slave_started"] % node) self.patterns.append(self.CM["Pat:Local_started"] % node) if chosen.dc_only: # Sometimes these will be in the log, and sometimes they won't... self.okerrpatterns.append("%s crmd:.*Process %s:.* exited" %(node, chosen.name)) self.okerrpatterns.append("%s crmd:.*I_ERROR.*crmdManagedChildDied" %node) self.okerrpatterns.append("%s crmd:.*The %s subsystem terminated unexpectedly" %(node, chosen.name)) self.okerrpatterns.append("ERROR: Client .* exited with return code") else: # Sometimes this won't be in the log... self.okerrpatterns.append(self.CM["Pat:ChildKilled"] %(node, chosen.name)) self.okerrpatterns.append(self.CM["Pat:ChildRespawn"] %(node, chosen.name)) self.okerrpatterns.append(self.CM["Pat:ChildExit"]) # supply a copy so self.patterns doesnt end up empty tmpPats = [] tmpPats.extend(self.patterns) self.patterns.extend(chosen.badnews_ignore) # Look for STONITH ops, depending on Env["at-boot"] we might need to change the nodes status stonithPats = [] stonithPats.append("stonith-ng:.*Operation .* for host '%s' with device .* returned: 0" % node) stonith = self.create_watch(stonithPats, 0) stonith.setwatch() # set the watch for stable watch = self.create_watch( tmpPats, self.CM["DeadTime"] + self.CM["StableTime"] + self.CM["StartTime"]) watch.setwatch() # kill the component chosen.kill(node) # check to see Heartbeat noticed matched = watch.lookforall(allow_multiple_matches=1) - if matched: - self.CM.debug("Found: "+ repr(matched)) - else: + if watch.unmatched: self.CM.log("Patterns not found: " + repr(watch.unmatched)) if self.CM.Env["at-boot"] == 0: self.CM.debug("Checking if %s was shot" % node) shot = stonith.look(60) if shot: self.CM.debug("Found: "+ repr(shot)) self.CM.ShouldBeStatus[node]="down" self.CM.debug("Waiting for the cluster to recover") self.CM.cluster_stable() self.CM.debug("Waiting for any STONITHd node to come back up") self.CM.ns.WaitForAllNodesToComeUp(self.CM.Env["nodes"], 600) self.CM.debug("Waiting for the cluster to re-stabilize with all nodes") is_stable = self.CM.cluster_stable(self.CM["StartTime"]) if not matched: return self.failure("Didn't find all expected patterns") elif not is_stable: return self.failure("Cluster did not become stable") return self.success() def errorstoignore(self): '''Return list of errors which should be ignored''' # Note that okerrpatterns refers to the last time we ran this test # The good news is that this works fine for us... self.okerrpatterns.extend(self.patterns) return self.okerrpatterns AllTestClasses.append(ComponentFail) #################################################################### class SplitBrainTest(CTSTest): #################################################################### '''It is used to test split-brain. when the path between the two nodes break check the two nodes both take over the resource''' def __init__(self,cm): CTSTest.__init__(self,cm) self.name = "SplitBrain" self.start = StartTest(cm) self.startall = SimulStartLite(cm) self.is_experimental = 1 def isolate_partition(self, partition): other_nodes = [] other_nodes.extend(self.CM.Env["nodes"]) for node in partition: try: other_nodes.remove(node) except ValueError: self.CM.log("Node "+node+" not in " + repr(self.CM.Env["nodes"]) + " from " +repr(partition)) if len(other_nodes) == 0: return 1 self.CM.debug("Creating partition: " + repr(partition)) self.CM.debug("Everyone else: " + repr(other_nodes)) for node in partition: if not self.CM.isolate_node(node, other_nodes): self.CM.log("Could not isolate %s" % node) return 0 return 1 def heal_partition(self, partition): other_nodes = [] other_nodes.extend(self.CM.Env["nodes"]) for node in partition: try: other_nodes.remove(node) except ValueError: self.CM.log("Node "+node+" not in " + repr(self.CM.Env["nodes"])) if len(other_nodes) == 0: return 1 self.CM.debug("Healing partition: " + repr(partition)) self.CM.debug("Everyone else: " + repr(other_nodes)) for node in partition: self.CM.unisolate_node(node, other_nodes) def __call__(self, node): '''Perform split-brain test''' self.incr("calls") self.passed = 1 partitions = {} ret = self.startall(None) if not ret: return self.failure("Setup failed") while 1: # Retry until we get multiple partitions partitions = {} p_max = len(self.CM.Env["nodes"]) for node in self.CM.Env["nodes"]: p = self.CM.Env.RandomGen.randint(1, p_max) if not partitions.has_key(p): partitions[p]= [] partitions[p].append(node) p_max = len(partitions.keys()) if p_max > 1: break # else, try again self.CM.debug("Created %d partitions" % p_max) for key in partitions.keys(): self.CM.debug("Partition["+str(key)+"]:\t"+repr(partitions[key])) # Disabling STONITH to reduce test complexity for now self.CM.rsh(node, "crm_attribute -n stonith-enabled -v false") for key in partitions.keys(): self.isolate_partition(partitions[key]) count = 30 while count > 0: if len(self.CM.find_partitions()) != p_max: time.sleep(10) else: break else: self.failure("Expected partitions were not created") # Target number of partitions formed - wait for stability if not self.CM.cluster_stable(): self.failure("Partitioned cluster not stable") # Now audit the cluster state self.CM.partitions_expected = p_max if not self.audit(): self.failure("Audits failed") self.CM.partitions_expected = 1 # And heal them again for key in partitions.keys(): self.heal_partition(partitions[key]) # Wait for a single partition to form count = 30 while count > 0: if len(self.CM.find_partitions()) != 1: time.sleep(10) count -= 1 else: break else: self.failure("Cluster did not reform") # Wait for it to have the right number of members count = 30 while count > 0: members = [] partitions = self.CM.find_partitions() if len(partitions) > 0: members = partitions[0].split() if len(members) != len(self.CM.Env["nodes"]): time.sleep(10) count -= 1 else: break else: self.failure("Cluster did not completely reform") # Wait up to 20 minutes - the delay is more preferable than # trying to continue with in a messed up state if not self.CM.cluster_stable(1200): self.failure("Reformed cluster not stable") answer = raw_input('Continue? [nY]') if answer and answer == "n": raise ValueError("Reformed cluster not stable") # Turn fencing back on if self.CM.Env["DoFencing"]: self.CM.rsh(node, "crm_attribute -D -n stonith-enabled") self.CM.cluster_stable() if self.passed: return self.success() return self.failure("See previous errors") def errorstoignore(self): '''Return list of errors which are 'normal' and should be ignored''' return [ "Another DC detected:", "ERROR: attrd_cib_callback: .*Application of an update diff failed", "crmd_ha_msg_callback:.*not in our membership list", "CRIT:.*node.*returning after partition", ] def is_applicable(self): if not self.is_applicable_common(): return 0 return len(self.CM.Env["nodes"]) > 2 AllTestClasses.append(SplitBrainTest) #################################################################### class Reattach(CTSTest): #################################################################### def __init__(self, cm): CTSTest.__init__(self,cm) self.name="Reattach" self.startall = SimulStartLite(cm) self.restart1 = RestartTest(cm) self.stopall = SimulStopLite(cm) self.is_unsafe = 0 # Handled by canrunnow() def setup(self, node): attempt=0 if not self.startall(None): return None # Make sure we are really _really_ stable and that all # resources, including those that depend on transient node # attributes, are started while not self.CM.cluster_stable(double_check=True): if attempt < 5: attempt += 1 self.CM.debug("Not stable yet, re-testing") else: self.CM.log("Cluster is not stable") return None return 1 def canrunnow(self, node): '''Return TRUE if we can meaningfully run right now''' if self.find_ocfs2_resources(node): self.CM.log("Detach/Reattach scenarios are not possible with OCFS2 services present") return 0 return 1 def __call__(self, node): self.incr("calls") pats = [] managed = self.create_watch(["is-managed-default"], 60) managed.setwatch() self.CM.debug("Disable resource management") self.CM.rsh(node, "crm_attribute -n is-managed-default -v false") if not managed.lookforall(): self.CM.log("Patterns not found: " + repr(managed.unmatched)) return self.failure("Resource management not disabled") pats = [] pats.append("crmd:.*Performing.*_stop_0") pats.append("crmd:.*Performing.*_start_0") pats.append("crmd:.*Performing.*_promote_0") pats.append("crmd:.*Performing.*_demote_0") pats.append("crmd:.*Performing.*_migrate_.*_0") watch = self.create_watch(pats, 60, "ShutdownActivity") watch.setwatch() self.CM.debug("Shutting down the cluster") ret = self.stopall(None) if not ret: self.CM.debug("Re-enable resource management") self.CM.rsh(node, "crm_attribute -D -n is-managed-default") return self.failure("Couldn't shut down the cluster") self.CM.debug("Bringing the cluster back up") ret = self.startall(None) + time.sleep(5) # allow ping to update the CIB if not ret: self.CM.debug("Re-enable resource management") self.CM.rsh(node, "crm_attribute -D -n is-managed-default") return self.failure("Couldn't restart the cluster") if self.local_badnews("ResourceActivity:", watch): self.CM.debug("Re-enable resource management") self.CM.rsh(node, "crm_attribute -D -n is-managed-default") return self.failure("Resources stopped or started during cluster restart") watch = self.create_watch(pats, 60, "StartupActivity") watch.setwatch() managed = self.create_watch(["is-managed-default"], 60) managed.setwatch() self.CM.debug("Re-enable resource management") self.CM.rsh(node, "crm_attribute -D -n is-managed-default") if not managed.lookforall(): self.CM.log("Patterns not found: " + repr(managed.unmatched)) return self.failure("Resource management not enabled") self.CM.cluster_stable() # Ignore actions for STONITH resources ignore = [] (rc, lines) = self.CM.rsh(node, "crm_resource -c", None) for line in lines: if re.search("^Resource", line): r = AuditResource(self.CM, line) if r.rclass == "stonith": self.CM.debug("Ignoring: crmd:.*Performing.*op=%s_.*_0" % r.id) ignore.append("crmd:.*Performing.*op=%s_.*_0" % r.id) if self.local_badnews("ResourceActivity:", watch, ignore): return self.failure("Resources stopped or started after resource management was re-enabled") return ret def errorstoignore(self): '''Return list of errors which should be ignored''' return [ "You may ignore this error if it is unmanaged.", "pingd: .*ERROR: send_ipc_message:", "pingd: .*ERROR: send_update:", "lrmd: .*ERROR: notify_client:", ] def is_applicable(self): if self.CM["Name"] == "crm-lha": return None return 1 AllTestClasses.append(Reattach) #################################################################### class SpecialTest1(CTSTest): #################################################################### '''Set up a custom test to cause quorum failure issues for Andrew''' def __init__(self, cm): CTSTest.__init__(self,cm) self.name="SpecialTest1" self.startall = SimulStartLite(cm) self.restart1 = RestartTest(cm) self.stopall = SimulStopLite(cm) def __call__(self, node): '''Perform the 'SpecialTest1' test for Andrew. ''' self.incr("calls") # Shut down all the nodes... ret = self.stopall(None) if not ret: return ret # Start the selected node ret = self.restart1(node) if not ret: return ret # Start all remaining nodes ret = self.startall(None) return ret AllTestClasses.append(SpecialTest1) #################################################################### class HAETest(CTSTest): #################################################################### '''Set up a custom test to cause quorum failure issues for Andrew''' def __init__(self, cm): CTSTest.__init__(self,cm) self.name="HAETest" self.stopall = SimulStopLite(cm) self.startall = SimulStartLite(cm) self.is_loop = 1 def setup(self, node): # Start all remaining nodes ret = self.startall(None) if not ret: return self.failure("Couldn't start all nodes") return self.success() def teardown(self, node): # Stop everything ret = self.stopall(None) if not ret: return self.failure("Couldn't stop all nodes") return self.success() def wait_on_state(self, node, resource, expected_clones, attempts=240): while attempts > 0: active=0 (rc, lines) = self.CM.rsh(node, "crm_resource -r %s -W -Q" % resource, stdout=None) # Hack until crm_resource does the right thing if rc == 0 and lines: active = len(lines) if len(lines) == expected_clones: return 1 elif rc == 1: self.CM.debug("Resource %s is still inactive" % resource) elif rc == 234: self.CM.log("Unknown resource %s" % resource) return 0 elif rc == 246: self.CM.log("Cluster is inactive") return 0 elif rc != 0: self.CM.log("Call to crm_resource failed, rc=%d" % rc) return 0 else: self.CM.debug("Resource %s is active on %d times instead of %d" % (resource, active, expected_clones)) attempts -= 1 time.sleep(1) return 0 def find_dlm(self, node): self.r_dlm = None (rc, lines) = self.CM.rsh(node, "crm_resource -c", None) for line in lines: if re.search("^Resource", line): r = AuditResource(self.CM, line) if r.rtype == "controld" and r.parent != "NA": self.CM.debug("Found dlm: %s" % self.r_dlm) self.r_dlm = r.parent return 1 return 0 def find_hae_resources(self, node): self.r_dlm = None self.r_o2cb = None self.r_ocfs2 = [] if self.find_dlm(node): self.find_ocfs2_resources(node) def is_applicable(self): if not self.is_applicable_common(): return 0 if self.CM.Env["Schema"] == "hae": return 1 return None #################################################################### class HAERoleTest(HAETest): #################################################################### def __init__(self, cm): '''Lars' mount/unmount test for the HA extension. ''' HAETest.__init__(self,cm) self.name="HAERoleTest" def change_state(self, node, resource, target): rc = self.CM.rsh(node, "crm_resource -r %s -p target-role -v %s --meta" % (resource, target)) return rc def __call__(self, node): self.incr("calls") lpc = 0 failed = 0 delay = 2 done=time.time() + self.CM.Env["loop-minutes"]*60 self.find_hae_resources(node) clone_max = len(self.CM.Env["nodes"]) while time.time() <= done and not failed: lpc = lpc + 1 self.change_state(node, self.r_dlm, "Stopped") if not self.wait_on_state(node, self.r_dlm, 0): self.failure("%s did not go down correctly" % self.r_dlm) failed = lpc self.change_state(node, self.r_dlm, "Started") if not self.wait_on_state(node, self.r_dlm, clone_max): self.failure("%s did not come up correctly" % self.r_dlm) failed = lpc if not self.wait_on_state(node, self.r_o2cb, clone_max): self.failure("%s did not come up correctly" % self.r_o2cb) failed = lpc for fs in self.r_ocfs2: if not self.wait_on_state(node, fs, clone_max): self.failure("%s did not come up correctly" % fs) failed = lpc if failed: return self.failure("iteration %d failed" % failed) return self.success() AllTestClasses.append(HAERoleTest) #################################################################### class HAEStandbyTest(HAETest): #################################################################### '''Set up a custom test to cause quorum failure issues for Andrew''' def __init__(self, cm): HAETest.__init__(self,cm) self.name="HAEStandbyTest" def change_state(self, node, resource, target): rc = self.CM.rsh(node, "crm_standby -l reboot -v %s" % (target)) return rc def __call__(self, node): self.incr("calls") lpc = 0 failed = 0 done=time.time() + self.CM.Env["loop-minutes"]*60 self.find_hae_resources(node) clone_max = len(self.CM.Env["nodes"]) while time.time() <= done and not failed: lpc = lpc + 1 self.change_state(node, self.r_dlm, "true") if not self.wait_on_state(node, self.r_dlm, clone_max-1): self.failure("%s did not go down correctly" % self.r_dlm) failed = lpc self.change_state(node, self.r_dlm, "false") if not self.wait_on_state(node, self.r_dlm, clone_max): self.failure("%s did not come up correctly" % self.r_dlm) failed = lpc if not self.wait_on_state(node, self.r_o2cb, clone_max): self.failure("%s did not come up correctly" % self.r_o2cb) failed = lpc for fs in self.r_ocfs2: if not self.wait_on_state(node, fs, clone_max): self.failure("%s did not come up correctly" % fs) failed = lpc if failed: return self.failure("iteration %d failed" % failed) return self.success() AllTestClasses.append(HAEStandbyTest) ################################################################### class NearQuorumPointTest(CTSTest): ################################################################### ''' This test brings larger clusters near the quorum point (50%). In addition, it will test doing starts and stops at the same time. Here is how I think it should work: - loop over the nodes and decide randomly which will be up and which will be down Use a 50% probability for each of up/down. - figure out what to do to get into that state from the current state - in parallel, bring up those going up and bring those going down. ''' def __init__(self, cm): CTSTest.__init__(self,cm) self.name="NearQuorumPoint" def __call__(self, dummy): '''Perform the 'NearQuorumPoint' test. ''' self.incr("calls") startset = [] stopset = [] #decide what to do with each node for node in self.CM.Env["nodes"]: action = self.CM.Env.RandomGen.choice(["start","stop"]) #action = self.CM.Env.RandomGen.choice(["start","stop","no change"]) if action == "start" : startset.append(node) elif action == "stop" : stopset.append(node) self.CM.debug("start nodes:" + repr(startset)) self.CM.debug("stop nodes:" + repr(stopset)) #add search patterns watchpats = [ ] for node in stopset: if self.CM.ShouldBeStatus[node] == "up": watchpats.append(self.CM["Pat:We_stopped"] % node) for node in startset: if self.CM.ShouldBeStatus[node] == "down": #watchpats.append(self.CM["Pat:Slave_started"] % node) watchpats.append(self.CM["Pat:Local_started"] % node) else: for stopping in stopset: if self.CM.ShouldBeStatus[stopping] == "up": watchpats.append(self.CM["Pat:They_stopped"] % (node, self.CM.key_for_node(stopping))) if len(watchpats) == 0: return self.skipped() if len(startset) != 0: watchpats.append(self.CM["Pat:DC_IDLE"]) watch = self.create_watch(watchpats, self.CM["DeadTime"]+10) watch.setwatch() #begin actions for node in stopset: if self.CM.ShouldBeStatus[node] == "up": self.CM.StopaCMnoBlock(node) for node in startset: if self.CM.ShouldBeStatus[node] == "down": self.CM.StartaCMnoBlock(node) #get the result if watch.lookforall(): self.CM.cluster_stable() return self.success() self.CM.log("Warn: Patterns not found: " + repr(watch.unmatched)) #get the "bad" nodes upnodes = [] for node in stopset: if self.CM.StataCM(node) == 1: upnodes.append(node) downnodes = [] for node in startset: if self.CM.StataCM(node) == 0: downnodes.append(node) if upnodes == [] and downnodes == []: self.CM.cluster_stable() # Make sure they're completely down with no residule for node in stopset: self.CM.rsh(node, self.CM["StopCmd"]) return self.success() if len(upnodes) > 0: self.CM.log("Warn: Unstoppable nodes: " + repr(upnodes)) if len(downnodes) > 0: self.CM.log("Warn: Unstartable nodes: " + repr(downnodes)) return self.failure() AllTestClasses.append(NearQuorumPointTest) ################################################################### class RollingUpgradeTest(CTSTest): ################################################################### '''Perform a rolling upgrade of the cluster''' def __init__(self, cm): CTSTest.__init__(self,cm) self.name="RollingUpgrade" self.start = StartTest(cm) self.stop = StopTest(cm) self.stopall = SimulStopLite(cm) self.startall = SimulStartLite(cm) def setup(self, node): # Start all remaining nodes ret = self.stopall(None) if not ret: return self.failure("Couldn't stop all nodes") for node in self.CM.Env["nodes"]: if not self.downgrade(node, None): return self.failure("Couldn't downgrade %s" % node) ret = self.startall(None) if not ret: return self.failure("Couldn't start all nodes") return self.success() def teardown(self, node): # Stop everything ret = self.stopall(None) if not ret: return self.failure("Couldn't stop all nodes") for node in self.CM.Env["nodes"]: if not self.upgrade(node, None): return self.failure("Couldn't upgrade %s" % node) return self.success() def install(self, node, version, start=1, flags="--force"): target_dir = "/tmp/rpm-%s" % version src_dir = "%s/%s" % (self.CM.Env["rpm-dir"], version) self.CM.log("Installing %s on %s with %s" % (version, node, flags)) if not self.stop(node): return self.failure("stop failure: "+node) rc = self.CM.rsh(node, "mkdir -p %s" % target_dir) rc = self.CM.rsh(node, "rm -f %s/*.rpm" % target_dir) (rc, lines) = self.CM.rsh(node, "ls -1 %s/*.rpm" % src_dir, None) for line in lines: line = line[:-1] rc = self.CM.rsh.cp("%s" % (line), "%s:%s/" % (node, target_dir)) rc = self.CM.rsh(node, "rpm -Uvh %s %s/*.rpm" % (flags, target_dir)) if start and not self.start(node): return self.failure("start failure: "+node) return self.success() def upgrade(self, node, start=1): return self.install(node, self.CM.Env["current-version"], start) def downgrade(self, node, start=1): return self.install(node, self.CM.Env["previous-version"], start, "--force --nodeps") def __call__(self, node): '''Perform the 'Rolling Upgrade' test. ''' self.incr("calls") for node in self.CM.Env["nodes"]: if self.upgrade(node): return self.failure("Couldn't upgrade %s" % node) self.CM.cluster_stable() return self.success() def is_applicable(self): if not self.is_applicable_common(): return None if not self.CM.Env.has_key("rpm-dir"): return None if not self.CM.Env.has_key("current-version"): return None if not self.CM.Env.has_key("previous-version"): return None return 1 # Register RestartTest as a good test to run AllTestClasses.append(RollingUpgradeTest) ################################################################### class BSC_AddResource(CTSTest): ################################################################### '''Add a resource to the cluster''' def __init__(self, cm): CTSTest.__init__(self, cm) self.name="AddResource" self.resource_offset = 0 self.cib_cmd="""cibadmin -C -o %s -X '%s' """ def __call__(self, node): self.incr("calls") self.resource_offset = self.resource_offset + 1 r_id = "bsc-rsc-%s-%d" % (node, self.resource_offset) start_pat = "crmd.*%s_start_0.*confirmed.*ok" patterns = [] patterns.append(start_pat % r_id) watch = self.create_watch(patterns, self.CM["DeadTime"]) watch.setwatch() fields = string.split(self.CM.Env["IPBase"], '.') fields[3] = str(int(fields[3])+1) ip = string.join(fields, '.') self.CM.Env["IPBase"] = ip if not self.make_ip_resource(node, r_id, "ocf", "IPaddr", ip): return self.failure("Make resource %s failed" % r_id) failed = 0 watch_result = watch.lookforall() if watch.unmatched: for regex in watch.unmatched: self.CM.log ("Warn: Pattern not found: %s" % (regex)) failed = 1 if failed: return self.failure("Resource pattern(s) not found") if not self.CM.cluster_stable(self.CM["DeadTime"]): return self.failure("Unstable cluster") return self.success() def make_ip_resource(self, node, id, rclass, type, ip): self.CM.log("Creating %s::%s:%s (%s) on %s" % (rclass,type,id,ip,node)) rsc_xml=""" """ % (id, rclass, type, id, id, ip) node_constraint=""" """ % (id, id, id, id, node) rc = 0 (rc, lines) = self.CM.rsh(node, self.cib_cmd % ("constraints", node_constraint), None) if rc != 0: self.CM.log("Constraint creation failed: %d" % rc) return None (rc, lines) = self.CM.rsh(node, self.cib_cmd % ("resources", rsc_xml), None) if rc != 0: self.CM.log("Resource creation failed: %d" % rc) return None return 1 def is_applicable(self): if self.CM.Env["DoBSC"]: return 1 return None AllTestClasses.append(BSC_AddResource) class SimulStopLite(CTSTest): ################################################################### '''Stop any active nodes ~ simultaneously''' def __init__(self, cm): CTSTest.__init__(self,cm) self.name="SimulStopLite" def __call__(self, dummy): '''Perform the 'SimulStopLite' setup work. ''' self.incr("calls") self.CM.debug("Setup: " + self.name) # We ignore the "node" parameter... watchpats = [ ] for node in self.CM.Env["nodes"]: if self.CM.ShouldBeStatus[node] == "up": self.incr("WasStarted") watchpats.append(self.CM["Pat:We_stopped"] % node) #if self.CM.Env["use_logd"]: # watchpats.append(self.CM["Pat:Logd_stopped"] % node) if len(watchpats) == 0: self.CM.clear_all_caches() return self.success() # Stop all the nodes - at about the same time... watch = self.create_watch(watchpats, self.CM["DeadTime"]+10) watch.setwatch() self.set_timer() for node in self.CM.Env["nodes"]: if self.CM.ShouldBeStatus[node] == "up": self.CM.StopaCMnoBlock(node) if watch.lookforall(): self.CM.clear_all_caches() # Make sure they're completely down with no residule for node in self.CM.Env["nodes"]: self.CM.rsh(node, self.CM["StopCmd"]) return self.success() did_fail=0 up_nodes = [] for node in self.CM.Env["nodes"]: if self.CM.StataCM(node) == 1: did_fail=1 up_nodes.append(node) if did_fail: return self.failure("Active nodes exist: " + repr(up_nodes)) self.CM.log("Warn: All nodes stopped but CTS didnt detect: " + repr(watch.unmatched)) self.CM.clear_all_caches() return self.failure("Missing log message: "+repr(watch.unmatched)) def is_applicable(self): '''SimulStopLite is a setup test and never applicable''' return 0 ################################################################### class SimulStartLite(CTSTest): ################################################################### '''Start any stopped nodes ~ simultaneously''' def __init__(self, cm): CTSTest.__init__(self,cm) self.name="SimulStartLite" def __call__(self, dummy): '''Perform the 'SimulStartList' setup work. ''' self.incr("calls") self.CM.debug("Setup: " + self.name) # We ignore the "node" parameter... watchpats = [ ] uppat = self.CM["Pat:Slave_started"] if self.CM.upcount() == 0: uppat = self.CM["Pat:Local_started"] for node in self.CM.Env["nodes"]: if self.CM.ShouldBeStatus[node] == "down": self.incr("WasStopped") watchpats.append(uppat % node) if len(watchpats) == 0: return self.success() watchpats.append(self.CM["Pat:DC_IDLE"]) # Start all the nodes - at about the same time... watch = self.create_watch(watchpats, self.CM["DeadTime"]+10) watch.setwatch() self.set_timer() for node in self.CM.Env["nodes"]: if self.CM.ShouldBeStatus[node] == "down": self.CM.StartaCMnoBlock(node) if watch.lookforall(): for attempt in (1, 2, 3, 4, 5): if self.CM.cluster_stable(): return self.success() return self.failure("Cluster did not stabilize") did_fail=0 unstable = [] for node in self.CM.Env["nodes"]: if self.CM.StataCM(node) == 0: did_fail=1 unstable.append(node) if did_fail: return self.failure("Unstarted nodes exist: " + repr(unstable)) unstable = [] for node in self.CM.Env["nodes"]: if not self.CM.node_stable(node): did_fail=1 unstable.append(node) if did_fail: return self.failure("Unstable cluster nodes exist: " + repr(unstable)) self.CM.log("ERROR: All nodes started but CTS didnt detect: " + repr(watch.unmatched)) return self.failure() def is_applicable(self): '''SimulStartLite is a setup test and never applicable''' return 0 def TestList(cm, audits): result = [] for testclass in AllTestClasses: bound_test = testclass(cm) if bound_test.is_applicable(): bound_test.Audits = audits result.append(bound_test) return result # vim:ts=4:sw=4:et: diff --git a/cts/cluster_test b/cts/cluster_test index a922515bc2..9d8f8c0c5d 100755 --- a/cts/cluster_test +++ b/cts/cluster_test @@ -1,328 +1,328 @@ #!/bin/bash anyAsked=0 if [ -e ~/.cts ]; then . ~/.cts fi CTS_master=`uname -n` CTS_numtests=$1 if [ "x$CTS_master" = "x" ]; then anyAsked=1 printf "This script should only be executed on the test master.\n" printf "The test master will remotely execute the actions required by the tests and should not be part of the cluster itself.\n" read -p "Is this host intended to be the test master? (yN)" CTS_master if [ "x$CTS_master" != "xy" ]; then printf "This script must be executed on the test master\n" exit 1 fi fi if [ "x$CTS_node_list" = "x" ]; then anyAsked=1 read -p "Please list your cluster nodes (eg. node1 node2 node3): " CTS_node_list else printf "Beginning test of cluster: $CTS_node_list\n" fi if [ "x$CTS_stack" = "x" ]; then anyAsked=1 - read -p "Which cluster stack are you using? ([openais] or heartbeat): " CTS_stack + read -p "Which cluster stack are you using? ([corosync], openais, or heartbeat): " CTS_stack if [ -z $CTS_stack ]; then - CTS_stack=openais + CTS_stack=corosync fi else printf "Using the $CTS_stack cluster stack\n" fi tmp=`echo ${CTS_node_list} | sed s/$HOSTNAME//` if [ "x${CTS_node_list}" != "x${tmp}" ]; then printf "This script must be executed on the test master and the test master cannot be part of the cluster\n" exit 1 fi printf "+ Bootstraping ssh... " if [ -z $SSH_AUTH_SOCK ]; then printf "\n + Initializing SSH " agent_tmp=/tmp/.$$.ssh ssh-agent > $agent_tmp . $agent_tmp rm $agent_tmp printf " + Adding identities...\n" ssh-add rc=$? if [ $rc != 0 ]; then printf " -- No identities added\n" printf "\nThe ability to open key-based 'ssh' connections (as the user 'root') is required to use CTS.\n" read -p " - Do you want this program to help you create one? (yN)" auto_fix if [ "x$auto_fix" = "xy" ]; then ssh-keygen -t dsa ssh-add else printf "Please run 'ssh-keygen -t dsa' to create a new key\n" exit 1 fi fi else printf "OK\n" fi test_ok=1 printf "+ Testing ssh configuration... " for n in $CTS_node_list; do ssh -l root -o PasswordAuthentication=no -o ConnectTimeout=5 $n /bin/true rc=$? if [ $rc != 0 ]; then printf "\n - connection to $n failed" test_ok=0 fi done if [ $test_ok = 0 ]; then printf "\n\nThe ability to open key-based 'ssh' connections (as the user 'root') is required to use CTS.\n" printf " Please install one of your SSH public keys to root's account on all cluster nodes\n" # todo - look for identities and guide the installation of one exit 1 fi printf "OK\n" logging_ok=1 syslog_conf=/etc/syslog-ng/syslog-ng.conf printf "+ Checking syslog-ng is installed... " if [ ! -e $syslog_conf ]; then printf "\n - Syslog-ng not installed on the test master (this machine)" logging_ok=0 fi for n in $CTS_node_list; do ssh -l root $n ls $syslog_conf 2>&1 > /dev/null rc=$? if [ $rc != 0 ]; then printf "\n - Syslog-ng not installed on $n" logging_ok=0 fi done if [ $logging_ok = 0 ]; then printf "\n\nSyslog-ng is required on all nodes to use CTS and the test master.\n" printf "Please install and start it before restarting CTS\n" exit 1 fi printf "OK\n" if [ -z $CTS_logfile ]; then anyAsked=1 read -p " + Where does/should syslog store logs from remote hosts? (/var/log/messages) " CTS_logfile if [ "x$CTS_logfile" = "x" ]; then CTS_logfile=/var/log/messages fi fi if [ ! -e $CTS_logfile ]; then printf "$CTS_logfile doesn't exist\n" exit 1 fi if [ -z $CTS_logfacility ]; then anyAsked=1 read -p " + Which log facility does the cluster use? (daemon) " CTS_logfacility if [ "x$CTS_logfacility" = "x" ]; then CTS_logfacility=daemon fi fi printf "+ Testing centralized logging configuration... " unique="`date` $$ CTS-SYSLOG-TEST" bad_nodes="" some_ok=0 for n in $CTS_node_list; do ssh -l root $n logger -p $CTS_logfacility.info "'$unique'" rc=$? if [ $rc != 0 ]; then printf "\n - logger command failed on $n" logging_ok=0 else grep "$n logger: $unique" $CTS_logfile 2>&1 > /dev/null rc=$? if [ $rc != 0 ]; then logging_ok=0 bad_nodes="$bad_nodes $n" node_ip=`host $n | grep -oE '[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}' | head -n 1` grep "$node_ip logger: $unique" $CTS_logfile 2>&1 > /dev/null rc=$? if [ $rc = 0 ]; then printf "\n -- Logs from $n are being saved with its IP address instead of uname.\n" printf " Add the line 'use_dns(yes);' to $syslog_conf and restart syslog before trying again\n" exit 1 fi # look for CTS-AUTO-SETUP-WAS-HERE and abort if found ssh -l root $n grep CTS-AUTO-SETUP-WAS-HERE $syslog_conf 2>&1 > /dev/null rc=$? if [ $rc = 0 ]; then printf "\n -- Previous attempt to auto-configure syslog-ng on $n failed - aborting!" exit 1 else printf "\n - Syslog-ng is not configured correctly on $n" fi else some_ok=1 fi fi done if [ $logging_ok = 0 ]; then if [ $some_ok = 1 ]; then printf "\n\nSome nodes ($bad_nodes) do not have functional logging configurations\n" else printf "\n\nNo nodes have correctly configured logging.\n" fi read -p " - Do you want this program to attempt to fix this? (yN)" auto_fix if [ "x$auto_fix" = "xy" ]; then if [ $some_ok = 0 ]; then grep CTS-AUTO-SETUP-WAS-HERE $syslog_conf 2>&1 > /dev/null rc=$? if [ $rc = 0 ]; then printf "\n -- Previous attempt to auto-configure syslog-ng on the test master failed - aborting!" exit 1 fi anyAsked=1 read -p " + Please chose a port for the test master to listen on. (9191) " CTS_logport if [ -z $CTS_logport ]; then CTS_logport=9191 fi printf "Make sure port $CTS_logport is not blocked by a firewalls on any of the hosts" printf "# CTS-AUTO-SETUP-WAS-HERE\n" >> $syslog_conf printf "source s_tcp { tcp(port($CTS_logport) max-connections(99999)); };\n" >> $syslog_conf printf "filter f_ha { facility($CTS_logfacility); };\n" >> $syslog_conf printf "destination ha_local { file($CTS_logfile perm(0644)); };\n" >> $syslog_conf printf "log { source(src); source(s_tcp); filter(f_ha); destination(ha_local); };\n" >> $syslog_conf printf " + Please restart logging on the current machine before restarting CTS\n" else if [ -z $CTS_logport ]; then anyAsked=1 read -p " + Please indicate the port that syslog (on this host) listens on for remote messages. (9191) " CTS_logport if [ -z $CTS_logport ]; then CTS_logport=9191 fi fi fi for n in $bad_nodes; do ssh -l root $n -- "printf '# CTS-AUTO-SETUP-WAS-HERE\n' >> $syslog_conf" ssh -l root $n -- "printf 'destination ha_tcp { tcp($CTS_master port($CTS_logport));};\n' >> $syslog_conf" ssh -l root $n -- "printf 'filter f_ha_tcp { facility($CTS_logfacility); };\n' >> $syslog_conf" ssh -l root $n -- "printf 'log { source(src); filter(f_ha_tcp); destination(ha_tcp); };\n' >> $syslog_conf" printf " + Please restart logging on $n before restarting CTS\n" done else if [ -z $CTS_logport ]; then CTS_logport=9191 fi if [ $some_ok = 0 ]; then printf "Check that $syslog_conf on this machine has something like:\n" printf " source s_tcp { tcp(port($CTS_logport) max-connections(99999)); };\n" printf " filter f_ha { facility($CTS_logfacility); };\n" printf " destination ha_local { file($CTS_logfile perm(0644)); };\n" printf " log { source(src); source(s_tcp); filter(f_ha); destination(ha_local); };\n" printf "\n and, \n" fi printf "Check that $syslog_conf on ($bad_nodes) have something like:\n" printf " destination ha_tcp { tcp($CTS_master port($CTS_logport));};\n" printf " filter f_ha_tcp { facility($CTS_logfacility); };\n" printf " log { source(src); filter(f_ha_tcp); destination(ha_tcp); };\n" printf "\nRemember to restart syslog-ng before restarting cts\n" fi exit 1 fi printf "OK\n" if [ 0 = 1 ]; then echo SYSLOG_NG_CREATE_CONFIG=\"no\" >> /etc/sysconfig/syslog fi if [ -z $CTS_boot ]; then read -p "+ Is the cluster software started automatically when a node boots? [yN] " CTS_boot if [ -z $CTS_boot ]; then CTS_boot=0 else case $CTS_boot in 1|y|Y) CTS_boot=1;; *) CTS_boot=0;; esac fi fi if [ -z $CTS_numtests ]; then read -p "+ How many test iterations should be performed? (500) " CTS_numtests if [ -z $CTS_numtests ]; then CTS_numtests=500 fi fi if [ -z $CTS_asked_once ]; then anyAsked=1 read -p "+ What type of STONITH agent do you use? (none) " CTS_stonith if [ "x$CTS_stonith" != "x" ]; then read -p "+ List any STONITH agent parameters (eq. device_host=switch.power.com): " CTS_stonith_args fi if [ -z $CTS_adv ]; then read -p "+ (Advanced) Any extra CTS parameters? (none) " CTS_adv fi fi if [ $anyAsked = 1 ]; then read -p "+ Save values to ~/.cts for next time? (yN) " doSave fi if [ "x$doSave" = "xy" ]; then echo "# CTS Test data" > ~/.cts echo CTS_master=\"$CTS_master\" >> ~/.cts echo CTS_stack=\"$CTS_stack\" >> ~/.cts echo CTS_node_list=\"$CTS_node_list\" >> ~/.cts echo CTS_logfile=\"$CTS_logfile\" >> ~/.cts echo CTS_logport=$CTS_logport >> ~/.cts echo CTS_logfacility=$CTS_logfacility >> ~/.cts echo CTS_asked_once=1 >> ~/.cts echo CTS_adv=\"$CTS_adv\" >> ~/.cts echo CTS_stonith=$CTS_stonith >> ~/.cts echo CTS_stonith_args=\"$CTS_stonith_args\" >> ~/.cts echo CTS_boot=\"$CTS_boot\" >> ~/.cts fi cts_extra="" if [ "x$CTS_stonith" != "x" ]; then cts_extra="$cts_extra --stonith-type $CTS_stonith" if [ "x$CTS_stonith_args" != "x" ]; then cts_extra="$cts_extra --stonitha-params \"$CTS_stonith_args\"" fi else cts_extra="$cts_extra --stonith 0" printf " - Testing a cluster without STONITH is like a blunt pencil... pointless\n" fi printf "\nAll set to go for $CTS_numtests iterations!\n" if [ $anyAsked = 0 ]; then printf "+ To use a different configuration, remove ~/.cts and re-run cts (or edit it manually).\n" fi echo Now paste the following command into this shell: echo python "`dirname "$0"`"/CTSlab.py -L $CTS_logfile --syslog-facility $CTS_logfacility --no-unsafe-tests --stack $CTS_stack $CTS_adv --at-boot $CTS_boot $cts_extra $CTS_numtests --nodes \"$CTS_node_list\" diff --git a/doc/Clusters_from_Scratch/en-US/Ch-Installation.xml b/doc/Clusters_from_Scratch/en-US/Ch-Installation.xml index ba0d75ea0e..e113b3236e 100644 --- a/doc/Clusters_from_Scratch/en-US/Ch-Installation.xml +++ b/doc/Clusters_from_Scratch/en-US/Ch-Installation.xml @@ -1,674 +1,874 @@ %BOOK_ENTITIES; ]> - Installation -
- OS Installation - - Detailed instructions for installing Fedora are available at http://docs.fedoraproject.org/install-guide/f12/ in a number of languages. The abbreviated version is as follows... - - - Point your browser to http://fedoraproject.org/en/get-fedora-all, locate the Install Media section and download the install DVD that matches your hardware. - - - Burn the disk image to a DVD - - http://docs.fedoraproject.org/readme-burning-isos/en-US.html - - and boot from it. Or use the image to boot a virtual machine as I have done here. After clicking through the welcome screen, select your language and keyboard layout - - http://docs.fedoraproject.org/install-guide/f12/en-US/html/s1-langselection-x86.html - - - - - - - Insert graphic here - - - - - Assign your machine a host name. - - http://docs.fedoraproject.org/install-guide/f12/en-US/html/sn-networkconfig-fedora.html - - I happen to control the clusterlabs.org domain name, so I will use that here. - - - - - Insert graphic here - - - - - You will then be prompted to indicate the machine’s physical location and to supply a root password. - - http://docs.fedoraproject.org/install-guide/f12/en-US/html/sn-account_configuration.html - - - - - Now select where you want Fedora installed. - - http://docs.fedoraproject.org/install-guide/f12/en-US/html/s1-diskpartsetup-x86.html - - As I don’t care about any existing data, I will accept the default and allow Fedora to use the complete drive. - - - IMPORTANT: If you plan on following the DRBD or GFS2 portions of this guide, you should reserve at least 1Gb of space on each machine from which to create a shared volume. - - - - - Insert graphic here - - - - - The next step is to configure networking. Do not accept the default. Cluster machines should never obtain an ip address via DHCP. Here I will use the internal addresses for the clusterlab.org network. - - - - - Insert graphic here - - - - - Next choose which software should be installed. Deselect the default “Office and Productivity” as its not appropriate for a cluster node. You will want to select the “Updates” channel though. We’ll install any needed software later. After you click next, Fedora will begin installing. - - - - - Insert graphic here - - - - - Insert graphic here - - - - - Once the node reboots, follow the on screen instructions - - http://docs.fedoraproject.org/install-guide/f12/en-US/html/ch-firstboot.html - - to create a system user and configure the time. It is highly recommended to enable NTP on your cluster nodes. Doing so ensures all nodes agree on the current time and makes reading log files significantly easier. - - - - - Insert graphic here - - - Click through the next screens until you reach the login window. Click on the user you created and supply the password you indicated earlier. - -
- -
- Cluster Software Installation - - - Installing the cluster in future versions of Fedora will be significantly easier now that the entire stack has been accepted into the distribution. However for now there are still some hoops to jump through. - - - - Start a terminal by going to Applications -> System Tools -> Terminal - - - - - Insert graphic here - - - That was the last screenshot by the way, from here on in we’re going to be working from the terminal. - - - Switch to the super user (aka. "root") account. You will need to supply the password you entered earlier during the installation process. - - - su - - - - [beekhof@pcmk-1 ~]$ su - - - - Password: - - - [root@pcmk-1 ~]# - - - Note that the username (the text before the @ symbol) now indicates we’re running as the super user “root”. - -
- Install the Cluster Software - - Since version 12, Fedora comes with recent versions of everything you need, so simply fire up the shell and run: - - - -[root@pcmk-1 ~]# yum install -y pacemaker + Installation +
+ OS Installation + + Detailed instructions for installing Fedora are available at + http://docs.fedoraproject.org/install-guide/f12/ + in a number of languages. + The abbreviated version is as follows... + + + Point your browser to + http://fedoraproject.org/en/get-fedora-all, + locate the Install Media section and download the install DVD that matches your hardware. + + + Burn the disk image to a DVD + + + + http://docs.fedoraproject.org/readme-burning-isos/en-US.html + + + + and boot from it. Or use the image to boot a virtual machine as I have done here. + After clicking through the welcome screen, select your language and keyboard layout + + + + http://docs.fedoraproject.org/install-guide/f12/en-US/html/s1-langselection-x86.html + + + + + +
+ Fedora Installation - Welcome + + + + + Fedora Installation: Good choice + +
+
+ +
+ Fedora Installation - Storage Devices + + + + + Fedora Installation: Storage Devices + +
+
+ + Assign your machine a host name. + + + + http://docs.fedoraproject.org/install-guide/f12/en-US/html/sn-networkconfig-fedora.html + + + + I happen to control the clusterlabs.org domain name, so I will use that here. +
+ Fedora Installation - Hostname + + + + + Fedora Installation: Choose a hostname + +
+
+ + You will then be prompted to indicate the machine’s physical location and to supply a root password. + + + + http://docs.fedoraproject.org/install-guide/f12/en-US/html/sn-account_configuration.html + + + + + + Now select where you want Fedora installed. + + + http://docs.fedoraproject.org/install-guide/f12/en-US/html/s1-diskpartsetup-x86.html + + + As I don’t care about any existing data, I will accept the default and allow Fedora to use the complete drive. + However I want to reserve some space for DRBD, so I'll check the Review and modify partitioning layout box. +
+ Fedora Installation - Installation Type + + + + + Fedora Installation: Choose an installation type + +
+
+ + By default, Fedora will give all the space to the / (aka. root) partition. + Wel'll take some back so we can use DRBD. +
+ Fedora Installation - Default Partitioning + + + + + +
+
+ + The finalized partition layout should look something like the diagram below. + + + If you plan on following the DRBD or GFS2 portions of this guide, you should reserve at least 1Gb of space on each machine from which to create a shared volume. + + +
+ Fedora Installation - Customize Partitioning + + + + + Fedora Installation: Create a partition to use (later) for website data + +
+
+ +
+ Fedora Installation - Bootloader + + + + + Fedora Installation: Unless you have a strong reason not to, accept the default bootloader location + +
+
+ + Next choose which software should be installed. + Change the selection to Web Server since we plan on using Apache. + Don't enable updates yet, we'll do that (and install any extra software we need) later. + After you click next, Fedora will begin installing. +
+ Fedora Installation - Software + + + + + Fedora Installation: Software selection + +
+
+ +
+ Fedora Installation - Installing + + + + + Fedora Installation: Go grab something to drink, this may take a while + +
+
+ +
+ Fedora Installation - Installation Complete + + + + + Fedora Installation: Stage 1, completed + +
+
+ + Once the node reboots, follow the on screen instructions + + + + http://docs.fedoraproject.org/install-guide/f12/en-US/html/ch-firstboot.html + + + + to create a system user and configure the time. +
+ Fedora Installation - First Boot + + + + + +
+
+ +
+ Fedora Installation - Create Non-privileged User + + + + + Fedora Installation: Creating a non-privileged user, take note of the password, you'll need it soon + +
+
+ + + + It is highly recommended to enable NTP on your cluster nodes. + Doing so ensures all nodes agree on the current time and makes reading log files significantly easier. + + +
+ Fedora Installation - Date and Time + + + + + Fedora Installation: Enable NTP to keep the times on all your nodes consistent + +
+ Click through the next screens until you reach the login window. + Click on the user you created and supply the password you indicated earlier. +
+ +
+ Fedora Installation - Customize Networking + + + + + Fedora Installation: Click here to configure networking + +
+
+ + + + Do not accept the default network settings. + Cluster machines should never obtain an ip address via DHCP. + Here I will use the internal addresses for the clusterlab.org network. + + +
+ Fedora Installation - Specify Network Preferences + + + + + Fedora Installation: Specify network settings for your machine, never choose DHCP + +
+
+ +
+ Fedora Installation - Activate Networking + + + + + Fedora Installation: Click the big green button to activate your changes + +
+
+ +
+ Fedora Installation - Bring up the Terminal + + + + + Fedora Installation: Down to business, fire up the command line + +
+
+ + + That was the last screenshot, from here on in we’re going to be working from the terminal. + + +
+ +
+ Cluster Software Installation + + Go to the terminal window you just opened and switch to the super user (aka. "root") account with the su command. + You will need to supply the password you entered earlier during the installation process. + + +[beekhof@pcmk-1 ~]$ su - +Password: +[root@pcmk-1 ~]# + + + + Note that the username (the text before the @ symbol) now indicates we’re running as the super user “root”. + + + +[root@pcmk-1 ~]# ip addr +1: lo: <LOOPBACK,UP,LOWER_UP> mtu 16436 qdisc noqueue state UNKNOWN + link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00 + inet 127.0.0.1/8 scope host lo + inet6 ::1/128 scope host + valid_lft forever preferred_lft forever +2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UNKNOWN qlen 1000 + link/ether 00:0c:29:6f:e1:58 brd ff:ff:ff:ff:ff:ff + inet 192.168.9.41/24 brd 192.168.9.255 scope global eth0 + inet6 ::20c:29ff:fe6f:e158/64 scope global dynamic + valid_lft 2591667sec preferred_lft 604467sec + inet6 2002:57ae:43fc:0:20c:29ff:fe6f:e158/64 scope global dynamic + valid_lft 2591990sec preferred_lft 604790sec + inet6 fe80::20c:29ff:fe6f:e158/64 scope link + valid_lft forever preferred_lft forever +[root@pcmk-1 ~]# ping -c 1 www.google.com +PING www.l.google.com (74.125.39.99) 56(84) bytes of data. +64 bytes from fx-in-f99.1e100.net (74.125.39.99): icmp_seq=1 ttl=56 time=16.7 ms + +--- www.l.google.com ping statistics --- +1 packets transmitted, 1 received, 0% packet loss, time 20ms +rtt min/avg/max/mdev = 16.713/16.713/16.713/0.000 ms +[root@pcmk-1 ~]# /sbin/chkconfig network on +[root@pcmk-1 ~]# + +
+ Security Shortcuts + + To simplify this guide and focus on the aspects directly connected to clustering, we will now disable the machine’s firewall and SELinux installation. + Both of these actions create significant security issues and should not be performed on machines that will be exposed to the outside world. + + + TODO: Create an Appendix that deals with (at least) re-enabling the firewall. + + + + +[root@pcmk-1 ~]# sed -i.bak "s/SELINUX=enforcing/SELINUX=permissive/g" /etc/selinux/config +[root@pcmk-1 ~]# /sbin/chkconfig --del iptables +[root@pcmk-1 ~]# service iptables stop +iptables: Flushing firewall rules: [ OK ] +iptables: Setting chains to policy ACCEPT: filter [ OK ] +iptables: Unloading modules: [ OK ] + + + + You will need to reboot for the SELinux changes to take effect. + Otherwise you will see something like this when you start corosync: + + +May 4 19:30:54 pcmk-1 setroubleshoot: SELinux is preventing /usr/sbin/corosync "getattr" access on /. For complete SELinux messages. run sealert -l 6e0d4384-638e-4d55-9aaf-7dac011f29c1 +May 4 19:30:54 pcmk-1 setroubleshoot: SELinux is preventing /usr/sbin/corosync "getattr" access on /. For complete SELinux messages. run sealert -l 6e0d4384-638e-4d55-9aaf-7dac011f29c1 + + +
+ +
+ Install the Cluster Software + + Since version 12, Fedora comes with recent versions of everything you need, so simply fire up the shell and run: + + +[root@pcmk-1 ~]# sed -i.bak "s/enabled=0/enabled=1/g" /etc/yum.repos.d/fedora.repo +[root@pcmk-1 ~]# sed -i.bak "s/enabled=0/enabled=1/g" /etc/yum.repos.d/fedora-updates.repo +[root@pcmk-1 ~]# yum install -y pacemaker corosync +[root@pcmk-1 ~]# yum install -y pacemaker corosync +Loaded plugins: presto, refresh-packagekit +fedora/metalink | 22 kB 00:00 +fedora-debuginfo/metalink | 16 kB 00:00 +fedora-debuginfo | 3.2 kB 00:00 +fedora-debuginfo/primary_db | 1.4 MB 00:04 +fedora-source/metalink | 22 kB 00:00 +fedora-source | 3.2 kB 00:00 +fedora-source/primary_db | 3.0 MB 00:05 +updates/metalink | 26 kB 00:00 +updates | 2.6 kB 00:00 +updates/primary_db | 1.1 kB 00:00 +updates-debuginfo/metalink | 18 kB 00:00 +updates-debuginfo | 2.6 kB 00:00 +updates-debuginfo/primary_db | 1.1 kB 00:00 +updates-source/metalink | 25 kB 00:00 +updates-source | 2.6 kB 00:00 +updates-source/primary_db | 1.1 kB 00:00 Setting up Install Process Resolving Dependencies --> Running transaction check ----> Package pacemaker.x86_64 0:1.1.0-1.fc12 set to be updated ---> Processing Dependency: heartbeat >= 3.0.0 for package: pacemaker-1.1.0-1.fc12.x86_64 ---> Processing Dependency: resource-agents for package: pacemaker-1.1.0-1.fc12.x86_64 ---> Processing Dependency: corosync for package: pacemaker-1.1.0-1.fc12.x86_64 ---> Processing Dependency: libgnutls.so.26(GNUTLS_1_4)(64bit) for package: pacemaker-1.1.0-1.fc12.x86_64 ---> Processing Dependency: cluster-glue for package: pacemaker-1.1.0-1.fc12.x86_64 ---> Processing Dependency: libcrmcluster.so.1()(64bit) for package: pacemaker-1.1.0-1.fc12.x86_64 ---> Processing Dependency: libcib.so.1()(64bit) for package: pacemaker-1.1.0-1.fc12.x86_64 ---> Processing Dependency: libltdl.so.7()(64bit) for package: pacemaker-1.1.0-1.fc12.x86_64 ---> Processing Dependency: libccmclient.so.1()(64bit) for package: pacemaker-1.1.0-1.fc12.x86_64 ---> Processing Dependency: libesmtp.so.5()(64bit) for package: pacemaker-1.1.0-1.fc12.x86_64 ---> Processing Dependency: libxslt.so.1()(64bit) for package: pacemaker-1.1.0-1.fc12.x86_64 ---> Processing Dependency: libpils.so.2()(64bit) for package: pacemaker-1.1.0-1.fc12.x86_64 ---> Processing Dependency: libpengine.so.3()(64bit) for package: pacemaker-1.1.0-1.fc12.x86_64 ---> Processing Dependency: liblrm.so.2()(64bit) for package: pacemaker-1.1.0-1.fc12.x86_64 ---> Processing Dependency: libtransitioner.so.1()(64bit) for package: pacemaker-1.1.0-1.fc12.x86_64 ---> Processing Dependency: libstonithd.so.1()(64bit) for package: pacemaker-1.1.0-1.fc12.x86_64 ---> Processing Dependency: libcrmcommon.so.2()(64bit) for package: pacemaker-1.1.0-1.fc12.x86_64 ---> Processing Dependency: libplumb.so.2()(64bit) for package: pacemaker-1.1.0-1.fc12.x86_64 ---> Processing Dependency: libnetsnmp.so.15()(64bit) for package: pacemaker-1.1.0-1.fc12.x86_64 ---> Processing Dependency: libnetsnmpagent.so.15()(64bit) for package: pacemaker-1.1.0-1.fc12.x86_64 ---> Processing Dependency: libnetsnmpmibs.so.15()(64bit) for package: pacemaker-1.1.0-1.fc12.x86_64 ---> Processing Dependency: libpe_status.so.2()(64bit) for package: pacemaker-1.1.0-1.fc12.x86_64 ---> Processing Dependency: libsensors.so.4()(64bit) for package: pacemaker-1.1.0-1.fc12.x86_64 ---> Processing Dependency: libnetsnmphelpers.so.15()(64bit) for package: pacemaker-1.1.0-1.fc12.x86_64 ---> Processing Dependency: libstonith.so.1()(64bit) for package: pacemaker-1.1.0-1.fc12.x86_64 ---> Processing Dependency: libcoroipcc.so.4()(64bit) for package: pacemaker-1.1.0-1.fc12.x86_64 ---> Processing Dependency: libpe_rules.so.2()(64bit) for package: pacemaker-1.1.0-1.fc12.x86_64 ---> Processing Dependency: libgnutls.so.26()(64bit) for package: pacemaker-1.1.0-1.fc12.x86_64 ---> Processing Dependency: libhbclient.so.1()(64bit) for package: pacemaker-1.1.0-1.fc12.x86_64 +---> Package corosync.x86_64 0:1.2.1-1.fc13 set to be updated +--> Processing Dependency: corosynclib = 1.2.1-1.fc13 for package: corosync-1.2.1-1.fc13.x86_64 +--> Processing Dependency: libquorum.so.4(COROSYNC_QUORUM_1.0)(64bit) for package: corosync-1.2.1-1.fc13.x86_64 +--> Processing Dependency: libvotequorum.so.4(COROSYNC_VOTEQUORUM_1.0)(64bit) for package: corosync-1.2.1-1.fc13.x86_64 +--> Processing Dependency: libcpg.so.4(COROSYNC_CPG_1.0)(64bit) for package: corosync-1.2.1-1.fc13.x86_64 +--> Processing Dependency: libconfdb.so.4(COROSYNC_CONFDB_1.0)(64bit) for package: corosync-1.2.1-1.fc13.x86_64 +--> Processing Dependency: libcfg.so.4(COROSYNC_CFG_0.82)(64bit) for package: corosync-1.2.1-1.fc13.x86_64 +--> Processing Dependency: libpload.so.4(COROSYNC_PLOAD_1.0)(64bit) for package: corosync-1.2.1-1.fc13.x86_64 +--> Processing Dependency: liblogsys.so.4()(64bit) for package: corosync-1.2.1-1.fc13.x86_64 +--> Processing Dependency: libconfdb.so.4()(64bit) for package: corosync-1.2.1-1.fc13.x86_64 +--> Processing Dependency: libcoroipcc.so.4()(64bit) for package: corosync-1.2.1-1.fc13.x86_64 +--> Processing Dependency: libcpg.so.4()(64bit) for package: corosync-1.2.1-1.fc13.x86_64 +--> Processing Dependency: libquorum.so.4()(64bit) for package: corosync-1.2.1-1.fc13.x86_64 +--> Processing Dependency: libcoroipcs.so.4()(64bit) for package: corosync-1.2.1-1.fc13.x86_64 +--> Processing Dependency: libvotequorum.so.4()(64bit) for package: corosync-1.2.1-1.fc13.x86_64 +--> Processing Dependency: libcfg.so.4()(64bit) for package: corosync-1.2.1-1.fc13.x86_64 +--> Processing Dependency: libtotem_pg.so.4()(64bit) for package: corosync-1.2.1-1.fc13.x86_64 +--> Processing Dependency: libpload.so.4()(64bit) for package: corosync-1.2.1-1.fc13.x86_64 +---> Package pacemaker.x86_64 0:1.1.1-1.fc13 set to be updated +--> Processing Dependency: heartbeat >= 3.0.0 for package: pacemaker-1.1.1-1.fc13.x86_64 +--> Processing Dependency: net-snmp >= 5.4 for package: pacemaker-1.1.1-1.fc13.x86_64 +--> Processing Dependency: resource-agents for package: pacemaker-1.1.1-1.fc13.x86_64 +--> Processing Dependency: cluster-glue for package: pacemaker-1.1.1-1.fc13.x86_64 +--> Processing Dependency: libnetsnmp.so.20()(64bit) for package: pacemaker-1.1.1-1.fc13.x86_64 +--> Processing Dependency: libcrmcluster.so.1()(64bit) for package: pacemaker-1.1.1-1.fc13.x86_64 +--> Processing Dependency: libpengine.so.3()(64bit) for package: pacemaker-1.1.1-1.fc13.x86_64 +--> Processing Dependency: libnetsnmpagent.so.20()(64bit) for package: pacemaker-1.1.1-1.fc13.x86_64 +--> Processing Dependency: libesmtp.so.5()(64bit) for package: pacemaker-1.1.1-1.fc13.x86_64 +--> Processing Dependency: libstonithd.so.1()(64bit) for package: pacemaker-1.1.1-1.fc13.x86_64 +--> Processing Dependency: libhbclient.so.1()(64bit) for package: pacemaker-1.1.1-1.fc13.x86_64 +--> Processing Dependency: libpils.so.2()(64bit) for package: pacemaker-1.1.1-1.fc13.x86_64 +--> Processing Dependency: libpe_status.so.2()(64bit) for package: pacemaker-1.1.1-1.fc13.x86_64 +--> Processing Dependency: libnetsnmpmibs.so.20()(64bit) for package: pacemaker-1.1.1-1.fc13.x86_64 +--> Processing Dependency: libnetsnmphelpers.so.20()(64bit) for package: pacemaker-1.1.1-1.fc13.x86_64 +--> Processing Dependency: libcib.so.1()(64bit) for package: pacemaker-1.1.1-1.fc13.x86_64 +--> Processing Dependency: libccmclient.so.1()(64bit) for package: pacemaker-1.1.1-1.fc13.x86_64 +--> Processing Dependency: libstonith.so.1()(64bit) for package: pacemaker-1.1.1-1.fc13.x86_64 +--> Processing Dependency: liblrm.so.2()(64bit) for package: pacemaker-1.1.1-1.fc13.x86_64 +--> Processing Dependency: libtransitioner.so.1()(64bit) for package: pacemaker-1.1.1-1.fc13.x86_64 +--> Processing Dependency: libpe_rules.so.2()(64bit) for package: pacemaker-1.1.1-1.fc13.x86_64 +--> Processing Dependency: libcrmcommon.so.2()(64bit) for package: pacemaker-1.1.1-1.fc13.x86_64 +--> Processing Dependency: libplumb.so.2()(64bit) for package: pacemaker-1.1.1-1.fc13.x86_64 --> Running transaction check ----> Package cluster-glue.x86_64 0:1.0-0.11.b79635605337.hg.fc12 set to be updated ---> Processing Dependency: perl-TimeDate for package: cluster-glue-1.0-0.11.b79635605337.hg.fc12.x86_64 ---> Processing Dependency: libOpenIPMIutils.so.0()(64bit) for package: cluster-glue-1.0-0.11.b79635605337.hg.fc12.x86_64 ---> Processing Dependency: libOpenIPMIposix.so.0()(64bit) for package: cluster-glue-1.0-0.11.b79635605337.hg.fc12.x86_64 ---> Processing Dependency: libopenhpi.so.2()(64bit) for package: cluster-glue-1.0-0.11.b79635605337.hg.fc12.x86_64 ---> Processing Dependency: libOpenIPMI.so.0()(64bit) for package: cluster-glue-1.0-0.11.b79635605337.hg.fc12.x86_64 ----> Package cluster-glue-libs.x86_64 0:1.0-0.11.b79635605337.hg.fc12 set to be updated ----> Package corosync.x86_64 0:1.1.2-1.fc12 set to be updated ----> Package corosynclib.x86_64 0:1.1.2-1.fc12 set to be updated ---> Processing Dependency: librdmacm.so.1(RDMACM_1.0)(64bit) for package: corosynclib-1.1.2-1.fc12.x86_64 ---> Processing Dependency: libibverbs.so.1(IBVERBS_1.0)(64bit) for package: corosynclib-1.1.2-1.fc12.x86_64 ---> Processing Dependency: libibverbs.so.1(IBVERBS_1.1)(64bit) for package: corosynclib-1.1.2-1.fc12.x86_64 ---> Processing Dependency: libibverbs.so.1()(64bit) for package: corosynclib-1.1.2-1.fc12.x86_64 ---> Processing Dependency: librdmacm.so.1()(64bit) for package: corosynclib-1.1.2-1.fc12.x86_64 ----> Package gnutls.x86_64 0:2.8.5-1.fc12 set to be updated ---> Processing Dependency: libtasn1.so.3(LIBTASN1_0_3)(64bit) for package: gnutls-2.8.5-1.fc12.x86_64 ---> Processing Dependency: libtasn1.so.3()(64bit) for package: gnutls-2.8.5-1.fc12.x86_64 ----> Package heartbeat.x86_64 0:3.0.0-0.5.0daab7da36a8.hg.fc12 set to be updated ---> Processing Dependency: PyXML for package: heartbeat-3.0.0-0.5.0daab7da36a8.hg.fc12.x86_64 ----> Package heartbeat-libs.x86_64 0:3.0.0-0.5.0daab7da36a8.hg.fc12 set to be updated +---> Package cluster-glue.x86_64 0:1.0.2-1.fc13 set to be updated +--> Processing Dependency: perl-TimeDate for package: cluster-glue-1.0.2-1.fc13.x86_64 +--> Processing Dependency: libOpenIPMIutils.so.0()(64bit) for package: cluster-glue-1.0.2-1.fc13.x86_64 +--> Processing Dependency: libOpenIPMIposix.so.0()(64bit) for package: cluster-glue-1.0.2-1.fc13.x86_64 +--> Processing Dependency: libopenhpi.so.2()(64bit) for package: cluster-glue-1.0.2-1.fc13.x86_64 +--> Processing Dependency: libOpenIPMI.so.0()(64bit) for package: cluster-glue-1.0.2-1.fc13.x86_64 +---> Package cluster-glue-libs.x86_64 0:1.0.2-1.fc13 set to be updated +---> Package corosynclib.x86_64 0:1.2.1-1.fc13 set to be updated +--> Processing Dependency: librdmacm.so.1(RDMACM_1.0)(64bit) for package: corosynclib-1.2.1-1.fc13.x86_64 +--> Processing Dependency: libibverbs.so.1(IBVERBS_1.0)(64bit) for package: corosynclib-1.2.1-1.fc13.x86_64 +--> Processing Dependency: libibverbs.so.1(IBVERBS_1.1)(64bit) for package: corosynclib-1.2.1-1.fc13.x86_64 +--> Processing Dependency: libibverbs.so.1()(64bit) for package: corosynclib-1.2.1-1.fc13.x86_64 +--> Processing Dependency: librdmacm.so.1()(64bit) for package: corosynclib-1.2.1-1.fc13.x86_64 +---> Package heartbeat.x86_64 0:3.0.0-0.7.0daab7da36a8.hg.fc13 set to be updated +--> Processing Dependency: PyXML for package: heartbeat-3.0.0-0.7.0daab7da36a8.hg.fc13.x86_64 +---> Package heartbeat-libs.x86_64 0:3.0.0-0.7.0daab7da36a8.hg.fc13 set to be updated ---> Package libesmtp.x86_64 0:1.0.4-12.fc12 set to be updated ----> Package libtool-ltdl.x86_64 0:2.2.6-15.fc12 set to be updated ----> Package libxslt.x86_64 0:1.1.26-1.fc12 set to be updated ----> Package lm_sensors-libs.x86_64 0:3.1.1-4.fc12 set to be updated ----> Package net-snmp-libs.x86_64 1:5.4.2.1-18.fc12 set to be updated ----> Package pacemaker-libs.x86_64 0:1.1.0-1.fc12 set to be updated ----> Package resource-agents.x86_64 0:3.0.5-1.fc12 set to be updated +---> Package net-snmp.x86_64 1:5.5-12.fc13 set to be updated +--> Processing Dependency: libsensors.so.4()(64bit) for package: 1:net-snmp-5.5-12.fc13.x86_64 +---> Package net-snmp-libs.x86_64 1:5.5-12.fc13 set to be updated +---> Package pacemaker-libs.x86_64 0:1.1.1-1.fc13 set to be updated +---> Package resource-agents.x86_64 0:3.0.10-1.fc13 set to be updated +--> Processing Dependency: libnet.so.1()(64bit) for package: resource-agents-3.0.10-1.fc13.x86_64 --> Running transaction check ----> Package OpenIPMI-libs.x86_64 0:2.0.16-4.fc12 set to be updated ----> Package PyXML.x86_64 0:0.8.4-15 set to be updated ----> Package libibverbs.x86_64 0:1.1.3-3.fc12 set to be updated ---> Processing Dependency: libibverbs-driver for package: libibverbs-1.1.3-3.fc12.x86_64 ----> Package librdmacm.x86_64 0:1.0.10-1.fc12 set to be updated ----> Package libtasn1.x86_64 0:2.3-1.fc12 set to be updated ----> Package openhpi-libs.x86_64 0:2.14.0-5.fc12 set to be updated ----> Package perl-TimeDate.noarch 1:1.16-11.fc12 set to be updated +---> Package OpenIPMI-libs.x86_64 0:2.0.16-8.fc13 set to be updated +---> Package PyXML.x86_64 0:0.8.4-17.fc13 set to be updated +---> Package libibverbs.x86_64 0:1.1.3-4.fc13 set to be updated +--> Processing Dependency: libibverbs-driver for package: libibverbs-1.1.3-4.fc13.x86_64 +---> Package libnet.x86_64 0:1.1.4-3.fc12 set to be updated +---> Package librdmacm.x86_64 0:1.0.10-2.fc13 set to be updated +---> Package lm_sensors-libs.x86_64 0:3.1.2-2.fc13 set to be updated +---> Package openhpi-libs.x86_64 0:2.14.1-3.fc13 set to be updated +---> Package perl-TimeDate.noarch 1:1.20-1.fc13 set to be updated --> Running transaction check ----> Package libmlx4.x86_64 0:1.0.1-3.fc12 set to be updated +---> Package libmlx4.x86_64 0:1.0.1-5.fc13 set to be updated --> Finished Dependency Resolution Dependencies Resolved -======================================================================================= - Package Arch Version Repository Size -======================================================================================= + + +========================================================================================== + Package Arch Version Repository Size +========================================================================================== Installing: - pacemaker x86_64 1.1.0-1.fc12 custom 545 k + corosync x86_64 1.2.1-1.fc13 fedora 136 k + pacemaker x86_64 1.1.1-1.fc13 fedora 543 k Installing for dependencies: - OpenIPMI-libs x86_64 2.0.16-4.fc12 fedora 434 k - PyXML x86_64 0.8.4-15 fedora 808 k - cluster-glue x86_64 1.0-0.11.b79635605337.hg.fc12 fedora 222 k - cluster-glue-libs x86_64 1.0-0.11.b79635605337.hg.fc12 fedora 114 k - corosync x86_64 1.1.2-1.fc12 fedora 144 k - corosynclib x86_64 1.1.2-1.fc12 fedora 138 k - gnutls x86_64 2.8.5-1.fc12 fedora 350 k - heartbeat x86_64 3.0.0-0.5.0daab7da36a8.hg.fc12 fedora 172 k - heartbeat-libs x86_64 3.0.0-0.5.0daab7da36a8.hg.fc12 fedora 265 k - libesmtp x86_64 1.0.4-12.fc12 fedora 54 k - libibverbs x86_64 1.1.3-3.fc12 updates 42 k - libmlx4 x86_64 1.0.1-3.fc12 updates 26 k - librdmacm x86_64 1.0.10-1.fc12 updates 22 k - libtasn1 x86_64 2.3-1.fc12 fedora 232 k - libtool-ltdl x86_64 2.2.6-15.fc12 fedora 44 k - libxslt x86_64 1.1.26-1.fc12 fedora 397 k - lm_sensors-libs x86_64 3.1.1-4.fc12 updates 36 k - net-snmp-libs x86_64 1:5.4.2.1-18.fc12 fedora 1.3 M - openhpi-libs x86_64 2.14.0-5.fc12 fedora 128 k - pacemaker-libs x86_64 1.1.0-1.fc12 custom 250 k - perl-TimeDate noarch 1:1.16-11.fc12 fedora 33 k - resource-agents x86_64 3.0.5-1.fc12 updates 251 k + OpenIPMI-libs x86_64 2.0.16-8.fc13 fedora 474 k + PyXML x86_64 0.8.4-17.fc13 fedora 906 k + cluster-glue x86_64 1.0.2-1.fc13 fedora 230 k + cluster-glue-libs x86_64 1.0.2-1.fc13 fedora 116 k + corosynclib x86_64 1.2.1-1.fc13 fedora 145 k + heartbeat x86_64 3.0.0-0.7.0daab7da36a8.hg.fc13 updates 172 k + heartbeat-libs x86_64 3.0.0-0.7.0daab7da36a8.hg.fc13 updates 265 k + libesmtp x86_64 1.0.4-12.fc12 fedora 54 k + libibverbs x86_64 1.1.3-4.fc13 fedora 42 k + libmlx4 x86_64 1.0.1-5.fc13 fedora 27 k + libnet x86_64 1.1.4-3.fc12 fedora 49 k + librdmacm x86_64 1.0.10-2.fc13 fedora 22 k + lm_sensors-libs x86_64 3.1.2-2.fc13 fedora 37 k + net-snmp x86_64 1:5.5-12.fc13 fedora 295 k + net-snmp-libs x86_64 1:5.5-12.fc13 fedora 1.5 M + openhpi-libs x86_64 2.14.1-3.fc13 fedora 135 k + pacemaker-libs x86_64 1.1.1-1.fc13 fedora 264 k + perl-TimeDate noarch 1:1.20-1.fc13 fedora 42 k + resource-agents x86_64 3.0.10-1.fc13 fedora 357 k Transaction Summary -======================================================================================= -Install 23 Package(s) +========================================================================================= +Install 21 Package(s) Upgrade 0 Package(s) -Total download size: 5.9 M +Total download size: 5.7 M +Installed size: 20 M Downloading Packages: -(1/23): OpenIPMI-libs-2.0.16-4.fc12.x86_64.rpm | 434 kB 00:00 -(2/23): PyXML-0.8.4-15.x86_64.rpm | 808 kB 00:00 -(3/23): cluster-glue-1.0-0.11.b79635605337.hg.fc12.x86_64.rpm | 222 kB 00:00 -(4/23): cluster-glue-libs-1.0-0.11.b79635605337.hg.fc12.x86_64. | 114 kB 00:00 -(5/23): corosync-1.1.2-1.fc12.x86_64.rpm | 144 kB 00:00 -(6/23): corosynclib-1.1.2-1.fc12.x86_64.rpm | 138 kB 00:00 -(7/23): gnutls-2.8.5-1.fc12.x86_64.rpm | 350 kB 00:00 -(8/23): heartbeat-3.0.0-0.5.0daab7da36a8.hg.fc12.x86_64.rpm | 172 kB 00:00 -(9/23): heartbeat-libs-3.0.0-0.5.0daab7da36a8.hg.fc12.x86_64.rp | 265 kB 00:00 -(10/23): libesmtp-1.0.4-12.fc12.x86_64.rpm | 54 kB 00:00 -(11/23): libibverbs-1.1.3-3.fc12.x86_64.rpm | 42 kB 00:00 -(12/23): libmlx4-1.0.1-3.fc12.x86_64.rpm | 26 kB 00:00 -(13/23): librdmacm-1.0.10-1.fc12.x86_64.rpm | 22 kB 00:00 -(14/23): libtasn1-2.3-1.fc12.x86_64.rpm | 232 kB 00:00 -(15/23): libtool-ltdl-2.2.6-15.fc12.x86_64.rpm | 44 kB 00:00 -(16/23): libxslt-1.1.26-1.fc12.x86_64.rpm | 397 kB 00:00 -(17/23): lm_sensors-libs-3.1.1-4.fc12.x86_64.rpm | 36 kB 00:00 -(18/23): net-snmp-libs-5.4.2.1-18.fc12.x86_64.rpm | 1.3 MB 00:00 -(19/23): openhpi-libs-2.14.0-5.fc12.x86_64.rpm | 128 kB 00:00 -(20/23): pacemaker-1.1.0-1.fc12.x86_64.rpm | 545 kB 00:00 -(21/23): pacemaker-libs-1.1.0-1.fc12.x86_64.rpm | 250 kB 00:00 -(22/23): perl-TimeDate-1.16-11.fc12.noarch.rpm | 33 kB 00:00 -(23/23): resource-agents-3.0.5-1.fc12.x86_64.rpm | 251 kB 00:00 ---------------------------------------------------------------------------------------- -Total 894 kB/s | 5.9 MB 00:06 - - - +Setting up and reading Presto delta metadata +updates-testing/prestodelta | 164 kB 00:00 +fedora/prestodelta | 150 B 00:00 +Processing delta metadata +Package(s) data still to download: 5.7 M +(1/21): OpenIPMI-libs-2.0.16-8.fc13.x86_64.rpm | 474 kB 00:00 +(2/21): PyXML-0.8.4-17.fc13.x86_64.rpm | 906 kB 00:01 +(3/21): cluster-glue-1.0.2-1.fc13.x86_64.rpm | 230 kB 00:00 +(4/21): cluster-glue-libs-1.0.2-1.fc13.x86_64.rpm | 116 kB 00:00 +(5/21): corosync-1.2.1-1.fc13.x86_64.rpm | 136 kB 00:00 +(6/21): corosynclib-1.2.1-1.fc13.x86_64.rpm | 145 kB 00:00 +(7/21): heartbeat-3.0.0-0.7.0daab7da36a8.hg.fc13.x86_64.rpm | 172 kB 00:00 +(8/21): heartbeat-libs-3.0.0-0.7.0daab7da36a8.hg.fc13.x86_64.rpm | 265 kB 00:00 +(9/21): libesmtp-1.0.4-12.fc12.x86_64.rpm | 54 kB 00:00 +(10/21): libibverbs-1.1.3-4.fc13.x86_64.rpm | 42 kB 00:00 +(11/21): libmlx4-1.0.1-5.fc13.x86_64.rpm | 27 kB 00:00 +(12/21): libnet-1.1.4-3.fc12.x86_64.rpm | 49 kB 00:00 +(13/21): librdmacm-1.0.10-2.fc13.x86_64.rpm | 22 kB 00:00 +(14/21): lm_sensors-libs-3.1.2-2.fc13.x86_64.rpm | 37 kB 00:00 +(15/21): net-snmp-5.5-12.fc13.x86_64.rpm | 295 kB 00:00 +(16/21): net-snmp-libs-5.5-12.fc13.x86_64.rpm | 1.5 MB 00:01 +(17/21): openhpi-libs-2.14.1-3.fc13.x86_64.rpm | 135 kB 00:00 +(18/21): pacemaker-1.1.1-1.fc13.x86_64.rpm | 543 kB 00:00 +(19/21): pacemaker-libs-1.1.1-1.fc13.x86_64.rpm | 264 kB 00:00 +(20/21): perl-TimeDate-1.20-1.fc13.noarch.rpm | 42 kB 00:00 +(21/21): resource-agents-3.0.10-1.fc13.x86_64.rpm | 357 kB 00:00 +---------------------------------------------------------------------------------------- +Total 539 kB/s | 5.7 MB 00:10 +warning: rpmts_HdrFromFdno: Header V3 RSA/SHA256 Signature, key ID e8e40fde: NOKEY +fedora/gpgkey | 3.2 kB 00:00 ... +Importing GPG key 0xE8E40FDE "Fedora (13) <fedora@fedoraproject.org%gt;" from /etc/pki/rpm-gpg/RPM-GPG-KEY-fedora-x86_64 + + Running rpm_check_debug Running Transaction Test -Finished Transaction Test Transaction Test Succeeded Running Transaction - Installing : libtool-ltdl-2.2.6-15.fc12.x86_64 1/23 - Installing : lm_sensors-libs-3.1.1-4.fc12.x86_64 2/23 - Installing : 1:net-snmp-libs-5.4.2.1-18.fc12.x86_64 3/23 - Installing : libxslt-1.1.26-1.fc12.x86_64 4/23 - Installing : openhpi-libs-2.14.0-5.fc12.x86_64 5/23 - Installing : PyXML-0.8.4-15.x86_64 6/23 - Installing : libesmtp-1.0.4-12.fc12.x86_64 7/23 - Installing : OpenIPMI-libs-2.0.16-4.fc12.x86_64 8/23 - Installing : libtasn1-2.3-1.fc12.x86_64 9/23 - Installing : gnutls-2.8.5-1.fc12.x86_64 10/23 - Installing : 1:perl-TimeDate-1.16-11.fc12.noarch 11/23 - Installing : cluster-glue-libs-1.0-0.11.b79635605337.hg.fc12.x86_64 12/23 - Installing : cluster-glue-1.0-0.11.b79635605337.hg.fc12.x86_64 13/23 - Installing : libibverbs-1.1.3-3.fc12.x86_64 14/23 - Installing : resource-agents-3.0.5-1.fc12.x86_64 15/23 - Installing : librdmacm-1.0.10-1.fc12.x86_64 16/23 - Installing : corosynclib-1.1.2-1.fc12.x86_64 17/23 - Installing : corosync-1.1.2-1.fc12.x86_64 18/23 - Installing : libmlx4-1.0.1-3.fc12.x86_64 19/23 - Installing : heartbeat-libs-3.0.0-0.5.0daab7da36a8.hg.fc12.x86_64 20/23 - Installing : heartbeat-3.0.0-0.5.0daab7da36a8.hg.fc12.x86_64 21/23 - Installing : pacemaker-1.1.0-1.fc12.x86_64 22/23 - Installing : pacemaker-libs-1.1.0-1.fc12.x86_64 23/23 + Installing : lm_sensors-libs-3.1.2-2.fc13.x86_64 1/21 + Installing : 1:net-snmp-libs-5.5-12.fc13.x86_64 2/21 + Installing : 1:net-snmp-5.5-12.fc13.x86_64 3/21 + Installing : openhpi-libs-2.14.1-3.fc13.x86_64 4/21 + Installing : libibverbs-1.1.3-4.fc13.x86_64 5/21 + Installing : libmlx4-1.0.1-5.fc13.x86_64 6/21 + Installing : librdmacm-1.0.10-2.fc13.x86_64 7/21 + Installing : corosync-1.2.1-1.fc13.x86_64 8/21 + Installing : corosynclib-1.2.1-1.fc13.x86_64 9/21 + Installing : libesmtp-1.0.4-12.fc12.x86_64 10/21 + Installing : OpenIPMI-libs-2.0.16-8.fc13.x86_64 11/21 + Installing : PyXML-0.8.4-17.fc13.x86_64 12/21 + Installing : libnet-1.1.4-3.fc12.x86_64 13/21 + Installing : 1:perl-TimeDate-1.20-1.fc13.noarch 14/21 + Installing : cluster-glue-1.0.2-1.fc13.x86_64 15/21 + Installing : cluster-glue-libs-1.0.2-1.fc13.x86_64 16/21 + Installing : resource-agents-3.0.10-1.fc13.x86_64 17/21 + Installing : heartbeat-libs-3.0.0-0.7.0daab7da36a8.hg.fc13.x86_64 18/21 + Installing : heartbeat-3.0.0-0.7.0daab7da36a8.hg.fc13.x86_64 19/21 + Installing : pacemaker-1.1.1-1.fc13.x86_64 20/21 + Installing : pacemaker-libs-1.1.1-1.fc13.x86_64 21/21 Installed: - pacemaker.x86_64 0:1.1.0-1.fc12 + corosync.x86_64 0:1.2.1-1.fc13 pacemaker.x86_64 0:1.1.1-1.fc13 Dependency Installed: - OpenIPMI-libs.x86_64 0:2.0.16-4.fc12 - PyXML.x86_64 0:0.8.4-15 - cluster-glue.x86_64 0:1.0-0.11.b79635605337.hg.fc12 - cluster-glue-libs.x86_64 0:1.0-0.11.b79635605337.hg.fc12 - corosync.x86_64 0:1.1.2-1.fc12 - corosynclib.x86_64 0:1.1.2-1.fc12 - gnutls.x86_64 0:2.8.5-1.fc12 - heartbeat.x86_64 0:3.0.0-0.5.0daab7da36a8.hg.fc12 - heartbeat-libs.x86_64 0:3.0.0-0.5.0daab7da36a8.hg.fc12 - libesmtp.x86_64 0:1.0.4-12.fc12 - libibverbs.x86_64 0:1.1.3-3.fc12 - libmlx4.x86_64 0:1.0.1-3.fc12 - librdmacm.x86_64 0:1.0.10-1.fc12 - libtasn1.x86_64 0:2.3-1.fc12 - libtool-ltdl.x86_64 0:2.2.6-15.fc12 - libxslt.x86_64 0:1.1.26-1.fc12 - lm_sensors-libs.x86_64 0:3.1.1-4.fc12 - net-snmp-libs.x86_64 1:5.4.2.1-18.fc12 - openhpi-libs.x86_64 0:2.14.0-5.fc12 - pacemaker-libs.x86_64 0:1.1.0-1.fc12 - perl-TimeDate.noarch 1:1.16-11.fc12 - resource-agents.x86_64 0:3.0.5-1.fc12 + OpenIPMI-libs.x86_64 0:2.0.16-8.fc13 + PyXML.x86_64 0:0.8.4-17.fc13 + cluster-glue.x86_64 0:1.0.2-1.fc13 + cluster-glue-libs.x86_64 0:1.0.2-1.fc13 + corosynclib.x86_64 0:1.2.1-1.fc13 + heartbeat.x86_64 0:3.0.0-0.7.0daab7da36a8.hg.fc13 + heartbeat-libs.x86_64 0:3.0.0-0.7.0daab7da36a8.hg.fc13 + libesmtp.x86_64 0:1.0.4-12.fc12 + libibverbs.x86_64 0:1.1.3-4.fc13 + libmlx4.x86_64 0:1.0.1-5.fc13 + libnet.x86_64 0:1.1.4-3.fc12 + librdmacm.x86_64 0:1.0.10-2.fc13 + lm_sensors-libs.x86_64 0:3.1.2-2.fc13 + net-snmp.x86_64 1:5.5-12.fc13 + net-snmp-libs.x86_64 1:5.5-12.fc13 + openhpi-libs.x86_64 0:2.14.1-3.fc13 + pacemaker-libs.x86_64 0:1.1.1-1.fc13 + perl-TimeDate.noarch 1:1.20-1.fc13 + resource-agents.x86_64 0:3.0.10-1.fc13 Complete! -[root@pcmk-1 ~]# - -
- -
- Security Shortcuts - - To simplify this guide and focus on the aspects directly connected to clustering, we will now disable the machine’s firewall and SELinux installation. Both of these actions create significant security issues and should not be performed on machines that will be exposed to the outside world. - - - Create an Appendix that deals with (at least) re-enabling the firewall. - - - - - -sed -i.bak "s/SELINUX=enforcing/SELINUX=permissive/g" /etc/selinux/config -/sbin/chkconfig --del iptables - - - - - Now reboot all nodes so the new security settings take effect. - - - -
- -
- -
- Before You Continue - - Repeat the Installation steps so that you have 2 Fedora nodes with the cluster software installed. - - - For the purposes of this document, the additional node is called pcmk-2 with address 192.168.122.42. - -
- -
- Setup -
- Finalize Networking - - Confirm that you can communicate with the two new nodes: - -
- Verify Connectivity by IP address - - +[root@pcmk-1 ~]# + +
+
+ +
+ Before You Continue + + Repeat the Installation steps so that you have 2 Fedora nodes with the cluster software installed. + + + For the purposes of this document, the additional node is called pcmk-2 with address 192.168.122.42. + +
+ +
+ Setup +
+ Finalize Networking + + Confirm that you can communicate with the two new nodes: + +
+ Verify Connectivity by IP address + ping -c 3 192.168.122.102 [root@pcmk-1 ~]# ping -c 3 192.168.122.102 PING 192.168.122.102 (192.168.122.102) 56(84) bytes of data. 64 bytes from 192.168.122.102: icmp_seq=1 ttl=64 time=0.343 ms 64 bytes from 192.168.122.102: icmp_seq=2 ttl=64 time=0.402 ms 64 bytes from 192.168.122.102: icmp_seq=3 ttl=64 time=0.558 ms --- 192.168.122.102 ping statistics --- 3 packets transmitted, 3 received, 0% packet loss, time 2000ms rtt min/avg/max/mdev = 0.343/0.434/0.558/0.092 ms - -
- - Now we need to make sure we can communicate with the machines by their name. If you have a DNS server, add additional entries for the three machines. Otherwise, you’ll need to add the machines to /etc/hosts . Below are the entries for my cluster nodes: - -
- Set up /etc/hosts entries - - + +
+ + Now we need to make sure we can communicate with the machines by their name. + If you have a DNS server, add additional entries for the three machines. + Otherwise, you’ll need to add the machines to /etc/hosts . + Below are the entries for my cluster nodes: + +
+ Set up /etc/hosts entries + grep pcmk /etc/hosts [root@pcmk-1 ~]# grep pcmk /etc/hosts 192.168.122.101 pcmk-1.clusterlabs.org pcmk-1 192.168.122.102 pcmk-2.clusterlabs.org pcmk-2 - -
- - We can now verify the setup by again using ping: - -
- Verify Connectivity by Hostname - - + +
+ + We can now verify the setup by again using ping: + +
+ Verify Connectivity by Hostname + ping -c 3 pcmk-2 [root@pcmk-1 ~]# ping -c 3 pcmk-2 PING pcmk-2.clusterlabs.org (192.168.122.101) 56(84) bytes of data. 64 bytes from pcmk-1.clusterlabs.org (192.168.122.101): icmp_seq=1 ttl=64 time=0.164 ms 64 bytes from pcmk-1.clusterlabs.org (192.168.122.101): icmp_seq=2 ttl=64 time=0.475 ms 64 bytes from pcmk-1.clusterlabs.org (192.168.122.101): icmp_seq=3 ttl=64 time=0.186 ms --- pcmk-2.clusterlabs.org ping statistics --- 3 packets transmitted, 3 received, 0% packet loss, time 2001ms rtt min/avg/max/mdev = 0.164/0.275/0.475/0.141 ms - -
-
- -
- Configure SSH - - SSH is a convenient and secure way to copy files and perform commands remotely. For the purposes of this guide, we will create a key without a password (using the -N “” option) so that we can perform remote actions without being prompted. - - - - NOTE: Unprotected SSH keys, those without a password, are not recommended for servers exposed to the outside world. - - - - Create a new key and allow anyone with that key to log in: - -
- Creating and Activating a new SSH Key - - + +
+
+ +
+ Configure SSH + + SSH is a convenient and secure way to copy files and perform commands remotely. For the purposes of this guide, we will create a key without a password (using the -N “” option) so that we can perform remote actions without being prompted. + + + + Unprotected SSH keys, those without a password, are not recommended for servers exposed to the outside world. + + + + Create a new key and allow anyone with that key to log in: + +
+ Creating and Activating a new SSH Key + [root@pcmk-1 ~]# ssh-keygen -t dsa -f ~/.ssh/id_dsa -N "" Generating public/private dsa key pair. Your identification has been saved in /root/.ssh/id_dsa. Your public key has been saved in /root/.ssh/id_dsa.pub. The key fingerprint is: 91:09:5c:82:5a:6a:50:08:4e:b2:0c:62:de:cc:74:44 root@pcmk-1.clusterlabs.org The key's randomart image is: +--[ DSA 1024]----+ |==.ooEo.. | |X O + .o o | | * A + | | + . | | . S | | | | | | | | | +-----------------+ [root@pcmk-1 ~]# cp .ssh/id_dsa.pub .ssh/authorized_keys [root@pcmk-1 ~]# - -
- - Install the key on the other nodes and test that you can now run commands remotely, without being prompted - -
- Installing the SSH Key on Another Host - - + +
+ + Install the key on the other nodes and test that you can now run commands remotely, without being prompted + +
+ Installing the SSH Key on Another Host + [root@pcmk-1 ~]# scp -r .ssh pcmk-2: The authenticity of host 'pcmk-2 (192.168.122.102)' can't be established. RSA key fingerprint is b1:2b:55:93:f1:d9:52:2b:0f:f2:8a:4e:ae:c6:7c:9a. Are you sure you want to continue connecting (yes/no)? yes Warning: Permanently added 'pcmk-2,192.168.122.102' (RSA) to the list of known hosts. root@pcmk-2's password: id_dsa.pub 100% 616 0.6KB/s 00:00 id_dsa 100% 672 0.7KB/s 00:00 known_hosts 100% 400 0.4KB/s 00:00 authorized_keys 100% 616 0.6KB/s 00:00 [root@pcmk-1 ~]# ssh pcmk-2 -- uname -n pcmk-2 [root@pcmk-1 ~]# - -
-
- -
- Short Node Names - - During installation, we filled in the machine’s fully qualifier domain name (FQDN) which can be rather long when it appears in cluster logs and status output. See for yourself how the machine identifies itself: - - - + + +
+ +
+ Short Node Names + + During installation, we filled in the machine’s fully qualifier domain name (FQDN) which can be rather long when it appears in cluster logs and status output. + See for yourself how the machine identifies itself: + + [root@pcmk-1 ~]# uname -n pcmk-1.clusterlabs.org [root@pcmk-1 ~]# dnsdomainname clusterlabs.org - - - The output from the second command is fine, but we really don’t need the domain name included in the basic host details. To address this, we need to update /etc/sysconfig/network. This is what it should look like before we start. - - - + + + The output from the second command is fine, but we really don’t need the domain name included in the basic host details. + To address this, we need to update /etc/sysconfig/network. This is what it should look like before we start. + + cat /etc/sysconfig/network NETWORKING=yes HOSTNAME=pcmk-1.clusterlabs.org GATEWAY=192.168.122.1 - - - All we need to do now is strip off the domain name portion, which is stored elsewhere anyway. - - -sed -i.bak 's/\.[a-z].*//g' /etc/sysconfig/network - - - Now confirm the change was successful. The revised file contents should look something like this. - - - + + + All we need to do now is strip off the domain name portion, which is stored elsewhere anyway. + + + sed -i.bak 's/\.[a-z].*//g' /etc/sysconfig/network + + Now confirm the change was successful. + The revised file contents should look something like this. + + [root@pcmk-1 ~]# cat /etc/sysconfig/network NETWORKING=yes HOSTNAME=pcmk-1 GATEWAY=192.168.122.1 - - - However we’re not finished. The machine wont normally see the shortened host name until about it reboots, but we can force it to update - - - + + + However we’re not finished. + The machine wont normally see the shortened host name until about it reboots, but we can force it to update. + + [root@pcmk-1 ~]# source /etc/sysconfig/network [root@pcmk-1 ~]# hostname $HOSTNAME - - - Now check the machine is using the correct names - - - + + + Now check the machine is using the correct names + + [root@pcmk-1 ~]# uname -n pcmk-1 [root@pcmk-1 ~]# dnsdomainname clusterlabs.org - - - Now repeat on pcmk-2. - -
- -
- Configuring Corosync - - Choose a port number and multi-cast - - http://en.wikipedia.org/wiki/Multicast - - address. - - http://en.wikipedia.org/wiki/Multicast_address - - - - - Be sure that the values you chose do not conflict with any existing clusters you might have. For advice on choosing a multi-cast address, see http://www.29west.com/docs/THPM/multicast-address-assignment.html - - - For this document, I have chosen port 4000 and used 226.94.1.1 as the multi-cast address. - - - + + + Now repeat on pcmk-2. + +
+ +
+ Configuring Corosync + + Choose a port number and multi-cast + + http://en.wikipedia.org/wiki/Multicast + + address. + + http://en.wikipedia.org/wiki/Multicast_address + + + + + Be sure that the values you chose do not conflict with any existing clusters you might have. + For advice on choosing a multi-cast address, see + + http://www.29west.com/docs/THPM/multicast-address-assignment.html + + + + For this document, I have chosen port 4000 and used 226.94.1.1 as the multi-cast address. + + export ais_port=4000 export ais_mcast=226.94.1.1 - - - Next we automatically determine the hosts address. By not using the full address, we make the configuration suitable to be copied to other nodes. - - -export ais_addr=`ip addr | grep "inet " | tail -n 1 | awk '{print $4}' | sed s/255/0/` - - - Display and verify the configuration options - - - + + + Next we automatically determine the hosts address. + By not using the full address, we make the configuration suitable to be copied to other nodes. + + export ais_addr=`ip addr | grep "inet " | tail -n 1 | awk '{print $4}' | sed s/255/0/` + + Display and verify the configuration options + + [root@pcmk-1 ~]# env | grep ais_ ais_mcast=226.94.1.1 ais_port=4000 ais_addr=192.168.122.0 - - - Once you’re happy with the chosen values, update the Corosync configuration - - - + + + Once you’re happy with the chosen values, update the Corosync configuration + + cp /etc/corosync/corosync.conf.example /etc/corosync/corosync.conf sed -i.bak "s/.*mcastaddr:.*/mcastaddr:\ $ais_mcast/g" /etc/corosync/corosync.conf sed -i.bak "s/.*mcastport:.*/mcastport:\ $ais_port/g" /etc/corosync/corosync.conf sed -i.bak "s/.*bindnetaddr:.*/bindnetaddr:\ $ais_addr/g" /etc/corosync/corosync.conf - - - Tell Corosync to run as the root user - - - -cat <<-END >>/etc/corosync/corosync.conf -aisexec { - user: root - group: root -} -END - - - Finally, tell Corosync to start Pacemaker - - - -cat <<-END >>/etc/corosync/corosync.conf + + + Finally, tell Corosync to start Pacemaker + + +cat <<-END >>/etc/corosync/service.d/pcmk service { # Load the Pacemaker Cluster Resource Manager name: pacemaker ver: 0 } END - - - The final configuration should look something like the sample in the appendix. - -
- -
- Propagate the Configuration - - Now we need to copy the changes so far to the other node: - - - -[root@pcmk-1 ~]# for f in /etc/corosync/corosync.conf /etc/hosts; do scp $f pcmk-2:$f ; done + + + The final configuration should look something like the sample in the appendix. + +
+
+ Propagate the Configuration + + Now we need to copy the changes so far to the other node: + + +[root@pcmk-1 ~]# for f in /etc/corosync/corosync.conf /etc/corosync/service.d/pcmk /etc/hosts; do scp $f pcmk-2:$f ; done corosync.conf 100% 1528 1.5KB/s 00:00 hosts 100% 281 0.3KB/s 00:00 [root@pcmk-1 ~]# - -
- -
- +
+
+
diff --git a/include/crm/cib_ops.h b/include/crm/cib_ops.h index d97c4652b0..401bcf0503 100644 --- a/include/crm/cib_ops.h +++ b/include/crm/cib_ops.h @@ -1,95 +1,95 @@ /* * Copyright (C) 2004 Andrew Beekhof * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * This software is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #ifndef CIB_OPS__H #define CIB_OPS__H #include #include #include #include #include #include #include #include #include #include #include #include #include enum cib_errors cib_process_query( const char *op, int options, const char *section, xmlNode *req, xmlNode *input, xmlNode *existing_cib, xmlNode **result_cib, xmlNode **answer); enum cib_errors cib_process_erase( const char *op, int options, const char *section, xmlNode *req, xmlNode *input, xmlNode *existing_cib, xmlNode **result_cib, xmlNode **answer); enum cib_errors cib_process_bump( const char *op, int options, const char *section, xmlNode *req, xmlNode *input, xmlNode *existing_cib, xmlNode **result_cib, xmlNode **answer); enum cib_errors cib_process_replace( const char *op, int options, const char *section, xmlNode *req, xmlNode *input, xmlNode *existing_cib, xmlNode **result_cib, xmlNode **answer); enum cib_errors cib_process_create( const char *op, int options, const char *section, xmlNode *req, xmlNode *input, xmlNode *existing_cib, xmlNode **result_cib, xmlNode **answer); enum cib_errors cib_process_modify( const char *op, int options, const char *section, xmlNode *req, xmlNode *input, xmlNode *existing_cib, xmlNode **result_cib, xmlNode **answer); enum cib_errors cib_process_delete( const char *op, int options, const char *section, xmlNode *req, xmlNode *input, xmlNode *existing_cib, xmlNode **result_cib, xmlNode **answer); enum cib_errors cib_process_diff( const char *op, int options, const char *section, xmlNode *req, xmlNode *input, xmlNode *existing_cib, xmlNode **result_cib, xmlNode **answer); enum cib_errors cib_process_upgrade( const char *op, int options, const char *section, xmlNode *req, xmlNode *input, xmlNode *existing_cib, xmlNode **result_cib, xmlNode **answer); enum cib_errors cib_process_xpath( const char *op, int options, const char *section, xmlNode *req, xmlNode *input, xmlNode *existing_cib, xmlNode **result_cib, xmlNode **answer); enum cib_errors cib_update_counter(xmlNode *xml_obj, const char *field, gboolean reset); -xmlNode *diff_cib_object(xmlNode *old_cib, xmlNode *new_cib, gboolean suppress); -gboolean apply_cib_diff(xmlNode *old, xmlNode *diff, xmlNode **new); -gboolean cib_config_changed(xmlNode *diff); -gboolean update_results( +extern xmlNode *diff_cib_object(xmlNode *old_cib, xmlNode *new_cib, gboolean suppress); +extern gboolean apply_cib_diff(xmlNode *old, xmlNode *diff, xmlNode **new); +extern gboolean cib_config_changed(xmlNode *last, xmlNode *next, xmlNode **diff); +extern gboolean update_results( xmlNode *failed, xmlNode *target, const char* operation, int return_code); #endif diff --git a/include/crm/common/util.h b/include/crm/common/util.h index 173d3d065e..93b0009025 100644 --- a/include/crm/common/util.h +++ b/include/crm/common/util.h @@ -1,275 +1,275 @@ /* * Copyright (C) 2004 Andrew Beekhof * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * This software is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #ifndef CRM_COMMON_UTIL__H #define CRM_COMMON_UTIL__H #include #include #include #include #include #include #include #if SUPPORT_HEARTBEAT # include #else # define NORMALNODE "normal" # define ACTIVESTATUS "active" /* fully functional, and all links are up */ # define DEADSTATUS "dead" /* Status of non-working link or machine */ # define PINGSTATUS "ping" /* Status of a working ping node */ # define JOINSTATUS "join" /* Status when an api client joins */ # define LEAVESTATUS "leave" /* Status when an api client leaves */ # define ONLINESTATUS "online" /* Status of an online client */ # define OFFLINESTATUS "offline" /* Status of an offline client */ #endif #define DEBUG_INC SIGUSR1 #define DEBUG_DEC SIGUSR2 extern unsigned int crm_log_level; extern gboolean crm_config_error; extern gboolean crm_config_warning; #ifdef HAVE_GETOPT_H # include #else #define no_argument 0 #define required_argument 1 #endif #define pcmk_option_default 0x00000 #define pcmk_option_hidden 0x00001 #define pcmk_option_paragraph 0x00002 #define pcmk_option_example 0x00004 struct crm_option { /* Fields from 'struct option' in getopt.h */ /* name of long option */ const char *name; /* * one of no_argument, required_argument, and optional_argument: * whether option takes an argument */ int has_arg; /* if not NULL, set *flag to val when option found */ int *flag; /* if flag not NULL, value to set *flag to; else return value */ int val; /* Custom fields */ const char *desc; long flags; }; #define crm_config_err(fmt...) { crm_config_error = TRUE; crm_err(fmt); } #define crm_config_warn(fmt...) { crm_config_warning = TRUE; crm_warn(fmt); } extern void crm_log_deinit(void); extern gboolean crm_log_init( const char *entity, int level, gboolean coredir, gboolean to_stderr, int argc, char **argv); /* returns the old value */ extern unsigned int set_crm_log_level(unsigned int level); extern unsigned int get_crm_log_level(void); extern char *crm_itoa(int an_int); extern char *crm_strdup_fn(const char *a, const char *file, const char *fn, int line); extern char *generate_hash_key(const char *crm_msg_reference, const char *sys); extern char *generate_hash_value(const char *src_node, const char *src_subsys); extern gboolean decodeNVpair(const char *srcstring, char separator, char **name, char **value); extern int compare_version(const char *version1, const char *version2); extern char *generateReference(const char *custom1, const char *custom2); extern void alter_debug(int nsig); extern void g_hash_destroy_str(gpointer data); extern gboolean crm_is_true(const char * s); extern int crm_str_to_boolean(const char * s, int * ret); extern long long crm_get_msec(const char * input); extern unsigned long long crm_get_interval(const char * input); extern const char *op_status2text(op_status_t status); extern char *generate_op_key( const char *rsc_id, const char *op_type, int interval); extern gboolean parse_op_key( const char *key, char **rsc_id, char **op_type, int *interval); extern char *generate_notify_key( const char *rsc_id, const char *notify_type, const char *op_type); extern char *generate_transition_magic_v202( const char *transition_key, int op_status); extern char *generate_transition_magic( const char *transition_key, int op_status, int op_rc); extern gboolean decode_transition_magic( const char *magic, char **uuid, int *transition_id, int *action_id, int *op_status, int *op_rc, int *target_rc); extern char *generate_transition_key(int action, int transition_id, int target_rc, const char *node); extern gboolean decode_transition_key( const char *key, char **uuid, int *action, int *transition_id, int *target_rc); extern char *crm_concat(const char *prefix, const char *suffix, char join); extern gboolean decode_op_key( const char *key, char **rsc_id, char **op_type, int *interval); extern void filter_action_parameters(xmlNode *param_set, const char *version); extern void filter_reload_parameters(xmlNode *param_set, const char *restart_string); #define safe_str_eq(a, b) crm_str_eq(a, b, FALSE) extern gboolean crm_str_eq(const char *a, const char *b, gboolean use_case); extern gboolean safe_str_neq(const char *a, const char *b); extern int crm_parse_int(const char *text, const char *default_text); extern long long crm_int_helper(const char *text, char **end_text); #define crm_atoi(text, default_text) crm_parse_int(text, default_text) extern void crm_abort(const char *file, const char *function, int line, const char *condition, gboolean do_core, gboolean do_fork); extern char *generate_series_filename( const char *directory, const char *series, int sequence, gboolean bzip); extern int get_last_sequence(const char *directory, const char *series); extern void write_last_sequence( const char *directory, const char *series, int sequence, int max); extern int crm_pid_active(long pid); extern int crm_read_pidfile(const char *filename); extern int crm_lock_pidfile(const char *filename); extern void crm_make_daemon( const char *name, gboolean daemonize, const char *pidfile); typedef struct pe_cluster_option_s { const char *name; const char *alt_name; const char *type; const char *values; const char *default_value; gboolean (*is_valid)(const char *); const char *description_short; const char *description_long; } pe_cluster_option; extern const char *cluster_option( GHashTable* options, gboolean(*validate)(const char*), const char *name, const char *old_name, const char *def_value); extern const char *get_cluster_pref( GHashTable *options, pe_cluster_option *option_list, int len, const char *name); extern void config_metadata( const char *name, const char *version, const char *desc_short, const char *desc_long, pe_cluster_option *option_list, int len); extern void verify_all_options(GHashTable *options, pe_cluster_option *option_list, int len); extern gboolean check_time(const char *value); extern gboolean check_timer(const char *value); extern gboolean check_boolean(const char *value); extern gboolean check_number(const char *value); extern int char2score(const char *score); extern char *score2char(int score); extern gboolean crm_is_writable( const char *dir, const char *file, const char *user, const char *group, gboolean need_both); extern long long crm_set_bit(const char *function, long long word, long long bit); extern long long crm_clear_bit(const char *function, long long word, long long bit); #define set_bit(word, bit) word = crm_set_bit(__PRETTY_FUNCTION__, word, bit) #define clear_bit(word, bit) word = crm_clear_bit(__PRETTY_FUNCTION__, word, bit) #define set_bit_inplace(word, bit) word |= bit #define clear_bit_inplace(word, bit) word &= ~bit static inline gboolean is_not_set(long long word, long long bit) { return ((word & bit) == 0); } static inline gboolean is_set(long long word, long long bit) { return ((word & bit) == bit); } static inline gboolean is_set_any(long long word, long long bit) { return ((word & bit) != 0); } extern gboolean is_openais_cluster(void); extern gboolean is_heartbeat_cluster(void); extern xmlNode *cib_recv_remote_msg(void *session, gboolean encrypted); extern void cib_send_remote_msg(void *session, xmlNode *msg, gboolean encrypted); extern char *crm_meta_name(const char *field); extern const char *crm_meta_value(GHashTable *hash, const char *field); extern void crm_set_options(const char *short_options, const char *usage, struct crm_option *long_options, const char *app_desc); extern int crm_get_option(int argc, char **argv, int *index); extern void crm_help(char cmd, int exit_code); extern gboolean attrd_update(IPC_Channel *cluster, char command, const char *host, const char *name, const char *value, const char *section, const char *set, const char *dampen); extern gboolean attrd_lazy_update(char command, const char *host, const char *name, const char *value, const char *section, const char *set, const char *dampen); extern gboolean attrd_update_no_mainloop(int *connection, char command, const char *host, const char *name, const char *value, const char *section, const char *set, const char *dampen); extern int node_score_red; extern int node_score_green; extern int node_score_yellow; extern int node_score_infinity; #include -extern xmlNode *create_operation_update(xmlNode *parent, lrm_op_t *op, const char *caller_version, int target_rc, const char *origin); +extern xmlNode *create_operation_update(xmlNode *parent, lrm_op_t *op, const char *caller_version, int target_rc, const char *origin, int level); #endif diff --git a/lib/ais/plugin.c b/lib/ais/plugin.c index 2e465a211e..604afe88e1 100644 --- a/lib/ais/plugin.c +++ b/lib/ais/plugin.c @@ -1,1715 +1,1728 @@ /* * Copyright (C) 2004 Andrew Beekhof * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * This software is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef AIS_COROSYNC # include # include # include #endif #include #include #include "plugin.h" #include "utils.h" #include #include #include #include #include #include #include #include #include struct corosync_api_v1 *pcmk_api = NULL; uint32_t plugin_has_votes = 0; uint32_t plugin_expected_votes = 2; int use_mgmtd = 0; int plugin_log_level = LOG_DEBUG; char *local_uname = NULL; int local_uname_len = 0; char *local_cname = NULL; int local_cname_len = 0; uint32_t local_nodeid = 0; char *ipc_channel_name = NULL; static uint64_t local_born_on = 0; uint64_t membership_seq = 0; pthread_t pcmk_wait_thread; gboolean wait_active = TRUE; gboolean have_reliable_membership_id = FALSE; GHashTable *ipc_client_list = NULL; GHashTable *membership_list = NULL; GHashTable *membership_notify_list = NULL; #define MAX_RESPAWN 100 #define LOOPBACK_ID 16777343 #define crm_flag_none 0x00000000 #define crm_flag_members 0x00000001 struct crm_identify_msg_s { coroipc_request_header_t header __attribute__((aligned(8))); uint32_t id; uint32_t pid; int32_t votes; uint32_t processes; char uname[256]; char version[256]; uint64_t born_on; } __attribute__((packed)); static crm_child_t pcmk_children[] = { { 0, crm_proc_none, crm_flag_none, 0, 0, FALSE, "none", NULL, NULL, NULL, NULL }, { 0, crm_proc_ais, crm_flag_none, 0, 0, FALSE, "ais", NULL, NULL, NULL, NULL }, { 0, crm_proc_lrmd, crm_flag_none, 3, 0, TRUE, "lrmd", NULL, HB_DAEMON_DIR"/lrmd", NULL, NULL }, { 0, crm_proc_cib, crm_flag_members, 2, 0, TRUE, "cib", CRM_DAEMON_USER, CRM_DAEMON_DIR"/cib", NULL, NULL }, { 0, crm_proc_crmd, crm_flag_members, 6, 0, TRUE, "crmd", CRM_DAEMON_USER, CRM_DAEMON_DIR"/crmd", NULL, NULL }, { 0, crm_proc_attrd, crm_flag_none, 4, 0, TRUE, "attrd", CRM_DAEMON_USER, CRM_DAEMON_DIR"/attrd", NULL, NULL }, { 0, crm_proc_stonithd, crm_flag_none, 0, 0, TRUE, "stonithd", NULL, "/bin/false", NULL, NULL }, { 0, crm_proc_pe, crm_flag_none, 5, 0, TRUE, "pengine", CRM_DAEMON_USER, CRM_DAEMON_DIR"/pengine", NULL, NULL }, { 0, crm_proc_mgmtd, crm_flag_none, 7, 0, TRUE, "mgmtd", NULL, HB_DAEMON_DIR"/mgmtd", NULL, NULL }, { 0, crm_proc_stonith_ng, crm_flag_none, 1, 0, TRUE, "stonith-ng", NULL, CRM_DAEMON_DIR"/stonithd", NULL, NULL }, }; void send_cluster_id(void); int send_cluster_msg_raw(const AIS_Message *ais_msg); char *pcmk_generate_membership_data(void); gboolean check_message_sanity(const AIS_Message *msg, const char *data); #ifdef AIS_COROSYNC typedef const void ais_void_ptr; int pcmk_shutdown(void); void pcmk_peer_update(enum totem_configuration_type configuration_type, const unsigned int *member_list, size_t member_list_entries, const unsigned int *left_list, size_t left_list_entries, const unsigned int *joined_list, size_t joined_list_entries, const struct memb_ring_id *ring_id); #else typedef void ais_void_ptr; extern totempg_groups_handle openais_group_handle; int pcmk_shutdown(struct objdb_iface_ver0 *objdb); void pcmk_peer_update(enum totem_configuration_type configuration_type, unsigned int *member_list, int member_list_entries, unsigned int *left_list, int left_list_entries, unsigned int *joined_list, int joined_list_entries, struct memb_ring_id *ring_id); #endif int pcmk_startup (struct corosync_api_v1 *corosync_api); int pcmk_config_init(struct corosync_api_v1 *corosync_api); int pcmk_ipc_exit (void *conn); int pcmk_ipc_connect (void *conn); void pcmk_ipc(void *conn, ais_void_ptr *msg); void pcmk_exec_dump(void); void pcmk_cluster_swab(void *msg); void pcmk_cluster_callback(ais_void_ptr *message, unsigned int nodeid); void pcmk_nodeid(void *conn, ais_void_ptr *msg); void pcmk_nodes(void *conn, ais_void_ptr *msg); void pcmk_notify(void *conn, ais_void_ptr *msg); void pcmk_remove_member(void *conn, ais_void_ptr *msg); void pcmk_quorum(void *conn, ais_void_ptr *msg); void pcmk_cluster_id_swab(void *msg); void pcmk_cluster_id_callback(ais_void_ptr *message, unsigned int nodeid); void ais_remove_peer(char *node_id); static uint32_t get_process_list(void) { int lpc = 0; uint32_t procs = crm_proc_ais; for (lpc = 0; lpc < SIZEOF(pcmk_children); lpc++) { if(pcmk_children[lpc].pid != 0) { procs |= pcmk_children[lpc].flag; } } return procs; } static struct corosync_lib_handler pcmk_lib_service[] = { { /* 0 */ .lib_handler_fn = pcmk_ipc, .flow_control = COROSYNC_LIB_FLOW_CONTROL_NOT_REQUIRED, #ifdef AIS_WHITETANK .response_size = sizeof (coroipc_response_header_t), .response_id = CRM_MESSAGE_IPC_ACK, #endif }, { /* 1 */ .lib_handler_fn = pcmk_nodes, .flow_control = COROSYNC_LIB_FLOW_CONTROL_NOT_REQUIRED, #ifdef AIS_WHITETANK .response_size = sizeof (coroipc_response_header_t), .response_id = CRM_MESSAGE_IPC_ACK, #endif }, { /* 2 */ .lib_handler_fn = pcmk_notify, .flow_control = COROSYNC_LIB_FLOW_CONTROL_NOT_REQUIRED, #ifdef AIS_WHITETANK .response_size = sizeof (coroipc_response_header_t), .response_id = CRM_MESSAGE_IPC_ACK, #endif }, { /* 3 */ .lib_handler_fn = pcmk_nodeid, .flow_control = COROSYNC_LIB_FLOW_CONTROL_NOT_REQUIRED, #ifdef AIS_WHITETANK .response_size = sizeof (struct crm_ais_nodeid_resp_s), .response_id = crm_class_nodeid, #endif }, { /* 4 */ .lib_handler_fn = pcmk_remove_member, .flow_control = COROSYNC_LIB_FLOW_CONTROL_NOT_REQUIRED, #ifdef AIS_WHITETANK .response_size = sizeof (coroipc_response_header_t), .response_id = CRM_MESSAGE_IPC_ACK, #endif }, { /* 5 */ .lib_handler_fn = pcmk_quorum, .flow_control = COROSYNC_LIB_FLOW_CONTROL_NOT_REQUIRED, #ifdef AIS_WHITETANK .response_size = sizeof (coroipc_response_header_t), .response_id = CRM_MESSAGE_IPC_ACK, #endif }, }; static struct corosync_exec_handler pcmk_exec_service[] = { { /* 0 */ .exec_handler_fn = pcmk_cluster_callback, .exec_endian_convert_fn = pcmk_cluster_swab }, { /* 1 */ .exec_handler_fn = pcmk_cluster_id_callback, .exec_endian_convert_fn = pcmk_cluster_id_swab } }; /* * Exports the interface for the service */ struct corosync_service_engine pcmk_service_handler = { .name = (unsigned char *)"Pacemaker Cluster Manager "PACKAGE_VERSION, .id = PCMK_SERVICE_ID, .private_data_size = 0, .flow_control = COROSYNC_LIB_FLOW_CONTROL_NOT_REQUIRED, .lib_init_fn = pcmk_ipc_connect, .lib_exit_fn = pcmk_ipc_exit, .exec_init_fn = pcmk_startup, .exec_exit_fn = pcmk_shutdown, .config_init_fn = pcmk_config_init, #ifdef AIS_COROSYNC .priority = 50, .lib_engine = pcmk_lib_service, .lib_engine_count = sizeof (pcmk_lib_service) / sizeof (struct corosync_lib_handler), .exec_engine = pcmk_exec_service, .exec_engine_count = sizeof (pcmk_exec_service) / sizeof (struct corosync_exec_handler), #else .lib_service = pcmk_lib_service, .lib_service_count = sizeof (pcmk_lib_service) / sizeof (struct corosync_lib_handler), .exec_service = pcmk_exec_service, .exec_service_count = sizeof (pcmk_exec_service) / sizeof (struct corosync_exec_handler), #endif .confchg_fn = pcmk_peer_update, .exec_dump_fn = pcmk_exec_dump, /* void (*sync_init) (void); */ /* int (*sync_process) (void); */ /* void (*sync_activate) (void); */ /* void (*sync_abort) (void); */ }; /* * Dynamic Loader definition */ struct corosync_service_engine *pcmk_get_handler_ver0 (void); #ifdef AIS_COROSYNC struct corosync_service_engine_iface_ver0 pcmk_service_handler_iface = { .corosync_get_service_engine_ver0 = pcmk_get_handler_ver0 }; #else struct openais_service_handler_iface_ver0 pcmk_service_handler_iface = { .openais_get_service_handler_ver0 = pcmk_get_handler_ver0 }; #endif static struct lcr_iface openais_pcmk_ver0[1] = { { .name = "pacemaker", .version = 0, .versions_replace = 0, .versions_replace_count = 0, .dependencies = 0, .dependency_count = 0, .constructor = NULL, .destructor = NULL, .interfaces = NULL } }; static struct lcr_comp pcmk_comp_ver0 = { .iface_count = 1, .ifaces = openais_pcmk_ver0 }; struct corosync_service_engine *pcmk_get_handler_ver0 (void) { return (&pcmk_service_handler); } __attribute__ ((constructor)) static void register_this_component (void) { lcr_interfaces_set (&openais_pcmk_ver0[0], &pcmk_service_handler_iface); lcr_component_register (&pcmk_comp_ver0); } static int plugin_has_quorum(void) { if((plugin_expected_votes >> 1) < plugin_has_votes) { return 1; } return 0; } static void update_expected_votes(int value) { if(value < plugin_has_votes) { /* Never drop below the number of connected nodes */ ais_info("Cannot update expected quorum votes %d -> %d:" " value cannot be less that the current number of votes", plugin_expected_votes, value); } else if(plugin_expected_votes != value) { ais_info("Expected quorum votes %d -> %d", plugin_expected_votes, value); plugin_expected_votes = value; } } /* Create our own local copy of the config so we can navigate it */ static void process_ais_conf(void) { char *value = NULL; gboolean any_log = FALSE; hdb_handle_t top_handle = 0; hdb_handle_t local_handle = 0; ais_info("Reading configure"); top_handle = config_find_init(pcmk_api, "logging"); local_handle = config_find_next(pcmk_api, "logging", top_handle); get_config_opt(pcmk_api, local_handle, "debug", &value, "on"); if(ais_get_boolean(value)) { plugin_log_level = LOG_DEBUG; pcmk_env.debug = "1"; } else { plugin_log_level = LOG_INFO; pcmk_env.debug = "0"; } get_config_opt(pcmk_api, local_handle, "to_logfile", &value, "off"); if(ais_get_boolean(value)) { get_config_opt(pcmk_api, local_handle, "logfile", &value, NULL); if(value == NULL) { ais_err("Logging to a file requested but no log file specified"); } else { - uid_t log_user = -1; - gid_t log_group = -1; - + uid_t pcmk_uid = geteuid(); + uid_t pcmk_gid = getegid(); + pcmk_env.logfile = value; - pcmk_user_lookup("root", &log_user, NULL); - pcmk_user_lookup(CRM_DAEMON_USER, NULL, &log_group); - if(log_user >= 0 && log_group >= 0) { + if(pcmk_uid >= 0 && pcmk_gid >= 0) { /* Ensure the file has the correct permissions */ FILE *logfile = fopen(value, "a"); int logfd = fileno(logfile); - fchown(logfd, log_user, log_group); + fchown(logfd, pcmk_uid, pcmk_gid); fchmod(logfd, S_IRUSR|S_IWUSR|S_IRGRP|S_IWGRP); - fprintf(logfile, "Set r/w permissions for "CRM_DAEMON_USER"\n"); + fprintf(logfile, "Set r/w permissions for uid=%d, gid=%d on %s\n", + pcmk_uid, pcmk_gid, value); fflush(logfile); fsync(fileno(logfile)); fclose(logfile); any_log = TRUE; } else { ais_err("Couldn't setup correct logfile permissions, some logs may be lost"); } } } get_config_opt(pcmk_api, local_handle, "to_syslog", &value, "on"); if(any_log && ais_get_boolean(value) == FALSE) { ais_info("User configured file based logging and explicitly disabled syslog."); value = "none"; } else { if(ais_get_boolean(value) == FALSE) { ais_err("Please enable some sort of logging, either 'to_file: on' or 'to_syslog: on'."); ais_err("If you use file logging, be sure to also define a value for 'logfile'"); } get_config_opt(pcmk_api, local_handle, "syslog_facility", &value, "daemon"); } pcmk_env.syslog = value; config_find_done(pcmk_api, local_handle); top_handle = config_find_init(pcmk_api, "service"); local_handle = config_find_next(pcmk_api, "service", top_handle); while(local_handle) { value = NULL; pcmk_api->object_key_get(local_handle, "name", strlen("name"), (void**)&value, NULL); if(ais_str_eq("pacemaker", value)) { break; } local_handle = config_find_next(pcmk_api, "service", top_handle); } get_config_opt(pcmk_api, local_handle, "clustername", &local_cname, "pcmk"); local_cname_len = strlen(local_cname); get_config_opt(pcmk_api, local_handle, "use_logd", &value, "no"); pcmk_env.use_logd = value; get_config_opt(pcmk_api, local_handle, "use_mgmtd", &value, "no"); if(ais_get_boolean(value) == FALSE) { int lpc = 0; for (; lpc < SIZEOF(pcmk_children); lpc++) { if(crm_proc_mgmtd & pcmk_children[lpc].flag) { /* Disable mgmtd startup */ pcmk_children[lpc].start_seq = 0; break; } } } config_find_done(pcmk_api, local_handle); } int pcmk_config_init(struct corosync_api_v1 *unused) { return 0; } static void *pcmk_wait_dispatch (void *arg) { struct timespec waitsleep = { .tv_sec = 1, .tv_nsec = 0 }; while(wait_active) { int lpc = 0; for (; lpc < SIZEOF(pcmk_children); lpc++) { if(pcmk_children[lpc].pid > 0) { int status; pid_t pid = wait4( pcmk_children[lpc].pid, &status, WNOHANG, NULL); if(pid == 0) { continue; } else if(pid < 0) { ais_perror("Call to wait4(%s) failed", pcmk_children[lpc].name); continue; } /* cleanup */ pcmk_children[lpc].pid = 0; pcmk_children[lpc].conn = NULL; pcmk_children[lpc].async_conn = NULL; if(WIFSIGNALED(status)) { int sig = WTERMSIG(status); ais_err("Child process %s terminated with signal %d" " (pid=%d, core=%s)", pcmk_children[lpc].name, sig, pid, WCOREDUMP(status)?"true":"false"); } else if (WIFEXITED(status)) { int rc = WEXITSTATUS(status); do_ais_log(rc==0?LOG_NOTICE:LOG_ERR, "Child process %s exited (pid=%d, rc=%d)", pcmk_children[lpc].name, pid, rc); if(rc == 100) { ais_notice("Child process %s no longer wishes" " to be respawned", pcmk_children[lpc].name); pcmk_children[lpc].respawn = FALSE; } } pcmk_children[lpc].respawn_count += 1; if(pcmk_children[lpc].respawn_count > MAX_RESPAWN) { ais_err("Child respawn count exceeded by %s", pcmk_children[lpc].name); pcmk_children[lpc].respawn = FALSE; } if(pcmk_children[lpc].respawn) { ais_notice("Respawning failed child process: %s", pcmk_children[lpc].name); spawn_child(&(pcmk_children[lpc])); } else { send_cluster_id(); } } } sched_yield (); nanosleep (&waitsleep, 0); } return 0; } static uint32_t pcmk_update_nodeid(void) { int last = local_nodeid; #if AIS_COROSYNC local_nodeid = pcmk_api->totem_nodeid_get(); #else local_nodeid = totempg_my_nodeid_get(); #endif if(last != local_nodeid) { if(last == 0) { ais_info("Local node id: %u", local_nodeid); } else { char *last_s = NULL; ais_malloc0(last_s, 32); ais_warn("Detected local node id change: %u -> %u", last, local_nodeid); snprintf(last_s, 31, "%u", last); ais_remove_peer(last_s); ais_free(last_s); } update_member(local_nodeid, 0, 0, 1, 0, local_uname, CRM_NODE_MEMBER, NULL); } return local_nodeid; } static void build_path(const char *path_c, mode_t mode) { int offset = 1, len = 0; char *path = ais_strdup(path_c); AIS_CHECK(path != NULL, return); for(len = strlen(path); offset < len; offset++) { if(path[offset] == '/') { path[offset] = 0; if(mkdir(path, mode) < 0 && errno != EEXIST) { ais_perror("Could not create directory '%s'", path); break; } path[offset] = '/'; } } + if(mkdir(path, mode) < 0 && errno != EEXIST) { + ais_perror("Could not create directory '%s'", path); + } ais_free(path); } int pcmk_startup(struct corosync_api_v1 *init_with) { int rc = 0; int lpc = 0; int start_seq = 1; struct utsname us; struct rlimit cores; static int max = SIZEOF(pcmk_children); uid_t pcmk_uid = 0; gid_t pcmk_gid = 0; + uid_t root_uid = -1; + uid_t cs_uid = geteuid(); + pcmk_user_lookup("root", &root_uid, NULL); + pcmk_api = init_with; #ifdef AIS_WHITETANK log_init ("crm"); #endif pcmk_env.debug = "0"; pcmk_env.logfile = NULL; pcmk_env.use_logd = "false"; pcmk_env.syslog = "daemon"; + + if(cs_uid != root_uid) { + ais_err("Corosync must be configured to start as 'root'," + " otherwise Pacemaker cannot manage services." + " Expected %d got %d", root_uid, cs_uid); + return -1; + } process_ais_conf(); membership_list = g_hash_table_new_full( g_direct_hash, g_direct_equal, NULL, destroy_ais_node); membership_notify_list = g_hash_table_new(g_direct_hash, g_direct_equal); ipc_client_list = g_hash_table_new(g_direct_hash, g_direct_equal); ais_info("CRM: Initialized"); log_printf(LOG_INFO, "Logging: Initialized %s\n", __PRETTY_FUNCTION__); rc = getrlimit(RLIMIT_CORE, &cores); if(rc < 0) { ais_perror("Cannot determine current maximum core size."); } if(cores.rlim_max <= 0) { cores.rlim_max = RLIM_INFINITY; rc = setrlimit(RLIMIT_CORE, &cores); if(rc < 0) { ais_perror("Core file generation will remain disabled." " Core files are an important diagnositic tool," " please consider enabling them by default."); } } else { ais_info("Maximum core file size is: %lu", cores.rlim_max); #if 0 /* system() is not thread-safe, can't call from here * Actually, its a pretty hacky way to try and achieve this anyway */ if(system("echo 1 > /proc/sys/kernel/core_uses_pid") != 0) { ais_perror("Could not enable /proc/sys/kernel/core_uses_pid"); } #endif } if(pcmk_user_lookup(CRM_DAEMON_USER, &pcmk_uid, &pcmk_gid) < 0) { ais_err("Cluster user %s does not exist, aborting Pacemaker startup", CRM_DAEMON_USER); return TRUE; } mkdir(CRM_STATE_DIR, 0750); chown(CRM_STATE_DIR, pcmk_uid, pcmk_gid); /* Used by stonithd */ build_path(HA_STATE_DIR"/heartbeat", 0755); /* Used by RAs - Leave owned by root */ build_path(CRM_RSCTMP_DIR, 0755); rc = uname(&us); AIS_ASSERT(rc == 0); local_uname = ais_strdup(us.nodename); local_uname_len = strlen(local_uname); ais_info("Service: %d", PCMK_SERVICE_ID); ais_info("Local hostname: %s", local_uname); pcmk_update_nodeid(); pthread_create (&pcmk_wait_thread, NULL, pcmk_wait_dispatch, NULL); for (start_seq = 1; start_seq < max; start_seq++) { /* dont start anything with start_seq < 1 */ for (lpc = 0; lpc < max; lpc++) { if(start_seq == pcmk_children[lpc].start_seq) { spawn_child(&(pcmk_children[lpc])); } } } return 0; } /* static void ais_print_node(const char *prefix, struct totem_ip_address *host) { int len = 0; char *buffer = NULL; ais_malloc0(buffer, INET6_ADDRSTRLEN+1); inet_ntop(host->family, host->addr, buffer, INET6_ADDRSTRLEN); len = strlen(buffer); ais_info("%s: %.*s", prefix, len, buffer); ais_free(buffer); } */ #if 0 /* copied here for reference from exec/totempg.c */ char *totempg_ifaces_print (unsigned int nodeid) { static char iface_string[256 * INTERFACE_MAX]; char one_iface[64]; struct totem_ip_address interfaces[INTERFACE_MAX]; char **status; unsigned int iface_count; unsigned int i; int res; iface_string[0] = '\0'; res = totempg_ifaces_get (nodeid, interfaces, &status, &iface_count); if (res == -1) { return ("no interface found for nodeid"); } for (i = 0; i < iface_count; i++) { sprintf (one_iface, "r(%d) ip(%s), ", i, totemip_print (&interfaces[i])); strcat (iface_string, one_iface); } return (iface_string); } #endif static void ais_mark_unseen_peer_dead( gpointer key, gpointer value, gpointer user_data) { int *changed = user_data; crm_node_t *node = value; if(node->last_seen != membership_seq && ais_str_eq(CRM_NODE_LOST, node->state) == FALSE) { ais_info("Node %s was not seen in the previous transition", node->uname); *changed += update_member(node->id, 0, membership_seq, node->votes, node->processes, node->uname, CRM_NODE_LOST, NULL); } } void pcmk_peer_update ( enum totem_configuration_type configuration_type, #ifdef AIS_COROSYNC const unsigned int *member_list, size_t member_list_entries, const unsigned int *left_list, size_t left_list_entries, const unsigned int *joined_list, size_t joined_list_entries, const struct memb_ring_id *ring_id #else unsigned int *member_list, int member_list_entries, unsigned int *left_list, int left_list_entries, unsigned int *joined_list, int joined_list_entries, struct memb_ring_id *ring_id #endif ) { int lpc = 0; int changed = 0; int do_update = 0; AIS_ASSERT(ring_id != NULL); switch(configuration_type) { case TOTEM_CONFIGURATION_REGULAR: do_update = 1; break; case TOTEM_CONFIGURATION_TRANSITIONAL: break; } membership_seq = ring_id->seq; ais_notice("%s membership event on ring %lld: memb=%ld, new=%ld, lost=%ld", do_update?"Stable":"Transitional", ring_id->seq, (long)member_list_entries, (long)joined_list_entries, (long)left_list_entries); if(do_update == 0) { for(lpc = 0; lpc < joined_list_entries; lpc++) { const char *prefix = "new: "; uint32_t nodeid = joined_list[lpc]; ais_info("%s %s %u", prefix, member_uname(nodeid), nodeid); } for(lpc = 0; lpc < member_list_entries; lpc++) { const char *prefix = "memb:"; uint32_t nodeid = member_list[lpc]; ais_info("%s %s %u", prefix, member_uname(nodeid), nodeid); } for(lpc = 0; lpc < left_list_entries; lpc++) { const char *prefix = "lost:"; uint32_t nodeid = left_list[lpc]; ais_info("%s %s %u", prefix, member_uname(nodeid), nodeid); } return; } for(lpc = 0; lpc < joined_list_entries; lpc++) { const char *prefix = "NEW: "; uint32_t nodeid = joined_list[lpc]; crm_node_t *node = NULL; changed += update_member( nodeid, 0, membership_seq, -1, 0, NULL, CRM_NODE_MEMBER, NULL); ais_info("%s %s %u", prefix, member_uname(nodeid), nodeid); node = g_hash_table_lookup(membership_list, GUINT_TO_POINTER(nodeid)); if(node->addr == NULL) { const char *addr = totempg_ifaces_print(nodeid); node->addr = ais_strdup(addr); ais_debug("Node %u has address %s", nodeid, node->addr); } } for(lpc = 0; lpc < member_list_entries; lpc++) { const char *prefix = "MEMB:"; uint32_t nodeid = member_list[lpc]; changed += update_member( nodeid, 0, membership_seq, -1, 0, NULL, CRM_NODE_MEMBER, NULL); ais_info("%s %s %u", prefix, member_uname(nodeid), nodeid); } for(lpc = 0; lpc < left_list_entries; lpc++) { const char *prefix = "LOST:"; uint32_t nodeid = left_list[lpc]; changed += update_member( nodeid, 0, membership_seq, -1, 0, NULL, CRM_NODE_LOST, NULL); ais_info("%s %s %u", prefix, member_uname(nodeid), nodeid); } if(changed && joined_list_entries == 0 && left_list_entries == 0) { ais_err("Something strange happened: %d", changed); changed = 0; } ais_debug_2("Reaping unseen nodes..."); g_hash_table_foreach(membership_list, ais_mark_unseen_peer_dead, &changed); if(member_list_entries > 1) { /* Used to set born-on in send_cluster_id()) * We need to wait until we have at least one peer since first * membership id is based on the one before we stopped and isn't reliable */ have_reliable_membership_id = TRUE; } if(changed) { ais_debug("%d nodes changed", changed); pcmk_update_nodeid(); send_member_notification(); } send_cluster_id(); } int pcmk_ipc_exit (void *conn) { int lpc = 0; const char *client = NULL; void *async_conn = conn; for (; lpc < SIZEOF(pcmk_children); lpc++) { if(pcmk_children[lpc].conn == conn) { if(wait_active == FALSE) { /* Make sure the shutdown loop exits */ pcmk_children[lpc].pid = 0; } pcmk_children[lpc].conn = NULL; pcmk_children[lpc].async_conn = NULL; client = pcmk_children[lpc].name; break; } } g_hash_table_remove(membership_notify_list, async_conn); g_hash_table_remove(ipc_client_list, async_conn); do_ais_log(client?LOG_INFO:(LOG_DEBUG+1), "Client %s (conn=%p, async-conn=%p) left", client?client:"unknown-transient", conn, async_conn); return (0); } int pcmk_ipc_connect (void *conn) { /* OpenAIS hasn't finished setting up the connection at this point * Sending messages now messes up the protocol! */ return (0); } /* * Executive message handlers */ void pcmk_cluster_swab(void *msg) { AIS_Message *ais_msg = msg; ais_debug_3("Performing endian conversion..."); ais_msg->id = swab32 (ais_msg->id); ais_msg->size = swab32 (ais_msg->size); ais_msg->is_compressed = swab32 (ais_msg->is_compressed); ais_msg->compressed_size = swab32 (ais_msg->compressed_size); ais_msg->host.id = swab32 (ais_msg->host.id); ais_msg->host.pid = swab32 (ais_msg->host.pid); ais_msg->host.type = swab32 (ais_msg->host.type); ais_msg->host.size = swab32 (ais_msg->host.size); ais_msg->host.local = swab32 (ais_msg->host.local); ais_msg->sender.id = swab32 (ais_msg->sender.id); ais_msg->sender.pid = swab32 (ais_msg->sender.pid); ais_msg->sender.type = swab32 (ais_msg->sender.type); ais_msg->sender.size = swab32 (ais_msg->sender.size); ais_msg->sender.local = swab32 (ais_msg->sender.local); ais_msg->header.size = swab32 (ais_msg->header.size); ais_msg->header.id = swab32 (ais_msg->header.id); ais_msg->header.error = swab32 (ais_msg->header.error); } void pcmk_cluster_callback ( ais_void_ptr *message, unsigned int nodeid) { const AIS_Message *ais_msg = message; ais_debug_2("Message from node %u (%s)", nodeid, nodeid==local_nodeid?"local":"remote"); /* Shouldn't be required... update_member( ais_msg->sender.id, membership_seq, -1, 0, ais_msg->sender.uname, NULL); */ if(ais_msg->host.size == 0 || ais_str_eq(ais_msg->host.uname, local_uname)) { route_ais_message(ais_msg, FALSE); } else { ais_debug_3("Discarding Msg[%d] (dest=%s:%s, from=%s:%s)", ais_msg->id, ais_dest(&(ais_msg->host)), msg_type2text(ais_msg->host.type), ais_dest(&(ais_msg->sender)), msg_type2text(ais_msg->sender.type)); } } void pcmk_cluster_id_swab(void *msg) { struct crm_identify_msg_s *ais_msg = msg; ais_debug_3("Performing endian conversion..."); ais_msg->id = swab32 (ais_msg->id); ais_msg->pid = swab32 (ais_msg->pid); ais_msg->votes = swab32 (ais_msg->votes); ais_msg->processes = swab32 (ais_msg->processes); ais_msg->born_on = swab64 (ais_msg->born_on); ais_msg->header.size = swab32 (ais_msg->header.size); ais_msg->header.id = swab32 (ais_msg->header.id); } void pcmk_cluster_id_callback (ais_void_ptr *message, unsigned int nodeid) { int changed = 0; const struct crm_identify_msg_s *msg = message; if(nodeid != msg->id) { ais_err("Invalid message: Node %u claimed to be node %d", nodeid, msg->id); return; } ais_debug("Node update: %s (%s)", msg->uname, msg->version); changed = update_member( nodeid, msg->born_on, membership_seq, msg->votes, msg->processes, msg->uname, NULL, msg->version); if(changed) { send_member_notification(); } } struct res_overlay { coroipc_response_header_t header __attribute((aligned(8))); char buf[4096]; }; struct res_overlay *res_overlay = NULL; static void send_ipc_ack(void *conn) { if(res_overlay == NULL) { ais_malloc0(res_overlay, sizeof(struct res_overlay)); } res_overlay->header.id = CRM_MESSAGE_IPC_ACK; res_overlay->header.size = sizeof (coroipc_response_header_t); res_overlay->header.error = CS_OK; #ifdef AIS_COROSYNC pcmk_api->ipc_response_send (conn, res_overlay, res_overlay->header.size); #else openais_response_send (conn, res_overlay, res_overlay->header.size); #endif } /* local callbacks */ void pcmk_ipc(void *conn, ais_void_ptr *msg) { AIS_Message *mutable; int type = 0, size = 0; gboolean transient = TRUE; const AIS_Message *ais_msg = (const AIS_Message*)msg; void *async_conn = conn; ais_debug_2("Message from client %p", conn); if(check_message_sanity(msg, ((const AIS_Message*)msg)->data) == FALSE) { /* The message is corrupted - ignore */ send_ipc_ack(conn); msg = NULL; return; } /* Make a copy of the message here and ACK it * The message is only valid until a response is sent * but the response must also be sent _before_ we send anything else */ mutable = ais_msg_copy(ais_msg); AIS_ASSERT(check_message_sanity(mutable, mutable->data)); size = mutable->header.size; /* ais_malloc0(ais_msg, size); */ /* memcpy(ais_msg, msg, size); */ type = mutable->sender.type; ais_debug_3("type: %d local: %d conn: %p host type: %d ais: %d sender pid: %d child pid: %d size: %d", type, mutable->host.local, pcmk_children[type].conn, mutable->host.type, crm_msg_ais, mutable->sender.pid, pcmk_children[type].pid, ((int)SIZEOF(pcmk_children))); if(type > crm_msg_none && type < SIZEOF(pcmk_children)) { /* known child process */ transient = FALSE; } /* If this check fails, the order of pcmk_children probably * doesn't match that of the crm_ais_msg_types enum */ AIS_CHECK(transient || mutable->sender.pid == pcmk_children[type].pid, ais_err("Sender: %d, child[%d]: %d", mutable->sender.pid, type, pcmk_children[type].pid); return); if(transient == FALSE && type > crm_msg_none && mutable->host.local && pcmk_children[type].conn == NULL && mutable->host.type == crm_msg_ais) { AIS_CHECK(mutable->sender.type != mutable->sender.pid, ais_err("Pid=%d, type=%d", mutable->sender.pid, mutable->sender.type)); ais_info("Recorded connection %p for %s/%d", conn, pcmk_children[type].name, pcmk_children[type].pid); pcmk_children[type].conn = conn; pcmk_children[type].async_conn = async_conn; /* Make sure they have the latest membership */ if(pcmk_children[type].flags & crm_flag_members) { char *update = pcmk_generate_membership_data(); g_hash_table_replace(membership_notify_list, async_conn, async_conn); ais_info("Sending membership update "U64T" to %s", membership_seq, pcmk_children[type].name); send_client_msg(async_conn, crm_class_members, crm_msg_none,update); } } else if(transient) { AIS_CHECK(mutable->sender.type == mutable->sender.pid, ais_err("Pid=%d, type=%d", mutable->sender.pid, mutable->sender.type)); g_hash_table_replace(ipc_client_list, async_conn, GUINT_TO_POINTER(mutable->sender.pid)); } mutable->sender.id = local_nodeid; mutable->sender.size = local_uname_len; memset(mutable->sender.uname, 0, MAX_NAME); memcpy(mutable->sender.uname, local_uname, mutable->sender.size); route_ais_message(mutable, TRUE); send_ipc_ack(conn); msg = NULL; ais_free(mutable); } int pcmk_shutdown ( #ifdef AIS_COROSYNC void #else struct objdb_iface_ver0 *objdb #endif ) { int lpc = 0; static int phase = 0; static int max_wait = 0; static time_t next_log = 0; static int max = SIZEOF(pcmk_children); if(phase == 0) { ais_notice("Shuting down Pacemaker"); phase = max; } wait_active = FALSE; /* stop the wait loop */ for (; phase > 0; phase--) { /* dont stop anything with start_seq < 1 */ for (lpc = max - 1; lpc >= 0; lpc--) { if(phase != pcmk_children[lpc].start_seq) { continue; } #ifdef AIS_WHITETANK retry: #endif if(pcmk_children[lpc].pid) { pid_t pid = 0; int status = 0; time_t now = time(NULL); if(pcmk_children[lpc].respawn) { max_wait = 5; /* 5 * 30s = 2.5 minutes... plenty once the crmd is gone */ next_log = now + 30; pcmk_children[lpc].respawn = FALSE; stop_child(&(pcmk_children[lpc]), SIGTERM); } pid = wait4(pcmk_children[lpc].pid, &status, WNOHANG, NULL); if(pid < 0) { ais_perror("Call to wait4(%s/%d) failed - treating it as stopped", pcmk_children[lpc].name, pcmk_children[lpc].pid); } else if(pid == 0) { if(now >= next_log) { max_wait--; next_log = now + 30; ais_notice("Still waiting for %s (pid=%d, seq=%d) to terminate...", pcmk_children[lpc].name, pcmk_children[lpc].pid, pcmk_children[lpc].start_seq); if(max_wait <= 0 && phase < pcmk_children[crm_msg_crmd].start_seq) { ais_err("Child %s taking too long to terminate, sending SIGKILL", pcmk_children[lpc].name); stop_child(&(pcmk_children[lpc]), SIGKILL); } } #ifdef AIS_WHITETANK { struct timespec waitsleep = { .tv_sec = 1, .tv_nsec = 0 }; sched_yield (); nanosleep (&waitsleep, 0); goto retry; } #else /* Return control to corosync */ return -1; #endif } } /* cleanup */ ais_notice("%s confirmed stopped", pcmk_children[lpc].name); pcmk_children[lpc].async_conn = NULL; pcmk_children[lpc].conn = NULL; pcmk_children[lpc].pid = 0; } } send_cluster_id(); ais_notice("Shutdown complete"); /* TODO: Add back the logsys flush call once its written */ #ifdef AIS_WHITETANK /* Bug bnc#482847, bnc#482905 * * All cluster services are now down, we could allow OpenAIS to continue * unloading plugins, but its kinda new at that and there are a bunch of * race conditions that get exercised. * * Take the easy way out for now (on whitetank) and eventually fix for * CoroSync which is where everyone wants to be eventually anyway */ ais_notice("Forcing clean exit of OpenAIS"); exit(0); #endif return 0; } struct member_loop_data { char *string; }; void member_vote_count_fn(gpointer key, gpointer value, gpointer user_data) { crm_node_t *node = value; if (ais_str_eq(CRM_NODE_MEMBER, node->state)) { plugin_has_votes += node->votes; } } void member_loop_fn(gpointer key, gpointer value, gpointer user_data) { crm_node_t *node = value; struct member_loop_data *data = user_data; ais_debug_2("Dumping node %u", node->id); data->string = append_member(data->string, node); } char *pcmk_generate_membership_data(void) { int size = 0; struct member_loop_data data; size = 256; ais_malloc0(data.string, size); /* Ensure the list of active processes is up-to-date */ update_member(local_nodeid, 0, 0, -1, get_process_list(), local_uname, CRM_NODE_MEMBER, NULL); plugin_has_votes = 0; g_hash_table_foreach(membership_list, member_vote_count_fn, NULL); if(plugin_has_votes > plugin_expected_votes) { update_expected_votes(plugin_has_votes); } snprintf(data.string, size, "", membership_seq, plugin_has_quorum()?"true":"false", plugin_expected_votes, plugin_has_votes); g_hash_table_foreach(membership_list, member_loop_fn, &data); size = strlen(data.string); data.string = realloc(data.string, size + 9) ;/* 9 = + nul */ sprintf(data.string + size, ""); return data.string; } void pcmk_nodes(void *conn, ais_void_ptr *msg) { char *data = pcmk_generate_membership_data(); void *async_conn = conn; /* send the ACK before we send any other messages * - but after we no longer need to access the message */ send_ipc_ack(conn); msg = NULL; if(async_conn) { send_client_msg(async_conn, crm_class_members, crm_msg_none, data); } ais_free(data); } void pcmk_remove_member(void *conn, ais_void_ptr *msg) { const AIS_Message *ais_msg = msg; char *data = get_ais_data(ais_msg); send_ipc_ack(conn); msg = NULL; if(data != NULL) { char *bcast = ais_concat("remove-peer", data, ':'); send_cluster_msg(crm_msg_ais, NULL, bcast); ais_info("Sent: %s", bcast); ais_free(bcast); } ais_free(data); } static void send_quorum_details(void *conn) { int size = 256; char *data = NULL; ais_malloc0(data, size); snprintf(data, size, "", membership_seq, plugin_has_quorum()?"true":"false", plugin_expected_votes, plugin_has_votes); send_client_msg(conn, crm_class_quorum, crm_msg_none, data); ais_free(data); } void pcmk_quorum(void *conn, ais_void_ptr *msg) { char *dummy = NULL; const AIS_Message *ais_msg = msg; char *data = get_ais_data(ais_msg); send_ipc_ack(conn); msg = NULL; /* Make sure the current number of votes is accurate */ dummy = pcmk_generate_membership_data(); ais_free(dummy); /* Calls without data just want the current quorum details */ if(data != NULL && strlen(data) > 0) { int value = ais_get_int(data, NULL); update_expected_votes(value); } send_quorum_details(conn); ais_free(data); } void pcmk_notify(void *conn, ais_void_ptr *msg) { const AIS_Message *ais_msg = msg; char *data = get_ais_data(ais_msg); void *async_conn = conn; int enable = 0; int sender = ais_msg->sender.pid; send_ipc_ack(conn); msg = NULL; if(ais_str_eq("true", data)) { enable = 1; } ais_info("%s node notifications for child %d (%p)", enable?"Enabling":"Disabling", sender, async_conn); if(enable) { g_hash_table_replace(membership_notify_list, async_conn, async_conn); } else { g_hash_table_remove(membership_notify_list, async_conn); } ais_free(data); } void pcmk_nodeid(void *conn, ais_void_ptr *msg) { static int counter = 0; struct crm_ais_nodeid_resp_s resp; ais_debug_2("Sending local nodeid: %d to %p[%d]", local_nodeid, conn, counter); resp.header.id = crm_class_nodeid; resp.header.size = sizeof (struct crm_ais_nodeid_resp_s); resp.header.error = CS_OK; resp.id = local_nodeid; resp.counter = counter++; memset(resp.uname, 0, MAX_NAME); memcpy(resp.uname, local_uname, local_uname_len); memset(resp.cname, 0, MAX_NAME); memcpy(resp.cname, local_cname, local_cname_len); #ifdef AIS_COROSYNC pcmk_api->ipc_response_send (conn, &resp, resp.header.size); #else openais_response_send (conn, &resp, resp.header.size); #endif } static gboolean ghash_send_update(gpointer key, gpointer value, gpointer data) { if(send_client_msg(value, crm_class_members, crm_msg_none, data) != 0) { /* remove it */ return TRUE; } return FALSE; } void send_member_notification(void) { char *update = pcmk_generate_membership_data(); ais_info("Sending membership update "U64T" to %d children", membership_seq, g_hash_table_size(membership_notify_list)); g_hash_table_foreach_remove(membership_notify_list, ghash_send_update, update); ais_free(update); } gboolean check_message_sanity(const AIS_Message *msg, const char *data) { gboolean sane = TRUE; gboolean repaired = FALSE; int dest = msg->host.type; int tmp_size = msg->header.size - sizeof(AIS_Message); if(sane && msg->header.size == 0) { ais_err("Message with no size"); sane = FALSE; } if(sane && msg->header.error != CS_OK) { ais_err("Message header contains an error: %d", msg->header.error); sane = FALSE; } AIS_CHECK(msg->header.size > sizeof(AIS_Message), ais_err("Message %d size too small: %d < %zu", msg->header.id, msg->header.size, sizeof(AIS_Message)); return FALSE); if(sane && ais_data_len(msg) != tmp_size) { ais_warn("Message payload size is incorrect: expected %d, got %d", ais_data_len(msg), tmp_size); sane = TRUE; } if(sane && ais_data_len(msg) == 0) { ais_err("Message with no payload"); sane = FALSE; } if(sane && data && msg->is_compressed == FALSE) { int str_size = strlen(data) + 1; if(ais_data_len(msg) != str_size) { int lpc = 0; ais_err("Message payload is corrupted: expected %d bytes, got %d", ais_data_len(msg), str_size); sane = FALSE; for(lpc = (str_size - 10); lpc < msg->size; lpc++) { if(lpc < 0) { lpc = 0; } ais_debug_2("bad_data[%d]: %d / '%c'", lpc, data[lpc], data[lpc]); } } } if(sane == FALSE) { AIS_CHECK(sane, ais_err("Invalid message %d: (dest=%s:%s, from=%s:%s.%d, compressed=%d, size=%d, total=%d)", msg->id, ais_dest(&(msg->host)), msg_type2text(dest), ais_dest(&(msg->sender)), msg_type2text(msg->sender.type), msg->sender.pid, msg->is_compressed, ais_data_len(msg), msg->header.size)); } else if(repaired) { ais_err("Repaired message %d: (dest=%s:%s, from=%s:%s.%d, compressed=%d, size=%d, total=%d)", msg->id, ais_dest(&(msg->host)), msg_type2text(dest), ais_dest(&(msg->sender)), msg_type2text(msg->sender.type), msg->sender.pid, msg->is_compressed, ais_data_len(msg), msg->header.size); } else { ais_debug_3("Verified message %d: (dest=%s:%s, from=%s:%s.%d, compressed=%d, size=%d, total=%d)", msg->id, ais_dest(&(msg->host)), msg_type2text(dest), ais_dest(&(msg->sender)), msg_type2text(msg->sender.type), msg->sender.pid, msg->is_compressed, ais_data_len(msg), msg->header.size); } return sane; } static int delivered_transient = 0; static void deliver_transient_msg(gpointer key, gpointer value, gpointer user_data) { int pid = GPOINTER_TO_INT(value); AIS_Message *mutable = user_data; if(pid == mutable->host.type) { int rc = send_client_ipc(key, mutable); delivered_transient++; ais_info("Sent message to %s.%d (rc=%d)", ais_dest(&(mutable->host)), pid, rc); if(rc != 0) { ais_warn("Sending message to %s.%d failed (rc=%d)", ais_dest(&(mutable->host)), pid, rc); log_ais_message(LOG_DEBUG, mutable); } } } gboolean route_ais_message(const AIS_Message *msg, gboolean local_origin) { int rc = 0; int dest = msg->host.type; const char *reason = "unknown"; AIS_Message *mutable = ais_msg_copy(msg); static int service_id = SERVICE_ID_MAKE(PCMK_SERVICE_ID, 0); ais_debug_3("Msg[%d] (dest=%s:%s, from=%s:%s.%d, remote=%s, size=%d)", mutable->id, ais_dest(&(mutable->host)), msg_type2text(dest), ais_dest(&(mutable->sender)), msg_type2text(mutable->sender.type), mutable->sender.pid, local_origin?"false":"true", ais_data_len((mutable))); if(local_origin == FALSE) { if(mutable->host.size == 0 || ais_str_eq(local_uname, mutable->host.uname)) { mutable->host.local = TRUE; } } if(check_message_sanity(mutable, mutable->data) == FALSE) { /* Dont send this message to anyone */ rc = 1; goto bail; } if(mutable->host.local) { void *conn = NULL; const char *lookup = NULL; if(dest == crm_msg_ais) { process_ais_message(mutable); goto bail; } else if(dest == crm_msg_lrmd) { /* lrmd messages are routed via the crm */ dest = crm_msg_crmd; } else if(dest == crm_msg_te) { /* te messages are routed via the crm */ dest = crm_msg_crmd; } else if(dest >= SIZEOF(pcmk_children)) { /* Transient client */ delivered_transient = 0; g_hash_table_foreach(ipc_client_list, deliver_transient_msg, mutable); if(delivered_transient) { ais_debug_2("Sent message to %d transient clients: %d", delivered_transient, dest); goto bail; } else { /* try the crmd */ ais_debug_2("Sending message to transient client %d via crmd", dest); dest = crm_msg_crmd; } } else if(dest == 0) { ais_err("Invalid destination: %d", dest); log_ais_message(LOG_ERR, mutable); log_printf(LOG_ERR, "%s", get_ais_data(mutable)); rc = 1; goto bail; } lookup = msg_type2text(dest); conn = pcmk_children[dest].async_conn; /* the cluster fails in weird and wonderfully obscure ways when this is not true */ AIS_ASSERT(ais_str_eq(lookup, pcmk_children[dest].name)); if(mutable->header.id == service_id) { mutable->header.id = 0; /* reset this back to zero for IPC messages */ } else if(mutable->header.id != 0) { ais_err("reset header id back to zero from %d", mutable->header.id); mutable->header.id = 0; /* reset this back to zero for IPC messages */ } reason = "ipc delivery failed"; rc = send_client_ipc(conn, mutable); } else if(local_origin) { /* forward to other hosts */ ais_debug_3("Forwarding to cluster"); reason = "cluster delivery failed"; rc = send_cluster_msg_raw(mutable); } if(rc != 0) { ais_warn("Sending message to %s.%s failed: %s (rc=%d)", ais_dest(&(mutable->host)), msg_type2text(dest), reason, rc); log_ais_message(LOG_DEBUG, mutable); } bail: ais_free(mutable); return rc==0?TRUE:FALSE; } int send_cluster_msg_raw(const AIS_Message *ais_msg) { int rc = 0; struct iovec iovec; static uint32_t msg_id = 0; AIS_Message *mutable = ais_msg_copy(ais_msg); AIS_ASSERT(local_nodeid != 0); AIS_ASSERT(ais_msg->header.size == (sizeof(AIS_Message) + ais_data_len(ais_msg))); if(mutable->id == 0) { msg_id++; AIS_CHECK(msg_id != 0 /* detect wrap-around */, msg_id++; ais_err("Message ID wrapped around")); mutable->id = msg_id; } mutable->header.error = CS_OK; mutable->header.id = SERVICE_ID_MAKE(PCMK_SERVICE_ID, 0); mutable->sender.id = local_nodeid; mutable->sender.size = local_uname_len; memset(mutable->sender.uname, 0, MAX_NAME); memcpy(mutable->sender.uname, local_uname, mutable->sender.size); iovec.iov_base = (char *)mutable; iovec.iov_len = mutable->header.size; ais_debug_3("Sending message (size=%u)", (unsigned int)iovec.iov_len); #if AIS_COROSYNC rc = pcmk_api->totem_mcast(&iovec, 1, TOTEMPG_SAFE); #else rc = totempg_groups_mcast_joined(openais_group_handle, &iovec, 1, TOTEMPG_SAFE); #endif if(rc == 0 && mutable->is_compressed == FALSE) { ais_debug_2("Message sent: %.80s", mutable->data); } AIS_CHECK(rc == 0, ais_err("Message not sent (%d): %.120s", rc, mutable->data)); ais_free(mutable); return rc; } #define min(x,y) (x)<(y)?(x):(y) void send_cluster_id(void) { int rc = 0; int lpc = 0; int len = 0; struct iovec iovec; struct crm_identify_msg_s *msg = NULL; AIS_ASSERT(local_nodeid != 0); if(local_born_on == 0 && have_reliable_membership_id) { local_born_on = membership_seq; } ais_malloc0(msg, sizeof(struct crm_identify_msg_s)); msg->header.size = sizeof(struct crm_identify_msg_s); msg->id = local_nodeid; /* msg->header.error = CS_OK; */ msg->header.id = SERVICE_ID_MAKE(PCMK_SERVICE_ID, 1); len = min(local_uname_len, MAX_NAME-1); memset(msg->uname, 0, MAX_NAME); memcpy(msg->uname, local_uname, len); len = min(strlen(VERSION), MAX_NAME-1); memset(msg->version, 0, MAX_NAME); memcpy(msg->version, VERSION, len); msg->votes = 1; msg->pid = getpid(); msg->processes = get_process_list(); msg->born_on = local_born_on; ais_debug("Local update: id=%u, born="U64T", seq="U64T"", local_nodeid, local_born_on, membership_seq); update_member( local_nodeid, local_born_on, membership_seq, msg->votes, msg->processes, NULL, NULL, VERSION); iovec.iov_base = (char *)msg; iovec.iov_len = msg->header.size; #if AIS_COROSYNC rc = pcmk_api->totem_mcast(&iovec, 1, TOTEMPG_SAFE); #else rc = totempg_groups_mcast_joined(openais_group_handle, &iovec, 1, TOTEMPG_SAFE); #endif AIS_CHECK(rc == 0, ais_err("Message not sent (%d)", rc)); ais_free(msg); } static gboolean ghash_send_removal(gpointer key, gpointer value, gpointer data) { send_quorum_details(value); if(send_client_msg(value, crm_class_rmpeer, crm_msg_none, data) != 0) { /* remove it */ return TRUE; } return FALSE; } void ais_remove_peer(char *node_id) { uint32_t id = ais_get_int(node_id, NULL); crm_node_t *node = g_hash_table_lookup(membership_list, GUINT_TO_POINTER(id)); if(node == NULL) { ais_info("Peer %u is unknown", id); } else if(ais_str_eq(CRM_NODE_MEMBER, node->state)) { ais_warn("Peer %u/%s is still active", id, node->uname); } else if(g_hash_table_remove(membership_list, GUINT_TO_POINTER(id))) { plugin_expected_votes--; ais_notice("Removed dead peer %u from the membership list", id); ais_info("Sending removal of %u to %d children", id, g_hash_table_size(membership_notify_list)); g_hash_table_foreach_remove(membership_notify_list, ghash_send_removal, node_id); } else { ais_warn("Peer %u/%s was not removed", id, node->uname); } } gboolean process_ais_message(const AIS_Message *msg) { int len = ais_data_len(msg); char *data = get_ais_data(msg); do_ais_log(LOG_DEBUG, "Msg[%d] (dest=%s:%s, from=%s:%s.%d, remote=%s, size=%d): %.90s", msg->id, ais_dest(&(msg->host)), msg_type2text(msg->host.type), ais_dest(&(msg->sender)), msg_type2text(msg->sender.type), msg->sender.pid, msg->sender.uname==local_uname?"false":"true", ais_data_len(msg), data); if(data && len > 12 && strncmp("remove-peer:", data, 12) == 0) { char *node = data+12; ais_remove_peer(node); } ais_free(data); return TRUE; } static void member_dump_fn(gpointer key, gpointer value, gpointer user_data) { crm_node_t *node = value; ais_info(" node id:%u, uname=%s state=%s processes=%.16x born="U64T" seen="U64T" addr=%s version=%s", node->id, node->uname?node->uname:"-unknown-", node->state, node->processes, node->born, node->last_seen, node->addr?node->addr:"-unknown-", node->version?node->version:"-unknown-"); } void pcmk_exec_dump(void) { /* Called after SIG_USR2 */ process_ais_conf(); ais_info("Local id: %u, uname: %s, born: "U64T, local_nodeid, local_uname, local_born_on); ais_info("Membership id: "U64T", quorate: %s, expected: %u, actual: %u", membership_seq, plugin_has_quorum()?"true":"false", plugin_expected_votes, plugin_has_votes); g_hash_table_foreach(membership_list, member_dump_fn, NULL); } diff --git a/lib/ais/utils.c b/lib/ais/utils.c index 3ffd5846b1..f49c0afa46 100644 --- a/lib/ais/utils.c +++ b/lib/ais/utils.c @@ -1,710 +1,710 @@ /* * Copyright (C) 2004 Andrew Beekhof * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * This software is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "./utils.h" #include "./plugin.h" struct pcmk_env_s pcmk_env; void log_ais_message(int level, const AIS_Message *msg) { char *data = get_ais_data(msg); log_printf(level, "Msg[%d] (dest=%s:%s, from=%s:%s.%d, remote=%s, size=%d): %.90s", msg->id, ais_dest(&(msg->host)), msg_type2text(msg->host.type), ais_dest(&(msg->sender)), msg_type2text(msg->sender.type), msg->sender.pid, msg->sender.uname==local_uname?"false":"true", ais_data_len(msg), data); /* do_ais_log(level, */ /* "Msg[%d] (dest=%s:%s, from=%s:%s.%d, remote=%s, size=%d): %.90s", */ /* msg->id, ais_dest(&(msg->host)), msg_type2text(msg->host.type), */ /* ais_dest(&(msg->sender)), msg_type2text(msg->sender.type), */ /* msg->sender.pid, */ /* msg->sender.uname==local_uname?"false":"true", */ /* ais_data_len(msg), data); */ ais_free(data); } /* static gboolean ghash_find_by_uname(gpointer key, gpointer value, gpointer user_data) { crm_node_t *node = value; int id = GPOINTER_TO_INT(user_data); if (node->id == id) { return TRUE; } return FALSE; } */ static int ais_string_to_boolean(const char * s) { int rc = 0; if(s == NULL) { return rc; } if(strcasecmp(s, "true") == 0 || strcasecmp(s, "on") == 0 || strcasecmp(s, "yes") == 0 || strcasecmp(s, "y") == 0 || strcasecmp(s, "1") == 0) { rc = 1; } return rc; } static char *opts_default[] = { NULL, NULL }; static char *opts_vgrind[] = { NULL, NULL, NULL }; gboolean spawn_child(crm_child_t *child) { int lpc = 0; uid_t uid = 0; struct rlimit oflimits; struct passwd *pwentry = NULL; gboolean use_valgrind = FALSE; const char *devnull = "/dev/null"; const char *env_valgrind = getenv("HA_VALGRIND_ENABLED"); if(child->command == NULL) { ais_info("Nothing to do for child \"%s\"", child->name); return TRUE; } if(env_valgrind == NULL) { use_valgrind = FALSE; } else if(ais_string_to_boolean(env_valgrind)) { use_valgrind = TRUE; } else if(strstr(env_valgrind, child->name)) { use_valgrind = TRUE; } if(use_valgrind && strlen(VALGRIND_BIN) == 0) { ais_warn("Cannot enable valgrind for %s:" " The location of the valgrind binary is unknown", child->name); use_valgrind = FALSE; } child->pid = fork(); AIS_ASSERT(child->pid != -1); if(child->pid > 0) { /* parent */ ais_info("Forked child %d for process %s%s", child->pid, child->name, use_valgrind?" (valgrind enabled)":""); } else { /* Setup the two alternate arg arrarys */ opts_vgrind[0] = ais_strdup(VALGRIND_BIN); opts_vgrind[1] = ais_strdup(child->command); opts_default[0] = opts_vgrind[1]; #if 0 /* Dont set the group for now - it prevents connection to the cluster */ if(gid && setgid(gid) < 0) { ais_perror("Could not set group to %d", gid); } #endif if(child->uid) { if(pcmk_user_lookup(child->uid, &uid, NULL) < 0) { ais_err("Invalid uid (%s) specified for %s", child->uid, child->name); return TRUE; } } if(uid && setuid(uid) < 0) { ais_perror("Could not set user to %d (%s)", uid, child->uid); } /* Close all open file descriptors */ getrlimit(RLIMIT_NOFILE, &oflimits); for (; lpc < oflimits.rlim_cur; lpc++) { close(lpc); } (void)open(devnull, O_RDONLY); /* Stdin: fd 0 */ (void)open(devnull, O_WRONLY); /* Stdout: fd 1 */ (void)open(devnull, O_WRONLY); /* Stderr: fd 2 */ setenv("HA_COMPRESSION", "bz2", 1); setenv("HA_cluster_type", "openais", 1); setenv("HA_debug", pcmk_env.debug, 1); setenv("HA_logfacility", pcmk_env.syslog, 1); setenv("HA_LOGFACILITY", pcmk_env.syslog, 1); setenv("HA_use_logd", pcmk_env.use_logd, 1); if(pcmk_env.logfile) { - setenv("HA_logfile", pcmk_env.logfile, 1); + setenv("HA_debugfile", pcmk_env.logfile, 1); } if(use_valgrind) { (void)execvp(VALGRIND_BIN, opts_vgrind); } else { (void)execvp(child->command, opts_default); } ais_perror("FATAL: Cannot exec %s", child->command); exit(100); } return TRUE; /* never reached */ } gboolean stop_child(crm_child_t *child, int signal) { if(signal == 0) { signal = SIGTERM; } if(child->command == NULL) { ais_info("Nothing to do for child \"%s\"", child->name); return TRUE; } ais_debug("Stopping CRM child \"%s\"", child->name); if (child->pid <= 0) { ais_debug_2("Client %s not running", child->name); return TRUE; } errno = 0; if(kill(child->pid, signal) == 0) { ais_notice("Sent -%d to %s: [%d]", signal, child->name, child->pid); } else { ais_perror("Sent -%d to %s: [%d]", signal, child->name, child->pid); } return TRUE; } void destroy_ais_node(gpointer data) { crm_node_t *node = data; ais_info("Destroying entry for node %u", node->id); ais_free(node->addr); ais_free(node->uname); ais_free(node->state); ais_free(node); } int update_member(unsigned int id, uint64_t born, uint64_t seq, int32_t votes, uint32_t procs, const char *uname, const char *state, const char *version) { int changed = 0; crm_node_t *node = NULL; node = g_hash_table_lookup(membership_list, GUINT_TO_POINTER(id)); if(node == NULL) { ais_malloc0(node, sizeof(crm_node_t)); ais_info("Creating entry for node %u born on "U64T"", id, seq); node->id = id; node->addr = NULL; node->state = ais_strdup("unknown"); g_hash_table_insert(membership_list, GUINT_TO_POINTER(id), node); node = g_hash_table_lookup(membership_list, GUINT_TO_POINTER(id)); } if(seq != 0) { node->last_seen = seq; } if(born != 0 && node->born != born) { changed = TRUE; node->born = born; ais_info("%p Node %u (%s) born on: "U64T, node, id, uname, born); } if(version != NULL) { ais_free(node->version); node->version = ais_strdup(version); } if(uname != NULL) { if(node->uname == NULL || ais_str_eq(node->uname, uname) == FALSE) { ais_info("%p Node %u now known as %s (was: %s)", node, id, uname, node->uname); ais_free(node->uname); node->uname = ais_strdup(uname); changed = TRUE; } } if(procs != 0 && procs != node->processes) { ais_info("Node %s now has process list: %.32x (%u)", node->uname, procs, procs); node->processes = procs; changed = TRUE; } if(votes >= 0 && votes != node->votes) { ais_info("Node %s now has %d quorum votes (was %d)", node->uname, votes, node->votes); node->votes = votes; changed = TRUE; } if(state != NULL) { if(node->state == NULL || ais_str_eq(node->state, state) == FALSE) { ais_free(node->state); node->state = ais_strdup(state); ais_info("Node %u/%s is now: %s", id, node->uname?node->uname:"unknown", state); changed = TRUE; } } AIS_ASSERT(node != NULL); return changed; } void delete_member(uint32_t id, const char *uname) { if(uname == NULL) { g_hash_table_remove(membership_list, GUINT_TO_POINTER(id)); return; } ais_err("Deleting by uname is not yet supported"); } const char *member_uname(uint32_t id) { crm_node_t *node = g_hash_table_lookup( membership_list, GUINT_TO_POINTER(id)); if(node == NULL) { return ".unknown."; } if(node->uname == NULL) { return ".pending."; } return node->uname; } char *append_member(char *data, crm_node_t *node) { int size = 1; /* nul */ int offset = 0; static int fixed_len = 4 + 8 + 7 + 6 + 6 + 7 + 11; if(data) { size = strlen(data); } offset = size; size += fixed_len; size += 32; /* node->id */ size += 100; /* node->seq, node->born */ size += strlen(node->state); if(node->uname) { size += (7 + strlen(node->uname)); } if(node->addr) { size += (6 + strlen(node->addr)); } if(node->version) { size += (9 + strlen(node->version)); } data = realloc(data, size); offset += snprintf(data + offset, size - offset, "id); if(node->uname) { offset += snprintf(data + offset, size - offset, "uname=\"%s\" ", node->uname); } offset += snprintf(data + offset, size - offset, "state=\"%s\" ", node->state); offset += snprintf(data + offset, size - offset, "born=\""U64T"\" ", node->born); offset += snprintf(data + offset, size - offset, "seen=\""U64T"\" ", node->last_seen); offset += snprintf(data + offset, size - offset, "votes=\"%d\" ", node->votes); offset += snprintf(data + offset, size - offset, "processes=\"%u\" ", node->processes); if(node->addr) { offset += snprintf(data + offset, size - offset, "addr=\"%s\" ", node->addr); } if(node->version) { offset += snprintf(data + offset, size - offset, "version=\"%s\" ", node->version); } offset += snprintf(data + offset, size - offset, "/>"); return data; } void swap_sender(AIS_Message *msg) { int tmp = 0; char tmp_s[256]; tmp = msg->host.type; msg->host.type = msg->sender.type; msg->sender.type = tmp; tmp = msg->host.type; msg->host.size = msg->sender.type; msg->sender.type = tmp; memcpy(tmp_s, msg->host.uname, 256); memcpy(msg->host.uname, msg->sender.uname, 256); memcpy(msg->sender.uname, tmp_s, 256); } char *get_ais_data(const AIS_Message *msg) { int rc = BZ_OK; char *uncompressed = NULL; unsigned int new_size = msg->size + 1; if(msg->is_compressed == FALSE) { uncompressed = strdup(msg->data); } else { ais_malloc0(uncompressed, new_size); rc = BZ2_bzBuffToBuffDecompress( uncompressed, &new_size, (char*)msg->data, msg->compressed_size, 1, 0); if(rc != BZ_OK) { ais_info("rc=%d, new=%u expected=%u", rc, new_size, msg->size); } AIS_ASSERT(rc == BZ_OK); AIS_ASSERT(new_size == msg->size); } return uncompressed; } int send_cluster_msg( enum crm_ais_msg_types type, const char *host, const char *data) { int rc = 0; int data_len = 0; AIS_Message *ais_msg = NULL; int total_size = sizeof(AIS_Message); AIS_ASSERT(local_nodeid != 0); if(data != NULL) { data_len = 1 + strlen(data); total_size += data_len; } ais_malloc0(ais_msg, total_size); ais_msg->header.size = total_size; ais_msg->header.error = CS_OK; ais_msg->header.id = 0; ais_msg->size = data_len; memcpy(ais_msg->data, data, data_len); ais_msg->sender.type = crm_msg_ais; ais_msg->host.type = type; ais_msg->host.id = 0; if(host) { ais_msg->host.size = strlen(host); memset(ais_msg->host.uname, 0, MAX_NAME); memcpy(ais_msg->host.uname, host, ais_msg->host.size); /* ais_msg->host.id = nodeid_lookup(host); */ } else { ais_msg->host.type = type; ais_msg->host.size = 0; memset(ais_msg->host.uname, 0, MAX_NAME); } rc = send_cluster_msg_raw(ais_msg); ais_free(ais_msg); return rc; } extern struct corosync_api_v1 *pcmk_api; int send_client_ipc(void *conn, const AIS_Message *ais_msg) { int rc = -1; if (conn == NULL) { rc = -2; } else if (!libais_connection_active(conn)) { ais_warn("Connection no longer active"); rc = -3; /* } else if ((queue->size - 1) == queue->used) { */ /* ais_err("Connection is throttled: %d", queue->size); */ } else { #ifdef AIS_WHITETANK rc = openais_dispatch_send (conn, (void*)ais_msg, ais_msg->header.size); #endif #ifdef AIS_COROSYNC rc = pcmk_api->ipc_dispatch_send (conn, ais_msg, ais_msg->header.size); #endif } return rc; } int send_client_msg( void *conn, enum crm_ais_msg_class class, enum crm_ais_msg_types type, const char *data) { int rc = 0; int data_len = 0; int total_size = sizeof(AIS_Message); AIS_Message *ais_msg = NULL; static int msg_id = 0; AIS_ASSERT(local_nodeid != 0); msg_id++; AIS_ASSERT(msg_id != 0 /* wrap-around */); if(data != NULL) { data_len = 1 + strlen(data); } total_size += data_len; ais_malloc0(ais_msg, total_size); ais_msg->id = msg_id; ais_msg->header.id = class; ais_msg->header.size = total_size; ais_msg->header.error = CS_OK; ais_msg->size = data_len; memcpy(ais_msg->data, data, data_len); ais_msg->host.size = 0; ais_msg->host.type = type; memset(ais_msg->host.uname, 0, MAX_NAME); ais_msg->host.id = 0; ais_msg->sender.type = crm_msg_ais; ais_msg->sender.size = local_uname_len; memset(ais_msg->sender.uname, 0, MAX_NAME); memcpy(ais_msg->sender.uname, local_uname, ais_msg->sender.size); ais_msg->sender.id = local_nodeid; rc = send_client_ipc(conn, ais_msg); if(rc != 0) { ais_warn("Sending message to %s failed: %d", msg_type2text(type), rc); log_ais_message(LOG_DEBUG, ais_msg); return FALSE; } ais_free(ais_msg); return rc; } char * ais_concat(const char *prefix, const char *suffix, char join) { int len = 0; char *new_str = NULL; AIS_ASSERT(prefix != NULL); AIS_ASSERT(suffix != NULL); len = strlen(prefix) + strlen(suffix) + 2; ais_malloc0(new_str, (len)); sprintf(new_str, "%s%c%s", prefix, join, suffix); new_str[len-1] = 0; return new_str; } hdb_handle_t config_find_init(struct corosync_api_v1 *config, char *name) { hdb_handle_t local_handle = 0; #ifdef AIS_COROSYNC config->object_find_create(OBJECT_PARENT_HANDLE, name, strlen(name), &local_handle); ais_info("Local handle: %lld for %s", local_handle, name); #endif #ifdef AIS_WHITETANK config->object_find_reset (OBJECT_PARENT_HANDLE); #endif return local_handle; } hdb_handle_t config_find_next(struct corosync_api_v1 *config, char *name, hdb_handle_t top_handle) { int rc = 0; hdb_handle_t local_handle = 0; #ifdef AIS_COROSYNC rc = config->object_find_next (top_handle, &local_handle); #endif #ifdef AIS_WHITETANK rc = config->object_find(OBJECT_PARENT_HANDLE, name, strlen (name), &local_handle); #endif if(rc < 0) { ais_info("No additional configuration supplied for: %s", name); local_handle = 0; } else { ais_info("Processing additional %s options...", name); } return local_handle; } void config_find_done(struct corosync_api_v1 *config, hdb_handle_t local_handle) { #ifdef AIS_COROSYNC config->object_find_destroy (local_handle); #endif } int get_config_opt( struct corosync_api_v1 *config, hdb_handle_t object_service_handle, char *key, char **value, const char *fallback) { char *env_key = NULL; *value = NULL; if(object_service_handle > 0) { config->object_key_get( object_service_handle, key, strlen(key), (void**)value, NULL); } if (*value) { ais_info("Found '%s' for option: %s", *value, key); return 0; } env_key = ais_concat("HA", key, '_'); *value = getenv(env_key); ais_free(env_key); if (*value) { ais_info("Found '%s' in ENV for option: %s", *value, key); return 0; } if(fallback) { ais_info("Defaulting to '%s' for option: %s", fallback, key); *value = ais_strdup(fallback); } else { ais_info("No default for option: %s", key); } return -1; } int ais_get_boolean(const char * value) { if(value == NULL) { return 0; } else if (strcasecmp(value, "true") == 0 || strcasecmp(value, "on") == 0 || strcasecmp(value, "yes") == 0 || strcasecmp(value, "y") == 0 || strcasecmp(value, "1") == 0){ return 1; } return 0; } long long ais_get_int(const char *text, char **end_text) { long long result = -1; char *local_end_text = NULL; errno = 0; if(text != NULL) { #ifdef ANSI_ONLY if(end_text != NULL) { result = strtol(text, end_text, 10); } else { result = strtol(text, &local_end_text, 10); } #else if(end_text != NULL) { result = strtoll(text, end_text, 10); } else { result = strtoll(text, &local_end_text, 10); } #endif if(errno == EINVAL) { ais_err("Conversion of %s failed", text); result = -1; } else if(errno == ERANGE) { ais_err("Conversion of %s was clipped: %lld", text, result); } else if(errno != 0) { ais_perror("Conversion of %s failed:", text); } if(local_end_text != NULL && local_end_text[0] != '\0') { ais_err("Characters left over after parsing '%s': '%s'", text, local_end_text); } } return result; } #define PW_BUFFER_LEN 500 int pcmk_user_lookup(const char *name, uid_t *uid, gid_t *gid) { int rc = -1; char *buffer = NULL; struct passwd pwd; struct passwd *pwentry = NULL; ais_malloc0(buffer, PW_BUFFER_LEN); getpwnam_r(name, &pwd, buffer, PW_BUFFER_LEN, &pwentry); if(pwentry) { rc = 0; if(uid) { *uid = pwentry->pw_uid; } if(gid) { *gid = pwentry->pw_gid; } ais_debug("Cluster user %s has uid=%d gid=%d", name, pwentry->pw_uid, pwentry->pw_gid); } else { ais_err("Cluster user %s does not exist", name); } ais_free(buffer); return rc; } diff --git a/lib/cib/cib_ops.c b/lib/cib/cib_ops.c index b3d247d96f..e6d9f720b6 100644 --- a/lib/cib/cib_ops.c +++ b/lib/cib/cib_ops.c @@ -1,995 +1,1013 @@ /* * Copyright (C) 2004 Andrew Beekhof * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * This software is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include enum cib_errors cib_process_query( const char *op, int options, const char *section, xmlNode *req, xmlNode *input, xmlNode *existing_cib, xmlNode **result_cib, xmlNode **answer) { xmlNode *obj_root = NULL; enum cib_errors result = cib_ok; crm_debug_2("Processing \"%s\" event for section=%s", op, crm_str(section)); if(options & cib_xpath) { return cib_process_xpath(op, options, section, req, input, existing_cib, result_cib, answer); } CRM_CHECK(*answer == NULL, free_xml(*answer)); *answer = NULL; if (safe_str_eq(XML_CIB_TAG_SECTION_ALL, section)) { section = NULL; } obj_root = get_object_root(section, existing_cib); if(obj_root == NULL) { result = cib_NOTEXISTS; } else { *answer = obj_root; } if(result == cib_ok && *answer == NULL) { crm_err("Error creating query response"); result = cib_output_data; } return result; } enum cib_errors cib_process_erase( const char *op, int options, const char *section, xmlNode *req, xmlNode *input, xmlNode *existing_cib, xmlNode **result_cib, xmlNode **answer) { enum cib_errors result = cib_ok; crm_debug_2("Processing \"%s\" event", op); *answer = NULL; free_xml(*result_cib); *result_cib = createEmptyCib(); copy_in_properties(*result_cib, existing_cib); cib_update_counter(*result_cib, XML_ATTR_GENERATION, FALSE); return result; } enum cib_errors cib_process_upgrade( const char *op, int options, const char *section, xmlNode *req, xmlNode *input, xmlNode *existing_cib, xmlNode **result_cib, xmlNode **answer) { int rc = 0; int new_version = 0; int current_version = 0; const char *value = NULL; *answer = NULL; crm_debug_2("Processing \"%s\" event", op); value = crm_element_value_copy(existing_cib, XML_ATTR_VALIDATION); if(value != NULL) { current_version = get_schema_version(value); } rc = update_validation(result_cib, &new_version, TRUE, TRUE); if(new_version > current_version) { return cib_ok; } return rc; } enum cib_errors cib_process_bump( const char *op, int options, const char *section, xmlNode *req, xmlNode *input, xmlNode *existing_cib, xmlNode **result_cib, xmlNode **answer) { enum cib_errors result = cib_ok; crm_debug_2("Processing \"%s\" event for epoch=%s", op, crm_str(crm_element_value(existing_cib, XML_ATTR_GENERATION))); *answer = NULL; cib_update_counter(*result_cib, XML_ATTR_GENERATION, FALSE); return result; } enum cib_errors cib_update_counter(xmlNode *xml_obj, const char *field, gboolean reset) { char *new_value = NULL; char *old_value = NULL; int int_value = -1; if(reset == FALSE && crm_element_value(xml_obj, field) != NULL) { old_value = crm_element_value_copy(xml_obj, field); } if(old_value != NULL) { crm_malloc0(new_value, 128); int_value = atoi(old_value); sprintf(new_value, "%d", ++int_value); } else { new_value = crm_strdup("1"); } crm_debug_4("%s %d(%s)->%s", field, int_value, crm_str(old_value), crm_str(new_value)); crm_xml_add(xml_obj, field, new_value); crm_free(new_value); crm_free(old_value); return cib_ok; } enum cib_errors cib_process_replace( const char *op, int options, const char *section, xmlNode *req, xmlNode *input, xmlNode *existing_cib, xmlNode **result_cib, xmlNode **answer) { const char *tag = NULL; gboolean verbose = FALSE; enum cib_errors result = cib_ok; crm_debug_2("Processing \"%s\" event for section=%s", op, crm_str(section)); if(options & cib_xpath) { return cib_process_xpath(op, options, section, req, input, existing_cib, result_cib, answer); } *answer = NULL; if (input == NULL) { return cib_NOOBJECT; } tag = crm_element_name(input); if (options & cib_verbose) { verbose = TRUE; } if(safe_str_eq(XML_CIB_TAG_SECTION_ALL, section)) { section = NULL; } else if(safe_str_eq(tag, section)) { section = NULL; } if(safe_str_eq(tag, XML_TAG_CIB)) { int updates = 0; int epoch = 0; int admin_epoch = 0; int replace_updates = 0; int replace_epoch = 0; int replace_admin_epoch = 0; const char *reason = NULL; cib_version_details( existing_cib, &admin_epoch, &epoch, &updates); cib_version_details(input, &replace_admin_epoch, &replace_epoch, &replace_updates); if(replace_admin_epoch < admin_epoch) { reason = XML_ATTR_GENERATION_ADMIN; } else if(replace_admin_epoch > admin_epoch) { /* no more checks */ } else if(replace_epoch < epoch) { reason = XML_ATTR_GENERATION; } else if(replace_epoch > epoch) { /* no more checks */ } else if(replace_updates < updates) { reason = XML_ATTR_NUMUPDATES; } if(reason != NULL) { crm_warn("Replacement %d.%d.%d not applied to %d.%d.%d:" " current %s is greater than the replacement", replace_admin_epoch, replace_epoch, replace_updates, admin_epoch, epoch, updates, reason); result = cib_old_data; } free_xml(*result_cib); *result_cib = copy_xml(input); } else { xmlNode *obj_root = NULL; gboolean ok = TRUE; obj_root = get_object_root(section, *result_cib); ok = replace_xml_child(NULL, obj_root, input, FALSE); if(ok == FALSE) { crm_debug_2("No matching object to replace"); result = cib_NOTEXISTS; } } return result; } enum cib_errors cib_process_delete( const char *op, int options, const char *section, xmlNode *req, xmlNode *input, xmlNode *existing_cib, xmlNode **result_cib, xmlNode **answer) { xmlNode *obj_root = NULL; crm_debug_2("Processing \"%s\" event", op); if(options & cib_xpath) { return cib_process_xpath(op, options, section, req, input, existing_cib, result_cib, answer); } if(input == NULL) { crm_err("Cannot perform modification with no data"); return cib_NOOBJECT; } obj_root = get_object_root(section, *result_cib); crm_validate_data(input); crm_validate_data(*result_cib); if(replace_xml_child(NULL, obj_root, input, TRUE) == FALSE) { crm_debug_2("No matching object to delete"); } return cib_ok; } enum cib_errors cib_process_modify( const char *op, int options, const char *section, xmlNode *req, xmlNode *input, xmlNode *existing_cib, xmlNode **result_cib, xmlNode **answer) { xmlNode *obj_root = NULL; crm_debug_2("Processing \"%s\" event", op); if(options & cib_xpath) { return cib_process_xpath(op, options, section, req, input, existing_cib, result_cib, answer); } if(input == NULL) { crm_err("Cannot perform modification with no data"); return cib_NOOBJECT; } obj_root = get_object_root(section, *result_cib); crm_validate_data(input); crm_validate_data(*result_cib); if(obj_root == NULL) { xmlNode *tmp_section = NULL; const char *path = get_object_parent(section); if(path == NULL) { return cib_bad_section; } tmp_section = create_xml_node(NULL, section); cib_process_xpath( CIB_OP_CREATE, 0, path, NULL, tmp_section, NULL, result_cib, answer); free_xml(tmp_section); obj_root = get_object_root(section, *result_cib); } CRM_CHECK(obj_root != NULL, return cib_unknown); if(update_xml_child(obj_root, input) == FALSE) { if(options & cib_can_create) { add_node_copy(obj_root, input); } else { return cib_NOTEXISTS; } } return cib_ok; } static int update_cib_object(xmlNode *parent, xmlNode *update) { const char *replace = NULL; const char *object_name = NULL; const char *object_id = NULL; xmlNode *target = NULL; int result = cib_ok; CRM_CHECK(update != NULL, return cib_NOOBJECT); CRM_CHECK(parent != NULL, return cib_NOPARENT); object_name = crm_element_name(update); CRM_CHECK(object_name != NULL, return cib_NOOBJECT); object_id = ID(update); crm_debug_3("Processing: <%s id=%s>", crm_str(object_name), crm_str(object_id)); if(object_id == NULL) { /* placeholder object */ target = find_xml_node(parent, object_name, FALSE); } else { target = find_entity(parent, object_name, object_id); } if(target == NULL) { target = create_xml_node(parent, object_name); } crm_debug_2("Found node <%s id=%s> to update", crm_str(object_name), crm_str(object_id)); replace = crm_element_value(update, XML_CIB_ATTR_REPLACE); if(replace != NULL) { xmlNode *remove = NULL; int last = 0, lpc = 0, len = 0; len = strlen(replace); while(lpc <= len) { if(replace[lpc] == ',' || replace[lpc] == 0) { char *replace_item = NULL; if ( last == lpc ) { /* nothing to do */ last = lpc+1; goto incr; } crm_malloc0(replace_item, lpc - last + 1); strncpy(replace_item, replace+last, lpc-last); remove = find_xml_node(target, replace_item, FALSE); if(remove != NULL) { crm_debug_3("Replacing node <%s> in <%s>", replace_item, crm_element_name(target)); zap_xml_from_parent(target, remove); } crm_free(replace_item); last = lpc+1; } incr: lpc++; } xml_remove_prop(update, XML_CIB_ATTR_REPLACE); xml_remove_prop(target, XML_CIB_ATTR_REPLACE); } copy_in_properties(target, update); crm_debug_3("Processing children of <%s id=%s>", crm_str(object_name), crm_str(object_id)); xml_child_iter( update, a_child, int tmp_result = 0; crm_debug_3("Updating child <%s id=%s>", crm_element_name(a_child), ID(a_child)); tmp_result = update_cib_object(target, a_child); /* only the first error is likely to be interesting */ if(tmp_result != cib_ok) { crm_err("Error updating child <%s id=%s>", crm_element_name(a_child), ID(a_child)); if(result == cib_ok) { result = tmp_result; } } ); crm_debug_3("Finished with <%s id=%s>", crm_str(object_name), crm_str(object_id)); return result; } static int add_cib_object(xmlNode *parent, xmlNode *new_obj) { enum cib_errors result = cib_ok; const char *object_name = NULL; const char *object_id = NULL; xmlNode *equiv_node = NULL; if(new_obj != NULL) { object_name = crm_element_name(new_obj); } object_id = crm_element_value(new_obj, XML_ATTR_ID); crm_debug_3("Processing: <%s id=%s>", crm_str(object_name), crm_str(object_id)); if(new_obj == NULL || object_name == NULL) { result = cib_NOOBJECT; } else if(parent == NULL) { result = cib_NOPARENT; } else if(object_id == NULL) { /* placeholder object */ equiv_node = find_xml_node(parent, object_name, FALSE); } else { equiv_node = find_entity(parent, object_name, object_id); } if(result != cib_ok) { ; /* do nothing */ } else if(equiv_node != NULL) { result = cib_EXISTS; } else { result = update_cib_object(parent, new_obj); } return result; } enum cib_errors cib_process_create( const char *op, int options, const char *section, xmlNode *req, xmlNode *input, xmlNode *existing_cib, xmlNode **result_cib, xmlNode **answer) { xmlNode *failed = NULL; enum cib_errors result = cib_ok; xmlNode *update_section = NULL; crm_debug_2("Processing \"%s\" event for section=%s", op, crm_str(section)); if(safe_str_eq(XML_CIB_TAG_SECTION_ALL, section)) { section = NULL; } else if(safe_str_eq(XML_TAG_CIB, section)) { section = NULL; } else if(safe_str_eq(crm_element_name(input), XML_TAG_CIB)) { section = NULL; } CRM_CHECK(strcasecmp(CIB_OP_CREATE, op) == 0, return cib_operation); if(input == NULL) { crm_err("Cannot perform modification with no data"); return cib_NOOBJECT; } if(section == NULL) { return cib_process_modify(op, options, section, req, input, existing_cib, result_cib, answer); } failed = create_xml_node(NULL, XML_TAG_FAILED); update_section = get_object_root(section, *result_cib); if(safe_str_eq(crm_element_name(input), section)) { xml_child_iter(input, a_child, result = add_cib_object(update_section, a_child); if(update_results(failed, a_child, op, result)) { break; } ); } else { result = add_cib_object(update_section, input); update_results(failed, input, op, result); } if(xml_has_children(failed)) { CRM_CHECK(result != cib_ok, result = cib_unknown); } if (result != cib_ok) { crm_log_xml_err(failed, "CIB Update failures"); *answer = failed; } else { free_xml(failed); } return result; } enum cib_errors cib_process_diff( const char *op, int options, const char *section, xmlNode *req, xmlNode *input, xmlNode *existing_cib, xmlNode **result_cib, xmlNode **answer) { unsigned int log_level = LOG_DEBUG; const char *reason = NULL; gboolean apply_diff = TRUE; enum cib_errors result = cib_ok; int this_updates = 0; int this_epoch = 0; int this_admin_epoch = 0; int diff_add_updates = 0; int diff_add_epoch = 0; int diff_add_admin_epoch = 0; int diff_del_updates = 0; int diff_del_epoch = 0; int diff_del_admin_epoch = 0; crm_debug_2("Processing \"%s\" event", op); cib_diff_version_details( input, &diff_add_admin_epoch, &diff_add_epoch, &diff_add_updates, &diff_del_admin_epoch, &diff_del_epoch, &diff_del_updates); crm_element_value_int(existing_cib, XML_ATTR_GENERATION, &this_epoch); crm_element_value_int(existing_cib, XML_ATTR_NUMUPDATES, &this_updates); crm_element_value_int(existing_cib, XML_ATTR_GENERATION_ADMIN, &this_admin_epoch); if(this_epoch < 0) { this_epoch = 0; } if(this_updates < 0) { this_updates = 0; } if(this_admin_epoch < 0) { this_admin_epoch = 0; } if(diff_del_admin_epoch == diff_add_admin_epoch && diff_del_epoch == diff_add_epoch && diff_del_updates == diff_add_updates) { if(options & cib_force_diff) { apply_diff = FALSE; log_level = LOG_ERR; reason = "+ and - versions in the diff did not change in global update"; crm_log_xml_warn(input, "Bad global update"); } else if(diff_add_admin_epoch == -1 && diff_add_epoch == -1 && diff_add_updates == -1) { crm_err("Massaging diff versions"); diff_add_epoch = this_epoch; diff_add_updates = this_updates + 1; diff_add_admin_epoch = this_admin_epoch; diff_del_epoch = this_epoch; diff_del_updates = this_updates; diff_del_admin_epoch = this_admin_epoch; crm_log_xml_err(input, __FUNCTION__); } else { apply_diff = FALSE; log_level = LOG_ERR; reason = "+ and - versions in the diff did not change"; log_cib_diff(LOG_ERR, input, __FUNCTION__); } } if(apply_diff && diff_del_admin_epoch > this_admin_epoch) { result = cib_diff_resync; apply_diff = FALSE; log_level = LOG_INFO; reason = "current \""XML_ATTR_GENERATION_ADMIN"\" is less than required"; } else if(apply_diff && diff_del_admin_epoch < this_admin_epoch) { apply_diff = FALSE; log_level = LOG_WARNING; reason = "current \""XML_ATTR_GENERATION_ADMIN"\" is greater than required"; } else if(apply_diff && diff_del_epoch > this_epoch) { result = cib_diff_resync; apply_diff = FALSE; log_level = LOG_INFO; reason = "current \""XML_ATTR_GENERATION"\" is less than required"; } else if(apply_diff && diff_del_epoch < this_epoch) { apply_diff = FALSE; log_level = LOG_WARNING; reason = "current \""XML_ATTR_GENERATION"\" is greater than required"; } else if(apply_diff && diff_del_updates > this_updates) { result = cib_diff_resync; apply_diff = FALSE; log_level = LOG_INFO; reason = "current \""XML_ATTR_NUMUPDATES"\" is less than required"; } else if(apply_diff && diff_del_updates < this_updates) { apply_diff = FALSE; log_level = LOG_WARNING; reason = "current \""XML_ATTR_NUMUPDATES"\" is greater than required"; } if(apply_diff) { free_xml(*result_cib); *result_cib = NULL; if(apply_xml_diff(existing_cib, input, result_cib) == FALSE) { log_level = LOG_NOTICE; reason = "Failed application of an update diff"; if(options & cib_force_diff) { result = cib_diff_resync; } } } if(reason != NULL) { do_crm_log( log_level, "Diff %d.%d.%d -> %d.%d.%d not applied to %d.%d.%d: %s", diff_del_admin_epoch,diff_del_epoch,diff_del_updates, diff_add_admin_epoch,diff_add_epoch,diff_add_updates, this_admin_epoch,this_epoch,this_updates, reason); if(result == cib_ok) { result = cib_diff_failed; } } else if(apply_diff) { crm_debug_2("Diff %d.%d.%d -> %d.%d.%d was applied to %d.%d.%d", diff_del_admin_epoch,diff_del_epoch,diff_del_updates, diff_add_admin_epoch,diff_add_epoch,diff_add_updates, this_admin_epoch,this_epoch,this_updates); } return result; } gboolean apply_cib_diff(xmlNode *old, xmlNode *diff, xmlNode **new) { gboolean result = TRUE; const char *value = NULL; int this_updates = 0; int this_epoch = 0; int this_admin_epoch = 0; int diff_add_updates = 0; int diff_add_epoch = 0; int diff_add_admin_epoch = 0; int diff_del_updates = 0; int diff_del_epoch = 0; int diff_del_admin_epoch = 0; CRM_CHECK(diff != NULL, return FALSE); CRM_CHECK(old != NULL, return FALSE); value = crm_element_value(old, XML_ATTR_GENERATION_ADMIN); this_admin_epoch = crm_parse_int(value, "0"); crm_debug_3("%s=%d (%s)", XML_ATTR_GENERATION_ADMIN, this_admin_epoch, value); value = crm_element_value(old, XML_ATTR_GENERATION); this_epoch = crm_parse_int(value, "0"); crm_debug_3("%s=%d (%s)", XML_ATTR_GENERATION, this_epoch, value); value = crm_element_value(old, XML_ATTR_NUMUPDATES); this_updates = crm_parse_int(value, "0"); crm_debug_3("%s=%d (%s)", XML_ATTR_NUMUPDATES, this_updates, value); cib_diff_version_details( diff, &diff_add_admin_epoch, &diff_add_epoch, &diff_add_updates, &diff_del_admin_epoch, &diff_del_epoch, &diff_del_updates); value = NULL; if(result && diff_del_admin_epoch != this_admin_epoch) { value = XML_ATTR_GENERATION_ADMIN; result = FALSE; crm_debug_3("%s=%d", value, diff_del_admin_epoch); } else if(result && diff_del_epoch != this_epoch) { value = XML_ATTR_GENERATION; result = FALSE; crm_debug_3("%s=%d", value, diff_del_epoch); } else if(result && diff_del_updates != this_updates) { value = XML_ATTR_NUMUPDATES; result = FALSE; crm_debug_3("%s=%d", value, diff_del_updates); } if(result) { xmlNode *tmp = NULL; xmlNode *diff_copy = copy_xml(diff); tmp = find_xml_node(diff_copy, "diff-removed", TRUE); if(tmp != NULL) { xml_remove_prop(tmp, XML_ATTR_GENERATION_ADMIN); xml_remove_prop(tmp, XML_ATTR_GENERATION); xml_remove_prop(tmp, XML_ATTR_NUMUPDATES); } tmp = find_xml_node(diff_copy, "diff-added", TRUE); if(tmp != NULL) { xml_remove_prop(tmp, XML_ATTR_GENERATION_ADMIN); xml_remove_prop(tmp, XML_ATTR_GENERATION); xml_remove_prop(tmp, XML_ATTR_NUMUPDATES); } result = apply_xml_diff(old, diff_copy, new); free_xml(diff_copy); } else { crm_err("target and diff %s values didnt match", value); } return result; } gboolean -cib_config_changed(xmlNode *diff) +cib_config_changed(xmlNode *last, xmlNode *next, xmlNode **diff) { gboolean config_changes = FALSE; xmlXPathObject *xpathObj = NULL; - if(diff == NULL) { + CRM_ASSERT(diff != NULL); + + *diff = diff_xml_object(last, next, FALSE); + if(*diff == NULL) { + char *digest_last = calculate_xml_digest(last, FALSE, TRUE); + char *digest_next = calculate_xml_digest(next, FALSE, TRUE); + + /* Detect ordering changes - important for groups and resource sets */ + if(safe_str_neq(digest_last, digest_next)) { + config_changes = TRUE; + crm_info("Detected ordering change: %s vs %s", digest_last, digest_next); + + /* Create a fake diff so that notifications will be sent */ + *diff = create_xml_node(NULL, "diff"); + create_xml_node(*diff, "diff-removed"); + create_xml_node(*diff, "diff-added"); + } + crm_free(digest_last); + crm_free(digest_next); goto done; } - xpathObj = xpath_search(diff, "//"XML_CIB_TAG_CONFIGURATION); + xpathObj = xpath_search(*diff, "//"XML_CIB_TAG_CONFIGURATION); if(xpathObj && xpathObj->nodesetval->nodeNr > 0) { config_changes = TRUE; goto done; } else if(xpathObj) { xmlXPathFreeObject(xpathObj); } - xpathObj = xpath_search(diff, "//"XML_TAG_CIB); + xpathObj = xpath_search(*diff, "//"XML_TAG_CIB); if(xpathObj) { int lpc = 0, max = xpathObj->nodesetval->nodeNr; for(lpc = 0; lpc < max; lpc++) { xmlNode *top = getXpathResult(xpathObj, lpc); xml_prop_iter(top, name, value, if(crm_str_eq(XML_ATTR_NUMUPDATES, name, TRUE) == FALSE) { config_changes = TRUE; goto done; } ); } } done: if(xpathObj) { xmlXPathFreeObject(xpathObj); } return config_changes; } xmlNode * diff_cib_object(xmlNode *old_cib, xmlNode *new_cib, gboolean suppress) { xmlNode *dest = NULL; xmlNode *src = NULL; const char *name = NULL; const char *value = NULL; xmlNode *diff = diff_xml_object(old_cib, new_cib, suppress); /* add complete version information */ src = old_cib; dest = find_xml_node(diff, "diff-removed", FALSE); if(src != NULL && dest != NULL) { name = XML_ATTR_GENERATION_ADMIN; value = crm_element_value(src, name); if(value == NULL) { value = "0"; } crm_xml_add(dest, name, value); name = XML_ATTR_GENERATION; value = crm_element_value(src, name); if(value == NULL) { value = "0"; } crm_xml_add(dest, name, value); name = XML_ATTR_NUMUPDATES; value = crm_element_value(src, name); if(value == NULL) { value = "0"; } crm_xml_add(dest, name, value); } src = new_cib; dest = find_xml_node(diff, "diff-added", FALSE); if(src != NULL && dest != NULL) { name = XML_ATTR_GENERATION_ADMIN; value = crm_element_value(src, name); if(value == NULL) { value = "0"; } crm_xml_add(dest, name, value); name = XML_ATTR_GENERATION; value = crm_element_value(src, name); if(value == NULL) { value = "0"; } crm_xml_add(dest, name, value); name = XML_ATTR_NUMUPDATES; value = crm_element_value(src, name); if(value == NULL) { value = "0"; } crm_xml_add(dest, name, value); } return diff; } enum cib_errors cib_process_xpath( const char *op, int options, const char *section, xmlNode *req, xmlNode *input, xmlNode *existing_cib, xmlNode **result_cib, xmlNode **answer) { int lpc = 0; int max = 0; int rc = cib_ok; gboolean is_query = safe_str_eq(op, CIB_OP_QUERY); xmlXPathObjectPtr xpathObj = NULL; crm_debug_2("Processing \"%s\" event", op); if(is_query) { xpathObj = xpath_search(existing_cib, section); } else { xpathObj = xpath_search(*result_cib, section); } if(xpathObj != NULL && xpathObj->nodesetval != NULL) { max = xpathObj->nodesetval->nodeNr; } if(max < 1 && safe_str_eq(op, CIB_OP_DELETE)) { crm_debug("%s was already removed", section); } else if(max < 1) { crm_debug("%s: %s does not exist", op, section); rc = cib_NOTEXISTS; } else if(is_query) { if(max > 1) { *answer = create_xml_node(NULL, "xpath-query"); } } for(lpc = 0; lpc < max; lpc++) { xmlChar *path = NULL; xmlNode *match = getXpathResult(xpathObj, lpc); if(match == NULL) { continue; } path = xmlGetNodePath(match); crm_debug("Processing %s op for %s (%s)", op, section, path); free(path); if(safe_str_eq(op, CIB_OP_DELETE)) { free_xml_from_parent(NULL, match); break; } else if(safe_str_eq(op, CIB_OP_MODIFY)) { if(update_xml_child(match, input) == FALSE) { rc = cib_NOTEXISTS; } else { rc = cib_ok; if((options & cib_multiple) == 0) { break; } } } else if(safe_str_eq(op, CIB_OP_CREATE)) { add_node_copy(match, input); break; } else if(safe_str_eq(op, CIB_OP_QUERY)) { if(options & cib_no_children) { const char *tag = TYPE(match); *answer = create_xml_node(NULL, tag); copy_in_properties(*answer, match); break; } else if(*answer) { add_node_copy(*answer, match); } else { *answer = match; } } else if(safe_str_eq(op, CIB_OP_REPLACE)) { xmlNode *parent = match->parent; free_xml_from_parent(NULL, match); if(input != NULL) { add_node_copy(parent, input); } if((options & cib_multiple) == 0) { break; } } } if(xpathObj) { xmlXPathFreeObject(xpathObj); } return rc; } /* remove this function */ gboolean update_results( xmlNode *failed, xmlNode *target, const char* operation, int return_code) { xmlNode *xml_node = NULL; gboolean was_error = FALSE; const char *error_msg = NULL; if (return_code != cib_ok) { error_msg = cib_error2string(return_code); xml_node = create_xml_node(failed, XML_FAIL_TAG_CIB); was_error = TRUE; add_node_copy(xml_node, target); crm_xml_add(xml_node, XML_FAILCIB_ATTR_ID, ID(target)); crm_xml_add(xml_node, XML_FAILCIB_ATTR_OBJTYPE, TYPE(target)); crm_xml_add(xml_node, XML_FAILCIB_ATTR_OP, operation); crm_xml_add(xml_node, XML_FAILCIB_ATTR_REASON, error_msg); crm_warn("Action %s failed: %s (cde=%d)", operation, error_msg, return_code); } return was_error; } diff --git a/lib/cib/cib_utils.c b/lib/cib/cib_utils.c index 20c9ed49d2..4935d4c2ab 100644 --- a/lib/cib/cib_utils.c +++ b/lib/cib/cib_utils.c @@ -1,895 +1,894 @@ /* * Copyright (c) 2004 International Business Machines * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #include #include #include #include #include #include #include #include #include #include #include #include #include struct config_root_s { const char *name; const char *parent; const char *path; }; /* * "//crm_config" will also work in place of "/cib/configuration/crm_config" * The / prefix means find starting from the root, whereas the // prefix means * find anywhere and risks multiple matches */ struct config_root_s known_paths[] = { { NULL, NULL, "//cib" }, { XML_TAG_CIB, NULL, "//cib" }, { XML_CIB_TAG_STATUS, "/cib", "//cib/status" }, { XML_CIB_TAG_CONFIGURATION,"/cib", "//cib/configuration" }, { XML_CIB_TAG_CRMCONFIG, "/cib/configuration", "//cib/configuration/crm_config" }, { XML_CIB_TAG_NODES, "/cib/configuration", "//cib/configuration/nodes" }, { XML_CIB_TAG_DOMAINS, "/cib/configuration", "//cib/configuration/domains" }, { XML_CIB_TAG_RESOURCES, "/cib/configuration", "//cib/configuration/resources" }, { XML_CIB_TAG_CONSTRAINTS, "/cib/configuration", "//cib/configuration/constraints" }, { XML_CIB_TAG_OPCONFIG, "/cib/configuration", "//cib/configuration/op_defaults" }, { XML_CIB_TAG_RSCCONFIG, "/cib/configuration", "//cib/configuration/rsc_defaults" }, { XML_CIB_TAG_SECTION_ALL, NULL, "//cib" }, }; const char * cib_error2string(enum cib_errors return_code) { const char *error_msg = NULL; switch(return_code) { case cib_bad_permissions: error_msg = "bad permissions for the on-disk configuration. shutdown heartbeat and repair."; break; case cib_bad_digest: error_msg = "the on-disk configuration was manually altered. shutdown heartbeat and repair."; break; case cib_bad_config: error_msg = "the on-disk configuration is not valid"; break; case cib_msg_field_add: error_msg = "failed adding field to cib message"; break; case cib_id_check: error_msg = "missing id or id-collision detected"; break; case cib_operation: error_msg = "invalid operation"; break; case cib_create_msg: error_msg = "couldnt create cib message"; break; case cib_client_gone: error_msg = "client left before we could send reply"; break; case cib_not_connected: error_msg = "not connected"; break; case cib_not_authorized: error_msg = "not authorized"; break; case cib_send_failed: error_msg = "send failed"; break; case cib_reply_failed: error_msg = "reply failed"; break; case cib_return_code: error_msg = "no return code"; break; case cib_output_ptr: error_msg = "nowhere to store output"; break; case cib_output_data: error_msg = "corrupt output data"; break; case cib_connection: error_msg = "connection failed"; break; case cib_callback_register: error_msg = "couldnt register callback channel"; break; case cib_authentication: error_msg = ""; break; case cib_registration_msg: error_msg = "invalid registration msg"; break; case cib_callback_token: error_msg = "callback token not found"; break; case cib_missing: error_msg = "cib object missing"; break; case cib_variant: error_msg = "unknown/corrupt cib variant"; break; case CIBRES_MISSING_ID: error_msg = "The id field is missing"; break; case CIBRES_MISSING_TYPE: error_msg = "The type field is missing"; break; case CIBRES_MISSING_FIELD: error_msg = "A required field is missing"; break; case CIBRES_OBJTYPE_MISMATCH: error_msg = "CIBRES_OBJTYPE_MISMATCH"; break; case cib_EXISTS: error_msg = "The object already exists"; break; case cib_NOTEXISTS: error_msg = "The object/attribute does not exist"; break; case CIBRES_CORRUPT: error_msg = "The CIB is corrupt"; break; case cib_NOOBJECT: error_msg = "The update was empty"; break; case cib_NOPARENT: error_msg = "The parent object does not exist"; break; case cib_NODECOPY: error_msg = "Failed while copying update"; break; case CIBRES_OTHER: error_msg = "CIBRES_OTHER"; break; case cib_ok: error_msg = "ok"; break; case cib_unknown: error_msg = "Unknown error"; break; case cib_STALE: error_msg = "Discarded old update"; break; case cib_ACTIVATION: error_msg = "Activation Failed"; break; case cib_NOSECTION: error_msg = "Required section was missing"; break; case cib_NOTSUPPORTED: error_msg = "The action/feature is not supported"; break; case cib_not_master: error_msg = "Local service is not the master instance"; break; case cib_client_corrupt: error_msg = "Service client not valid"; break; case cib_remote_timeout: error_msg = "Remote node did not respond"; break; case cib_master_timeout: error_msg = "No master service is currently active"; break; case cib_revision_unsupported: error_msg = "The required CIB revision number is not supported"; break; case cib_revision_unknown: error_msg = "The CIB revision number could not be determined"; break; case cib_missing_data: error_msg = "Required data for this CIB API call not found"; break; case cib_no_quorum: error_msg = "Write requires quorum"; break; case cib_diff_failed: error_msg = "Application of an update diff failed"; break; case cib_diff_resync: error_msg = "Application of an update diff failed, requesting a full refresh"; break; case cib_bad_section: error_msg = "Invalid CIB section specified"; break; case cib_old_data: error_msg = "Update was older than existing configuration"; break; case cib_dtd_validation: error_msg = "Update does not conform to the configured schema/DTD"; break; case cib_invalid_argument: error_msg = "Invalid argument"; break; case cib_transform_failed: error_msg = "Schema transform failed"; break; } if(error_msg == NULL) { crm_err("Unknown CIB Error Code: %d", return_code); error_msg = ""; } return error_msg; } int cib_section2enum(const char *a_section) { if(a_section == NULL || strcasecmp(a_section, "all") == 0) { return cib_section_all; } else if(strcasecmp(a_section, XML_CIB_TAG_NODES) == 0) { return cib_section_nodes; } else if(strcasecmp(a_section, XML_CIB_TAG_STATUS) == 0) { return cib_section_status; } else if(strcasecmp(a_section, XML_CIB_TAG_CONSTRAINTS) == 0) { return cib_section_constraints; } else if(strcasecmp(a_section, XML_CIB_TAG_RESOURCES) == 0) { return cib_section_resources; } else if(strcasecmp(a_section, XML_CIB_TAG_CRMCONFIG) == 0) { return cib_section_crmconfig; } crm_err("Unknown CIB section: %s", a_section); return cib_section_none; } int cib_compare_generation(xmlNode *left, xmlNode *right) { int lpc = 0; const char *attributes[] = { XML_ATTR_GENERATION_ADMIN, XML_ATTR_GENERATION, XML_ATTR_NUMUPDATES, }; crm_log_xml_debug_3(left, "left"); crm_log_xml_debug_3(right, "right"); for(lpc = 0; lpc < DIMOF(attributes); lpc++) { int int_elem_l = -1; int int_elem_r = -1; const char *elem_r = NULL; const char *elem_l = crm_element_value(left, attributes[lpc]); if(right != NULL) { elem_r = crm_element_value(right, attributes[lpc]); } if(elem_l != NULL) { int_elem_l = crm_parse_int(elem_l, NULL); } if(elem_r != NULL) { int_elem_r = crm_parse_int(elem_r, NULL); } if(int_elem_l < int_elem_r) { crm_debug_2("%s (%s < %s)", attributes[lpc], crm_str(elem_l), crm_str(elem_r)); return -1; } else if(int_elem_l > int_elem_r) { crm_debug_2("%s (%s > %s)", attributes[lpc], crm_str(elem_l), crm_str(elem_r)); return 1; } } return 0; } xmlNode* get_cib_copy(cib_t *cib) { xmlNode *xml_cib; int options = cib_scope_local|cib_sync_call; if(cib->cmds->query(cib, NULL, &xml_cib, options) != cib_ok) { crm_err("Couldnt retrieve the CIB"); return NULL; } else if(xml_cib == NULL) { crm_err("The CIB result was empty"); return NULL; } if(safe_str_eq(crm_element_name(xml_cib), XML_TAG_CIB)) { return xml_cib; } free_xml(xml_cib); return NULL; } xmlNode* cib_get_generation(cib_t *cib) { xmlNode *the_cib = get_cib_copy(cib); xmlNode *generation = create_xml_node( NULL, XML_CIB_TAG_GENERATION_TUPPLE); if(the_cib != NULL) { copy_in_properties(generation, the_cib); free_xml(the_cib); } return generation; } void log_cib_diff(int log_level, xmlNode *diff, const char *function) { int add_updates = 0; int add_epoch = 0; int add_admin_epoch = 0; int del_updates = 0; int del_epoch = 0; int del_admin_epoch = 0; if(diff == NULL) { return; } cib_diff_version_details( diff, &add_admin_epoch, &add_epoch, &add_updates, &del_admin_epoch, &del_epoch, &del_updates); if(add_updates != del_updates) { do_crm_log(log_level, "%s: Diff: --- %d.%d.%d", function, del_admin_epoch, del_epoch, del_updates); do_crm_log(log_level, "%s: Diff: +++ %d.%d.%d", function, add_admin_epoch, add_epoch, add_updates); } else if(diff != NULL) { do_crm_log(log_level, "%s: Local-only Change: %d.%d.%d", function, add_admin_epoch, add_epoch, add_updates); } log_xml_diff(log_level, diff, function); } gboolean cib_version_details( xmlNode *cib, int *admin_epoch, int *epoch, int *updates) { if(cib == NULL) { *admin_epoch = -1; *epoch = -1; *updates = -1; return FALSE; } else { crm_element_value_int(cib, XML_ATTR_GENERATION, epoch); crm_element_value_int(cib, XML_ATTR_NUMUPDATES, updates); crm_element_value_int(cib, XML_ATTR_GENERATION_ADMIN, admin_epoch); } return TRUE; } gboolean cib_diff_version_details( xmlNode *diff, int *admin_epoch, int *epoch, int *updates, int *_admin_epoch, int *_epoch, int *_updates) { xmlNode *tmp = NULL; tmp = find_xml_node(diff, "diff-added", FALSE); cib_version_details(tmp, admin_epoch, epoch, updates); tmp = find_xml_node(diff, "diff-removed", FALSE); cib_version_details(tmp, _admin_epoch, _epoch, _updates); return TRUE; } /* * The caller should never free the return value */ const char *get_object_path(const char *object_type) { int lpc = 0; int max = DIMOF(known_paths); for(; lpc < max; lpc++) { if((object_type == NULL && known_paths[lpc].name == NULL) || safe_str_eq(object_type, known_paths[lpc].name)) { return known_paths[lpc].path; } } return NULL; } const char *get_object_parent(const char *object_type) { int lpc = 0; int max = DIMOF(known_paths); for(; lpc < max; lpc++) { if(safe_str_eq(object_type, known_paths[lpc].name)) { return known_paths[lpc].parent; } } return NULL; } xmlNode* get_object_root(const char *object_type, xmlNode *the_root) { const char *xpath = get_object_path(object_type); if(xpath == NULL) { return the_root; /* or return NULL? */ } return get_xpath_object(xpath, the_root, LOG_DEBUG_2); } xmlNode* create_cib_fragment_adv( xmlNode *update, const char *update_section, const char *source) { xmlNode *cib = NULL; gboolean whole_cib = FALSE; xmlNode *object_root = NULL; char *local_section = NULL; /* crm_debug("Creating a blank fragment: %s", update_section); */ if(update == NULL && update_section == NULL) { crm_debug_3("Creating a blank fragment"); update = createEmptyCib(); crm_xml_add(cib, XML_ATTR_ORIGIN, source); return update; } else if(update == NULL) { crm_err("No update to create a fragment for"); return NULL; } CRM_CHECK(update_section != NULL, return NULL); if(safe_str_eq(crm_element_name(update), XML_TAG_CIB)) { whole_cib = TRUE; } if(whole_cib == FALSE) { cib = createEmptyCib(); crm_xml_add(cib, XML_ATTR_ORIGIN, source); object_root = get_object_root(update_section, cib); add_node_copy(object_root, update); } else { cib = copy_xml(update); crm_xml_add(cib, XML_ATTR_ORIGIN, source); } crm_free(local_section); crm_debug_3("Verifying created fragment"); return cib; } /* * It is the callers responsibility to free both the new CIB (output) * and the new CIB (input) */ xmlNode* createEmptyCib(void) { xmlNode *cib_root = NULL, *config = NULL, *status = NULL; cib_root = create_xml_node(NULL, XML_TAG_CIB); config = create_xml_node(cib_root, XML_CIB_TAG_CONFIGURATION); status = create_xml_node(cib_root, XML_CIB_TAG_STATUS); /* crm_xml_add(cib_root, "version", "1"); */ create_xml_node(config, XML_CIB_TAG_CRMCONFIG); create_xml_node(config, XML_CIB_TAG_NODES); create_xml_node(config, XML_CIB_TAG_RESOURCES); create_xml_node(config, XML_CIB_TAG_CONSTRAINTS); return cib_root; } static unsigned int dtd_throttle = 0; enum cib_errors cib_perform_op(const char *op, int call_options, cib_op_t *fn, gboolean is_query, const char *section, xmlNode *req, xmlNode *input, gboolean manage_counters, gboolean *config_changed, xmlNode *current_cib, xmlNode **result_cib, xmlNode **diff, xmlNode **output) { int rc = cib_ok; gboolean check_dtd = TRUE; xmlNode *scratch = NULL; xmlNode *local_diff = NULL; const char *current_dtd = "unknown"; CRM_CHECK(output != NULL, return cib_output_data); CRM_CHECK(result_cib != NULL, return cib_output_data); CRM_CHECK(config_changed != NULL, return cib_output_data); *output = NULL; *result_cib = NULL; *config_changed = FALSE; if(fn == NULL) { return cib_operation; } if(is_query) { rc = (*fn)(op, call_options, section, req, input, current_cib, result_cib, output); return rc; } scratch = copy_xml(current_cib); rc = (*fn)(op, call_options, section, req, input, current_cib, &scratch, output); CRM_CHECK(current_cib != scratch, return cib_unknown); if(rc == cib_ok && scratch == NULL) { rc = cib_unknown; } if(rc == cib_ok && current_cib) { int old = 0; int new = 0; crm_element_value_int(scratch, XML_ATTR_GENERATION_ADMIN, &new); crm_element_value_int(current_cib, XML_ATTR_GENERATION_ADMIN, &old); if(old > new) { crm_err("%s went backwards: %d -> %d (Opts: 0x%x)", XML_ATTR_GENERATION_ADMIN, old, new, call_options); crm_log_xml_warn(req, "Bad Op"); crm_log_xml_warn(input, "Bad Data"); rc = cib_old_data; } else if(old == new) { crm_element_value_int(scratch, XML_ATTR_GENERATION, &new); crm_element_value_int(current_cib, XML_ATTR_GENERATION, &old); if(old > new) { crm_err("%s went backwards: %d -> %d (Opts: 0x%x)", XML_ATTR_GENERATION, old, new, call_options); crm_log_xml_warn(req, "Bad Op"); crm_log_xml_warn(input, "Bad Data"); rc = cib_old_data; } } } if(rc == cib_ok) { fix_plus_plus_recursive(scratch); current_dtd = crm_element_value(scratch, XML_ATTR_VALIDATION); if(manage_counters) { - local_diff = diff_xml_object(current_cib, scratch, FALSE); - *config_changed = cib_config_changed(local_diff); + *config_changed = cib_config_changed(current_cib, scratch, &local_diff); if(*config_changed) { cib_update_counter(scratch, XML_ATTR_NUMUPDATES, TRUE); cib_update_counter(scratch, XML_ATTR_GENERATION, FALSE); } else if(local_diff != NULL){ cib_update_counter(scratch, XML_ATTR_NUMUPDATES, FALSE); if(dtd_throttle++ % 20) { check_dtd = FALSE; /* Throttle the amount of costly validation we perform due to status updates * a) we don't really care whats in the status section * b) we don't validate any of it's contents at the moment anyway */ } } } } if(diff != NULL && local_diff != NULL) { /* Only fix the diff if we'll return it... */ xmlNode *cib = NULL; xmlNode *diff_child = NULL; const char *tag = NULL; const char *value = NULL; tag = "diff-removed"; diff_child = find_xml_node(local_diff, tag, FALSE); if(diff_child == NULL) { diff_child = create_xml_node(local_diff, tag); } tag = XML_TAG_CIB; cib = find_xml_node(diff_child, tag, FALSE); if(cib == NULL) { cib = create_xml_node(diff_child, tag); } tag = XML_ATTR_GENERATION_ADMIN; value = crm_element_value(current_cib, tag); crm_xml_add(diff_child, tag, value); if(*config_changed) { crm_xml_add(cib, tag, value); } tag = XML_ATTR_GENERATION; value = crm_element_value(current_cib, tag); crm_xml_add(diff_child, tag, value); if(*config_changed) { crm_xml_add(cib, tag, value); } tag = XML_ATTR_NUMUPDATES; value = crm_element_value(current_cib, tag); crm_xml_add(cib, tag, value); crm_xml_add(diff_child, tag, value); tag = "diff-added"; diff_child = find_xml_node(local_diff, tag, FALSE); if(diff_child == NULL) { diff_child = create_xml_node(local_diff, tag); } tag = XML_TAG_CIB; cib = find_xml_node(diff_child, tag, FALSE); if(cib == NULL) { cib = create_xml_node(diff_child, tag); } tag = XML_ATTR_GENERATION_ADMIN; value = crm_element_value(scratch, tag); crm_xml_add(diff_child, tag, value); if(*config_changed) { crm_xml_add(cib, tag, value); } tag = XML_ATTR_GENERATION; value = crm_element_value(scratch, tag); crm_xml_add(diff_child, tag, value); if(*config_changed) { crm_xml_add(cib, tag, value); } tag = XML_ATTR_NUMUPDATES; value = crm_element_value(scratch, tag); crm_xml_add(cib, tag, value); crm_xml_add(diff_child, tag, value); *diff = local_diff; local_diff = NULL; } if(rc == cib_ok && check_dtd && validate_xml(scratch, NULL, TRUE) == FALSE) { crm_warn("Updated CIB does not validate against %s schema/dtd", crm_str(current_dtd)); rc = cib_dtd_validation; } *result_cib = scratch; free_xml(local_diff); return rc; } int get_channel_token(IPC_Channel *ch, char **token) { int rc = cib_ok; xmlNode *reg_msg = NULL; const char *msg_type = NULL; const char *tmp_ticket = NULL; CRM_CHECK(ch != NULL, return cib_missing); CRM_CHECK(token != NULL, return cib_output_ptr); crm_debug_4("Waiting for msg on command channel"); reg_msg = xmlfromIPC(ch, MAX_IPC_DELAY); if(ch->ops->get_chan_status(ch) != IPC_CONNECT) { crm_err("No reply message - disconnected"); free_xml(reg_msg); return cib_not_connected; } else if(reg_msg == NULL) { crm_err("No reply message - empty"); return cib_reply_failed; } msg_type = crm_element_value(reg_msg, F_CIB_OPERATION); tmp_ticket = crm_element_value(reg_msg, F_CIB_CLIENTID); if(safe_str_neq(msg_type, CRM_OP_REGISTER) ) { crm_err("Invalid registration message: %s", msg_type); rc = cib_registration_msg; } else if(tmp_ticket == NULL) { rc = cib_callback_token; } else { *token = crm_strdup(tmp_ticket); } free_xml(reg_msg); return rc; } xmlNode * cib_create_op( int call_id, const char *token, const char *op, const char *host, const char *section, xmlNode *data, int call_options) { int rc = HA_OK; xmlNode *op_msg = create_xml_node(NULL, "cib_command"); CRM_CHECK(op_msg != NULL, return NULL); CRM_CHECK(token != NULL, return NULL); crm_xml_add(op_msg, F_XML_TAGNAME, "cib_command"); crm_xml_add(op_msg, F_TYPE, T_CIB); crm_xml_add(op_msg, F_CIB_CALLBACK_TOKEN, token); crm_xml_add(op_msg, F_CIB_OPERATION, op); crm_xml_add(op_msg, F_CIB_HOST, host); crm_xml_add(op_msg, F_CIB_SECTION, section); crm_xml_add_int(op_msg, F_CIB_CALLID, call_id); crm_debug_4("Sending call options: %.8lx, %d", (long)call_options, call_options); crm_xml_add_int(op_msg, F_CIB_CALLOPTS, call_options); if(data != NULL) { add_message_xml(op_msg, F_CIB_CALLDATA, data); } if (rc != HA_OK) { crm_err("Failed to create CIB operation message"); crm_log_xml(LOG_ERR, "op", op_msg); free_xml(op_msg); return NULL; } if(call_options & cib_inhibit_bcast) { CRM_CHECK((call_options & cib_scope_local), return NULL); } return op_msg; } void cib_native_callback(cib_t *cib, xmlNode *msg, int call_id, int rc) { xmlNode *output = NULL; cib_callback_client_t *blob = NULL; cib_callback_client_t local_blob; local_blob.id = NULL; local_blob.callback = NULL; local_blob.user_data = NULL; local_blob.only_success = FALSE; if(msg != NULL) { crm_element_value_int(msg, F_CIB_RC, &rc); crm_element_value_int(msg, F_CIB_CALLID, &call_id); output = get_message_xml(msg, F_CIB_CALLDATA); } blob = g_hash_table_lookup( cib_op_callback_table, GINT_TO_POINTER(call_id)); if(blob != NULL) { local_blob = *blob; blob = NULL; remove_cib_op_callback(call_id, FALSE); } else { crm_debug_2("No callback found for call %d", call_id); local_blob.callback = NULL; } if(cib == NULL) { crm_debug("No cib object supplied"); } if(rc == cib_diff_resync) { /* This is an internal value that clients do not and should not care about */ rc = cib_ok; } if(local_blob.callback != NULL && (rc == cib_ok || local_blob.only_success == FALSE)) { crm_debug_2("Invoking callback %s for call %d", crm_str(local_blob.id), call_id); local_blob.callback(msg, call_id, rc, output, local_blob.user_data); } else if(cib && cib->op_callback == NULL && rc != cib_ok) { crm_warn("CIB command failed: %s", cib_error2string(rc)); crm_log_xml(LOG_DEBUG, "Failed CIB Update", msg); } if(cib && cib->op_callback != NULL) { crm_debug_2("Invoking global callback for call %d", call_id); cib->op_callback(msg, call_id, rc, output); } crm_debug_4("OP callback activated."); } void cib_native_notify(gpointer data, gpointer user_data) { xmlNode *msg = user_data; cib_notify_client_t *entry = data; const char *event = NULL; if(msg == NULL) { crm_warn("Skipping callback - NULL message"); return; } event = crm_element_value(msg, F_SUBTYPE); if(entry == NULL) { crm_warn("Skipping callback - NULL callback client"); return; } else if(entry->callback == NULL) { crm_warn("Skipping callback - NULL callback"); return; } else if(safe_str_neq(entry->event, event)) { crm_debug_4("Skipping callback - event mismatch %p/%s vs. %s", entry, entry->event, event); return; } crm_debug_4("Invoking callback for %p/%s event...", entry, event); entry->callback(event, msg); crm_debug_4("Callback invoked..."); } gboolean determine_host(cib_t *cib_conn, char **node_uname, char **node_uuid) { CRM_CHECK(node_uname != NULL, return FALSE); if(*node_uname == NULL) { struct utsname name; if(uname(&name) < 0) { crm_perror(LOG_ERR,"uname(2) call failed"); return FALSE; } *node_uname = crm_strdup(name.nodename); crm_info("Detected uname: %s", *node_uname); } if(cib_conn && *node_uname != NULL && node_uuid != NULL && *node_uuid == NULL) { int rc = query_node_uuid(cib_conn, *node_uname, node_uuid); if(rc != cib_ok) { fprintf(stderr,"Could not map uname=%s to a UUID: %s\n", *node_uname, cib_error2string(rc)); return FALSE; } crm_info("Mapped %s to %s", *node_uname, crm_str(*node_uuid)); } return TRUE; } diff --git a/lib/common/utils.c b/lib/common/utils.c index 5ea18ffbc1..9bc9d2c691 100644 --- a/lib/common/utils.c +++ b/lib/common/utils.c @@ -1,2362 +1,2362 @@ /* * Copyright (C) 2004 Andrew Beekhof * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * This software is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include #ifndef _GNU_SOURCE # define _GNU_SOURCE #endif #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #if HAVE_HB_CONFIG_H #include /* for HB_COREDIR */ #endif #if HAVE_GLUE_CONFIG_H #include /* for HB_COREDIR */ #endif #ifndef MAXLINE # define MAXLINE 512 #endif #ifdef HAVE_GETOPT_H # include #endif static uint ref_counter = 0; unsigned int crm_log_level = LOG_INFO; gboolean crm_config_error = FALSE; gboolean crm_config_warning = FALSE; const char *crm_system_name = "unknown"; int node_score_red = 0; int node_score_green = 0; int node_score_yellow = 0; int node_score_infinity = INFINITY; void crm_set_env_options(void); gboolean check_time(const char *value) { if(crm_get_msec(value) < 5000) { return FALSE; } return TRUE; } gboolean check_timer(const char *value) { if(crm_get_msec(value) < 0) { return FALSE; } return TRUE; } gboolean check_boolean(const char *value) { int tmp = FALSE; if(crm_str_to_boolean(value, &tmp) != 1) { return FALSE; } return TRUE; } gboolean check_number(const char *value) { errno = 0; if(value == NULL) { return FALSE; } else if(safe_str_eq(value, MINUS_INFINITY_S)) { } else if(safe_str_eq(value, INFINITY_S)) { } else { crm_int_helper(value, NULL); } if(errno != 0) { return FALSE; } return TRUE; } int char2score(const char *score) { int score_f = 0; if(score == NULL) { } else if(safe_str_eq(score, MINUS_INFINITY_S)) { score_f = -node_score_infinity; } else if(safe_str_eq(score, INFINITY_S)) { score_f = node_score_infinity; } else if(safe_str_eq(score, "+"INFINITY_S)) { score_f = node_score_infinity; } else if(safe_str_eq(score, "red")) { score_f = node_score_red; } else if(safe_str_eq(score, "yellow")) { score_f = node_score_yellow; } else if(safe_str_eq(score, "green")) { score_f = node_score_green; } else { score_f = crm_parse_int(score, NULL); if(score_f > 0 && score_f > node_score_infinity) { score_f = node_score_infinity; } else if(score_f < 0 && score_f < -node_score_infinity) { score_f = -node_score_infinity; } } return score_f; } char * score2char(int score) { if(score >= node_score_infinity) { return crm_strdup(INFINITY_S); } else if(score <= -node_score_infinity) { return crm_strdup("-"INFINITY_S); } return crm_itoa(score); } const char * cluster_option(GHashTable* options, gboolean(*validate)(const char*), const char *name, const char *old_name, const char *def_value) { const char *value = NULL; CRM_ASSERT(name != NULL); if(options != NULL) { value = g_hash_table_lookup(options, name); } if(value == NULL && old_name && options != NULL) { value = g_hash_table_lookup(options, old_name); if(value != NULL) { crm_config_warn("Using deprecated name '%s' for" " cluster option '%s'", old_name, name); g_hash_table_insert( options, crm_strdup(name), crm_strdup(value)); value = g_hash_table_lookup(options, old_name); } } if(value == NULL) { - crm_debug("Using default value '%s' for cluster option '%s'", - def_value, name); + crm_debug_2("Using default value '%s' for cluster option '%s'", + def_value, name); if(options == NULL) { return def_value; } g_hash_table_insert( options, crm_strdup(name), crm_strdup(def_value)); value = g_hash_table_lookup(options, name); } if(validate && validate(value) == FALSE) { crm_config_err("Value '%s' for cluster option '%s' is invalid." " Defaulting to %s", value, name, def_value); g_hash_table_replace(options, crm_strdup(name), crm_strdup(def_value)); value = g_hash_table_lookup(options, name); } return value; } const char * get_cluster_pref(GHashTable *options, pe_cluster_option *option_list, int len, const char *name) { int lpc = 0; const char *value = NULL; gboolean found = FALSE; for(lpc = 0; lpc < len; lpc++) { if(safe_str_eq(name, option_list[lpc].name)) { found = TRUE; value = cluster_option(options, option_list[lpc].is_valid, option_list[lpc].name, option_list[lpc].alt_name, option_list[lpc].default_value); } } CRM_CHECK(found, crm_err("No option named: %s", name)); CRM_ASSERT(value != NULL); return value; } void config_metadata(const char *name, const char *version, const char *desc_short, const char *desc_long, pe_cluster_option *option_list, int len) { int lpc = 0; fprintf(stdout, "" "\n" "\n" " %s\n" " %s\n" " %s\n" " \n", name, version, desc_long, desc_short); for(lpc = 0; lpc < len; lpc++) { if(option_list[lpc].description_long == NULL && option_list[lpc].description_short == NULL) { continue; } fprintf(stdout, " \n" " %s\n" " \n" " %s%s%s\n" " \n", option_list[lpc].name, option_list[lpc].description_short, option_list[lpc].type, option_list[lpc].default_value, option_list[lpc].description_long?option_list[lpc].description_long:option_list[lpc].description_short, option_list[lpc].values?" Allowed values: ":"", option_list[lpc].values?option_list[lpc].values:""); } fprintf(stdout, " \n\n"); } void verify_all_options(GHashTable *options, pe_cluster_option *option_list, int len) { int lpc = 0; for(lpc = 0; lpc < len; lpc++) { cluster_option(options, option_list[lpc].is_valid, option_list[lpc].name, option_list[lpc].alt_name, option_list[lpc].default_value); } } char * generateReference(const char *custom1, const char *custom2) { const char *local_cust1 = custom1; const char *local_cust2 = custom2; int reference_len = 4; char *since_epoch = NULL; reference_len += 20; /* too big */ reference_len += 40; /* too big */ if(local_cust1 == NULL) { local_cust1 = "_empty_"; } reference_len += strlen(local_cust1); if(local_cust2 == NULL) { local_cust2 = "_empty_"; } reference_len += strlen(local_cust2); crm_malloc0(since_epoch, reference_len); if(since_epoch != NULL) { sprintf(since_epoch, "%s-%s-%ld-%u", local_cust1, local_cust2, (unsigned long)time(NULL), ref_counter++); } return since_epoch; } gboolean decodeNVpair(const char *srcstring, char separator, char **name, char **value) { int lpc = 0; int len = 0; const char *temp = NULL; CRM_ASSERT(name != NULL && value != NULL); *name = NULL; *value = NULL; crm_debug_4("Attempting to decode: [%s]", srcstring); if (srcstring != NULL) { len = strlen(srcstring); while(lpc <= len) { if (srcstring[lpc] == separator) { crm_malloc0(*name, lpc+1); if(*name == NULL) { break; /* and return FALSE */ } strncpy(*name, srcstring, lpc); (*name)[lpc] = '\0'; /* this sucks but as the strtok manpage says.. * it *is* a bug */ len = len-lpc; len--; if(len <= 0) { *value = NULL; } else { crm_malloc0(*value, len+1); if(*value == NULL) { crm_free(*name); break; /* and return FALSE */ } temp = srcstring+lpc+1; strncpy(*value, temp, len); (*value)[len] = '\0'; } return TRUE; } lpc++; } } if(*name != NULL) { crm_free(*name); } *name = NULL; *value = NULL; return FALSE; } char * crm_concat(const char *prefix, const char *suffix, char join) { int len = 0; char *new_str = NULL; CRM_ASSERT(prefix != NULL); CRM_ASSERT(suffix != NULL); len = strlen(prefix) + strlen(suffix) + 2; crm_malloc0(new_str, (len)); sprintf(new_str, "%s%c%s", prefix, join, suffix); new_str[len-1] = 0; return new_str; } char * generate_hash_key(const char *crm_msg_reference, const char *sys) { char *hash_key = crm_concat(sys?sys:"none", crm_msg_reference, '_'); crm_debug_3("created hash key: (%s)", hash_key); return hash_key; } char * generate_hash_value(const char *src_node, const char *src_subsys) { char *hash_value = NULL; if (src_node == NULL || src_subsys == NULL) { return NULL; } if (strcasecmp(CRM_SYSTEM_DC, src_subsys) == 0) { hash_value = crm_strdup(src_subsys); CRM_ASSERT(hash_value); return hash_value; } hash_value = crm_concat(src_node, src_subsys, '_'); crm_info("created hash value: (%s)", hash_value); return hash_value; } char * crm_itoa(int an_int) { int len = 32; char *buffer = NULL; crm_malloc0(buffer, (len+1)); if(buffer != NULL) { snprintf(buffer, len, "%d", an_int); } return buffer; } extern int LogToLoggingDaemon(int priority, const char * buf, int bstrlen, gboolean use_pri_str); #ifdef HAVE_G_LOG_SET_DEFAULT_HANDLER GLogFunc glib_log_default; static void crm_glib_handler(const gchar *log_domain, GLogLevelFlags flags, const gchar *message, gpointer user_data) { int log_level = LOG_WARNING; GLogLevelFlags msg_level = (flags & G_LOG_LEVEL_MASK); switch(msg_level) { case G_LOG_LEVEL_CRITICAL: /* log and record how we got here */ crm_abort(__FILE__,__PRETTY_FUNCTION__,__LINE__, message, TRUE, TRUE); return; case G_LOG_LEVEL_ERROR: log_level = LOG_ERR; break; case G_LOG_LEVEL_MESSAGE: log_level = LOG_NOTICE; break; case G_LOG_LEVEL_INFO: log_level = LOG_INFO; break; case G_LOG_LEVEL_DEBUG: log_level = LOG_DEBUG; break; case G_LOG_LEVEL_WARNING: case G_LOG_FLAG_RECURSION: case G_LOG_FLAG_FATAL: case G_LOG_LEVEL_MASK: log_level = LOG_WARNING; break; } do_crm_log(log_level, "%s: %s", log_domain, message); } #endif void crm_log_deinit(void) { #ifdef HAVE_G_LOG_SET_DEFAULT_HANDLER g_log_set_default_handler(glib_log_default, NULL); #endif } gboolean crm_log_init( const char *entity, int level, gboolean coredir, gboolean to_stderr, int argc, char **argv) { /* Redirect messages from glib functions to our handler */ /* cl_malloc_forced_for_glib(); */ #ifdef HAVE_G_LOG_SET_DEFAULT_HANDLER glib_log_default = g_log_set_default_handler(crm_glib_handler, NULL); #endif /* and for good measure... - this enum is a bit field (!) */ g_log_set_always_fatal((GLogLevelFlags)0); /*value out of range*/ crm_system_name = entity; setenv("PCMK_service", crm_system_name, 1); cl_log_set_entity(entity); if(argc == 0) { /* Nuke any syslog activity */ unsetenv("HA_logfacility"); } else if(getenv("HA_logfacility") == NULL) { /* Set a default */ cl_log_set_facility(HA_LOG_FACILITY); } /* else: picked up by crm_set_env_options() */ if(coredir) { const char *user = getenv("USER"); if(safe_str_neq(user, "root") && safe_str_neq(user, CRM_DAEMON_USER)) { crm_info("Not switching to corefile directory"); coredir = FALSE; } } if(coredir) { int user = getuid(); struct passwd *pwent = NULL; const char *base = HA_COREDIR; pwent = getpwuid(user); if (pwent == NULL) { crm_perror(LOG_ERR, "Cannot get name for uid: %d", user); } else if(safe_str_neq(pwent->pw_name, "root") && safe_str_neq(pwent->pw_name, "nobody") && safe_str_neq(pwent->pw_name, CRM_DAEMON_USER)) { crm_debug("Don't change active directory for regular user: %s", pwent->pw_name); } else if (chdir(base) < 0) { crm_perror(LOG_ERR, "Cannot change active directory to %s", base); } else if (chdir(pwent->pw_name) < 0) { crm_perror(LOG_ERR, "Cannot change active directory to %s/%s", base, pwent->pw_name); } else { crm_info("Changed active directory to %s/%s", base, pwent->pw_name); } } set_crm_log_level(level); crm_set_env_options(); cl_log_args(argc, argv); cl_log_enable_stderr(to_stderr); crm_signal(DEBUG_INC, alter_debug); crm_signal(DEBUG_DEC, alter_debug); return TRUE; } /* returns the old value */ unsigned int set_crm_log_level(unsigned int level) { unsigned int old = crm_log_level; while(crm_log_level < 100 && crm_log_level < level) { alter_debug(DEBUG_INC); } while(crm_log_level > 0 && crm_log_level > level) { alter_debug(DEBUG_DEC); } return old; } unsigned int get_crm_log_level(void) { return crm_log_level; } static int crm_version_helper(const char *text, char **end_text) { int atoi_result = -1; CRM_ASSERT(end_text != NULL); errno = 0; if(text != NULL && text[0] != 0) { atoi_result = (int)strtol(text, end_text, 10); if(errno == EINVAL) { crm_err("Conversion of '%s' %c failed", text, text[0]); atoi_result = -1; } } return atoi_result; } /* * version1 < version2 : -1 * version1 = version2 : 0 * version1 > version2 : 1 */ int compare_version(const char *version1, const char *version2) { int rc = 0; int lpc = 0; char *ver1_copy = NULL, *ver2_copy = NULL; char *rest1 = NULL, *rest2 = NULL; if(version1 == NULL && version2 == NULL) { return 0; } else if(version1 == NULL) { return -1; } else if(version2 == NULL) { return 1; } ver1_copy = crm_strdup(version1); ver2_copy = crm_strdup(version2); rest1 = ver1_copy; rest2 = ver2_copy; while(1) { int digit1 = 0; int digit2 = 0; lpc++; if(rest1 == rest2) { break; } if(rest1 != NULL) { digit1 = crm_version_helper(rest1, &rest1); } if(rest2 != NULL) { digit2 = crm_version_helper(rest2, &rest2); } if(digit1 < digit2){ rc = -1; crm_debug_5("%d < %d", digit1, digit2); break; } else if (digit1 > digit2){ rc = 1; crm_debug_5("%d > %d", digit1, digit2); break; } if(rest1 != NULL && rest1[0] == '.') { rest1++; } if(rest1 != NULL && rest1[0] == 0) { rest1 = NULL; } if(rest2 != NULL && rest2[0] == '.') { rest2++; } if(rest2 != NULL && rest2[0] == 0) { rest2 = NULL; } } crm_free(ver1_copy); crm_free(ver2_copy); if(rc == 0) { crm_debug_3("%s == %s (%d)", version1, version2, lpc); } else if(rc < 0) { crm_debug_3("%s < %s (%d)", version1, version2, lpc); } else if(rc > 0) { crm_debug_3("%s > %s (%d)", version1, version2, lpc); } return rc; } gboolean do_stderr = FALSE; void alter_debug(int nsig) { crm_signal(DEBUG_INC, alter_debug); crm_signal(DEBUG_DEC, alter_debug); switch(nsig) { case DEBUG_INC: if (crm_log_level < 100) { crm_log_level++; } break; case DEBUG_DEC: if (crm_log_level > 0) { crm_log_level--; } break; default: fprintf(stderr, "Unknown signal %d\n", nsig); cl_log(LOG_ERR, "Unknown signal %d", nsig); break; } } void g_hash_destroy_str(gpointer data) { crm_free(data); } #include /* #include */ /* #include */ long long crm_int_helper(const char *text, char **end_text) { long long result = -1; char *local_end_text = NULL; int saved_errno = 0; errno = 0; if(text != NULL) { #ifdef ANSI_ONLY if(end_text != NULL) { result = strtol(text, end_text, 10); } else { result = strtol(text, &local_end_text, 10); } #else if(end_text != NULL) { result = strtoll(text, end_text, 10); } else { result = strtoll(text, &local_end_text, 10); } #endif saved_errno = errno; /* CRM_CHECK(errno != EINVAL); */ if(errno == EINVAL) { crm_err("Conversion of %s failed", text); result = -1; } else if(errno == ERANGE) { crm_err("Conversion of %s was clipped: %lld", text, result); } else if(errno != 0) { crm_perror(LOG_ERR,"Conversion of %s failed:", text); } if(local_end_text != NULL && local_end_text[0] != '\0') { crm_err("Characters left over after parsing '%s': '%s'", text, local_end_text); } errno = saved_errno; } return result; } int crm_parse_int(const char *text, const char *default_text) { int atoi_result = -1; if(text != NULL) { atoi_result = crm_int_helper(text, NULL); if(errno == 0) { return atoi_result; } } if(default_text != NULL) { atoi_result = crm_int_helper(default_text, NULL); if(errno == 0) { return atoi_result; } } else { crm_err("No default conversion value supplied"); } return -1; } gboolean safe_str_neq(const char *a, const char *b) { if(a == b) { return FALSE; } else if(a==NULL || b==NULL) { return TRUE; } else if(strcasecmp(a, b) == 0) { return FALSE; } return TRUE; } char * crm_strdup_fn(const char *src, const char *file, const char *fn, int line) { char *dup = NULL; CRM_CHECK(src != NULL, crm_err("Could not perform copy at %s:%d (%s)", file, line, fn); return NULL); crm_malloc0(dup, strlen(src) + 1); return strcpy(dup, src); } #define ENV_PREFIX "HA_" void crm_set_env_options(void) { cl_inherit_logging_environment(500); cl_log_set_logd_channel_source(NULL, NULL); if(debug_level > 0 && (debug_level+LOG_INFO) > (int)crm_log_level) { set_crm_log_level(LOG_INFO + debug_level); } } gboolean crm_is_true(const char * s) { gboolean ret = FALSE; if(s != NULL) { crm_str_to_boolean(s, &ret); } return ret; } int crm_str_to_boolean(const char * s, int * ret) { if(s == NULL) { return -1; } else if (strcasecmp(s, "true") == 0 || strcasecmp(s, "on") == 0 || strcasecmp(s, "yes") == 0 || strcasecmp(s, "y") == 0 || strcasecmp(s, "1") == 0){ *ret = TRUE; return 1; } else if (strcasecmp(s, "false") == 0 || strcasecmp(s, "off") == 0 || strcasecmp(s, "no") == 0 || strcasecmp(s, "n") == 0 || strcasecmp(s, "0") == 0){ *ret = FALSE; return 1; } return -1; } #ifndef NUMCHARS # define NUMCHARS "0123456789." #endif #ifndef WHITESPACE # define WHITESPACE " \t\n\r\f" #endif unsigned long long crm_get_interval(const char * input) { ha_time_t *interval = NULL; char *input_copy = crm_strdup(input); char *input_copy_mutable = input_copy; unsigned long long msec = 0; if(input == NULL) { return 0; } else if(input[0] != 'P') { crm_free(input_copy); return crm_get_msec(input); } interval = parse_time_duration(&input_copy_mutable); msec = date_in_seconds(interval); free_ha_date(interval); crm_free(input_copy); return msec * 1000; } long long crm_get_msec(const char * input) { const char *cp = input; const char *units; long long multiplier = 1000; long long divisor = 1; long long msec = -1; char *end_text = NULL; /* double dret; */ if(input == NULL) { return msec; } cp += strspn(cp, WHITESPACE); units = cp + strspn(cp, NUMCHARS); units += strspn(units, WHITESPACE); if (strchr(NUMCHARS, *cp) == NULL) { return msec; } if (strncasecmp(units, "ms", 2) == 0 || strncasecmp(units, "msec", 4) == 0) { multiplier = 1; divisor = 1; } else if (strncasecmp(units, "us", 2) == 0 || strncasecmp(units, "usec", 4) == 0) { multiplier = 1; divisor = 1000; } else if (strncasecmp(units, "s", 1) == 0 || strncasecmp(units, "sec", 3) == 0) { multiplier = 1000; divisor = 1; } else if (strncasecmp(units, "m", 1) == 0 || strncasecmp(units, "min", 3) == 0) { multiplier = 60*1000; divisor = 1; } else if (strncasecmp(units, "h", 1) == 0 || strncasecmp(units, "hr", 2) == 0) { multiplier = 60*60*1000; divisor = 1; } else if (*units != EOS && *units != '\n' && *units != '\r') { return msec; } msec = crm_int_helper(cp, &end_text); msec *= multiplier; msec /= divisor; /* dret += 0.5; */ /* msec = (long long)dret; */ return msec; } const char * op_status2text(op_status_t status) { switch(status) { case LRM_OP_PENDING: return "pending"; break; case LRM_OP_DONE: return "complete"; break; case LRM_OP_ERROR: return "Error"; break; case LRM_OP_TIMEOUT: return "Timed Out"; break; case LRM_OP_NOTSUPPORTED: return "NOT SUPPORTED"; break; case LRM_OP_CANCELLED: return "Cancelled"; break; } crm_err("Unknown status: %d", status); return "UNKNOWN!"; } char * generate_op_key(const char *rsc_id, const char *op_type, int interval) { int len = 35; char *op_id = NULL; CRM_CHECK(rsc_id != NULL, return NULL); CRM_CHECK(op_type != NULL, return NULL); len += strlen(op_type); len += strlen(rsc_id); crm_malloc0(op_id, len); CRM_CHECK(op_id != NULL, return NULL); sprintf(op_id, "%s_%s_%d", rsc_id, op_type, interval); return op_id; } gboolean parse_op_key(const char *key, char **rsc_id, char **op_type, int *interval) { char *mutable_key = NULL; char *mutable_key_ptr = NULL; int len = 0, offset = 0, ch = 0; CRM_CHECK(key != NULL, return FALSE); *interval = 0; len = strlen(key); offset = len-1; crm_debug_3("Source: %s", key); while(offset > 0 && isdigit(key[offset])) { int digits = len-offset; ch = key[offset] - '0'; CRM_CHECK(ch < 10, return FALSE); CRM_CHECK(ch >= 0, return FALSE); while(digits > 1) { digits--; ch = ch * 10; } *interval += ch; offset--; } crm_debug_3(" Interval: %d", *interval); CRM_CHECK(key[offset] == '_', return FALSE); mutable_key = crm_strdup(key); mutable_key_ptr = mutable_key_ptr; mutable_key[offset] = 0; offset--; while(offset > 0 && key[offset] != '_') { offset--; } CRM_CHECK(key[offset] == '_', crm_free(mutable_key); return FALSE); mutable_key_ptr = mutable_key+offset+1; crm_debug_3(" Action: %s", mutable_key_ptr); *op_type = crm_strdup(mutable_key_ptr); mutable_key[offset] = 0; offset--; CRM_CHECK(mutable_key != mutable_key_ptr, crm_free(mutable_key); return FALSE); crm_debug_3(" Resource: %s", mutable_key); *rsc_id = crm_strdup(mutable_key); crm_free(mutable_key); return TRUE; } char * generate_notify_key(const char *rsc_id, const char *notify_type, const char *op_type) { int len = 12; char *op_id = NULL; CRM_CHECK(rsc_id != NULL, return NULL); CRM_CHECK(op_type != NULL, return NULL); CRM_CHECK(notify_type != NULL, return NULL); len += strlen(op_type); len += strlen(rsc_id); len += strlen(notify_type); crm_malloc0(op_id, len); if(op_id != NULL) { sprintf(op_id, "%s_%s_notify_%s_0", rsc_id, notify_type, op_type); } return op_id; } char * generate_transition_magic_v202(const char *transition_key, int op_status) { int len = 80; char *fail_state = NULL; CRM_CHECK(transition_key != NULL, return NULL); len += strlen(transition_key); crm_malloc0(fail_state, len); if(fail_state != NULL) { snprintf(fail_state, len, "%d:%s", op_status,transition_key); } return fail_state; } char * generate_transition_magic(const char *transition_key, int op_status, int op_rc) { int len = 80; char *fail_state = NULL; CRM_CHECK(transition_key != NULL, return NULL); len += strlen(transition_key); crm_malloc0(fail_state, len); if(fail_state != NULL) { snprintf(fail_state, len, "%d:%d;%s", op_status, op_rc, transition_key); } return fail_state; } gboolean decode_transition_magic( const char *magic, char **uuid, int *transition_id, int *action_id, int *op_status, int *op_rc, int *target_rc) { int res = 0; char *key = NULL; gboolean result = TRUE; CRM_CHECK(magic != NULL, return FALSE); CRM_CHECK(op_rc != NULL, return FALSE); CRM_CHECK(op_status != NULL, return FALSE); crm_malloc0(key, strlen(magic)); res = sscanf(magic, "%d:%d;%s", op_status, op_rc, key); if(res != 3) { crm_crit("Only found %d items in: %s", res, magic); result = FALSE; goto bail; } CRM_CHECK(decode_transition_key(key, uuid, transition_id, action_id, target_rc), result = FALSE; goto bail; ); bail: crm_free(key); return result; } char * generate_transition_key(int transition_id, int action_id, int target_rc, const char *node) { int len = 40; char *fail_state = NULL; CRM_CHECK(node != NULL, return NULL); len += strlen(node); crm_malloc0(fail_state, len); if(fail_state != NULL) { snprintf(fail_state, len, "%d:%d:%d:%s", action_id, transition_id, target_rc, node); } return fail_state; } gboolean decode_transition_key( const char *key, char **uuid, int *transition_id, int *action_id, int *target_rc) { int res = 0; gboolean done = FALSE; CRM_CHECK(uuid != NULL, return FALSE); CRM_CHECK(target_rc != NULL, return FALSE); CRM_CHECK(action_id != NULL, return FALSE); CRM_CHECK(transition_id != NULL, return FALSE); crm_malloc0(*uuid, strlen(key)); res = sscanf(key, "%d:%d:%d:%s", action_id, transition_id, target_rc, *uuid); switch(res) { case 4: /* Post Pacemaker 0.6 */ done = TRUE; break; case 3: case 2: /* this can be tricky - the UUID might start with an integer */ /* Until Pacemaker 0.6 */ done = TRUE; *target_rc = -1; res = sscanf(key, "%d:%d:%s", action_id, transition_id, *uuid); if(res == 2) { *action_id = -1; res = sscanf(key, "%d:%s", transition_id, *uuid); CRM_CHECK(res == 2, done = FALSE); } else if(res != 3) { CRM_CHECK(res == 3, done = FALSE); } break; case 1: /* Prior to Heartbeat 2.0.8 */ done = TRUE; *action_id = -1; *target_rc = -1; res = sscanf(key, "%d:%s", transition_id, *uuid); CRM_CHECK(res == 2, done = FALSE); break; default: crm_crit("Unhandled sscanf result (%d) for %s", res, key); } if(strlen(*uuid) != 36) { crm_warn("Bad UUID (%s) in sscanf result (%d) for %s", *uuid, res, key); } if(done == FALSE) { crm_err("Cannot decode '%s' rc=%d", key, res); crm_free(*uuid); *uuid = NULL; *target_rc = -1; *action_id = -1; *transition_id = -1; } return done; } void filter_action_parameters(xmlNode *param_set, const char *version) { char *key = NULL; char *timeout = NULL; char *interval = NULL; #if CRM_DEPRECATED_SINCE_2_0_5 const char *filter_205[] = { XML_ATTR_TE_TARGET_RC, XML_ATTR_LRM_PROBE, XML_RSC_ATTR_START, XML_RSC_ATTR_NOTIFY, XML_RSC_ATTR_UNIQUE, XML_RSC_ATTR_MANAGED, XML_RSC_ATTR_PRIORITY, XML_RSC_ATTR_MULTIPLE, XML_RSC_ATTR_STICKINESS, XML_RSC_ATTR_FAIL_STICKINESS, XML_RSC_ATTR_TARGET_ROLE, /* ignore clone fields */ XML_RSC_ATTR_INCARNATION, XML_RSC_ATTR_INCARNATION_MAX, XML_RSC_ATTR_INCARNATION_NODEMAX, XML_RSC_ATTR_MASTER_MAX, XML_RSC_ATTR_MASTER_NODEMAX, /* old field names */ "role", "crm_role", "te-target-rc", /* ignore notify fields */ "notify_stop_resource", "notify_stop_uname", "notify_start_resource", "notify_start_uname", "notify_active_resource", "notify_active_uname", "notify_inactive_resource", "notify_inactive_uname", "notify_promote_resource", "notify_promote_uname", "notify_demote_resource", "notify_demote_uname", "notify_master_resource", "notify_master_uname", "notify_slave_resource", "notify_slave_uname" }; #endif const char *attr_filter[] = { XML_ATTR_ID, XML_ATTR_CRM_VERSION, XML_LRM_ATTR_OP_DIGEST, }; gboolean do_delete = FALSE; int lpc = 0; static int meta_len = 0; if(meta_len == 0) { meta_len = strlen(CRM_META); } if(param_set == NULL) { return; } #if CRM_DEPRECATED_SINCE_2_0_5 if(version == NULL || compare_version("1.0.5", version) > 0) { for(lpc = 0; lpc < DIMOF(filter_205); lpc++) { xml_remove_prop(param_set, filter_205[lpc]); } } #endif for(lpc = 0; lpc < DIMOF(attr_filter); lpc++) { xml_remove_prop(param_set, attr_filter[lpc]); } key = crm_meta_name(XML_LRM_ATTR_INTERVAL); interval = crm_element_value_copy(param_set, key); crm_free(key); key = crm_meta_name(XML_ATTR_TIMEOUT); timeout = crm_element_value_copy(param_set, key); xml_prop_iter(param_set, prop_name, prop_value, do_delete = FALSE; if(strncasecmp(prop_name, CRM_META, meta_len) == 0) { do_delete = TRUE; } if(do_delete) { xml_remove_prop(param_set, prop_name); } ); if(crm_get_msec(interval) > 0 && compare_version(version, "1.0.8") > 0) { /* Re-instate the operation's timeout value */ if(timeout != NULL) { crm_xml_add(param_set, key, timeout); } } crm_free(interval); crm_free(timeout); crm_free(key); } void filter_reload_parameters(xmlNode *param_set, const char *restart_string) { int len = 0; char *name = NULL; char *match = NULL; if(param_set == NULL) { return; } xml_prop_iter(param_set, prop_name, prop_value, name = NULL; len = strlen(prop_name) + 3; crm_malloc0(name, len); sprintf(name, " %s ", prop_name); name[len-1] = 0; match = strstr(restart_string, name); if(match == NULL) { crm_debug_3("%s not found in %s", prop_name, restart_string); xml_remove_prop(param_set, prop_name); } crm_free(name); ); } void crm_abort(const char *file, const char *function, int line, const char *assert_condition, gboolean do_core, gboolean do_fork) { int rc = 0; int pid = 0; int status = 0; if(do_core == FALSE) { do_crm_log(LOG_ERR, "%s: Triggered assert at %s:%d : %s", function, file, line, assert_condition); return; } else if(do_fork) { pid=fork(); } else { do_crm_log(LOG_ERR, "%s: Triggered fatal assert at %s:%d : %s", function, file, line, assert_condition); } switch(pid) { case -1: do_crm_log(LOG_CRIT, "%s: Cannot create core for non-fatal assert at %s:%d : %s", function, file, line, assert_condition); return; default: /* Parent */ do_crm_log(LOG_ERR, "%s: Forked child %d to record non-fatal assert at %s:%d : %s", function, pid, file, line, assert_condition); do { rc = waitpid(pid, &status, 0); if(rc < 0 && errno != EINTR) { crm_perror(LOG_ERR,"%s: Cannot wait on forked child %d", function, pid); } } while(rc < 0 && errno == EINTR); return; case 0: /* Child */ abort(); break; } } char * generate_series_filename( const char *directory, const char *series, int sequence, gboolean bzip) { int len = 40; char *filename = NULL; const char *ext = "raw"; CRM_CHECK(directory != NULL, return NULL); CRM_CHECK(series != NULL, return NULL); len += strlen(directory); len += strlen(series); crm_malloc0(filename, len); CRM_CHECK(filename != NULL, return NULL); if(bzip) { ext = "bz2"; } sprintf(filename, "%s/%s-%d.%s", directory, series, sequence, ext); return filename; } int get_last_sequence(const char *directory, const char *series) { FILE *file_strm = NULL; int start = 0, length = 0, read_len = 0; char *series_file = NULL; char *buffer = NULL; int seq = 0; int len = 36; CRM_CHECK(directory != NULL, return 0); CRM_CHECK(series != NULL, return 0); len += strlen(directory); len += strlen(series); crm_malloc0(series_file, len); CRM_CHECK(series_file != NULL, return 0); sprintf(series_file, "%s/%s.last", directory, series); file_strm = fopen(series_file, "r"); if(file_strm == NULL) { crm_debug("Series file %s does not exist", series_file); crm_free(series_file); return 0; } /* see how big the file is */ start = ftell(file_strm); fseek(file_strm, 0L, SEEK_END); length = ftell(file_strm); fseek(file_strm, 0L, start); CRM_ASSERT(start == ftell(file_strm)); crm_debug_3("Reading %d bytes from file", length); crm_malloc0(buffer, (length+1)); read_len = fread(buffer, 1, length, file_strm); if(read_len != length) { crm_err("Calculated and read bytes differ: %d vs. %d", length, read_len); crm_free(buffer); buffer = NULL; } else if(length <= 0) { crm_info("%s was not valid", series_file); crm_free(buffer); buffer = NULL; } crm_free(series_file); seq = crm_parse_int(buffer, "0"); crm_free(buffer); fclose(file_strm); return seq; } void write_last_sequence( const char *directory, const char *series, int sequence, int max) { int rc = 0; int len = 36; FILE *file_strm = NULL; char *series_file = NULL; CRM_CHECK(directory != NULL, return); CRM_CHECK(series != NULL, return); if(max == 0) { return; } while(max > 0 && sequence > max) { sequence -= max; } len += strlen(directory); len += strlen(series); crm_malloc0(series_file, len); sprintf(series_file, "%s/%s.last", directory, series); file_strm = fopen(series_file, "w"); if(file_strm == NULL) { crm_err("Cannout open series file %s for writing", series_file); goto bail; } rc = fprintf(file_strm, "%d", sequence); if(rc < 0) { crm_perror(LOG_ERR,"Cannot write to series file %s", series_file); } bail: if(file_strm != NULL) { fflush(file_strm); fclose(file_strm); } crm_free(series_file); } #define LOCKSTRLEN 11 int crm_pid_active(long pid) { int rc = 0; int running = 0; char proc_path[PATH_MAX], exe_path[PATH_MAX], myexe_path[PATH_MAX]; if(pid <= 0) { return -1; } else if (kill(pid, 0) < 0 && errno == ESRCH) { return 0; } #ifndef HAVE_PROC_PID return 1; #endif /* check to make sure pid hasn't been reused by another process */ snprintf(proc_path, sizeof(proc_path), "/proc/%lu/exe", pid); rc = readlink(proc_path, exe_path, PATH_MAX-1); if(rc < 0) { crm_perror(LOG_ERR, "Could not read from %s", proc_path); goto bail; } exe_path[rc] = 0; snprintf(proc_path, sizeof(proc_path), "/proc/%lu/exe", (long unsigned int)getpid()); rc = readlink(proc_path, myexe_path, PATH_MAX-1); if(rc < 0) { crm_perror(LOG_ERR, "Could not read from %s", proc_path); goto bail; } myexe_path[rc] = 0; if(strcmp(exe_path, myexe_path) == 0) { running = 1; } bail: return running; } int crm_read_pidfile(const char *filename) { int fd; long pid = -1; char buf[LOCKSTRLEN+1]; if ((fd = open(filename, O_RDONLY)) < 0) { goto bail; } if (read(fd, buf, sizeof(buf)) < 1) { goto bail; } if (sscanf(buf, "%lu", &pid) > 0) { if (pid <= 0){ pid = -LSB_STATUS_STOPPED; } } bail: close(fd); return pid; } int crm_lock_pidfile(const char *filename) { struct stat sbuf; int fd = 0, rc = 0; long pid = 0, mypid = 0; char lf_name[256], tf_name[256], buf[LOCKSTRLEN+1]; mypid = (unsigned long) getpid(); snprintf(lf_name, sizeof(lf_name), "%s",filename); snprintf(tf_name, sizeof(tf_name), "%s.%lu", filename, mypid); if ((fd = open(lf_name, O_RDONLY)) >= 0) { if (fstat(fd, &sbuf) >= 0 && sbuf.st_size < LOCKSTRLEN) { sleep(1); /* if someone was about to create one, * give'm a sec to do so * Though if they follow our protocol, * this won't happen. They should really * put the pid in, then link, not the * other way around. */ } if (read(fd, buf, sizeof(buf)) > 0) { if (sscanf(buf, "%lu", &pid) > 0) { if (pid > 1 && pid != getpid() && crm_pid_active(pid)) { /* locked by existing process - give up */ close(fd); return -1; } } } unlink(lf_name); close(fd); } if ((fd = open(tf_name, O_CREAT | O_WRONLY | O_EXCL, 0644)) < 0) { /* Hmmh, why did we fail? Anyway, nothing we can do about it */ return -3; } /* Slight overkill with the %*d format ;-) */ snprintf(buf, sizeof(buf), "%*lu\n", LOCKSTRLEN-1, mypid); if (write(fd, buf, LOCKSTRLEN) != LOCKSTRLEN) { /* Again, nothing we can do about this */ rc = -3; close(fd); goto out; } close(fd); switch (link(tf_name, lf_name)) { case 0: if (stat(tf_name, &sbuf) < 0) { /* something weird happened */ rc = -3; } else if (sbuf.st_nlink < 2) { /* somehow, it didn't get through - NFS trouble? */ rc = -2; } else { rc = 0; } break; case EEXIST: rc = -1; break; default: rc = -3; } out: unlink(tf_name); return rc; } void crm_make_daemon(const char *name, gboolean daemonize, const char *pidfile) { long pid; const char *devnull = "/dev/null"; if(daemonize == FALSE) { return; } pid = fork(); if (pid < 0) { fprintf(stderr, "%s: could not start daemon\n", name); crm_perror(LOG_ERR,"fork"); exit(LSB_EXIT_GENERIC); } else if (pid > 0) { exit(LSB_EXIT_OK); } if (crm_lock_pidfile(pidfile) < 0 ) { pid = crm_read_pidfile(pidfile); if(crm_pid_active(pid) > 0) { crm_warn("%s: already running [pid %ld] (%s).\n", name, pid, pidfile); exit(LSB_EXIT_OK); } } umask(022); close(STDIN_FILENO); (void)open(devnull, O_RDONLY); /* Stdin: fd 0 */ close(STDOUT_FILENO); (void)open(devnull, O_WRONLY); /* Stdout: fd 1 */ close(STDERR_FILENO); (void)open(devnull, O_WRONLY); /* Stderr: fd 2 */ } gboolean crm_is_writable(const char *dir, const char *file, const char *user, const char *group, gboolean need_both) { int s_res = -1; struct stat buf; char *full_file = NULL; const char *target = NULL; gboolean pass = TRUE; gboolean readwritable = FALSE; CRM_ASSERT(dir != NULL); if(file != NULL) { full_file = crm_concat(dir, file, '/'); target = full_file; s_res = stat(full_file, &buf); if( s_res == 0 && S_ISREG(buf.st_mode) == FALSE ) { crm_err("%s must be a regular file", target); pass = FALSE; goto out; } } if (s_res != 0) { target = dir; s_res = stat(dir, &buf); if(s_res != 0) { crm_err("%s must exist and be a directory", dir); pass = FALSE; goto out; } else if( S_ISDIR(buf.st_mode) == FALSE ) { crm_err("%s must be a directory", dir); pass = FALSE; } } if(user) { struct passwd *sys_user = NULL; sys_user = getpwnam(user); readwritable = (sys_user != NULL && buf.st_uid == sys_user->pw_uid && (buf.st_mode & (S_IRUSR|S_IWUSR))); if(readwritable == FALSE) { crm_err("%s must be owned and r/w by user %s", target, user); if(need_both) { pass = FALSE; } } } if(group) { struct group *sys_grp = getgrnam(group); readwritable = ( sys_grp != NULL && buf.st_gid == sys_grp->gr_gid && (buf.st_mode & (S_IRGRP|S_IWGRP))); if(readwritable == FALSE) { if(need_both || user == NULL) { pass = FALSE; crm_err("%s must be owned and r/w by group %s", target, group); } else { crm_warn("%s should be owned and r/w by group %s", target, group); } } } out: crm_free(full_file); return pass; } static unsigned long long crm_bit_filter = 0; /* 0x00000002ULL; */ static unsigned int bit_log_level = LOG_DEBUG_5; long long crm_clear_bit(const char *function, long long word, long long bit) { unsigned int level = bit_log_level; if(bit & crm_bit_filter) { level = LOG_ERR; } do_crm_log_unlikely(level, "Bit 0x%.16llx cleared by %s", bit, function); word &= ~bit; return word; } long long crm_set_bit(const char *function, long long word, long long bit) { unsigned int level = bit_log_level; if(bit & crm_bit_filter) { level = LOG_ERR; } do_crm_log_unlikely(level, "Bit 0x%.16llx set by %s", bit, function); word |= bit; return word; } static const char *cluster_type = NULL; gboolean is_openais_cluster(void) { if(cluster_type == NULL) { cluster_type = getenv("HA_cluster_type"); if(cluster_type == NULL) { cluster_type = "Heartbeat"; } } if(safe_str_eq("openais", cluster_type)) { #if SUPPORT_AIS return TRUE; #else crm_crit("The installation of Pacemaker only supports Heartbeat" " but you're trying to run it on %s. Terminating.", cluster_type); exit(100); #endif } return FALSE; } gboolean is_heartbeat_cluster(void) { #if SUPPORT_HEARTBEAT return !is_openais_cluster(); #else if(is_openais_cluster() == FALSE) { crm_crit("The installation of Pacemaker only supports OpenAIS" " but you're trying to run it on %s. Terminating.", cluster_type); exit(100); } return FALSE; #endif } gboolean crm_str_eq(const char *a, const char *b, gboolean use_case) { if(a == b) { return TRUE; } else if(a == NULL || b == NULL) { /* shouldn't be comparing NULLs */ return FALSE; } else if(use_case && a[0] != b[0]) { return FALSE; } else if(strcasecmp(a, b) == 0) { return TRUE; } return FALSE; } char *crm_meta_name(const char *field) { int lpc = 0; int max = 0; char *crm_name = NULL; CRM_CHECK(field != NULL, return NULL); crm_name = crm_concat(CRM_META, field, '_'); /* Massage the names so they can be used as shell variables */ max = strlen(crm_name); for(; lpc < max; lpc++) { switch(crm_name[lpc]) { case '-': crm_name[lpc] = '_'; break; } } return crm_name; } const char *crm_meta_value(GHashTable *hash, const char *field) { char *key = NULL; const char *value = NULL; key = crm_meta_name(field); if(key) { value = g_hash_table_lookup(hash, key); crm_free(key); } return value; } static struct crm_option *crm_long_options = NULL; static const char *crm_app_description = NULL; static const char *crm_short_options = NULL; static const char *crm_app_usage = NULL; static struct option *crm_create_long_opts(struct crm_option *long_options) { struct option *long_opts = NULL; #ifdef HAVE_GETOPT_H int index = 0, lpc = 0; /* * A previous, possibly poor, choice of '?' as the short form of --help * means that getopt_long() returns '?' for both --help and for "unknown option" * * This dummy entry allows us to differentiate between the two in crm_get_option() * and exit with the correct error code */ crm_realloc(long_opts, (index+1) * sizeof(struct option)); long_opts[index].name = "__dummmy__"; long_opts[index].has_arg = 0; long_opts[index].flag = 0; long_opts[index].val = '_'; index++; for(lpc = 0; long_options[lpc].name != NULL; lpc++) { if(long_options[lpc].name[0] == '-') { continue; } crm_realloc(long_opts, (index+1) * sizeof(struct option)); /*fprintf(stderr, "Creating %d %s = %c\n", index, * long_options[lpc].name, long_options[lpc].val); */ long_opts[index].name = long_options[lpc].name; long_opts[index].has_arg = long_options[lpc].has_arg; long_opts[index].flag = long_options[lpc].flag; long_opts[index].val = long_options[lpc].val; index++; } /* Now create the list terminator */ crm_realloc(long_opts, (index+1) * sizeof(struct option)); long_opts[index].name = NULL; long_opts[index].has_arg = 0; long_opts[index].flag = 0; long_opts[index].val = 0; #endif return long_opts; } void crm_set_options(const char *short_options, const char *app_usage, struct crm_option *long_options, const char *app_desc) { if(short_options) { crm_short_options = short_options; } if(long_options) { crm_long_options = long_options; } if(app_desc) { crm_app_description = app_desc; } if(app_usage) { crm_app_usage = app_usage; } } int crm_get_option(int argc, char **argv, int *index) { #ifdef HAVE_GETOPT_H static struct option *long_opts = NULL; if(long_opts == NULL && crm_long_options) { long_opts = crm_create_long_opts(crm_long_options); } if(long_opts) { int flag = getopt_long(argc, argv, crm_short_options, long_opts, index); switch(flag) { case 0: return long_opts[*index].val; case -1: /* End of option processing */ break; case ':': crm_debug_2("Missing argument"); crm_help('?', 1); break; case '?': crm_help('?', *index?0:1); break; } return flag; } #endif if(crm_short_options) { return getopt(argc, argv, crm_short_options); } return -1; } void crm_help(char cmd, int exit_code) { int i = 0; FILE *stream = (exit_code ? stderr : stdout); if(cmd == 'v' || cmd == '$') { fprintf(stream, "%s %s for %s (Build: %s)\n", crm_system_name, VERSION, #if !SUPPORT_HEARTBEAT "OpenAIS", #elif !SUPPORT_AIS "Heartbeat", #else "OpenAIS and Heartbeat", #endif BUILD_VERSION); fprintf(stream, "\nWritten by Andrew Beekhof\n"); goto out; } fprintf(stream, "%s - %s\n", crm_system_name, crm_app_description); if(crm_app_usage) { fprintf(stream, "Usage: %s %s\n", crm_system_name, crm_app_usage); } if(crm_long_options) { fprintf(stream, "Options:\n"); for(i = 0; crm_long_options[i].name != NULL; i++) { if(crm_long_options[i].flags & pcmk_option_hidden) { } else if(crm_long_options[i].flags & pcmk_option_paragraph) { fprintf(stream, "%s\n\n", crm_long_options[i].desc); } else if(crm_long_options[i].flags & pcmk_option_example) { fprintf(stream, "\t#%s\n\n", crm_long_options[i].desc); } else if(crm_long_options[i].val == '-' && crm_long_options[i].desc) { fprintf(stream, "%s\n", crm_long_options[i].desc); } else { fprintf(stream, " -%c, --%s%c%s\t%s\n", crm_long_options[i].val, crm_long_options[i].name, crm_long_options[i].has_arg?'=':' ',crm_long_options[i].has_arg?"value":"", crm_long_options[i].desc?crm_long_options[i].desc:""); } } } else if(crm_short_options) { fprintf(stream, "Usage: %s - %s\n", crm_system_name, crm_app_description); for(i = 0; crm_short_options[i] != 0; i++) { int has_arg = FALSE; if(crm_short_options[i+1] == ':') { has_arg = TRUE; } fprintf(stream, " -%c %s\n", crm_short_options[i], has_arg?"{value}":""); if(has_arg) { i++; } } } fprintf(stream, "\nReport bugs to %s\n", PACKAGE_BUGREPORT); out: if(exit_code >= 0) { exit(exit_code); } } #include <../../tools/attrd.h> gboolean attrd_update(IPC_Channel *cluster, char command, const char *host, const char *name, const char *value, const char *section, const char *set, const char *dampen) { gboolean success = FALSE; const char *reason = "Cluster connection failed"; /* remap common aliases */ if(safe_str_eq(section, "reboot")) { section = XML_CIB_TAG_STATUS; } else if(safe_str_eq(section, "forever")) { section = XML_CIB_TAG_NODES; } if(cluster == NULL) { reason = "No connection to the cluster"; } else { xmlNode *update = create_xml_node(NULL, __FUNCTION__); crm_xml_add(update, F_TYPE, T_ATTRD); crm_xml_add(update, F_ORIG, crm_system_name); if(name == NULL && command == 'U') { command = 'R'; } switch(command) { case 'D': case 'U': case 'v': crm_xml_add(update, F_ATTRD_TASK, "update"); crm_xml_add(update, F_ATTRD_ATTRIBUTE, name); break; case 'R': crm_xml_add(update, F_ATTRD_TASK, "refresh"); break; case 'q': crm_xml_add(update, F_ATTRD_TASK, "query"); break; } crm_xml_add(update, F_ATTRD_VALUE, value); crm_xml_add(update, F_ATTRD_DAMPEN, dampen); crm_xml_add(update, F_ATTRD_SECTION, section); crm_xml_add(update, F_ATTRD_HOST, host); crm_xml_add(update, F_ATTRD_SET, set); success = send_ipc_message(cluster, update); free_xml(update); } if(success) { crm_debug("Sent update: %s=%s for %s", name, value, host?host:"localhost"); return TRUE; } crm_info("Could not send update: %s=%s for %s", name, value, host?host:"localhost"); return FALSE; } gboolean attrd_lazy_update(char command, const char *host, const char *name, const char *value, const char *section, const char *set, const char *dampen) { int max = 5; gboolean updated = FALSE; static IPC_Channel *cluster = NULL; while(updated == 0 && max > 0) { if(cluster == NULL) { crm_info("Connecting to cluster... %d retries remaining", max); cluster = init_client_ipc_comms_nodispatch(T_ATTRD); } if(cluster != NULL) { updated = attrd_update(cluster, command, host, name, value, section, set, dampen); } if(updated == 0) { cluster = NULL; sleep(2); max--; } } return updated; } gboolean attrd_update_no_mainloop(int *connection, char command, const char *host, const char *name, const char *value, const char *section, const char *set, const char *dampen) { int max = 5; gboolean updated = FALSE; static IPC_Channel *cluster = NULL; if(connection && *connection == 0 && cluster) { crm_info("Forcing a new connection to the cluster"); cluster = NULL; } while(updated == 0 && max > 0) { if(cluster == NULL) { crm_info("Connecting to cluster... %d retries remaining", max); cluster = init_client_ipc_comms_nodispatch(T_ATTRD); } if(connection) { if(cluster != NULL) { *connection = cluster->ops->get_recv_select_fd(cluster); } else { *connection = 0; } } if(cluster != NULL) { updated = attrd_update(cluster, command, host, name, value, section, set, dampen); } if(updated == 0) { cluster = NULL; sleep(2); max--; } } return updated; } #define FAKE_TE_ID "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" static void append_digest(lrm_op_t *op, xmlNode *update, const char *version, const char *magic, int level) { /* this will enable us to later determine that the * resource's parameters have changed and we should force * a restart */ char *digest = NULL; xmlNode *args_xml = NULL; if(op->params == NULL) { return; } args_xml = create_xml_node(NULL, XML_TAG_PARAMS); g_hash_table_foreach(op->params, hash2field, args_xml); filter_action_parameters(args_xml, version); digest = calculate_xml_digest(args_xml, TRUE, FALSE); #if 0 if(level < crm_log_level && op->interval == 0 && crm_str_eq(op->op_type, CRMD_ACTION_START, TRUE)) { char *digest_source = dump_xml_unformatted(args_xml); do_crm_log(level, "Calculated digest %s for %s (%s). Source: %s\n", digest, ID(update), magic, digest_source); crm_free(digest_source); } #endif crm_xml_add(update, XML_LRM_ATTR_OP_DIGEST, digest); free_xml(args_xml); crm_free(digest); } xmlNode * create_operation_update( - xmlNode *parent, lrm_op_t *op, const char *caller_version, int target_rc, const char *origin) + xmlNode *parent, lrm_op_t *op, const char *caller_version, int target_rc, const char *origin, int level) { char *magic = NULL; const char *task = NULL; xmlNode *xml_op = NULL; char *op_id = NULL; char *local_user_data = NULL; CRM_CHECK(op != NULL, return NULL); - crm_debug_2("%s: Updating resouce %s after %s %s op", - origin, op->rsc_id, op_status2text(op->op_status), op->op_type); + do_crm_log(level, "%s: Updating resouce %s after %s %s op (interval=%d)", + origin, op->rsc_id, op_status2text(op->op_status), op->op_type, op->interval); if(op->op_status == LRM_OP_CANCELLED) { crm_debug_3("Ignoring cancelled op"); return NULL; } crm_debug_3("DC version: %s", caller_version); task = op->op_type; /* remap the task name under various scenarios * this makes life easier for the PE when its trying determin the current state */ if(crm_str_eq(task, "reload", TRUE)) { if(op->op_status == LRM_OP_DONE) { task = CRMD_ACTION_START; } else { task = CRMD_ACTION_STATUS; } } else if(crm_str_eq(task, CRMD_ACTION_MIGRATE, TRUE)) { /* if the migrate_from fails it will have enough info to do the right thing */ if(op->op_status == LRM_OP_DONE) { task = CRMD_ACTION_STOP; } else { task = CRMD_ACTION_STATUS; } } else if(op->op_status == LRM_OP_DONE && crm_str_eq(task, CRMD_ACTION_MIGRATED, TRUE)) { task = CRMD_ACTION_START; } else if(crm_str_eq(task, CRMD_ACTION_NOTIFY, TRUE)) { const char *n_type = crm_meta_value(op->params, "notify_type"); const char *n_task = crm_meta_value(op->params, "notify_operation"); CRM_DEV_ASSERT(n_type != NULL); CRM_DEV_ASSERT(n_task != NULL); op_id = generate_notify_key(op->rsc_id, n_type, n_task); /* these are not yet allowed to fail */ op->op_status = LRM_OP_DONE; op->rc = 0; } if (op_id == NULL) { op_id = generate_op_key(op->rsc_id, task, op->interval); } xml_op = find_entity(parent, XML_LRM_TAG_RSC_OP, op_id); if(xml_op != NULL) { crm_log_xml(LOG_DEBUG, "Replacing existing entry", xml_op); } else { xml_op = create_xml_node(parent, XML_LRM_TAG_RSC_OP); } if(op->user_data == NULL) { crm_debug("Generating fake transition key for:" " %s_%s_%d %d from %s", op->rsc_id, op->op_type, op->interval, op->call_id, op->app_name); local_user_data = generate_transition_key(-1, op->call_id, target_rc, FAKE_TE_ID); op->user_data = local_user_data; } magic = generate_transition_magic(op->user_data, op->op_status, op->rc); crm_xml_add(xml_op, XML_ATTR_ID, op_id); crm_xml_add(xml_op, XML_LRM_ATTR_TASK, task); crm_xml_add(xml_op, XML_ATTR_ORIGIN, origin); crm_xml_add(xml_op, XML_ATTR_CRM_VERSION, caller_version); crm_xml_add(xml_op, XML_ATTR_TRANSITION_KEY, op->user_data); crm_xml_add(xml_op, XML_ATTR_TRANSITION_MAGIC, magic); crm_xml_add_int(xml_op, XML_LRM_ATTR_CALLID, op->call_id); crm_xml_add_int(xml_op, XML_LRM_ATTR_RC, op->rc); crm_xml_add_int(xml_op, XML_LRM_ATTR_OPSTATUS, op->op_status); crm_xml_add_int(xml_op, XML_LRM_ATTR_INTERVAL, op->interval); if(compare_version("2.1", caller_version) <= 0) { if(op->t_run || op->t_rcchange || op->exec_time || op->queue_time) { crm_debug_2("Timing data (%s_%s_%d): last=%lu change=%lu exec=%lu queue=%lu", op->rsc_id, op->op_type, op->interval, op->t_run, op->t_rcchange, op->exec_time, op->queue_time); crm_xml_add_int(xml_op, "last-run", op->t_run); crm_xml_add_int(xml_op, "last-rc-change", op->t_rcchange); crm_xml_add_int(xml_op, "exec-time", op->exec_time); crm_xml_add_int(xml_op, "queue-time", op->queue_time); } } append_digest(op, xml_op, caller_version, magic, LOG_DEBUG); if(op->op_status != LRM_OP_DONE && crm_str_eq(op->op_type, CRMD_ACTION_MIGRATED, TRUE)) { const char *host = crm_meta_value(op->params, "migrate_source_uuid"); crm_xml_add(xml_op, CRMD_ACTION_MIGRATED, host); } if(local_user_data) { crm_free(local_user_data); op->user_data = NULL; } crm_free(magic); crm_free(op_id); return xml_op; } diff --git a/lib/pengine/unpack.c b/lib/pengine/unpack.c index 519979e2a4..f155c9e96a 100644 --- a/lib/pengine/unpack.c +++ b/lib/pengine/unpack.c @@ -1,1820 +1,1829 @@ /* * Copyright (C) 2004 Andrew Beekhof * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * This software is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include #include #include #include #include #include #include #include #include #include #include #include #define set_config_flag(data_set, option, flag) do { \ const char *tmp = pe_pref(data_set->config_hash, option); \ if(tmp) { \ if(crm_is_true(tmp)) { \ set_bit_inplace(data_set->flags, flag); \ } else { \ clear_bit_inplace(data_set->flags, flag); \ } \ } \ } while(0) gboolean unpack_rsc_op( resource_t *rsc, node_t *node, xmlNode *xml_op, enum action_fail_response *failed, pe_working_set_t *data_set); static void pe_fence_node(pe_working_set_t *data_set, node_t *node, const char *reason) { CRM_CHECK(node, return); if(node->details->unclean == FALSE) { if(is_set(data_set->flags, pe_flag_stonith_enabled)) { crm_warn("Node %s will be fenced %s", node->details->uname, reason); } else { crm_warn("Node %s is unclean %s", node->details->uname, reason); } } node->details->unclean = TRUE; } gboolean unpack_config(xmlNode *config, pe_working_set_t *data_set) { const char *value = NULL; GHashTable *config_hash = g_hash_table_new_full( g_str_hash,g_str_equal, g_hash_destroy_str,g_hash_destroy_str); data_set->config_hash = config_hash; unpack_instance_attributes( data_set->input, config, XML_CIB_TAG_PROPSET, NULL, config_hash, CIB_OPTIONS_FIRST, FALSE, data_set->now); verify_pe_options(data_set->config_hash); set_config_flag(data_set, "enable-startup-probes", pe_flag_startup_probes); crm_info("Startup probes: %s", is_set(data_set->flags, pe_flag_startup_probes)?"enabled":"disabled (dangerous)"); value = pe_pref(data_set->config_hash, "stonith-timeout"); data_set->stonith_timeout = crm_get_msec(value); crm_debug("STONITH timeout: %d", data_set->stonith_timeout); set_config_flag(data_set, "stonith-enabled", pe_flag_stonith_enabled); crm_debug("STONITH of failed nodes is %s", is_set(data_set->flags, pe_flag_stonith_enabled)?"enabled":"disabled"); data_set->stonith_action = pe_pref(data_set->config_hash, "stonith-action"); crm_debug_2("STONITH will %s nodes", data_set->stonith_action); set_config_flag(data_set, "stop-all-resources", pe_flag_stop_everything); crm_debug("Stop all active resources: %s", is_set(data_set->flags, pe_flag_stop_everything)?"true":"false"); set_config_flag(data_set, "symmetric-cluster", pe_flag_symmetric_cluster); if(is_set(data_set->flags, pe_flag_symmetric_cluster)) { crm_debug("Cluster is symmetric" " - resources can run anywhere by default"); } value = pe_pref(data_set->config_hash, "default-resource-stickiness"); data_set->default_resource_stickiness = char2score(value); crm_debug("Default stickiness: %d", data_set->default_resource_stickiness); value = pe_pref(data_set->config_hash, "no-quorum-policy"); if(safe_str_eq(value, "ignore")) { data_set->no_quorum_policy = no_quorum_ignore; } else if(safe_str_eq(value, "freeze")) { data_set->no_quorum_policy = no_quorum_freeze; } else if(safe_str_eq(value, "suicide")) { gboolean do_panic = FALSE; crm_element_value_int(data_set->input, XML_ATTR_QUORUM_PANIC, &do_panic); if(is_set(data_set->flags, pe_flag_stonith_enabled) == FALSE){ crm_config_err("Setting no-quorum-policy=suicide makes no sense if stonith-enabled=false"); } if(do_panic && is_set(data_set->flags, pe_flag_stonith_enabled)) { data_set->no_quorum_policy = no_quorum_suicide; } else if(is_set(data_set->flags, pe_flag_have_quorum) == FALSE && do_panic == FALSE) { crm_notice("Resetting no-quorum-policy to 'stop': The cluster has never had quorum"); data_set->no_quorum_policy = no_quorum_stop; } } else { data_set->no_quorum_policy = no_quorum_stop; } switch (data_set->no_quorum_policy) { case no_quorum_freeze: crm_debug("On loss of CCM Quorum: Freeze resources"); break; case no_quorum_stop: crm_debug("On loss of CCM Quorum: Stop ALL resources"); break; case no_quorum_suicide: crm_notice("On loss of CCM Quorum: Fence all remaining nodes"); break; case no_quorum_ignore: crm_notice("On loss of CCM Quorum: Ignore"); break; } set_config_flag(data_set, "stop-orphan-resources", pe_flag_stop_rsc_orphans); crm_debug_2("Orphan resources are %s", is_set(data_set->flags, pe_flag_stop_rsc_orphans)?"stopped":"ignored"); set_config_flag(data_set, "stop-orphan-actions", pe_flag_stop_action_orphans); crm_debug_2("Orphan resource actions are %s", is_set(data_set->flags, pe_flag_stop_action_orphans)?"stopped":"ignored"); set_config_flag(data_set, "remove-after-stop", pe_flag_remove_after_stop); crm_debug_2("Stopped resources are removed from the status section: %s", is_set(data_set->flags, pe_flag_remove_after_stop)?"true":"false"); set_config_flag(data_set, "maintenance-mode", pe_flag_maintenance_mode); crm_debug_2("Maintenance mode: %s", is_set(data_set->flags, pe_flag_maintenance_mode)?"true":"false"); if(is_set(data_set->flags, pe_flag_maintenance_mode)) { clear_bit(data_set->flags, pe_flag_is_managed_default); } else { set_config_flag(data_set, "is-managed-default", pe_flag_is_managed_default); } crm_debug_2("By default resources are %smanaged", is_set(data_set->flags, pe_flag_is_managed_default)?"":"not "); set_config_flag(data_set, "start-failure-is-fatal", pe_flag_start_failure_fatal); crm_debug_2("Start failures are %s", is_set(data_set->flags, pe_flag_start_failure_fatal)?"always fatal":"handled by failcount"); node_score_red = char2score(pe_pref(data_set->config_hash, "node-health-red")); node_score_green = char2score(pe_pref(data_set->config_hash, "node-health-green")); node_score_yellow = char2score(pe_pref(data_set->config_hash, "node-health-yellow")); crm_info("Node scores: 'red' = %s, 'yellow' = %s, 'green' = %s", pe_pref(data_set->config_hash, "node-health-red"), pe_pref(data_set->config_hash, "node-health-yellow"), pe_pref(data_set->config_hash, "node-health-green")); data_set->placement_strategy = pe_pref(data_set->config_hash, "placement-strategy"); crm_debug_2("Placement strategy: %s", data_set->placement_strategy); return TRUE; } gboolean unpack_nodes(xmlNode * xml_nodes, pe_working_set_t *data_set) { node_t *new_node = NULL; const char *id = NULL; const char *uname = NULL; const char *type = NULL; const char *score = NULL; gboolean unseen_are_unclean = TRUE; const char *blind_faith = pe_pref( data_set->config_hash, "startup-fencing"); if(crm_is_true(blind_faith) == FALSE) { unseen_are_unclean = FALSE; crm_warn("Blind faith: not fencing unseen nodes"); } xml_child_iter_filter( xml_nodes, xml_obj, XML_CIB_TAG_NODE, new_node = NULL; id = crm_element_value(xml_obj, XML_ATTR_ID); uname = crm_element_value(xml_obj, XML_ATTR_UNAME); type = crm_element_value(xml_obj, XML_ATTR_TYPE); score = crm_element_value(xml_obj, XML_RULE_ATTR_SCORE); crm_debug_3("Processing node %s/%s", uname, id); if(id == NULL) { crm_config_err("Must specify id tag in "); continue; } if(type == NULL) { crm_config_err("Must specify type tag in "); continue; } if(pe_find_node(data_set->nodes, uname) != NULL) { crm_config_warn("Detected multiple node entries with uname=%s" " - this is rarely intended", uname); } crm_malloc0(new_node, sizeof(node_t)); if(new_node == NULL) { return FALSE; } new_node->weight = char2score(score); new_node->fixed = FALSE; crm_malloc0(new_node->details, sizeof(struct node_shared_s)); if(new_node->details == NULL) { crm_free(new_node); return FALSE; } crm_debug_3("Creaing node for entry %s/%s", uname, id); new_node->details->id = id; new_node->details->uname = uname; new_node->details->type = node_ping; new_node->details->online = FALSE; new_node->details->shutdown = FALSE; new_node->details->running_rsc = NULL; new_node->details->attrs = g_hash_table_new_full( g_str_hash, g_str_equal, g_hash_destroy_str, g_hash_destroy_str); new_node->details->utilization = g_hash_table_new_full( g_str_hash, g_str_equal, g_hash_destroy_str, g_hash_destroy_str); /* if(data_set->have_quorum == FALSE */ /* && data_set->no_quorum_policy == no_quorum_stop) { */ /* /\* start shutting resources down *\/ */ /* new_node->weight = -INFINITY; */ /* } */ if(is_set(data_set->flags, pe_flag_stonith_enabled) == FALSE || unseen_are_unclean == FALSE) { /* blind faith... */ new_node->details->unclean = FALSE; } else { /* all nodes are unclean until we've seen their * status entry */ new_node->details->unclean = TRUE; } if(type == NULL || safe_str_eq(type, "member") || safe_str_eq(type, NORMALNODE)) { new_node->details->type = node_member; } add_node_attrs(xml_obj, new_node, FALSE, data_set); unpack_instance_attributes( data_set->input, xml_obj, XML_TAG_UTILIZATION, NULL, new_node->details->utilization, NULL, FALSE, data_set->now); data_set->nodes = g_list_append(data_set->nodes, new_node); crm_debug_3("Done with node %s", crm_element_value(xml_obj, XML_ATTR_UNAME)); ); return TRUE; } static void g_hash_destroy_node_list(gpointer data) { GListPtr domain = data; slist_destroy(node_t, node, domain, crm_free(node)); } gboolean unpack_domains(xmlNode *xml_domains, pe_working_set_t *data_set) { GListPtr domain = NULL; const char *id = NULL; crm_info("Unpacking domains"); data_set->domains = g_hash_table_new_full( g_str_hash, g_str_equal, g_hash_destroy_str, g_hash_destroy_node_list); xml_child_iter_filter( xml_domains, xml_domain, XML_CIB_TAG_DOMAIN, domain = NULL; id = crm_element_value(xml_domain, XML_ATTR_ID); xml_child_iter_filter( xml_domain, xml_node, XML_CIB_TAG_NODE, node_t *copy = NULL; node_t *node = NULL; const char *uname = crm_element_value(xml_node, "name"); const char *score = crm_element_value(xml_node, XML_RULE_ATTR_SCORE); if(uname == NULL) { crm_config_err("Invalid domain %s: Must specify id tag in ", id); continue; } node = pe_find_node(data_set->nodes, uname); if(node == NULL) { node = pe_find_node_id(data_set->nodes, uname); continue; } if(node == NULL) { crm_config_warn("Invalid domain %s: Node %s does not exist", id, uname); continue; } copy = node_copy(node); copy->weight = char2score(score); crm_debug("Adding %s to domain %s with score %s", node->details->uname, id, score); domain = g_list_append(domain, copy); ); if(domain) { crm_debug("Created domain %s with %d members", id, g_list_length(domain)); g_hash_table_replace(data_set->domains, crm_strdup(id), domain); } ); return TRUE; } gboolean unpack_resources(xmlNode * xml_resources, pe_working_set_t *data_set) { xml_child_iter( xml_resources, xml_obj, resource_t *new_rsc = NULL; crm_debug_3("Beginning unpack... <%s id=%s... >", crm_element_name(xml_obj), ID(xml_obj)); if(common_unpack(xml_obj, &new_rsc, NULL, data_set)) { data_set->resources = g_list_append( data_set->resources, new_rsc); print_resource(LOG_DEBUG_3, "Added", new_rsc, FALSE); } else { crm_config_err("Failed unpacking %s %s", crm_element_name(xml_obj), crm_element_value(xml_obj, XML_ATTR_ID)); if(new_rsc != NULL && new_rsc->fns != NULL) { new_rsc->fns->free(new_rsc); } } ); data_set->resources = g_list_sort( data_set->resources, sort_rsc_priority); if(is_set(data_set->flags, pe_flag_stonith_enabled) && is_set(data_set->flags, pe_flag_have_stonith_resource) == FALSE) { crm_config_err("Resource start-up disabled since no STONITH resources have been defined"); crm_config_err("Either configure some or disable STONITH with the stonith-enabled option"); crm_config_err("NOTE: Clusters with shared data need STONITH to ensure data integrity"); } return TRUE; } /* remove nodes that are down, stopping */ /* create +ve rsc_to_node constraints between resources and the nodes they are running on */ /* anything else? */ gboolean unpack_status(xmlNode * status, pe_working_set_t *data_set) { const char *id = NULL; const char *uname = NULL; xmlNode * lrm_rsc = NULL; xmlNode * attrs = NULL; node_t *this_node = NULL; crm_debug_3("Beginning unpack"); xml_child_iter_filter( status, node_state, XML_CIB_TAG_STATE, id = crm_element_value(node_state, XML_ATTR_ID); uname = crm_element_value(node_state, XML_ATTR_UNAME); attrs = find_xml_node( node_state, XML_TAG_TRANSIENT_NODEATTRS, FALSE); lrm_rsc = find_xml_node(node_state, XML_CIB_TAG_LRM, FALSE); lrm_rsc = find_xml_node(lrm_rsc, XML_LRM_TAG_RESOURCES, FALSE); crm_debug_3("Processing node %s", uname); this_node = pe_find_node_id(data_set->nodes, id); if(uname == NULL) { /* error */ continue; } else if(this_node == NULL) { crm_config_warn("Node %s in status section no longer exists", uname); continue; } /* Mark the node as provisionally clean * - at least we have seen it in the current cluster's lifetime */ this_node->details->unclean = FALSE; add_node_attrs(attrs, this_node, TRUE, data_set); if(crm_is_true(g_hash_table_lookup(this_node->details->attrs, "standby"))) { crm_info("Node %s is in standby-mode", this_node->details->uname); this_node->details->standby = TRUE; } crm_debug_3("determining node state"); determine_online_status(node_state, this_node, data_set); if(this_node->details->online && data_set->no_quorum_policy == no_quorum_suicide) { /* Everything else should flow from this automatically * At least until the PE becomes able to migrate off healthy resources */ pe_fence_node(data_set, this_node, "because the cluster does not have quorum"); } if(this_node->details->online || is_set(data_set->flags, pe_flag_stonith_enabled)) { /* offline nodes run no resources... * unless stonith is enabled in which case we need to * make sure rsc start events happen after the stonith */ crm_debug_3("Processing lrm resource entries"); unpack_lrm_resources(this_node, lrm_rsc, data_set); } ); return TRUE; } static gboolean determine_online_status_no_fencing(pe_working_set_t *data_set, xmlNode * node_state, node_t *this_node) { gboolean online = FALSE; const char *join_state = crm_element_value(node_state, XML_CIB_ATTR_JOINSTATE); const char *crm_state = crm_element_value(node_state, XML_CIB_ATTR_CRMDSTATE); const char *ccm_state = crm_element_value(node_state, XML_CIB_ATTR_INCCM); const char *ha_state = crm_element_value(node_state, XML_CIB_ATTR_HASTATE); const char *exp_state = crm_element_value(node_state, XML_CIB_ATTR_EXPSTATE); if(ha_state == NULL) { ha_state = DEADSTATUS; } if(!crm_is_true(ccm_state) || safe_str_eq(ha_state, DEADSTATUS)){ crm_debug_2("Node is down: ha_state=%s, ccm_state=%s", crm_str(ha_state), crm_str(ccm_state)); } else if(!crm_is_true(ccm_state) || safe_str_eq(ha_state, DEADSTATUS)) { } else if(safe_str_eq(crm_state, ONLINESTATUS)) { if(safe_str_eq(join_state, CRMD_JOINSTATE_MEMBER)) { online = TRUE; } else { crm_debug("Node is not ready to run resources: %s", join_state); } } else if(this_node->details->expected_up == FALSE) { crm_debug_2("CRMd is down: ha_state=%s, ccm_state=%s", crm_str(ha_state), crm_str(ccm_state)); crm_debug_2("\tcrm_state=%s, join_state=%s, expected=%s", crm_str(crm_state), crm_str(join_state), crm_str(exp_state)); } else { /* mark it unclean */ pe_fence_node(data_set, this_node, "because it is partially and/or un-expectedly down"); crm_info("\tha_state=%s, ccm_state=%s," " crm_state=%s, join_state=%s, expected=%s", crm_str(ha_state), crm_str(ccm_state), crm_str(crm_state), crm_str(join_state), crm_str(exp_state)); } return online; } static gboolean determine_online_status_fencing(pe_working_set_t *data_set, xmlNode * node_state, node_t *this_node) { gboolean online = FALSE; gboolean do_terminate = FALSE; const char *join_state = crm_element_value(node_state, XML_CIB_ATTR_JOINSTATE); const char *crm_state = crm_element_value(node_state, XML_CIB_ATTR_CRMDSTATE); const char *ccm_state = crm_element_value(node_state, XML_CIB_ATTR_INCCM); const char *ha_state = crm_element_value(node_state, XML_CIB_ATTR_HASTATE); const char *exp_state = crm_element_value(node_state, XML_CIB_ATTR_EXPSTATE); const char *terminate = g_hash_table_lookup(this_node->details->attrs, "terminate"); if(ha_state == NULL) { ha_state = DEADSTATUS; } if(crm_is_true(terminate)) { do_terminate = TRUE; } else if(terminate != NULL && strlen(terminate) > 0) { /* could be a time() value */ char t = terminate[0]; if(t != '0' && isdigit(t)) { do_terminate = TRUE; } } if(crm_is_true(ccm_state) && safe_str_eq(ha_state, ACTIVESTATUS) && safe_str_eq(crm_state, ONLINESTATUS)) { if(safe_str_eq(join_state, CRMD_JOINSTATE_MEMBER)) { online = TRUE; if(do_terminate) { pe_fence_node(data_set, this_node, "because termination was requested"); this_node->details->shutdown = TRUE; } } else if(join_state == exp_state /* == NULL */) { crm_info("Node %s is coming up", this_node->details->uname); crm_debug("\tha_state=%s, ccm_state=%s," " crm_state=%s, join_state=%s, expected=%s", crm_str(ha_state), crm_str(ccm_state), crm_str(crm_state), crm_str(join_state), crm_str(exp_state)); } else if(safe_str_eq(join_state, CRMD_JOINSTATE_PENDING)) { crm_info("Node %s is not ready to run resources", this_node->details->uname); this_node->details->standby = TRUE; this_node->details->pending = TRUE; online = TRUE; } else if(safe_str_eq(join_state, CRMD_JOINSTATE_NACK)) { crm_warn("Node %s is not part of the cluster", this_node->details->uname); this_node->details->standby = TRUE; this_node->details->pending = TRUE; online = TRUE; + } else if(safe_str_eq(join_state, exp_state)) { + crm_info("Node %s is still coming up: %s", + this_node->details->uname, join_state); + crm_info("\tha_state=%s, ccm_state=%s, crm_state=%s", + crm_str(ha_state), crm_str(ccm_state), crm_str(crm_state)); + this_node->details->standby = TRUE; + this_node->details->pending = TRUE; + online = TRUE; + } else { crm_warn("Node %s (%s) is un-expectedly down", this_node->details->uname, this_node->details->id); crm_info("\tha_state=%s, ccm_state=%s," " crm_state=%s, join_state=%s, expected=%s", crm_str(ha_state), crm_str(ccm_state), crm_str(crm_state), crm_str(join_state), crm_str(exp_state)); pe_fence_node(data_set, this_node, "because it is un-expectedly down"); } } else if(crm_is_true(ccm_state) == FALSE && safe_str_eq(ha_state, DEADSTATUS) && safe_str_eq(crm_state, OFFLINESTATUS) && this_node->details->expected_up == FALSE) { crm_debug("Node %s is down: join_state=%s, expected=%s", this_node->details->uname, crm_str(join_state), crm_str(exp_state)); #if 0 /* While a nice optimization, it causes the cluster to block until the node * comes back online. Which is a serious problem if the cluster software * is not configured to start at boot or stonith is configured to merely * stop the node instead of restart it. * Easily triggered by setting terminate=true for the DC */ } else if(do_terminate) { crm_info("Node %s is %s after forced termination", this_node->details->uname, crm_is_true(ccm_state)?"coming up":"going down"); crm_debug("\tha_state=%s, ccm_state=%s," " crm_state=%s, join_state=%s, expected=%s", crm_str(ha_state), crm_str(ccm_state), crm_str(crm_state), crm_str(join_state), crm_str(exp_state)); if(crm_is_true(ccm_state) == FALSE) { this_node->details->standby = TRUE; this_node->details->pending = TRUE; online = TRUE; } #endif } else if(this_node->details->expected_up) { /* mark it unclean */ pe_fence_node(data_set, this_node, "because it is un-expectedly down"); crm_info("\tha_state=%s, ccm_state=%s," " crm_state=%s, join_state=%s, expected=%s", crm_str(ha_state), crm_str(ccm_state), crm_str(crm_state), crm_str(join_state), crm_str(exp_state)); } else { crm_info("Node %s is down", this_node->details->uname); crm_debug("\tha_state=%s, ccm_state=%s," " crm_state=%s, join_state=%s, expected=%s", crm_str(ha_state), crm_str(ccm_state), crm_str(crm_state), crm_str(join_state), crm_str(exp_state)); } return online; } gboolean determine_online_status( xmlNode * node_state, node_t *this_node, pe_working_set_t *data_set) { gboolean online = FALSE; const char *shutdown = NULL; const char *exp_state = crm_element_value(node_state, XML_CIB_ATTR_EXPSTATE); if(this_node == NULL) { crm_config_err("No node to check"); return online; } this_node->details->shutdown = FALSE; this_node->details->expected_up = FALSE; shutdown = g_hash_table_lookup(this_node->details->attrs, XML_CIB_ATTR_SHUTDOWN); if(shutdown != NULL && safe_str_neq("0", shutdown)) { this_node->details->shutdown = TRUE; } else if(safe_str_eq(exp_state, CRMD_JOINSTATE_MEMBER)) { this_node->details->expected_up = TRUE; } if(is_set(data_set->flags, pe_flag_stonith_enabled) == FALSE) { online = determine_online_status_no_fencing( data_set, node_state, this_node); } else { online = determine_online_status_fencing( data_set, node_state, this_node); } if(online) { this_node->details->online = TRUE; } else { /* remove node from contention */ this_node->fixed = TRUE; this_node->weight = -INFINITY; } if(online && this_node->details->shutdown) { /* dont run resources here */ this_node->fixed = TRUE; this_node->weight = -INFINITY; } if(this_node->details->unclean) { pe_proc_warn("Node %s is unclean", this_node->details->uname); } else if(this_node->details->online) { crm_info("Node %s is %s", this_node->details->uname, this_node->details->shutdown?"shutting down": this_node->details->pending?"pending": this_node->details->standby?"standby":"online"); } else { crm_debug_2("Node %s is offline", this_node->details->uname); } return online; } #define set_char(x) last_rsc_id[lpc] = x; complete = TRUE; char * clone_zero(const char *last_rsc_id) { int lpc = 0; char *zero = NULL; CRM_CHECK(last_rsc_id != NULL, return NULL); if(last_rsc_id != NULL) { lpc = strlen(last_rsc_id); } while(--lpc > 0) { switch(last_rsc_id[lpc]) { case 0: return NULL; break; case '0': case '1': case '2': case '3': case '4': case '5': case '6': case '7': case '8': case '9': break; case ':': crm_malloc0(zero, lpc + 3); memcpy(zero, last_rsc_id, lpc); zero[lpc] = ':'; zero[lpc+1] = '0'; zero[lpc+2] = 0; return zero; } } return NULL; } char * increment_clone(char *last_rsc_id) { int lpc = 0; int len = 0; char *tmp = NULL; gboolean complete = FALSE; CRM_CHECK(last_rsc_id != NULL, return NULL); if(last_rsc_id != NULL) { len = strlen(last_rsc_id); } lpc = len-1; while(complete == FALSE && lpc > 0) { switch (last_rsc_id[lpc]) { case 0: lpc--; break; case '0': set_char('1'); break; case '1': set_char('2'); break; case '2': set_char('3'); break; case '3': set_char('4'); break; case '4': set_char('5'); break; case '5': set_char('6'); break; case '6': set_char('7'); break; case '7': set_char('8'); break; case '8': set_char('9'); break; case '9': last_rsc_id[lpc] = '0'; lpc--; break; case ':': tmp = last_rsc_id; crm_malloc0(last_rsc_id, len + 2); memcpy(last_rsc_id, tmp, len); last_rsc_id[++lpc] = '1'; last_rsc_id[len] = '0'; last_rsc_id[len+1] = 0; complete = TRUE; crm_free(tmp); break; default: crm_err("Unexpected char: %c (%d)", last_rsc_id[lpc], lpc); break; } } return last_rsc_id; } static resource_t * create_fake_resource(const char *rsc_id, xmlNode *rsc_entry, pe_working_set_t *data_set) { resource_t *rsc = NULL; xmlNode *xml_rsc = create_xml_node(NULL, XML_CIB_TAG_RESOURCE); copy_in_properties(xml_rsc, rsc_entry); crm_xml_add(xml_rsc, XML_ATTR_ID, rsc_id); crm_log_xml_debug(xml_rsc, "Orphan resource"); common_unpack(xml_rsc, &rsc, NULL, data_set); set_bit(rsc->flags, pe_rsc_orphan); data_set->resources = g_list_append(data_set->resources, rsc); return rsc; } extern resource_t *create_child_clone(resource_t *rsc, int sub_id, pe_working_set_t *data_set); static resource_t *find_clone(pe_working_set_t *data_set, node_t *node, resource_t *parent, const char *rsc_id) { int len = 0; resource_t *rsc = NULL; char *base = clone_zero(rsc_id); char *alt_rsc_id = crm_strdup(rsc_id); CRM_ASSERT(parent != NULL); CRM_ASSERT(parent->variant == pe_clone || parent->variant == pe_master); if(base) { len = strlen(base); } if(len > 0) { base[len-1] = 0; } crm_debug_3("Looking for %s on %s in %s %d", rsc_id, node->details->uname, parent->id, is_set(parent->flags, pe_rsc_unique)); if(is_set(parent->flags, pe_rsc_unique)) { crm_debug_3("Looking for %s", rsc_id); rsc = parent->fns->find_rsc(parent, rsc_id, FALSE, FALSE, NULL, TRUE); } else { rsc = parent->fns->find_rsc(parent, base, FALSE, TRUE, node, TRUE); if(rsc != NULL && rsc->running_on) { rsc = NULL; crm_debug_3("Looking for an existing orphan for %s: %s on %s", parent->id, rsc_id, node->details->uname); /* There is already an instance of this _anonymous_ clone active on "node". * * If there is a partially active orphan (only applies to clone groups) on * the same node, use that. * Otherwise create a new (orphaned) instance at "orphan_check:". */ slist_iter(child, resource_t, parent->children, lpc, node_t *loc = child->fns->location(child, NULL, TRUE); if(loc && loc->details == node->details) { resource_t *tmp = child->fns->find_rsc(child, base, FALSE, TRUE, NULL, TRUE); if(tmp && tmp->running_on == NULL) { rsc = tmp; break; } } ); goto orphan_check; } while(rsc == NULL) { crm_debug_3("Trying %s", alt_rsc_id); rsc = parent->fns->find_rsc(parent, alt_rsc_id, FALSE, FALSE, NULL, TRUE); if(rsc == NULL) { break; } else if(rsc->running_on == NULL) { break; } alt_rsc_id = increment_clone(alt_rsc_id); rsc = NULL; } } orphan_check: if(rsc == NULL) { /* Create an extra orphan */ resource_t *top = create_child_clone(parent, -1, data_set); crm_debug("Created orphan for %s: %s on %s", parent->id, rsc_id, node->details->uname); rsc = top->fns->find_rsc(top, base, FALSE, TRUE, NULL, TRUE); CRM_ASSERT(rsc != NULL); } crm_free(rsc->clone_name); rsc->clone_name = NULL; if(safe_str_neq(rsc_id, rsc->id)) { crm_info("Internally renamed %s on %s to %s%s", rsc_id, node->details->uname, rsc->id, is_set(rsc->flags, pe_rsc_orphan)?" (ORPHAN)":""); rsc->clone_name = crm_strdup(rsc_id); } crm_free(alt_rsc_id); crm_free(base); return rsc; } static resource_t * unpack_find_resource( pe_working_set_t *data_set, node_t *node, const char *rsc_id, xmlNode *rsc_entry) { resource_t *rsc = NULL; resource_t *clone_parent = NULL; char *alt_rsc_id = crm_strdup(rsc_id); crm_debug_2("looking for %s", rsc_id); rsc = pe_find_resource(data_set->resources, alt_rsc_id); /* no match */ if(rsc == NULL) { /* Even when clone-max=0, we still create a single :0 orphan to match against */ char *tmp = clone_zero(alt_rsc_id); resource_t *clone0 = pe_find_resource(data_set->resources, tmp); clone_parent = uber_parent(clone0); crm_free(tmp); crm_debug_2("%s not found: %s", alt_rsc_id, clone_parent?clone_parent->id:"orphan"); } else { clone_parent = uber_parent(rsc); } if(clone_parent && clone_parent->variant > pe_group) { rsc = find_clone(data_set, node, clone_parent, rsc_id); CRM_ASSERT(rsc != NULL); } crm_free(alt_rsc_id); return rsc; } static resource_t * process_orphan_resource(xmlNode *rsc_entry, node_t *node, pe_working_set_t *data_set) { resource_t *rsc = NULL; const char *rsc_id = crm_element_value(rsc_entry, XML_ATTR_ID); crm_debug("Detected orphan resource %s on %s", rsc_id, node->details->uname); rsc = create_fake_resource(rsc_id, rsc_entry, data_set); if(is_set(data_set->flags, pe_flag_stop_rsc_orphans) == FALSE) { clear_bit(rsc->flags, pe_rsc_managed); } else { print_resource(LOG_DEBUG_3, "Added orphan", rsc, FALSE); CRM_CHECK(rsc != NULL, return NULL); resource_location(rsc, NULL, -INFINITY, "__orphan_dont_run__", data_set); } return rsc; } static void process_rsc_state(resource_t *rsc, node_t *node, enum action_fail_response on_fail, xmlNode *migrate_op, pe_working_set_t *data_set) { if(on_fail == action_migrate_failure) { node_t *from = NULL; const char *uuid = crm_element_value(migrate_op, CRMD_ACTION_MIGRATED); on_fail = action_fail_recover; from = pe_find_node_id(data_set->nodes, uuid); if(from != NULL) { process_rsc_state(rsc, from, on_fail, NULL, data_set); } else { crm_log_xml_err(migrate_op, "Bad Op"); } } crm_debug_2("Resource %s is %s on %s: on_fail=%s", rsc->id, role2text(rsc->role), node->details->uname, fail2text(on_fail)); /* process current state */ if(rsc->role != RSC_ROLE_UNKNOWN) { rsc->known_on = g_list_append(rsc->known_on, node); } if(node->details->unclean) { /* No extra processing needed * Also allows resources to be started again after a node is shot */ on_fail = action_fail_ignore; } switch(on_fail) { case action_fail_ignore: /* nothing to do */ break; case action_fail_fence: /* treat it as if it is still running * but also mark the node as unclean */ pe_fence_node(data_set, node, "to recover from resource failure(s)"); break; case action_fail_standby: node->details->standby = TRUE; node->details->standby_onfail = TRUE; break; case action_fail_block: /* is_managed == FALSE will prevent any * actions being sent for the resource */ clear_bit(rsc->flags, pe_rsc_managed); break; case action_fail_migrate: /* make sure it comes up somewhere else * or not at all */ resource_location(rsc, node, -INFINITY, "__action_migration_auto__",data_set); break; case action_fail_stop: rsc->next_role = RSC_ROLE_STOPPED; break; case action_fail_recover: if(rsc->role != RSC_ROLE_STOPPED && rsc->role != RSC_ROLE_UNKNOWN) { set_bit(rsc->flags, pe_rsc_failed); stop_action(rsc, node, FALSE); } break; case action_migrate_failure: /* anything extra? */ break; } if(rsc->role != RSC_ROLE_STOPPED && rsc->role != RSC_ROLE_UNKNOWN) { if(is_set(rsc->flags, pe_rsc_orphan)) { if(is_set(rsc->flags, pe_rsc_managed)) { crm_config_warn("Detected active orphan %s running on %s", rsc->id, node->details->uname); } else { crm_config_warn("Cluster configured not to stop active orphans." " %s must be stopped manually on %s", rsc->id, node->details->uname); } } native_add_running(rsc, node, data_set); if(on_fail != action_fail_ignore) { set_bit(rsc->flags, pe_rsc_failed); } } else if(rsc->clone_name) { crm_debug_2("Resetting clone_name %s for %s (stopped)", rsc->clone_name, rsc->id); crm_free(rsc->clone_name); rsc->clone_name = NULL; } else { char *key = stop_key(rsc); GListPtr possible_matches = find_actions(rsc->actions, key, node); slist_iter(stop, action_t, possible_matches, lpc, stop->optional = TRUE; ); crm_free(key); } } /* create active recurring operations as optional */ static void process_recurring(node_t *node, resource_t *rsc, int start_index, int stop_index, GListPtr sorted_op_list, pe_working_set_t *data_set) { const char *task = NULL; const char *status = NULL; crm_debug_3("%s: Start index %d, stop index = %d", rsc->id, start_index, stop_index); slist_iter(rsc_op, xmlNode, sorted_op_list, lpc, int interval = 0; char *key = NULL; const char *id = ID(rsc_op); const char *interval_s = NULL; if(node->details->online == FALSE) { crm_debug_4("Skipping %s/%s: node is offline", rsc->id, node->details->uname); break; } else if(start_index < stop_index) { crm_debug_4("Skipping %s/%s: not active", rsc->id, node->details->uname); break; } else if(lpc <= start_index) { crm_debug_4("Skipping %s/%s: old", id, node->details->uname); continue; } interval_s = crm_element_value(rsc_op,XML_LRM_ATTR_INTERVAL); interval = crm_parse_int(interval_s, "0"); if(interval == 0) { crm_debug_4("Skipping %s/%s: non-recurring", id, node->details->uname); continue; } status = crm_element_value(rsc_op, XML_LRM_ATTR_OPSTATUS); if(safe_str_eq(status, "-1")) { crm_debug_4("Skipping %s/%s: status", id, node->details->uname); continue; } task = crm_element_value(rsc_op, XML_LRM_ATTR_TASK); /* create the action */ key = generate_op_key(rsc->id, task, interval); crm_debug_3("Creating %s/%s", key, node->details->uname); custom_action(rsc, key, task, node, TRUE, TRUE, data_set); ); } void calculate_active_ops(GListPtr sorted_op_list, int *start_index, int *stop_index) { const char *task = NULL; const char *status = NULL; *stop_index = -1; *start_index = -1; slist_iter( rsc_op, xmlNode, sorted_op_list, lpc, task = crm_element_value(rsc_op, XML_LRM_ATTR_TASK); status = crm_element_value(rsc_op, XML_LRM_ATTR_OPSTATUS); if(safe_str_eq(task, CRMD_ACTION_STOP) && safe_str_eq(status, "0")) { *stop_index = lpc; } else if(safe_str_eq(task, CRMD_ACTION_START)) { *start_index = lpc; } else if(*start_index <= *stop_index && safe_str_eq(task, CRMD_ACTION_STATUS)) { const char *rc = crm_element_value(rsc_op, XML_LRM_ATTR_RC); if(safe_str_eq(rc, "0") || safe_str_eq(rc, "8")) { *start_index = lpc; } } ); } static void unpack_lrm_rsc_state( node_t *node, xmlNode * rsc_entry, pe_working_set_t *data_set) { int stop_index = -1; int start_index = -1; enum rsc_role_e req_role = RSC_ROLE_UNKNOWN; const char *task = NULL; const char *rsc_id = crm_element_value(rsc_entry, XML_ATTR_ID); resource_t *rsc = NULL; GListPtr op_list = NULL; GListPtr sorted_op_list = NULL; xmlNode *migrate_op = NULL; enum action_fail_response on_fail = FALSE; enum rsc_role_e saved_role = RSC_ROLE_UNKNOWN; crm_debug_3("[%s] Processing %s on %s", crm_element_name(rsc_entry), rsc_id, node->details->uname); /* extract operations */ op_list = NULL; sorted_op_list = NULL; xml_child_iter_filter( rsc_entry, rsc_op, XML_LRM_TAG_RSC_OP, op_list = g_list_append(op_list, rsc_op); ); if(op_list == NULL) { /* if there are no operations, there is nothing to do */ return; } /* find the resource */ rsc = unpack_find_resource(data_set, node, rsc_id, rsc_entry); if(rsc == NULL) { rsc = process_orphan_resource(rsc_entry, node, data_set); } CRM_ASSERT(rsc != NULL); /* process operations */ saved_role = rsc->role; on_fail = action_fail_ignore; rsc->role = RSC_ROLE_UNKNOWN; sorted_op_list = g_list_sort(op_list, sort_op_by_callid); slist_iter( rsc_op, xmlNode, sorted_op_list, lpc, task = crm_element_value(rsc_op, XML_LRM_ATTR_TASK); if(safe_str_eq(task, CRMD_ACTION_MIGRATED)) { migrate_op = rsc_op; } unpack_rsc_op(rsc, node, rsc_op, &on_fail, data_set); ); /* create active recurring operations as optional */ calculate_active_ops(sorted_op_list, &start_index, &stop_index); process_recurring(node, rsc, start_index, stop_index, sorted_op_list, data_set); /* no need to free the contents */ g_list_free(sorted_op_list); process_rsc_state(rsc, node, on_fail, migrate_op, data_set); if(get_target_role(rsc, &req_role)) { if(rsc->next_role == RSC_ROLE_UNKNOWN || req_role < rsc->next_role) { crm_debug("%s: Overwriting calculated next role %s" " with requested next role %s", rsc->id, role2text(rsc->next_role), role2text(req_role)); rsc->next_role = req_role; } else if(req_role > rsc->next_role) { crm_info("%s: Not overwriting calculated next role %s" " with requested next role %s", rsc->id, role2text(rsc->next_role), role2text(req_role)); } } if(saved_role > rsc->role) { rsc->role = saved_role; } } gboolean unpack_lrm_resources(node_t *node, xmlNode * lrm_rsc_list, pe_working_set_t *data_set) { CRM_CHECK(node != NULL, return FALSE); crm_debug_3("Unpacking resources on %s", node->details->uname); xml_child_iter_filter( lrm_rsc_list, rsc_entry, XML_LRM_TAG_RESOURCE, unpack_lrm_rsc_state(node, rsc_entry, data_set); ); return TRUE; } static void set_active(resource_t *rsc) { resource_t *top = uber_parent(rsc); if(top && top->variant == pe_master) { rsc->role = RSC_ROLE_SLAVE; } else { rsc->role = RSC_ROLE_STARTED; } } gboolean unpack_rsc_op(resource_t *rsc, node_t *node, xmlNode *xml_op, enum action_fail_response *on_fail, pe_working_set_t *data_set) { const char *id = NULL; const char *key = NULL; const char *task = NULL; const char *magic = NULL; const char *task_id = NULL; const char *actual_rc = NULL; /* const char *target_rc = NULL; */ const char *task_status = NULL; const char *interval_s = NULL; const char *op_digest = NULL; const char *op_version = NULL; int interval = 0; int task_status_i = -2; int actual_rc_i = 0; int target_rc = -1; action_t *action = NULL; node_t *effective_node = NULL; resource_t *failed = NULL; gboolean expired = FALSE; gboolean is_probe = FALSE; CRM_CHECK(rsc != NULL, return FALSE); CRM_CHECK(node != NULL, return FALSE); CRM_CHECK(xml_op != NULL, return FALSE); id = ID(xml_op); task = crm_element_value(xml_op, XML_LRM_ATTR_TASK); task_id = crm_element_value(xml_op, XML_LRM_ATTR_CALLID); task_status = crm_element_value(xml_op, XML_LRM_ATTR_OPSTATUS); op_digest = crm_element_value(xml_op, XML_LRM_ATTR_OP_DIGEST); op_version = crm_element_value(xml_op, XML_ATTR_CRM_VERSION); magic = crm_element_value(xml_op, XML_ATTR_TRANSITION_MAGIC); key = crm_element_value(xml_op, XML_ATTR_TRANSITION_KEY); CRM_CHECK(id != NULL, return FALSE); CRM_CHECK(task != NULL, return FALSE); CRM_CHECK(task_status != NULL, return FALSE); task_status_i = crm_parse_int(task_status, NULL); CRM_CHECK(task_status_i <= LRM_OP_ERROR, return FALSE); CRM_CHECK(task_status_i >= LRM_OP_PENDING, return FALSE); if(safe_str_eq(task, CRMD_ACTION_NOTIFY)) { /* safe to ignore these */ return TRUE; } if(rsc->failure_timeout > 0) { int last_run = 0; if(crm_element_value_int(xml_op, "last-run", &last_run) == 0) { /* int last_change = crm_element_value_int(xml_op, "last_rc_change"); */ time_t now = get_timet_now(data_set); if(now > (last_run + rsc->failure_timeout)) { expired = TRUE; } } } crm_debug_2("Unpacking task %s/%s (call_id=%s, status=%s) on %s (role=%s)", id, task, task_id, task_status, node->details->uname, role2text(rsc->role)); interval_s = crm_element_value(xml_op, XML_LRM_ATTR_INTERVAL); interval = crm_parse_int(interval_s, "0"); if(interval == 0 && safe_str_eq(task, CRMD_ACTION_STATUS)) { is_probe = TRUE; } if(node->details->unclean) { crm_debug_2("Node %s (where %s is running) is unclean." " Further action depends on the value of the stop's on-fail attribue", node->details->uname, rsc->id); } actual_rc = crm_element_value(xml_op, XML_LRM_ATTR_RC); CRM_CHECK(actual_rc != NULL, return FALSE); actual_rc_i = crm_parse_int(actual_rc, NULL); if(key) { int dummy = 0; char *dummy_string = NULL; decode_transition_key(key, &dummy_string, &dummy, &dummy, &target_rc); crm_free(dummy_string); } if(task_status_i == LRM_OP_DONE && target_rc >= 0) { if(target_rc == actual_rc_i) { task_status_i = LRM_OP_DONE; } else { task_status_i = LRM_OP_ERROR; crm_debug("%s on %s returned %d (%s) instead of the expected value: %d (%s)", id, node->details->uname, actual_rc_i, execra_code2string(actual_rc_i), target_rc, execra_code2string(target_rc)); } } else if(task_status_i == LRM_OP_ERROR) { /* let us decide that */ task_status_i = LRM_OP_DONE; } if(task_status_i == LRM_OP_NOTSUPPORTED) { actual_rc_i = EXECRA_UNIMPLEMENT_FEATURE; } if(task_status_i != actual_rc_i && rsc->failure_timeout > 0 && get_failcount(node, rsc, NULL, data_set) == 0) { action_t *clear_op = NULL; clear_op = custom_action( rsc, crm_concat(rsc->id, CRM_OP_CLEAR_FAILCOUNT, '_'), CRM_OP_CLEAR_FAILCOUNT, node, FALSE, TRUE, data_set); add_hash_param(clear_op->meta, XML_ATTR_TE_NOWAIT, XML_BOOLEAN_TRUE); crm_notice("Clearing expired failcount for %s on %s", rsc->id, node->details->uname); } if(expired && actual_rc_i != EXECRA_NOT_RUNNING && actual_rc_i != EXECRA_RUNNING_MASTER && actual_rc_i != EXECRA_OK) { crm_notice("Ignoring expired failure %s (rc=%d, magic=%s) on %s", id, actual_rc_i, magic, node->details->uname); goto done; } /* we could clean this up significantly except for old LRMs and CRMs that * didnt include target_rc and liked to remap status */ switch(actual_rc_i) { case EXECRA_NOT_RUNNING: if(is_probe || target_rc == actual_rc_i) { task_status_i = LRM_OP_DONE; rsc->role = RSC_ROLE_STOPPED; /* clear any previous failure actions */ *on_fail = action_fail_ignore; rsc->next_role = RSC_ROLE_UNKNOWN; } else if(safe_str_neq(task, CRMD_ACTION_STOP)) { task_status_i = LRM_OP_ERROR; } break; case EXECRA_RUNNING_MASTER: if(is_probe) { task_status_i = LRM_OP_DONE; crm_notice("Operation %s found resource %s active in master mode on %s", id, rsc->id, node->details->uname); } else if(target_rc == actual_rc_i) { /* nothing to do */ } else if(target_rc >= 0) { task_status_i = LRM_OP_ERROR; /* legacy code for pre-0.6.5 operations */ } else if(safe_str_neq(task, CRMD_ACTION_STATUS) || rsc->role != RSC_ROLE_MASTER) { task_status_i = LRM_OP_ERROR; if(rsc->role != RSC_ROLE_MASTER) { crm_err("%s reported %s in master mode on %s", id, rsc->id, node->details->uname); } } rsc->role = RSC_ROLE_MASTER; break; case EXECRA_FAILED_MASTER: rsc->role = RSC_ROLE_MASTER; task_status_i = LRM_OP_ERROR; break; case EXECRA_UNIMPLEMENT_FEATURE: if(interval > 0) { task_status_i = LRM_OP_NOTSUPPORTED; break; } /* else: fall through */ case EXECRA_INSUFFICIENT_PRIV: case EXECRA_NOT_INSTALLED: case EXECRA_INVALID_PARAM: effective_node = node; /* fall through */ case EXECRA_NOT_CONFIGURED: failed = rsc; if(is_not_set(rsc->flags, pe_rsc_unique)) { failed = uber_parent(failed); } do_crm_log(actual_rc_i==EXECRA_NOT_INSTALLED?LOG_NOTICE:LOG_ERR, "Hard error - %s failed with rc=%d: Preventing %s from re-starting %s %s", id, actual_rc_i, failed->id, effective_node?"on":"anywhere", effective_node?effective_node->details->uname:"in the cluster"); resource_location(failed, effective_node, -INFINITY, "hard-error", data_set); if(is_probe) { /* treat these like stops */ task = CRMD_ACTION_STOP; task_status_i = LRM_OP_DONE; crm_xml_add(xml_op, XML_ATTR_UNAME, node->details->uname); if(actual_rc_i != EXECRA_NOT_INSTALLED || is_set(data_set->flags, pe_flag_symmetric_cluster)) { add_node_copy(data_set->failed, xml_op); } } break; case EXECRA_OK: if(is_probe && target_rc == 7) { task_status_i = LRM_OP_DONE; crm_notice("Operation %s found resource %s active on %s", id, rsc->id, node->details->uname); /* legacy code for pre-0.6.5 operations */ } else if(target_rc < 0 && interval > 0 && rsc->role == RSC_ROLE_MASTER) { /* catch status ops that return 0 instead of 8 while they * are supposed to be in master mode */ task_status_i = LRM_OP_ERROR; } break; default: if(task_status_i == LRM_OP_DONE) { crm_info("Remapping %s (rc=%d) on %s to an ERROR", id, actual_rc_i, node->details->uname); task_status_i = LRM_OP_ERROR; } } if(task_status_i == LRM_OP_ERROR || task_status_i == LRM_OP_TIMEOUT || task_status_i == LRM_OP_NOTSUPPORTED) { action = custom_action(rsc, crm_strdup(id), task, NULL, TRUE, FALSE, data_set); if(expired) { crm_notice("Ignoring expired failure (calculated) %s (rc=%d, magic=%s) on %s", id, actual_rc_i, magic, node->details->uname); goto done; } else if(action->on_fail == action_fail_ignore) { crm_warn("Remapping %s (rc=%d) on %s to DONE: ignore", id, actual_rc_i, node->details->uname); task_status_i = LRM_OP_DONE; } } switch(task_status_i) { case LRM_OP_PENDING: if(safe_str_eq(task, CRMD_ACTION_START)) { set_bit(rsc->flags, pe_rsc_start_pending); set_active(rsc); } else if(safe_str_eq(task, CRMD_ACTION_PROMOTE)) { rsc->role = RSC_ROLE_MASTER; } break; case LRM_OP_DONE: crm_debug_3("%s/%s completed on %s", rsc->id, task, node->details->uname); if(actual_rc_i == EXECRA_NOT_RUNNING) { /* nothing to do */ } else if(safe_str_eq(task, CRMD_ACTION_STOP)) { rsc->role = RSC_ROLE_STOPPED; /* clear any previous failure actions */ switch(*on_fail) { case action_fail_block: case action_fail_stop: case action_fail_fence: case action_fail_migrate: case action_fail_standby: crm_debug_2("%s.%s is not cleared by a completed stop", rsc->id, fail2text(*on_fail)); break; case action_fail_ignore: case action_fail_recover: case action_migrate_failure: *on_fail = action_fail_ignore; rsc->next_role = RSC_ROLE_UNKNOWN; } } else if(safe_str_eq(task, CRMD_ACTION_PROMOTE)) { rsc->role = RSC_ROLE_MASTER; } else if(safe_str_eq(task, CRMD_ACTION_DEMOTE)) { rsc->role = RSC_ROLE_SLAVE; } else if(rsc->role < RSC_ROLE_STARTED) { crm_debug_3("%s active on %s", rsc->id, node->details->uname); set_active(rsc); } break; case LRM_OP_ERROR: case LRM_OP_TIMEOUT: case LRM_OP_NOTSUPPORTED: crm_warn("Processing failed op %s on %s: %s (%d)", id, node->details->uname, execra_code2string(actual_rc_i), actual_rc_i); crm_xml_add(xml_op, XML_ATTR_UNAME, node->details->uname); add_node_copy(data_set->failed, xml_op); if(*on_fail < action->on_fail) { *on_fail = action->on_fail; } if(safe_str_eq(task, CRMD_ACTION_STOP)) { resource_location( rsc, node, -INFINITY, "__stop_fail__", data_set); } else if(safe_str_eq(task, CRMD_ACTION_PROMOTE)) { rsc->role = RSC_ROLE_MASTER; } else if(safe_str_eq(task, CRMD_ACTION_DEMOTE)) { /* * staying in role=master ends up putting the PE/TE into a loop * setting role=slave is not dangerous because no master will be * promoted until the failed resource has been fully stopped */ crm_warn("Forcing %s to stop after a failed demote action", rsc->id); rsc->next_role = RSC_ROLE_STOPPED; rsc->role = RSC_ROLE_SLAVE; } else if(compare_version("2.0", op_version) > 0 && safe_str_eq(task, CRMD_ACTION_START)) { crm_warn("Compatibility handling for failed op %s on %s", id, node->details->uname); resource_location( rsc, node, -INFINITY, "__legacy_start__", data_set); } if(rsc->role < RSC_ROLE_STARTED) { set_active(rsc); } crm_debug_2("Resource %s: role=%s, unclean=%s, on_fail=%s, fail_role=%s", rsc->id, role2text(rsc->role), node->details->unclean?"true":"false", fail2text(action->on_fail), role2text(action->fail_role)); if(action->fail_role != RSC_ROLE_STARTED && rsc->next_role < action->fail_role) { rsc->next_role = action->fail_role; } if(action->fail_role == RSC_ROLE_STOPPED) { crm_err("Making sure %s doesn't come up again", rsc->id); /* make sure it doesnt come up again */ pe_free_shallow_adv(rsc->allowed_nodes, TRUE); rsc->allowed_nodes = node_list_dup( data_set->nodes, FALSE, FALSE); slist_iter( node, node_t, rsc->allowed_nodes, lpc, node->weight = -INFINITY; ); } pe_free_action(action); action = NULL; break; case LRM_OP_CANCELLED: /* do nothing?? */ pe_err("Dont know what to do for cancelled ops yet"); break; } done: crm_debug_3("Resource %s after %s: role=%s", rsc->id, task, role2text(rsc->role)); pe_free_action(action); return TRUE; } gboolean add_node_attrs(xmlNode *xml_obj, node_t *node, gboolean overwrite, pe_working_set_t *data_set) { g_hash_table_insert(node->details->attrs, crm_strdup("#"XML_ATTR_UNAME), crm_strdup(node->details->uname)); g_hash_table_insert(node->details->attrs, crm_strdup("#"XML_ATTR_ID), crm_strdup(node->details->id)); if(safe_str_eq(node->details->id, data_set->dc_uuid)) { data_set->dc_node = node; node->details->is_dc = TRUE; g_hash_table_insert(node->details->attrs, crm_strdup("#"XML_ATTR_DC), crm_strdup(XML_BOOLEAN_TRUE)); } else { g_hash_table_insert(node->details->attrs, crm_strdup("#"XML_ATTR_DC), crm_strdup(XML_BOOLEAN_FALSE)); } unpack_instance_attributes( data_set->input, xml_obj, XML_TAG_ATTR_SETS, NULL, node->details->attrs, NULL, overwrite, data_set->now); return TRUE; } static GListPtr extract_operations(const char *node, const char *rsc, xmlNode *rsc_entry, gboolean active_filter) { int stop_index = -1; int start_index = -1; GListPtr op_list = NULL; GListPtr sorted_op_list = NULL; /* extract operations */ op_list = NULL; sorted_op_list = NULL; xml_child_iter_filter( rsc_entry, rsc_op, XML_LRM_TAG_RSC_OP, crm_xml_add(rsc_op, "resource", rsc); crm_xml_add(rsc_op, XML_ATTR_UNAME, node); op_list = g_list_append(op_list, rsc_op); ); if(op_list == NULL) { /* if there are no operations, there is nothing to do */ return NULL; } sorted_op_list = g_list_sort(op_list, sort_op_by_callid); /* create active recurring operations as optional */ if(active_filter == FALSE) { return sorted_op_list; } op_list = NULL; calculate_active_ops(sorted_op_list, &start_index, &stop_index); slist_iter(rsc_op, xmlNode, sorted_op_list, lpc, if(start_index < stop_index) { crm_debug_4("Skipping %s: not active", ID(rsc_entry)); break; } else if(lpc < start_index) { crm_debug_4("Skipping %s: old", ID(rsc_op)); continue; } op_list = g_list_append(op_list, rsc_op); ); g_list_free(sorted_op_list); return op_list; } GListPtr find_operations( const char *rsc, const char *node, gboolean active_filter, pe_working_set_t *data_set) { GListPtr output = NULL; GListPtr intermediate = NULL; xmlNode *tmp = NULL; xmlNode *status = find_xml_node(data_set->input, XML_CIB_TAG_STATUS, TRUE); const char *uname = NULL; node_t *this_node = NULL; xml_child_iter_filter( status, node_state, XML_CIB_TAG_STATE, uname = crm_element_value(node_state, XML_ATTR_UNAME); if(node != NULL && safe_str_neq(uname, node)) { continue; } this_node = pe_find_node(data_set->nodes, uname); CRM_CHECK(this_node != NULL, continue); determine_online_status(node_state, this_node, data_set); if(this_node->details->online || is_set(data_set->flags, pe_flag_stonith_enabled)) { /* offline nodes run no resources... * unless stonith is enabled in which case we need to * make sure rsc start events happen after the stonith */ tmp = find_xml_node(node_state, XML_CIB_TAG_LRM, FALSE); tmp = find_xml_node(tmp, XML_LRM_TAG_RESOURCES, FALSE); xml_child_iter_filter( tmp, lrm_rsc, XML_LRM_TAG_RESOURCE, const char *rsc_id = crm_element_value(lrm_rsc, XML_ATTR_ID); if(rsc != NULL && safe_str_neq(rsc_id, rsc)) { continue; } intermediate = extract_operations(uname, rsc_id, lrm_rsc, active_filter); output = g_list_concat(output, intermediate); ); } ); return output; } diff --git a/pengine/native.c b/pengine/native.c index 3f383afbcb..a5feee5d58 100644 --- a/pengine/native.c +++ b/pengine/native.c @@ -1,2310 +1,2310 @@ /* * Copyright (C) 2004 Andrew Beekhof * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * This software is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include #include #include #include #include #include #include #define DELETE_THEN_REFRESH 1 /* The crmd will remove the resource from the CIB itself, making this redundant */ #define VARIANT_NATIVE 1 #include void native_rsc_colocation_rh_must(resource_t *rsc_lh, gboolean update_lh, resource_t *rsc_rh, gboolean update_rh); void native_rsc_colocation_rh_mustnot(resource_t *rsc_lh, gboolean update_lh, resource_t *rsc_rh, gboolean update_rh); void Recurring(resource_t *rsc, action_t *start, node_t *node, pe_working_set_t *data_set); void RecurringOp(resource_t *rsc, action_t *start, node_t *node, xmlNode *operation, pe_working_set_t *data_set); void pe_post_notify( resource_t *rsc, node_t *node, action_t *op, notify_data_t *n_data, pe_working_set_t *data_set); void NoRoleChange (resource_t *rsc, node_t *current, node_t *next, pe_working_set_t *data_set); gboolean DeleteRsc (resource_t *rsc, node_t *node, gboolean optional, pe_working_set_t *data_set); gboolean StopRsc (resource_t *rsc, node_t *next, gboolean optional, pe_working_set_t *data_set); gboolean StartRsc (resource_t *rsc, node_t *next, gboolean optional, pe_working_set_t *data_set); gboolean DemoteRsc (resource_t *rsc, node_t *next, gboolean optional, pe_working_set_t *data_set); gboolean PromoteRsc(resource_t *rsc, node_t *next, gboolean optional, pe_working_set_t *data_set); gboolean RoleError (resource_t *rsc, node_t *next, gboolean optional, pe_working_set_t *data_set); gboolean NullOp (resource_t *rsc, node_t *next, gboolean optional, pe_working_set_t *data_set); enum rsc_role_e rsc_state_matrix[RSC_ROLE_MAX][RSC_ROLE_MAX] = { /* Current State */ /* Next State: Unknown Stopped Started Slave Master */ /* Unknown */ { RSC_ROLE_UNKNOWN, RSC_ROLE_STOPPED, RSC_ROLE_STOPPED, RSC_ROLE_STOPPED, RSC_ROLE_STOPPED, }, /* Stopped */ { RSC_ROLE_STOPPED, RSC_ROLE_STOPPED, RSC_ROLE_STARTED, RSC_ROLE_SLAVE, RSC_ROLE_SLAVE, }, /* Started */ { RSC_ROLE_STOPPED, RSC_ROLE_STOPPED, RSC_ROLE_STARTED, RSC_ROLE_SLAVE, RSC_ROLE_MASTER, }, /* Slave */ { RSC_ROLE_STOPPED, RSC_ROLE_STOPPED, RSC_ROLE_UNKNOWN, RSC_ROLE_SLAVE, RSC_ROLE_MASTER, }, /* Master */ { RSC_ROLE_STOPPED, RSC_ROLE_SLAVE, RSC_ROLE_UNKNOWN, RSC_ROLE_SLAVE, RSC_ROLE_MASTER, }, }; gboolean (*rsc_action_matrix[RSC_ROLE_MAX][RSC_ROLE_MAX])(resource_t*,node_t*,gboolean,pe_working_set_t*) = { /* Current State */ /* Next State: Unknown Stopped Started Slave Master */ /* Unknown */ { RoleError, StopRsc, RoleError, RoleError, RoleError, }, /* Stopped */ { RoleError, NullOp, StartRsc, StartRsc, RoleError, }, /* Started */ { RoleError, StopRsc, NullOp, NullOp, PromoteRsc, }, /* Slave */ { RoleError, StopRsc, RoleError, NullOp, PromoteRsc, }, /* Master */ { RoleError, RoleError, RoleError, DemoteRsc, NullOp, }, }; struct capacity_data { node_t *node; resource_t *rsc; gboolean is_enough; }; static void check_capacity(gpointer key, gpointer value, gpointer user_data) { int required = 0; int remaining = 0; struct capacity_data *data = user_data; required = crm_parse_int(value, "0"); remaining = crm_parse_int(g_hash_table_lookup(data->node->details->utilization, key), "0"); if (required > remaining) { crm_debug("Node %s has no enough %s for resource %s: required=%d remaining=%d", data->node->details->uname, (char *)key, data->rsc->id, required, remaining); data->is_enough = FALSE; } } static gboolean have_enough_capacity(node_t *node, resource_t *rsc) { struct capacity_data data; data.node = node; data.rsc = rsc; data.is_enough = TRUE; g_hash_table_foreach(rsc->utilization, check_capacity, &data); return data.is_enough; } static gboolean native_choose_node(resource_t *rsc, pe_working_set_t *data_set) { /* 1. Sort by weight 2. color.chosen_node = the node (of those with the highest wieght) with the fewest resources 3. remove color.chosen_node from all other colors */ int alloc_details = scores_log_level+1; GListPtr nodes = NULL; node_t *chosen = NULL; int lpc = 0; int multiple = 0; int length = 0; if (safe_str_neq(data_set->placement_strategy, "default")) { slist_iter( node, node_t, data_set->nodes, lpc, if (have_enough_capacity(node, rsc) == FALSE) { crm_debug("Resource %s cannot be allocated to node %s: none of enough capacity", rsc->id, node->details->uname); resource_location(rsc, node, -INFINITY, "__limit_utilization_", data_set); } ); dump_node_scores(alloc_details, rsc, "Post-utilization", rsc->allowed_nodes); } length = g_list_length(rsc->allowed_nodes); if(is_not_set(rsc->flags, pe_rsc_provisional)) { return rsc->allocated_to?TRUE:FALSE; } crm_debug_3("Choosing node for %s from %d candidates", rsc->id, length); if(rsc->allowed_nodes) { rsc->allowed_nodes = g_list_sort_with_data(rsc->allowed_nodes, sort_node_weight, data_set); nodes = rsc->allowed_nodes; chosen = g_list_nth_data(nodes, 0); if(chosen && chosen->weight > 0 && can_run_resources(chosen)) { node_t *running = g_list_nth_data(rsc->running_on, 0); if(can_run_resources(running) == FALSE) { running = NULL; } for(lpc = 1; lpc < length; lpc++) { node_t *tmp = g_list_nth_data(nodes, lpc); if(tmp->weight == chosen->weight) { multiple++; if(running && tmp->details == running->details) { /* prefer the existing node if scores are equal */ chosen = tmp; } } } } } if(multiple > 1) { int log_level = LOG_INFO; char *score = score2char(chosen->weight); if(chosen->weight >= INFINITY) { log_level = LOG_WARNING; } do_crm_log(log_level, "%d nodes with equal score (%s) for" " running %s resources. Chose %s.", multiple, score, rsc->id, chosen->details->uname); crm_free(score); } return native_assign_node(rsc, nodes, chosen, FALSE); } int node_list_attr_score(GListPtr list, const char *attr, const char *value) { int best_score = -INFINITY; const char *best_node = NULL; if(attr == NULL) { attr = "#"XML_ATTR_UNAME; } slist_iter(node, node_t, list, lpc, int weight = node->weight; if(can_run_resources(node) == FALSE) { weight = -INFINITY; } if(weight > best_score || best_node == NULL) { const char *tmp = g_hash_table_lookup(node->details->attrs, attr); if(safe_str_eq(value, tmp)) { best_score = weight; best_node = node->details->uname; } } ); if(safe_str_neq(attr, "#"XML_ATTR_UNAME)) { crm_info("Best score for %s=%s was %s with %d", attr, value, best_node?best_node:"", best_score); } return best_score; } static void node_list_update(GListPtr list1, GListPtr list2, const char *attr, int factor) { int score = 0; if(attr == NULL) { attr = "#"XML_ATTR_UNAME; } slist_iter( node, node_t, list1, lpc, CRM_CHECK(node != NULL, continue); score = node_list_attr_score(list2, attr, g_hash_table_lookup(node->details->attrs, attr)); if(factor < 0 && score < 0) { /* Negative preference for a node with a negative score * should not become a positive preference * * TODO: Decide if we want to filter only if weight == -INFINITY * */ continue; } crm_debug_2("%s: %d + %d*%d", node->details->uname, node->weight, factor, score); node->weight = merge_weights(factor*score, node->weight); ); } GListPtr native_merge_weights( resource_t *rsc, const char *rhs, GListPtr nodes, const char *attr, int factor, gboolean allow_rollback) { GListPtr work = NULL; int multiplier = 1; if(factor < 0) { multiplier = -1; } if(is_set(rsc->flags, pe_rsc_merging)) { crm_info("%s: Breaking dependency loop at %s", rhs, rsc->id); return nodes; } set_bit(rsc->flags, pe_rsc_merging); crm_debug_2("%s: Combining scores from %s", rhs, rsc->id); work = node_list_dup(nodes, FALSE, FALSE); node_list_update(work, rsc->allowed_nodes, attr, factor); if(allow_rollback && can_run_any(work) == FALSE) { crm_info("%s: Rolling back scores from %s", rhs, rsc->id); slist_destroy(node_t, n, work, crm_free(n)); clear_bit(rsc->flags, pe_rsc_merging); return nodes; } if(can_run_any(work)) { slist_iter( constraint, rsc_colocation_t, rsc->rsc_cons_lhs, lpc, crm_debug_2("Applying %s", constraint->id); work = constraint->rsc_lh->cmds->merge_weights( constraint->rsc_lh, rhs, work, constraint->node_attribute, multiplier*constraint->score/INFINITY, allow_rollback); ); } slist_destroy(node_t, n, nodes, crm_free(n)); clear_bit(rsc->flags, pe_rsc_merging); return work; } node_t * native_color(resource_t *rsc, pe_working_set_t *data_set) { int alloc_details = scores_log_level+1; if(rsc->parent && is_not_set(rsc->parent->flags, pe_rsc_allocating)) { /* never allocate children on their own */ crm_debug("Escalating allocation of %s to its parent: %s", rsc->id, rsc->parent->id); rsc->parent->cmds->color(rsc->parent, data_set); } if(is_not_set(rsc->flags, pe_rsc_provisional)) { return rsc->allocated_to; } if(is_set(rsc->flags, pe_rsc_allocating)) { crm_debug("Dependency loop detected involving %s", rsc->id); return NULL; } set_bit(rsc->flags, pe_rsc_allocating); print_resource(alloc_details, "Allocating: ", rsc, FALSE); dump_node_scores(alloc_details, rsc, "Pre-allloc", rsc->allowed_nodes); slist_iter( constraint, rsc_colocation_t, rsc->rsc_cons, lpc, GListPtr archive = NULL; resource_t *rsc_rh = constraint->rsc_rh; crm_debug_2("%s: Pre-Processing %s (%s, %d, %s)", rsc->id, constraint->id, rsc_rh->id, constraint->score, role2text(constraint->role_lh)); if(constraint->role_lh >= RSC_ROLE_MASTER || (constraint->score < 0 && constraint->score > -INFINITY)) { archive = node_list_dup(rsc->allowed_nodes, FALSE, FALSE); } rsc_rh->cmds->color(rsc_rh, data_set); rsc->cmds->rsc_colocation_lh(rsc, rsc_rh, constraint); if(archive && can_run_any(rsc->allowed_nodes) == FALSE) { crm_info("%s: Rolling back scores from %s", rsc->id, rsc_rh->id); pe_free_shallow_adv(rsc->allowed_nodes, TRUE); rsc->allowed_nodes = archive; archive = NULL; } pe_free_shallow_adv(archive, TRUE); ); dump_node_scores(alloc_details, rsc, "Post-coloc", rsc->allowed_nodes); slist_iter( constraint, rsc_colocation_t, rsc->rsc_cons_lhs, lpc, rsc->allowed_nodes = constraint->rsc_lh->cmds->merge_weights( constraint->rsc_lh, rsc->id, rsc->allowed_nodes, constraint->node_attribute, constraint->score/INFINITY, TRUE); ); print_resource(LOG_DEBUG_2, "Allocating: ", rsc, FALSE); if(rsc->next_role == RSC_ROLE_STOPPED) { crm_debug_2("Making sure %s doesn't get allocated", rsc->id); /* make sure it doesnt come up again */ resource_location( rsc, NULL, -INFINITY, XML_RSC_ATTR_TARGET_ROLE, data_set); } dump_node_scores(show_scores?0:scores_log_level, rsc, __PRETTY_FUNCTION__, rsc->allowed_nodes); if(is_set(data_set->flags, pe_flag_stonith_enabled) && is_set(data_set->flags, pe_flag_have_stonith_resource) == FALSE) { clear_bit(rsc->flags, pe_rsc_managed); } if(is_not_set(rsc->flags, pe_rsc_managed)) { const char *reason = NULL; node_t *assign_to = NULL; if(rsc->running_on == NULL) { reason = "inactive"; } else if(rsc->role == RSC_ROLE_MASTER) { assign_to = rsc->running_on->data; reason = "master"; } else if(is_set(rsc->flags, pe_rsc_failed)) { reason = "failed"; } else { assign_to = rsc->running_on->data; reason = "active"; } crm_info("Unmanaged resource %s allocated to %s: %s", rsc->id, assign_to?assign_to->details->uname:"'nowhere'", reason); native_assign_node(rsc, NULL, assign_to, TRUE); } else if(is_set(data_set->flags, pe_flag_stop_everything)) { crm_debug("Forcing %s to stop", rsc->id); native_assign_node(rsc, NULL, NULL, TRUE); } else if(is_set(rsc->flags, pe_rsc_provisional) && native_choose_node(rsc, data_set) ) { crm_debug_3("Allocated resource %s to %s", rsc->id, rsc->allocated_to->details->uname); } else if(rsc->allocated_to == NULL) { if(is_not_set(rsc->flags, pe_rsc_orphan)) { crm_info("Resource %s cannot run anywhere", rsc->id); } else if(rsc->running_on != NULL) { crm_info("Stopping orphan resource %s", rsc->id); } } else { crm_debug("Pre-Allocated resource %s to %s", rsc->id, rsc->allocated_to->details->uname); } clear_bit(rsc->flags, pe_rsc_allocating); print_resource(LOG_DEBUG_3, "Allocated ", rsc, TRUE); return rsc->allocated_to; } static gboolean is_op_dup( resource_t *rsc, const char *name, const char *interval) { gboolean dup = FALSE; const char *id = NULL; const char *value = NULL; xml_child_iter_filter( rsc->ops_xml, operation, "op", value = crm_element_value(operation, "name"); if(safe_str_neq(value, name)) { continue; } value = crm_element_value(operation, XML_LRM_ATTR_INTERVAL); if(value == NULL) { value = "0"; } if(safe_str_neq(value, interval)) { continue; } if(id == NULL) { id = ID(operation); } else { crm_config_err("Operation %s is a duplicate of %s", ID(operation), id); crm_config_err("Do not use the same (name, interval) combination more than once per resource"); dup = TRUE; } ); return dup; } void RecurringOp(resource_t *rsc, action_t *start, node_t *node, xmlNode *operation, pe_working_set_t *data_set) { char *key = NULL; const char *name = NULL; const char *value = NULL; const char *interval = NULL; const char *node_uname = NULL; unsigned long long interval_ms = 0; action_t *mon = NULL; gboolean is_optional = TRUE; GListPtr possible_matches = NULL; crm_debug_2("Creating recurring action %s for %s in role %s", ID(operation), rsc->id, role2text(rsc->next_role)); if(node != NULL) { node_uname = node->details->uname; } interval = crm_element_value(operation, XML_LRM_ATTR_INTERVAL); interval_ms = crm_get_interval(interval); if(interval_ms == 0) { return; } name = crm_element_value(operation, "name"); if(is_op_dup(rsc, name, interval)) { return; } key = generate_op_key(rsc->id, name, interval_ms); if(find_rsc_op_entry(rsc, key) == NULL) { /* disabled */ return; } if(start != NULL) { crm_debug_3("Marking %s %s due to %s", key, start->optional?"optional":"manditory", start->uuid); is_optional = start->optional; } else { crm_debug_2("Marking %s optional", key); is_optional = TRUE; } /* start a monitor for an already active resource */ possible_matches = find_actions_exact(rsc->actions, key, node); if(possible_matches == NULL) { is_optional = FALSE; crm_debug_3("Marking %s manditory: not active", key); } else { g_list_free(possible_matches); } value = crm_element_value(operation, "role"); if((rsc->next_role == RSC_ROLE_MASTER && value == NULL) || (value != NULL && text2role(value) != rsc->next_role)) { int log_level = LOG_DEBUG_2; const char *result = "Ignoring"; if(is_optional) { char *local_key = crm_strdup(key); log_level = LOG_INFO; result = "Cancelling"; /* its running : cancel it */ mon = custom_action( rsc, local_key, RSC_CANCEL, node, FALSE, TRUE, data_set); crm_free(mon->task); mon->task = crm_strdup(RSC_CANCEL); add_hash_param(mon->meta, XML_LRM_ATTR_INTERVAL, interval); add_hash_param(mon->meta, XML_LRM_ATTR_TASK, name); local_key = NULL; switch(rsc->role) { case RSC_ROLE_SLAVE: case RSC_ROLE_STARTED: if(rsc->next_role == RSC_ROLE_MASTER) { local_key = promote_key(rsc); } else if(rsc->next_role == RSC_ROLE_STOPPED) { local_key = stop_key(rsc); } break; case RSC_ROLE_MASTER: local_key = demote_key(rsc); break; default: break; } if(local_key) { custom_action_order(rsc, NULL, mon, rsc, local_key, NULL, pe_order_runnable_left, data_set); } mon = NULL; } do_crm_log(log_level, "%s action %s (%s vs. %s)", result , key, value?value:role2text(RSC_ROLE_SLAVE), role2text(rsc->next_role)); crm_free(key); key = NULL; return; } mon = custom_action(rsc, key, name, node, is_optional, TRUE, data_set); key = mon->uuid; if(is_optional) { crm_debug_2("%s\t %s (optional)", crm_str(node_uname), mon->uuid); } if(start == NULL || start->runnable == FALSE) { crm_debug("%s\t %s (cancelled : start un-runnable)", crm_str(node_uname), mon->uuid); mon->runnable = FALSE; } else if(node == NULL || node->details->online == FALSE || node->details->unclean) { crm_debug("%s\t %s (cancelled : no node available)", crm_str(node_uname), mon->uuid); mon->runnable = FALSE; } else if(mon->optional == FALSE) { crm_notice(" Start recurring %s (%llus) for %s on %s", mon->task, interval_ms/1000, rsc->id, crm_str(node_uname)); } if(rsc->next_role == RSC_ROLE_MASTER) { char *running_master = crm_itoa(EXECRA_RUNNING_MASTER); add_hash_param(mon->meta, XML_ATTR_TE_TARGET_RC, running_master); crm_free(running_master); } if(node == NULL || is_set(rsc->flags, pe_rsc_managed)) { custom_action_order(rsc, start_key(rsc), NULL, NULL, crm_strdup(key), mon, pe_order_implies_right|pe_order_runnable_left, data_set); if(rsc->next_role == RSC_ROLE_MASTER) { custom_action_order( rsc, promote_key(rsc), NULL, rsc, NULL, mon, pe_order_optional|pe_order_runnable_left, data_set); } else if(rsc->role == RSC_ROLE_MASTER) { custom_action_order( rsc, demote_key(rsc), NULL, rsc, NULL, mon, pe_order_optional|pe_order_runnable_left, data_set); } } } void Recurring(resource_t *rsc, action_t *start, node_t *node, pe_working_set_t *data_set) { if(is_not_set(data_set->flags, pe_flag_maintenance_mode)) { xml_child_iter_filter( rsc->ops_xml, operation, "op", RecurringOp(rsc, start, node, operation, data_set); ); } } void native_create_actions(resource_t *rsc, pe_working_set_t *data_set) { action_t *start = NULL; node_t *chosen = NULL; enum rsc_role_e role = RSC_ROLE_UNKNOWN; enum rsc_role_e next_role = RSC_ROLE_UNKNOWN; crm_debug_2("Createing actions for %s: %s->%s", rsc->id, role2text(rsc->role), role2text(rsc->next_role)); chosen = rsc->allocated_to; if(chosen != NULL && rsc->next_role == RSC_ROLE_UNKNOWN) { rsc->next_role = RSC_ROLE_STARTED; } else if(rsc->next_role == RSC_ROLE_UNKNOWN) { rsc->next_role = RSC_ROLE_STOPPED; } get_rsc_attributes(rsc->parameters, rsc, chosen, data_set); if(g_list_length(rsc->running_on) > 1) { if(rsc->recovery_type == recovery_stop_start) { pe_proc_warn("Attempting recovery of resource %s", rsc->id); if(rsc->role == RSC_ROLE_MASTER) { DemoteRsc(rsc, NULL, FALSE, data_set); } StopRsc(rsc, NULL, FALSE, data_set); rsc->role = RSC_ROLE_STOPPED; } } else if(rsc->running_on != NULL) { node_t *current = rsc->running_on->data; NoRoleChange(rsc, current, chosen, data_set); } else if(rsc->role == RSC_ROLE_STOPPED && rsc->next_role == RSC_ROLE_STOPPED) { char *key = start_key(rsc); GListPtr possible_matches = find_actions(rsc->actions, key, NULL); slist_iter( action, action_t, possible_matches, lpc, action->optional = TRUE; /* action->pseudo = TRUE; */ ); g_list_free(possible_matches); crm_debug_2("Stopping a stopped resource"); crm_free(key); goto do_recurring; } else if(rsc->role != RSC_ROLE_STOPPED) { /* A cheap trick to account for the fact that Master/Slave groups may not be * completely running when we set their role to Slave */ crm_debug_2("Resetting %s.role = %s (was %s)", rsc->id, role2text(RSC_ROLE_STOPPED), role2text(rsc->role)); rsc->role = RSC_ROLE_STOPPED; } role = rsc->role; while(role != rsc->next_role) { next_role = rsc_state_matrix[role][rsc->next_role]; crm_debug_2("Executing: %s->%s (%s)", role2text(role), role2text(next_role), rsc->id); if(rsc_action_matrix[role][next_role]( rsc, chosen, FALSE, data_set) == FALSE) { break; } role = next_role; } do_recurring: if(rsc->next_role != RSC_ROLE_STOPPED || is_set(rsc->flags, pe_rsc_managed) == FALSE) { start = start_action(rsc, chosen, TRUE); Recurring(rsc, start, chosen, data_set); } } void native_internal_constraints(resource_t *rsc, pe_working_set_t *data_set) { int type = pe_order_optional; const char *class = crm_element_value(rsc->xml, XML_AGENT_ATTR_CLASS); action_t *all_stopped = get_pseudo_op(ALL_STOPPED, data_set); if(rsc->variant == pe_native) { type |= pe_order_implies_right; } if(rsc->parent == NULL || rsc->parent->variant == pe_group) { type |= pe_order_restart; } new_rsc_order(rsc, RSC_STOP, rsc, RSC_START, type, data_set); new_rsc_order(rsc, RSC_DEMOTE, rsc, RSC_STOP, pe_order_demote_stop, data_set); new_rsc_order(rsc, RSC_START, rsc, RSC_PROMOTE, pe_order_runnable_left, data_set); new_rsc_order(rsc, RSC_DELETE, rsc, RSC_START, pe_order_optional, data_set); if(is_not_set(rsc->flags, pe_rsc_managed)) { crm_debug_3("Skipping fencing constraints for unmanaged resource: %s", rsc->id); return; } if(rsc->variant == pe_native && safe_str_neq(class, "stonith")) { custom_action_order( rsc, stop_key(rsc), NULL, NULL, crm_strdup(all_stopped->task), all_stopped, pe_order_implies_right|pe_order_runnable_left, data_set); } if (safe_str_neq(data_set->placement_strategy, "default") && g_hash_table_size(rsc->utilization) > 0) { slist_iter( current, node_t, rsc->running_on, lpc, char *load_stopped_task = crm_concat(LOAD_STOPPED, current->details->uname, '_'); action_t *load_stopped = get_pseudo_op(load_stopped_task, data_set); load_stopped->node = current; load_stopped->optional = FALSE; custom_action_order( rsc, stop_key(rsc), NULL, NULL, load_stopped_task, load_stopped, pe_order_optional, data_set); ); slist_iter( next, node_t, rsc->allowed_nodes, lpc, char *load_stopped_task = crm_concat(LOAD_STOPPED, next->details->uname, '_'); action_t *load_stopped = get_pseudo_op(load_stopped_task, data_set); load_stopped->node = next; load_stopped->optional = FALSE; custom_action_order( NULL, load_stopped_task, load_stopped, rsc, start_key(rsc), NULL, pe_order_optional, data_set); ); } } void native_rsc_colocation_lh( resource_t *rsc_lh, resource_t *rsc_rh, rsc_colocation_t *constraint) { if(rsc_lh == NULL) { pe_err("rsc_lh was NULL for %s", constraint->id); return; } else if(constraint->rsc_rh == NULL) { pe_err("rsc_rh was NULL for %s", constraint->id); return; } crm_debug_2("Processing colocation constraint between %s and %s", rsc_lh->id, rsc_rh->id); rsc_rh->cmds->rsc_colocation_rh(rsc_lh, rsc_rh, constraint); } static gboolean filter_colocation_constraint( resource_t *rsc_lh, resource_t *rsc_rh, rsc_colocation_t *constraint) { int level = LOG_DEBUG_4; if(constraint->score == 0){ return FALSE; } if(constraint->score > 0 && constraint->role_lh != RSC_ROLE_UNKNOWN && constraint->role_lh != rsc_lh->next_role) { do_crm_log_unlikely(level, "LH: Skipping constraint: \"%s\" state filter", role2text(constraint->role_rh)); return FALSE; } if(constraint->score > 0 && constraint->role_rh != RSC_ROLE_UNKNOWN && constraint->role_rh != rsc_rh->next_role) { do_crm_log_unlikely(level, "RH: Skipping constraint: \"%s\" state filter", role2text(constraint->role_rh)); return FALSE; } if(constraint->score < 0 && constraint->role_lh != RSC_ROLE_UNKNOWN && constraint->role_lh == rsc_lh->next_role) { do_crm_log_unlikely(level, "LH: Skipping -ve constraint: \"%s\" state filter", role2text(constraint->role_rh)); return FALSE; } if(constraint->score < 0 && constraint->role_rh != RSC_ROLE_UNKNOWN && constraint->role_rh == rsc_rh->next_role) { do_crm_log_unlikely(level, "RH: Skipping -ve constraint: \"%s\" state filter", role2text(constraint->role_rh)); return FALSE; } return TRUE; } static void colocation_match( resource_t *rsc_lh, resource_t *rsc_rh, rsc_colocation_t *constraint) { const char *tmp = NULL; const char *value = NULL; const char *attribute = "#id"; GListPtr archive = NULL; gboolean do_check = FALSE; if(constraint->node_attribute != NULL) { attribute = constraint->node_attribute; } if(rsc_rh->allocated_to) { value = g_hash_table_lookup( rsc_rh->allocated_to->details->attrs, attribute); do_check = TRUE; } else if(constraint->score < 0) { /* nothing to do: * anti-colocation with something thats not running */ return; } if(constraint->score > -INFINITY && constraint->score < INFINITY) { archive = node_list_dup(rsc_lh->allowed_nodes, FALSE, FALSE); } slist_iter( node, node_t, rsc_lh->allowed_nodes, lpc, tmp = g_hash_table_lookup(node->details->attrs, attribute); if(do_check && safe_str_eq(tmp, value)) { if(constraint->score < INFINITY) { crm_debug_2("%s: %s.%s += %d", constraint->id, rsc_lh->id, node->details->uname, constraint->score); node->weight = merge_weights( constraint->score, node->weight); } } else if(do_check == FALSE || constraint->score >= INFINITY) { crm_debug_2("%s: %s.%s -= %d (%s)", constraint->id, rsc_lh->id, node->details->uname, constraint->score, do_check?"failed":"unallocated"); node->weight = merge_weights(-constraint->score, node->weight); } ); if(can_run_any(rsc_lh->allowed_nodes) == FALSE) { if(archive) { char *score = score2char(constraint->score); crm_info("%s: Rolling back scores from %s (%d, %s)", rsc_lh->id, rsc_rh->id, do_check, score); pe_free_shallow_adv(rsc_lh->allowed_nodes, TRUE); rsc_lh->allowed_nodes = archive; crm_free(score); } } } void native_rsc_colocation_rh( resource_t *rsc_lh, resource_t *rsc_rh, rsc_colocation_t *constraint) { crm_debug_2("%sColocating %s with %s (%s, weight=%d)", constraint->score >= 0?"":"Anti-", rsc_lh->id, rsc_rh->id, constraint->id, constraint->score); if(filter_colocation_constraint(rsc_lh, rsc_rh, constraint) == FALSE) { return; } if(is_set(rsc_rh->flags, pe_rsc_provisional)) { return; } else if(is_not_set(rsc_lh->flags, pe_rsc_provisional)) { /* error check */ struct node_shared_s *details_lh; struct node_shared_s *details_rh; if((constraint->score > -INFINITY) && (constraint->score < INFINITY)) { return; } details_rh = rsc_rh->allocated_to?rsc_rh->allocated_to->details:NULL; details_lh = rsc_lh->allocated_to?rsc_lh->allocated_to->details:NULL; if(constraint->score == INFINITY && details_lh != details_rh) { crm_err("%s and %s are both allocated" " but to different nodes: %s vs. %s", rsc_lh->id, rsc_rh->id, details_lh?details_lh->uname:"n/a", details_rh?details_rh->uname:"n/a"); } else if(constraint->score == -INFINITY && details_lh == details_rh) { crm_err("%s and %s are both allocated" " but to the SAME node: %s", rsc_lh->id, rsc_rh->id, details_rh?details_rh->uname:"n/a"); } return; } else { colocation_match(rsc_lh, rsc_rh, constraint); } } static GListPtr find_actions_by_task(GListPtr actions, resource_t *rsc, const char *original_key) { GListPtr list = NULL; list = find_actions(actions, original_key, NULL); if(list == NULL) { /* we're potentially searching a child of the original resource */ char *key = NULL; char *tmp = NULL; char *task = NULL; int interval = 0; if(parse_op_key(original_key, &tmp, &task, &interval)) { key = generate_op_key(rsc->id, task, interval); /* crm_err("looking up %s instead of %s", key, original_key); */ /* slist_iter(action, action_t, actions, lpc, */ /* crm_err(" - %s", action->uuid)); */ list = find_actions(actions, key, NULL); } else { crm_err("search key: %s", original_key); } crm_free(key); crm_free(tmp); crm_free(task); } return list; } void native_rsc_order_lh(resource_t *lh_rsc, order_constraint_t *order, pe_working_set_t *data_set) { GListPtr lh_actions = NULL; action_t *lh_action = order->lh_action; resource_t *rh_rsc = order->rh_rsc; crm_debug_3("Processing LH of ordering constraint %d", order->id); CRM_ASSERT(lh_rsc != NULL); if(lh_action != NULL) { lh_actions = g_list_append(NULL, lh_action); } else if(lh_action == NULL) { lh_actions = find_actions_by_task( lh_rsc->actions, lh_rsc, order->lh_action_task); } if(lh_actions == NULL && lh_rsc != rh_rsc) { char *key = NULL; char *rsc_id = NULL; char *op_type = NULL; int interval = 0; crm_debug_4("No LH-Side (%s/%s) found for constraint %d with %s - creating", lh_rsc->id, order->lh_action_task, order->id, order->rh_action_task); parse_op_key( order->lh_action_task, &rsc_id, &op_type, &interval); key = generate_op_key(lh_rsc->id, op_type, interval); lh_action = custom_action(lh_rsc, key, op_type, NULL, TRUE, TRUE, data_set); if(lh_rsc->fns->state(lh_rsc, TRUE) == RSC_ROLE_STOPPED && safe_str_eq(op_type, RSC_STOP)) { lh_action->pseudo = TRUE; lh_action->runnable = TRUE; } lh_actions = g_list_append(NULL, lh_action); crm_free(op_type); crm_free(rsc_id); } slist_iter( lh_action_iter, action_t, lh_actions, lpc, if(rh_rsc == NULL && order->rh_action) { rh_rsc = order->rh_action->rsc; } if(rh_rsc) { rh_rsc->cmds->rsc_order_rh( lh_action_iter, rh_rsc, order); } else if(order->rh_action) { order_actions( lh_action_iter, order->rh_action, order->type); } ); pe_free_shallow_adv(lh_actions, FALSE); } void native_rsc_order_rh( action_t *lh_action, resource_t *rsc, order_constraint_t *order) { GListPtr rh_actions = NULL; action_t *rh_action = NULL; CRM_CHECK(rsc != NULL, return); CRM_CHECK(order != NULL, return); rh_action = order->rh_action; crm_debug_3("Processing RH of ordering constraint %d", order->id); if(rh_action != NULL) { rh_actions = g_list_append(NULL, rh_action); } else if(rsc != NULL) { rh_actions = find_actions_by_task( rsc->actions, rsc, order->rh_action_task); } if(rh_actions == NULL) { crm_debug_4("No RH-Side (%s/%s) found for constraint..." " ignoring", rsc->id,order->rh_action_task); if(lh_action) { crm_debug_4("LH-Side was: %s", lh_action->uuid); } return; } slist_iter( rh_action_iter, action_t, rh_actions, lpc, if(lh_action) { order_actions(lh_action, rh_action_iter, order->type); } else if(order->type & pe_order_implies_right) { rh_action_iter->runnable = FALSE; crm_warn("Unrunnable %s 0x%.6x", rh_action_iter->uuid, order->type); } else { crm_warn("neither %s 0x%.6x", rh_action_iter->uuid, order->type); } ); pe_free_shallow_adv(rh_actions, FALSE); } void native_rsc_location(resource_t *rsc, rsc_to_node_t *constraint) { GListPtr or_list; crm_debug_2("Applying %s (%s) to %s", constraint->id, role2text(constraint->role_filter), rsc->id); /* take "lifetime" into account */ if(constraint == NULL) { pe_err("Constraint is NULL"); return; } else if(rsc == NULL) { pe_err("LHS of rsc_to_node (%s) is NULL", constraint->id); return; } else if(constraint->role_filter > 0 && constraint->role_filter != rsc->next_role) { crm_debug("Constraint (%s) is not active (role : %s)", constraint->id, role2text(constraint->role_filter)); return; } else if(is_active(constraint) == FALSE) { crm_debug_2("Constraint (%s) is not active", constraint->id); return; } if(constraint->node_list_rh == NULL) { crm_debug_2("RHS of constraint %s is NULL", constraint->id); return; } or_list = node_list_or( rsc->allowed_nodes, constraint->node_list_rh, FALSE); pe_free_shallow(rsc->allowed_nodes); rsc->allowed_nodes = or_list; slist_iter(node, node_t, or_list, lpc, crm_debug_3("%s + %s : %d", rsc->id, node->details->uname, node->weight); ); } void native_expand(resource_t *rsc, pe_working_set_t *data_set) { crm_debug_3("Processing actions from %s", rsc->id); slist_iter( action, action_t, rsc->actions, lpc, crm_debug_4("processing action %d for rsc=%s", action->id, rsc->id); graph_element_from_action(action, data_set); ); slist_iter( child_rsc, resource_t, rsc->children, lpc, child_rsc->cmds->expand(child_rsc, data_set); ); } void LogActions(resource_t *rsc, pe_working_set_t *data_set) { node_t *next = NULL; node_t *current = NULL; gboolean moving = FALSE; if(rsc->children) { slist_iter( child_rsc, resource_t, rsc->children, lpc, LogActions(child_rsc, data_set); ); return; } next = rsc->allocated_to; if(rsc->running_on) { current = rsc->running_on->data; if(rsc->role == RSC_ROLE_STOPPED) { /* * This can occur when resources are being recovered * We fiddle with the current role in native_create_actions() */ rsc->role = RSC_ROLE_STARTED; } } if(current == NULL && is_set(rsc->flags, pe_rsc_orphan)) { /* Don't log stopped orphans */ return; } if(is_not_set(rsc->flags, pe_rsc_managed) || (current == NULL && next == NULL)) { crm_notice("Leave resource %s\t(%s%s)", rsc->id, role2text(rsc->role), is_not_set(rsc->flags, pe_rsc_managed)?" unmanaged":""); return; } if(current != NULL && next != NULL && safe_str_neq(current->details->id, next->details->id)) { moving = TRUE; } if(rsc->role == rsc->next_role) { action_t *start = NULL; char *key = start_key(rsc); GListPtr possible_matches = find_actions(rsc->actions, key, next); crm_free(key); if(possible_matches) { start = possible_matches->data; g_list_free(possible_matches); } key = generate_op_key(rsc->id, CRMD_ACTION_MIGRATED, 0); possible_matches = find_actions(rsc->actions, key, next); crm_free(key); CRM_CHECK(next != NULL,); if(next == NULL) { } else if(possible_matches) { crm_notice("Migrate resource %s\t(%s %s -> %s)", rsc->id, role2text(rsc->role), current->details->uname, next->details->uname); g_list_free(possible_matches); } else if(start == NULL || start->optional) { crm_notice("Leave resource %s\t(%s %s)", rsc->id, role2text(rsc->role), next->details->uname); } else if(moving && current) { crm_notice("Move resource %s\t(%s %s -> %s)", rsc->id, role2text(rsc->role), current->details->uname, next->details->uname); } else if(is_set(rsc->flags, pe_rsc_failed)) { crm_notice("Recover resource %s\t(%s %s)", rsc->id, role2text(rsc->role), next->details->uname); } else if(start && start->runnable == FALSE) { crm_notice("Stop resource %s\t(%s %s)", rsc->id, role2text(rsc->role), next->details->uname); } else { crm_notice("Restart resource %s\t(%s %s)", rsc->id, role2text(rsc->role), next->details->uname); } return; } if(rsc->role > RSC_ROLE_SLAVE && rsc->role > rsc->next_role) { CRM_CHECK(current != NULL,); if(current != NULL) { crm_notice("Demote %s\t(%s -> %s %s)", rsc->id, role2text(rsc->role), role2text(rsc->next_role), current->details->uname); } } if(rsc->next_role == RSC_ROLE_STOPPED || moving) { CRM_CHECK(current != NULL,); slist_iter(node, node_t, rsc->running_on, lpc, crm_notice("Stop resource %s\t(%s)", rsc->id, node->details->uname)); } if(rsc->role == RSC_ROLE_STOPPED || moving) { CRM_CHECK(next != NULL,); if(next != NULL) { crm_notice("Start %s\t(%s)", rsc->id, next->details->uname); } } if(rsc->next_role > RSC_ROLE_SLAVE && rsc->role < rsc->next_role) { CRM_CHECK(next != NULL,); crm_notice("Promote %s\t(%s -> %s %s)", rsc->id, role2text(rsc->role), role2text(rsc->next_role), next->details->uname); } } void NoRoleChange(resource_t *rsc, node_t *current, node_t *next, pe_working_set_t *data_set) { action_t *stop = NULL; action_t *start = NULL; GListPtr possible_matches = NULL; crm_debug_2("Executing: %s (role=%s)", rsc->id, role2text(rsc->next_role)); if(current == NULL || next == NULL) { return; } if(is_set(rsc->flags, pe_rsc_failed) || safe_str_neq(current->details->id, next->details->id)) { if(rsc->next_role > RSC_ROLE_STARTED) { gboolean optional = TRUE; if(rsc->role == RSC_ROLE_MASTER) { optional = FALSE; } DemoteRsc(rsc, current, optional, data_set); } if(rsc->role == RSC_ROLE_MASTER) { DemoteRsc(rsc, current, FALSE, data_set); } StopRsc(rsc, current, FALSE, data_set); StartRsc(rsc, next, FALSE, data_set); if(rsc->next_role == RSC_ROLE_MASTER) { PromoteRsc(rsc, next, FALSE, data_set); } possible_matches = find_recurring_actions(rsc->actions, next); slist_iter(match, action_t, possible_matches, lpc, if(match->optional == FALSE) { crm_debug("Fixing recurring action: %s", match->uuid); match->optional = TRUE; } ); g_list_free(possible_matches); } else if(is_set(rsc->flags, pe_rsc_start_pending)) { start = start_action(rsc, next, TRUE); if(start->runnable) { /* wait for StartRsc() to be called */ rsc->role = RSC_ROLE_STOPPED; } else { /* wait for StopRsc() to be called */ rsc->next_role = RSC_ROLE_STOPPED; } } else { stop = stop_action(rsc, current, TRUE); start = start_action(rsc, next, TRUE); stop->optional = start->optional; if(rsc->next_role > RSC_ROLE_STARTED) { DemoteRsc(rsc, current, start->optional, data_set); } StopRsc(rsc, current, start->optional, data_set); StartRsc(rsc, current, start->optional, data_set); if(rsc->next_role == RSC_ROLE_MASTER) { PromoteRsc(rsc, next, start->optional, data_set); } if(start->runnable == FALSE) { rsc->next_role = RSC_ROLE_STOPPED; } } } gboolean StopRsc(resource_t *rsc, node_t *next, gboolean optional, pe_working_set_t *data_set) { action_t *stop = NULL; const char *class = crm_element_value(rsc->xml, XML_AGENT_ATTR_CLASS); crm_debug_2("Executing: %s", rsc->id); if(rsc->next_role == RSC_ROLE_STOPPED && rsc->variant == pe_native && safe_str_eq(class, "stonith")) { action_t *all_stopped = get_pseudo_op(ALL_STOPPED, data_set); custom_action_order( NULL, crm_strdup(all_stopped->task), all_stopped, rsc, stop_key(rsc), NULL, pe_order_implies_left|pe_order_stonith_stop, data_set); } slist_iter( current, node_t, rsc->running_on, lpc, stop = stop_action(rsc, current, optional); if(is_set(data_set->flags, pe_flag_remove_after_stop)) { DeleteRsc(rsc, current, optional, data_set); } ); return TRUE; } gboolean StartRsc(resource_t *rsc, node_t *next, gboolean optional, pe_working_set_t *data_set) { action_t *start = NULL; crm_debug_2("Executing: %s", rsc->id); start = start_action(rsc, next, TRUE); if(start->runnable && optional == FALSE) { start->optional = FALSE; } return TRUE; } gboolean PromoteRsc(resource_t *rsc, node_t *next, gboolean optional, pe_working_set_t *data_set) { char *key = NULL; gboolean runnable = TRUE; GListPtr action_list = NULL; crm_debug_2("Executing: %s", rsc->id); CRM_CHECK(rsc->next_role == RSC_ROLE_MASTER, crm_err("Next role: %s", role2text(rsc->next_role)); return FALSE); CRM_CHECK(next != NULL, return FALSE); key = start_key(rsc); action_list = find_actions_exact(rsc->actions, key, next); crm_free(key); slist_iter(start, action_t, action_list, lpc, if(start->runnable == FALSE) { runnable = FALSE; } ); g_list_free(action_list); if(runnable) { promote_action(rsc, next, optional); return TRUE; } crm_debug("%s\tPromote %s (canceled)", next->details->uname, rsc->id); key = promote_key(rsc); action_list = find_actions_exact(rsc->actions, key, next); crm_free(key); slist_iter(promote, action_t, action_list, lpc, promote->runnable = FALSE; ); g_list_free(action_list); return TRUE; } gboolean DemoteRsc(resource_t *rsc, node_t *next, gboolean optional, pe_working_set_t *data_set) { crm_debug_2("Executing: %s", rsc->id); /* CRM_CHECK(rsc->next_role == RSC_ROLE_SLAVE, return FALSE); */ slist_iter( current, node_t, rsc->running_on, lpc, demote_action(rsc, current, optional); ); return TRUE; } gboolean RoleError(resource_t *rsc, node_t *next, gboolean optional, pe_working_set_t *data_set) { crm_debug("Executing: %s", rsc->id); CRM_CHECK(FALSE, return FALSE); return FALSE; } gboolean NullOp(resource_t *rsc, node_t *next, gboolean optional, pe_working_set_t *data_set) { crm_debug_2("Executing: %s", rsc->id); return FALSE; } gboolean DeleteRsc(resource_t *rsc, node_t *node, gboolean optional, pe_working_set_t *data_set) { action_t *delete = NULL; #if DELETE_THEN_REFRESH action_t *refresh = NULL; #endif if(is_set(rsc->flags, pe_rsc_failed)) { crm_debug_2("Resource %s not deleted from %s: failed", rsc->id, node->details->uname); return FALSE; } else if(node == NULL) { crm_debug_2("Resource %s not deleted: NULL node", rsc->id); return FALSE; } else if(node->details->unclean || node->details->online == FALSE) { crm_debug_2("Resource %s not deleted from %s: unrunnable", rsc->id, node->details->uname); return FALSE; } crm_notice("Removing %s from %s", rsc->id, node->details->uname); delete = delete_action(rsc, node, optional); new_rsc_order(rsc, RSC_STOP, rsc, RSC_DELETE, optional?pe_order_implies_right:pe_order_implies_left, data_set); #if DELETE_THEN_REFRESH refresh = custom_action( NULL, crm_strdup(CRM_OP_LRM_REFRESH), CRM_OP_LRM_REFRESH, node, FALSE, TRUE, data_set); add_hash_param(refresh->meta, XML_ATTR_TE_NOWAIT, XML_BOOLEAN_TRUE); order_actions(delete, refresh, pe_order_optional); #endif return TRUE; } #include <../lib/pengine/unpack.h> gboolean native_create_probe(resource_t *rsc, node_t *node, action_t *complete, gboolean force, pe_working_set_t *data_set) { char *key = NULL; action_t *probe = NULL; node_t *running = NULL; resource_t *top = uber_parent(rsc); static const char *rc_master = NULL; static const char *rc_inactive = NULL; if(rc_inactive == NULL) { rc_inactive = crm_itoa(EXECRA_NOT_RUNNING); rc_master = crm_itoa(EXECRA_RUNNING_MASTER); } CRM_CHECK(node != NULL, return FALSE); if(force == FALSE && is_not_set(data_set->flags, pe_flag_startup_probes)) { crm_debug_2("Skipping active resource detection for %s", rsc->id); return FALSE; } if(rsc->children) { gboolean any_created = FALSE; slist_iter( child_rsc, resource_t, rsc->children, lpc, any_created = child_rsc->cmds->create_probe( child_rsc, node, complete, force, data_set) || any_created; ); return any_created; } if(is_set(rsc->flags, pe_rsc_orphan)) { crm_debug_2("Skipping orphan: %s", rsc->id); return FALSE; } running = pe_find_node_id(rsc->known_on, node->details->id); if(force == FALSE && running != NULL) { /* we already know the status of the resource on this node */ crm_debug_3("Skipping active: %s", rsc->id); return FALSE; } if(running == NULL && is_set(top->flags, pe_rsc_unique) == FALSE) { /* Annoyingly we also need to check any other clone instances * Clumsy, but it will work. * * An alternative would be to update known_on for every peer * during process_rsc_state() * * This code desperately needs optimization * ptest -x with 100 nodes, 100 clones and clone-max=10: * No probes O(25s) * Detection without clone loop O(3m) * Detection with clone loop O(8m) ptest[32211]: 2010/02/18_14:27:55 CRIT: stage5: Probing for unknown resources ptest[32211]: 2010/02/18_14:33:39 CRIT: stage5: Done ptest[32211]: 2010/02/18_14:35:05 CRIT: stage7: Updating action states ptest[32211]: 2010/02/18_14:35:05 CRIT: stage7: Done */ char *clone_id = clone_zero(rsc->id); resource_t *peer = pe_find_resource(top->children, clone_id); while(peer && running == NULL) { running = pe_find_node_id(peer->known_on, node->details->id); if(force == FALSE && running != NULL) { /* we already know the status of the resource on this node */ crm_debug_3("Skipping active clone: %s", rsc->id); crm_free(clone_id); return FALSE; } clone_id = increment_clone(clone_id); peer = pe_find_resource(data_set->resources, clone_id); } crm_free(clone_id); } key = generate_op_key(rsc->id, RSC_STATUS, 0); probe = custom_action(rsc, key, RSC_STATUS, node, FALSE, TRUE, data_set); probe->optional = FALSE; if(running == NULL) { add_hash_param(probe->meta, XML_ATTR_TE_TARGET_RC, rc_inactive); } else if(rsc->role == RSC_ROLE_MASTER) { add_hash_param(probe->meta, XML_ATTR_TE_TARGET_RC, rc_master); } crm_debug("Probing %s on %s (%s)", rsc->id, node->details->uname, role2text(rsc->role)); order_actions(probe, complete, pe_order_implies_right); return TRUE; } static void native_start_constraints( resource_t *rsc, action_t *stonith_op, gboolean is_stonith, pe_working_set_t *data_set) { node_t *target = stonith_op?stonith_op->node:NULL; if(is_stonith) { char *key = start_key(rsc); action_t *ready = get_pseudo_op(STONITH_UP, data_set); crm_debug_2("Ordering %s action before stonith events", key); custom_action_order( rsc, key, NULL, NULL, crm_strdup(ready->task), ready, pe_order_optional, data_set); } else { action_t *all_stopped = get_pseudo_op(ALL_STOPPED, data_set); action_t *stonith_done = get_pseudo_op(STONITH_DONE, data_set); slist_iter(action, action_t, rsc->actions, lpc2, if(action->needs == rsc_req_stonith) { order_actions(stonith_done, action, pe_order_implies_left); } else if(target != NULL && safe_str_eq(action->task, RSC_START) && NULL == pe_find_node_id( rsc->known_on, target->details->id)) { /* if known == NULL, then we dont know if * the resource is active on the node * we're about to shoot * * in this case, regardless of action->needs, * the only safe option is to wait until * the node is shot before doing anything * to with the resource * * its analogous to waiting for all the probes * for rscX to complete before starting rscX * * the most likely explaination is that the * DC died and took its status with it */ - crm_info("Ordering %s after %s recovery", - action->uuid, target->details->uname); + crm_debug("Ordering %s after %s recovery", + action->uuid, target->details->uname); order_actions(all_stopped, action, pe_order_implies_left|pe_order_runnable_left); } ); } } static void native_stop_constraints( resource_t *rsc, action_t *stonith_op, gboolean is_stonith, pe_working_set_t *data_set) { char *key = NULL; GListPtr action_list = NULL; resource_t *top = uber_parent(rsc); key = stop_key(rsc); action_list = find_actions(rsc->actions, key, stonith_op->node); crm_free(key); /* add the stonith OP as a stop pre-req and the mark the stop * as a pseudo op - since its now redundant */ slist_iter( action, action_t, action_list, lpc2, resource_t *parent = NULL; if(action->node->details->online && action->node->details->unclean == FALSE && is_set(rsc->flags, pe_rsc_failed)) { continue; } if(is_set(rsc->flags, pe_rsc_failed)) { crm_warn("Stop of failed resource %s is" " implicit after %s is fenced", rsc->id, action->node->details->uname); } else { crm_info("%s is implicit after %s is fenced", action->uuid, action->node->details->uname); } /* the stop would never complete and is * now implied by the stonith operation */ action->pseudo = TRUE; action->runnable = TRUE; action->implied_by_stonith = TRUE; if(is_stonith == FALSE) { action_t *parent_stop = find_first_action(top->actions, NULL, RSC_STOP, NULL); order_actions(stonith_op, action, pe_order_optional); order_actions(stonith_op, parent_stop, pe_order_optional); } if(is_set(rsc->flags, pe_rsc_notify)) { /* Create a second notification that will be delivered * immediately after the node is fenced * * Basic problem: * - C is a clone active on the node to be shot and stopping on another * - R is a resource that depends on C * * + C.stop depends on R.stop * + C.stopped depends on STONITH * + C.notify depends on C.stopped * + C.healthy depends on C.notify * + R.stop depends on C.healthy * * The extra notification here changes * + C.healthy depends on C.notify * into: * + C.healthy depends on C.notify' * + C.notify' depends on STONITH' * thus breaking the loop */ notify_data_t *n_data = create_notification_boundaries(rsc, RSC_STOP, NULL, stonith_op, data_set); crm_info("Creating secondary notification for %s", action->uuid); collect_notification_data(rsc, TRUE, FALSE, n_data); g_hash_table_insert(n_data->keys, crm_strdup("notify_stop_resource"), crm_strdup(rsc->id)); g_hash_table_insert(n_data->keys, crm_strdup("notify_stop_uname"), crm_strdup(action->node->details->uname)); create_notifications(uber_parent(rsc), n_data, data_set); free_notification_data(n_data); } /* find the top-most resource */ parent = rsc->parent; while(parent != NULL && parent->parent != NULL) { parent = parent->parent; } if(parent) { crm_debug_2("Re-creating actions for %s", parent->id); parent->cmds->create_actions(parent, data_set); /* make sure we dont mess anything up in create_actions */ CRM_CHECK(action->pseudo, action->pseudo = TRUE); CRM_CHECK(action->runnable, action->runnable = TRUE); } /* From Bug #1601, successful fencing must be an input to a failed resources stop action. However given group(rA, rB) running on nodeX and B.stop has failed, A := stop healthy resource (rA.stop) B := stop failed resource (pseudo operation B.stop) C := stonith nodeX A requires B, B requires C, C requires A This loop would prevent the cluster from making progress. This block creates the "C requires A" dependency and therefore must (at least for now) be disabled. Instead, run the block above and treat all resources on nodeX as B would be (marked as a pseudo op depending on the STONITH). TODO: Break the "A requires B" dependency in update_action() and re-enable this block } else if(is_stonith == FALSE) { crm_info("Moving healthy resource %s" " off %s before fencing", rsc->id, node->details->uname); * stop healthy resources before the * stonith op * custom_action_order( rsc, stop_key(rsc), NULL, NULL,crm_strdup(CRM_OP_FENCE),stonith_op, pe_order_optional, data_set); */ ); g_list_free(action_list); key = demote_key(rsc); action_list = find_actions(rsc->actions, key, stonith_op->node); crm_free(key); slist_iter( action, action_t, action_list, lpc2, if(action->node->details->online == FALSE || is_set(rsc->flags, pe_rsc_failed)) { crm_info("Demote of failed resource %s is" " implict after %s is fenced", rsc->id, action->node->details->uname); /* the stop would never complete and is * now implied by the stonith operation */ action->pseudo = TRUE; action->runnable = TRUE; if(is_stonith == FALSE) { order_actions(stonith_op, action, pe_order_optional); } } ); g_list_free(action_list); } void complex_stonith_ordering( resource_t *rsc, action_t *stonith_op, pe_working_set_t *data_set) { gboolean is_stonith = FALSE; const char *class = crm_element_value(rsc->xml, XML_AGENT_ATTR_CLASS); if(rsc->children) { slist_iter( child_rsc, resource_t, rsc->children, lpc, child_rsc->cmds->stonith_ordering( child_rsc, stonith_op, data_set); ); return; } if(is_not_set(rsc->flags, pe_rsc_managed)) { crm_debug_3("Skipping fencing constraints for unmanaged resource: %s", rsc->id); return; } if(stonith_op != NULL && safe_str_eq(class, "stonith")) { is_stonith = TRUE; } /* Start constraints */ native_start_constraints(rsc, stonith_op, is_stonith, data_set); /* Stop constraints */ native_stop_constraints(rsc, stonith_op, is_stonith, data_set); } enum stack_activity { stack_stable = 0, stack_starting = 1, stack_stopping = 2, stack_middle = 4, }; static enum stack_activity find_clone_activity_on(resource_t *rsc, resource_t *target, node_t *node, const char *type) { int mode = stack_stable; action_t *active = NULL; if(target->children) { slist_iter( child, resource_t, target->children, lpc, mode |= find_clone_activity_on(rsc, child, node, type); ); return mode; } active = find_first_action(target->actions, NULL, CRMD_ACTION_START, NULL); if(active && active->optional == FALSE && active->pseudo == FALSE) { crm_debug("%s: found scheduled %s action (%s)", rsc->id, active->uuid, type); mode |= stack_starting; } active = find_first_action(target->actions, NULL, CRMD_ACTION_STOP, node); if(active && active->optional == FALSE && active->pseudo == FALSE) { crm_debug("%s: found scheduled %s action (%s)", rsc->id, active->uuid, type); mode |= stack_stopping; } return mode; } static enum stack_activity check_stack_element(resource_t *rsc, resource_t *other_rsc, const char *type) { resource_t *other_p = uber_parent(other_rsc); if(other_rsc == NULL || other_rsc == rsc) { return stack_stable; } else if(other_p->variant == pe_native) { crm_notice("Cannot migrate %s due to dependency on %s (%s)", rsc->id, other_rsc->id, type); return stack_middle; } else if(other_rsc == rsc->parent) { int mode = 0; slist_iter(constraint, rsc_colocation_t, other_rsc->rsc_cons, lpc, if(constraint->score > 0) { mode |= check_stack_element(rsc, constraint->rsc_rh, type); } ); return mode; } else if(other_p->variant == pe_group) { crm_notice("Cannot migrate %s due to dependency on group %s (%s)", rsc->id, other_rsc->id, type); return stack_middle; } /* else: >= clone */ /* ## Assumption A depends on clone(B) ## Resource Activity During Move N1 N2 N3 --- --- --- t0 A.stop t1 B.stop B.stop t2 B.start B.start t3 A.start ## Resource Activity During Migration N1 N2 N3 --- --- --- t0 B.start B.start t1 A.stop (1) t2 A.start (2) t3 B.stop B.stop Node 1: Rewritten to be a migrate-to operation Node 2: Rewritten to be a migrate-from operation # Constraints The following constraints already exist in the system. The 'ok' and 'fail' column refers to whether they still hold for migration. a) A.stop -> A.start - ok b) B.stop -> B.start - fail c) A.stop -> B.stop - ok d) B.start -> A.start - ok e) B.stop -> A.start - fail f) A.stop -> B.start - fail ## Scenarios B unchanged - ok B stopping only - fail - possible after fixing 'e' B starting only - fail - possible after fixing 'f' B stoping and starting - fail - constraint 'b' is unfixable B restarting only on N2 - fail - as-per previous only rarer */ /* Only allow migration when the clone is either stable, only starting or only stopping */ return find_clone_activity_on(rsc, other_rsc, NULL, type); } static gboolean at_stack_bottom(resource_t *rsc) { char *key = NULL; action_t *start = NULL; action_t *other = NULL; int mode = stack_stable; GListPtr action_list = NULL; key = start_key(rsc); action_list = find_actions(rsc->actions, key, NULL); crm_free(key); crm_debug_3("%s: processing", rsc->id); CRM_CHECK(action_list != NULL, return FALSE); start = action_list->data; g_list_free(action_list); slist_iter( constraint, rsc_colocation_t, rsc->rsc_cons, lpc, resource_t *target = constraint->rsc_rh; crm_debug_4("Checking %s: %s == %s (%d)", constraint->id, rsc->id, target->id, constraint->score); if(constraint->score > 0) { mode |= check_stack_element(rsc, target, "coloc"); if(mode & stack_middle) { return FALSE; } else if((mode & stack_stopping) && (mode & stack_starting)) { crm_notice("Cannot migrate %s due to colocation activity (last was %s)", rsc->id, target->id); return FALSE; } } ); slist_iter( other_w, action_wrapper_t, start->actions_before, lpc, other = other_w->action; if(other_w->type & pe_order_serialize_only) { crm_debug_3("%s: depends on %s (serialize ordering)", rsc->id, other->uuid); continue; } crm_debug_2("%s: Checking %s ordering", rsc->id, other->uuid); if(other->optional == FALSE) { mode |= check_stack_element(rsc, other->rsc, "order"); if(mode & stack_middle) { return FALSE; } else if((mode & stack_stopping) && (mode & stack_starting)) { crm_notice("Cannot migrate %s due to ordering activity (last was %s)", rsc->id, other->rsc->id); return FALSE; } } ); return TRUE; } void complex_migrate_reload(resource_t *rsc, pe_working_set_t *data_set) { char *key = NULL; int level = LOG_DEBUG; GListPtr action_list = NULL; action_t *stop = NULL; action_t *start = NULL; action_t *other = NULL; action_t *action = NULL; const char *value = NULL; if(rsc->children) { slist_iter( child_rsc, resource_t, rsc->children, lpc, child_rsc->cmds->migrate_reload(child_rsc, data_set); ); other = NULL; return; } else if(rsc->variant > pe_native) { return; } do_crm_log_unlikely(level+1, "Processing %s", rsc->id); if(is_not_set(rsc->flags, pe_rsc_managed) || is_set(rsc->flags, pe_rsc_failed) || is_set(rsc->flags, pe_rsc_start_pending) || rsc->next_role < RSC_ROLE_STARTED || g_list_length(rsc->running_on) != 1) { do_crm_log_unlikely( level+1, "%s: general resource state: flags=0x%.16llx", rsc->id, rsc->flags); return; } value = g_hash_table_lookup(rsc->meta, XML_OP_ATTR_ALLOW_MIGRATE); if(crm_is_true(value)) { set_bit(rsc->flags, pe_rsc_can_migrate); } if(rsc->next_role > RSC_ROLE_SLAVE) { clear_bit(rsc->flags, pe_rsc_can_migrate); do_crm_log_unlikely( level+1, "%s: resource role: role=%s", rsc->id, role2text(rsc->next_role)); } key = start_key(rsc); action_list = find_actions(rsc->actions, key, NULL); crm_free(key); if(action_list == NULL) { do_crm_log_unlikely(level, "%s: no start action", rsc->id); return; } start = action_list->data; g_list_free(action_list); if(is_not_set(rsc->flags, pe_rsc_can_migrate) && start->allow_reload_conversion == FALSE) { do_crm_log_unlikely(level+1, "%s: no need to continue", rsc->id); return; } key = stop_key(rsc); action_list = find_actions(rsc->actions, key, NULL); crm_free(key); if(action_list == NULL) { do_crm_log_unlikely(level, "%s: no stop action", rsc->id); return; } stop = action_list->data; g_list_free(action_list); action = start; if(action->pseudo || action->optional || action->node == NULL || action->runnable == FALSE) { do_crm_log_unlikely(level, "%s: %s", rsc->id, action->task); return; } action = stop; if(action->pseudo || action->optional || action->node == NULL || action->runnable == FALSE) { do_crm_log_unlikely(level, "%s: %s", rsc->id, action->task); return; } if(is_set(rsc->flags, pe_rsc_can_migrate)) { if(start->node == NULL || stop->node == NULL || stop->node->details == start->node->details) { clear_bit(rsc->flags, pe_rsc_can_migrate); } else if(at_stack_bottom(rsc) == FALSE) { clear_bit(rsc->flags, pe_rsc_can_migrate); } } if(is_set(rsc->flags, pe_rsc_can_migrate)) { action_t *to = NULL; action_t *from = NULL; crm_info("Migrating %s from %s to %s", rsc->id, stop->node->details->uname, start->node->details->uname); crm_free(stop->uuid); crm_free(stop->task); stop->task = crm_strdup(RSC_MIGRATE); stop->uuid = generate_op_key(rsc->id, stop->task, 0); add_hash_param(stop->meta, "migrate_source", stop->node->details->uname); add_hash_param(stop->meta, "migrate_target", start->node->details->uname); /* Create the correct ordering ajustments based on find_clone_activity_on(); */ slist_iter( constraint, rsc_colocation_t, rsc->rsc_cons, lpc, resource_t *target = constraint->rsc_rh; crm_info("Repairing %s: %s == %s (%d)", constraint->id, rsc->id, target->id, constraint->score); if(constraint->score > 0) { int mode = check_stack_element(rsc, target, "coloc"); action_t *clone_stop = find_first_action(target->actions, NULL, RSC_STOP, NULL); action_t *clone_start = find_first_action(target->actions, NULL, RSC_STARTED, NULL); CRM_ASSERT(clone_stop != NULL); CRM_ASSERT(clone_start != NULL); CRM_ASSERT((mode & stack_middle) == 0); CRM_ASSERT(((mode & stack_stopping) && (mode & stack_starting)) == 0); if(mode & stack_stopping) { action_t *clone_stop = find_first_action(target->actions, NULL, RSC_STOP, NULL); action_t *clone_start = find_first_action(target->actions, NULL, RSC_STARTED, NULL); crm_debug("Creating %s.start -> %s.stop ordering", rsc->id, target->id); order_actions(start, clone_stop, pe_order_optional); slist_iter( other_w, action_wrapper_t, start->actions_before, lpc2, /* Needed if the clone's started pseudo-action ever gets printed in the graph */ if(other_w->action == clone_start) { crm_debug("Breaking %s -> %s ordering", other_w->action->uuid, start->uuid); other_w->type = pe_order_none; } ); } else if(mode & stack_starting) { crm_debug("Creating %s.started -> %s.stop ordering", target->id, rsc->id); order_actions(clone_start, stop, pe_order_optional); slist_iter( other_w, action_wrapper_t, clone_stop->actions_before, lpc2, /* Needed if the clone's stop pseudo-action ever gets printed in the graph */ if(other_w->action == stop) { crm_debug("Breaking %s -> %s ordering", other_w->action->uuid, clone_stop->uuid); other_w->type = pe_order_none; } ); } } ); crm_free(start->uuid); crm_free(start->task); start->task = crm_strdup(RSC_MIGRATED); start->uuid = generate_op_key(rsc->id, start->task, 0); add_hash_param(start->meta, "migrate_source_uuid", stop->node->details->id); add_hash_param(start->meta, "migrate_source", stop->node->details->uname); add_hash_param(start->meta, "migrate_target", start->node->details->uname); /* Anything that needed stop to complete, now also needs start to have completed */ slist_iter( other_w, action_wrapper_t, stop->actions_after, lpc, other = other_w->action; if(other->optional || other->rsc != NULL) { continue; } crm_debug("Ordering %s before %s (stop)", start->uuid, other_w->action->uuid); order_actions(start, other, other_w->type); ); /* Stop also needs anything that the start needed to have completed too */ slist_iter( other_w, action_wrapper_t, start->actions_before, lpc, other = other_w->action; if(other->rsc == NULL) { /* nothing */ } else if(other->optional || other->rsc == rsc || other->rsc == rsc->parent) { continue; } crm_debug("Ordering %s before %s (start)", other_w->action->uuid, stop->uuid); order_actions(other, stop, other_w->type); ); /* Overwrite any op-specific params with those from the migrate ops */ from = custom_action( rsc, crm_strdup(start->uuid), start->task, start->node, FALSE, FALSE, data_set); g_hash_table_foreach(start->meta, append_hashtable, from->meta); g_hash_table_destroy(start->meta); start->meta = from->meta; from->meta = NULL; pe_free_action(from); to = custom_action( rsc, crm_strdup(stop->uuid), stop->task, stop->node, FALSE, FALSE, data_set); g_hash_table_foreach(stop->meta, append_hashtable, to->meta); g_hash_table_destroy(stop->meta); stop->meta = to->meta; to->meta = NULL; pe_free_action(to); } else if(start && stop && start->allow_reload_conversion && stop->node->details == start->node->details) { action_t *rewrite = NULL; start->pseudo = TRUE; /* easier than trying to delete it from the graph */ action = NULL; key = promote_key(rsc); action_list = find_actions(rsc->actions, key, NULL); if(action_list) { action = action_list->data; } if(action && action->optional == FALSE) { action->pseudo = TRUE; } g_list_free(action_list); crm_free(key); action = NULL; key = demote_key(rsc); action_list = find_actions(rsc->actions, key, NULL); if(action_list) { action = action_list->data; } g_list_free(action_list); crm_free(key); if(action && action->optional == FALSE) { rewrite = action; stop->pseudo = TRUE; } else { rewrite = stop; } crm_info("Rewriting %s of %s on %s as a reload", rewrite->task, rsc->id, stop->node->details->uname); crm_free(rewrite->uuid); crm_free(rewrite->task); rewrite->task = crm_strdup("reload"); rewrite->uuid = generate_op_key(rsc->id, rewrite->task, 0); } else { do_crm_log_unlikely(level+1, "%s nothing to do", rsc->id); } } void native_append_meta(resource_t *rsc, xmlNode *xml) { char *value = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_INCARNATION); if(value) { char *name = NULL; name = crm_meta_name(XML_RSC_ATTR_INCARNATION); crm_xml_add(xml, name, value); crm_free(name); } } diff --git a/pengine/test10/master-promotion-constraint.exp b/pengine/test10/master-promotion-constraint.exp index bc5b2c27f2..344764d205 100644 --- a/pengine/test10/master-promotion-constraint.exp +++ b/pengine/test10/master-promotion-constraint.exp @@ -1,64 +1,64 @@ - + - + - + - + - + diff --git a/shell/modules/cibconfig.py b/shell/modules/cibconfig.py index d447371ad2..de13f9932f 100644 --- a/shell/modules/cibconfig.py +++ b/shell/modules/cibconfig.py @@ -1,2296 +1,2306 @@ # Copyright (C) 2008 Dejan Muhamedagic # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or (at your option) any later version. # # This software is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # # You should have received a copy of the GNU General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA # import sys import subprocess import copy import xml.dom.minidom import re from singletonmixin import Singleton from userprefs import Options, UserPrefs from vars import Vars from cliformat import * from utils import * from xmlutil import * from msg import * from parse import CliParser from clidisplay import CliDisplay from cibstatus import CibStatus from idmgmt import IdMgmt from ra import RAInfo def show_unrecognized_elems(doc): try: conf = doc.getElementsByTagName("configuration")[0] except: common_warn("CIB has no configuration element") return for topnode in conf.childNodes: if not is_element(topnode): continue if is_defaults(topnode): continue if not topnode.tagName in cib_topnodes: common_warn("unrecognized CIB element %s" % c.tagName) continue for c in topnode.childNodes: if not is_element(c): continue if not c.tagName in cib_object_map: common_warn("unrecognized CIB element %s" % c.tagName) # # object sets (enables operations on sets of elements) # def mkset_obj(*args): if args and args[0] == "xml": obj = lambda: CibObjectSetRaw(*args[1:]) else: obj = lambda: CibObjectSetCli(*args) return obj() class CibObjectSet(object): ''' Edit or display a set of cib objects. repr() for objects representation and save() used to store objects into internal structures are defined in subclasses. ''' def __init__(self, *args): self.obj_list = [] def _open_url(self,src): import urllib try: return urllib.urlopen(src) except: pass if src == "-": return sys.stdin try: return open(src) except: pass common_err("could not open %s" % src) return False def init_aux_lists(self): ''' Before edit, initialize two auxiliary lists which will hold a list of objects to be removed and a list of objects which were created. Then, we can create a new object list which will match the current state of affairs, i.e. the object set after the last edit. ''' self.remove_objs = copy.copy(self.obj_list) self.add_objs = [] def recreate_obj_list(self): ''' Recreate obj_list: remove deleted objects and add created objects ''' for obj in self.remove_objs: self.obj_list.remove(obj) self.obj_list += self.add_objs rmlist = [] for obj in self.obj_list: if obj.invalid: rmlist.append(obj) for obj in rmlist: self.obj_list.remove(obj) def edit_save(self,s,erase = False): ''' Save string s to a tmp file. Invoke editor to edit it. Parse/save the resulting file. In case of syntax error, allow user to reedit. If erase is True, erase the CIB first. If no changes are done, return silently. ''' tmp = str2tmp(s) if not tmp: return False filehash = hash(s) rc = False while True: if edit_file(tmp) != 0: break try: f = open(tmp,'r') except IOError, msg: common_err(msg) break s = ''.join(f) f.close() if hash(s) == filehash: # file unchanged rc = True break if erase: cib_factory.erase() if not self.save(s): if ask("Do you want to edit again?"): continue rc = True break try: os.unlink(tmp) except: pass return rc def edit(self): if options.batch: common_info("edit not allowed in batch mode") return False cli_display.set_no_pretty() s = self.repr() cli_display.reset_no_pretty() return self.edit_save(s) def filter_save(self,filter,s): ''' Pipe string s through a filter. Parse/save the output. If no changes are done, return silently. ''' rc,outp = filter_string(filter,s) if rc != 0: return False if hash(outp) == hash(s): return True return self.save(outp) def filter(self,filter): cli_display.set_no_pretty() s = self.repr(format = -1) cli_display.reset_no_pretty() return self.filter_save(filter,s) def save_to_file(self,fname): if fname == "-": f = sys.stdout else: if not options.batch and os.access(fname,os.F_OK): if not ask("File %s exists. Do you want to overwrite it?"%fname): return False try: f = open(fname,"w") except IOError, msg: common_err(msg) return False rc = True cli_display.set_no_pretty() s = self.repr() cli_display.reset_no_pretty() if s: f.write(s) f.write('\n') elif self.obj_list: rc = False if f != sys.stdout: f.close() return rc def show(self): s = self.repr() if not s: if self.obj_list: # objects could not be displayed return False else: return True page_string(s) def import_file(self,method,fname): if not cib_factory.is_cib_sane(): return False if method == "replace": if options.interactive and cib_factory.has_cib_changed(): if not ask("This operation will erase all changes. Do you want to proceed?"): return False cib_factory.erase() f = self._open_url(fname) if not f: return False s = ''.join(f) if f != sys.stdin: f.close() return self.save(s, method == "update") def repr(self): ''' Return a string with objects's representations (either CLI or XML). ''' return '' def save(self, s, update = False): ''' For each object: - try to find a corresponding object in obj_list - if (update and not found) or found: replace the object in the obj_list with the new object - if not found: create new See below for specific implementations. ''' pass def semantic_check(self): ''' Test objects for sanity. This is about semantics. ''' rc = 0 for obj in self.obj_list: rc |= obj.check_sanity() return rc def lookup_cli(self,cli_list): for obj in self.obj_list: if obj.matchcli(cli_list): return obj def lookup(self,xml_obj_type,obj_id): for obj in self.obj_list: if obj.match(xml_obj_type,obj_id): return obj def drop_remaining(self): 'Any remaining objects in obj_list are deleted.' l = [x.obj_id for x in self.remove_objs] return cib_factory.delete(*l) def get_comments(cli_list): if not cli_list: return [] last = cli_list[len(cli_list)-1] try: if last[0] == "comments": cli_list.pop() return last[1] except: pass return [] class CibObjectSetCli(CibObjectSet): ''' Edit or display a set of cib objects (using cli notation). ''' def __init__(self, *args): CibObjectSet.__init__(self, *args) self.obj_list = cib_factory.mkobj_list("cli",*args) def repr(self, format = 1): "Return a string containing cli format of all objects." if not self.obj_list: return '' return '\n'.join(obj.repr_cli(format = format) \ for obj in processing_sort_cli(self.obj_list)) def process(self, cli_list, update = False): ''' Create new objects or update existing ones. ''' myobj = obj = self.lookup_cli(cli_list) if update and not obj: obj = cib_factory.find_object_for_cli(cli_list) if obj: rc = cib_factory.update_from_cli(obj,cli_list) != False if myobj: self.remove_objs.remove(myobj) else: obj = cib_factory.create_from_cli(cli_list) rc = obj != None if rc: self.add_objs.append(obj) return rc def save(self, s, update = False): ''' Save a user supplied cli format configuration. On errors user is typically asked to review the configuration (for instance on editting). On syntax error (return code 1), no changes are done, but on semantic errors (return code 2), some changes did take place so object list must be updated properly. Finally, once syntax check passed, there's no way back, all changes are applied to the current configuration. TODO: Implement undo configuration changes. ''' l = [] rc = True err_buf.start_tmp_lineno() cp = CliParser() for cli_text in lines2cli(s): err_buf.incr_lineno() cli_list = cp.parse(cli_text) if cli_list: l.append(cli_list) elif cli_list == False: rc = False err_buf.stop_tmp_lineno() # we can't proceed if there was a syntax error, but we # can ask the user to fix problems if not rc: return rc self.init_aux_lists() if l: for cli_list in processing_sort_cli(l): if self.process(cli_list,update) == False: rc = False if not self.drop_remaining(): # this is tricky, we don't know what was removed! # it could happen that the user dropped a resource # which was running and therefore couldn't be removed rc = False self.recreate_obj_list() return rc cib_verify = "crm_verify -V -p" class CibObjectSetRaw(CibObjectSet): ''' Edit or display one or more CIB objects (XML). ''' def __init__(self, *args): CibObjectSet.__init__(self, *args) self.obj_list = cib_factory.mkobj_list("xml",*args) def repr(self, format = "ignored"): "Return a string containing xml of all objects." doc = cib_factory.objlist2doc(self.obj_list) s = doc.toprettyxml(user_prefs.xmlindent) doc.unlink() return s def repr_configure(self): ''' Return a string containing xml of configure and its children. ''' doc = cib_factory.objlist2doc(self.obj_list) conf_node = doc.getElementsByTagName("configuration")[0] s = conf_node.toprettyxml(user_prefs.xmlindent) doc.unlink() return s def process(self, node, update = False): if not cib_factory.is_cib_sane(): return False myobj = obj = self.lookup(node.tagName,node.getAttribute("id")) if update and not obj: obj = cib_factory.find_object_for_node(node) if obj: rc = cib_factory.update_from_node(obj,node) if myobj: self.remove_objs.remove(obj) else: new_obj = cib_factory.create_from_node(node) rc = new_obj != None if rc: self.add_objs.append(new_obj) return rc def save(self, s, update = False): try: doc = xml.dom.minidom.parseString(s) except xml.parsers.expat.ExpatError,msg: cib_parse_err(msg,s) return False rc = True sanitize_cib(doc) show_unrecognized_elems(doc) newnodes = get_interesting_nodes(doc,[]) self.init_aux_lists() if newnodes: for node in processing_sort(newnodes): if not self.process(node,update): rc = False if not self.drop_remaining(): rc = False doc.unlink() self.recreate_obj_list() return rc def verify(self): if not self.obj_list: return True cli_display.set_no_pretty() rc = pipe_string(cib_verify,self.repr(format = -1)) cli_display.reset_no_pretty() return rc in (0,1) def ptest(self, nograph, scores, utilization, verbosity): if not cib_factory.is_cib_sane(): return False if verbosity: ptest = "ptest -X -%s" % verbosity.upper() if scores: ptest = "%s -s" % ptest if utilization: ptest = "%s -U" % ptest if user_prefs.dotty and not nograph: fd,dotfile = mkstemp() ptest = "%s -D %s" % (ptest,dotfile) else: dotfile = None doc = cib_factory.objlist2doc(self.obj_list) cib = doc.childNodes[0] status = cib_status.get_status() if not status: common_err("no status section found") return False cib.appendChild(doc.importNode(status,1)) pipe_string(ptest,doc.toprettyxml()) doc.unlink() if dotfile: show_dot_graph(dotfile) vars.tmpfiles.append(dotfile) else: if not nograph: common_info("install graphviz to see a transition graph") return True # # XML generate utilities # def set_id(node,oldnode,id_hint,id_required = True): ''' Set the id attribute for the node. Procedure: - if the node already contains "id", keep it - if the old node contains "id", copy that - if neither is true, then create a new one using id_hint (exception: if not id_required, then no new id is generated) Finally, save the new id in id_store. ''' old_id = None new_id = node.getAttribute("id") if oldnode and oldnode.getAttribute("id"): old_id = oldnode.getAttribute("id") if not new_id: new_id = old_id if not new_id: if id_required: new_id = id_store.new(node,id_hint) else: id_store.save(new_id) if new_id: node.setAttribute("id",new_id) if oldnode and old_id == new_id: set_id_used_attr(oldnode) def mkxmlsimple(e,oldnode,id_hint): ''' Create an xml node from the (name,dict) pair. The name is the name of the element. The dict contains a set of attributes. ''' node = cib_factory.createElement(e[0]) for n,v in e[1]: if n == "$children": # this one's skipped continue if n == "operation": v = v.lower() if n.startswith('$'): n = n.lstrip('$') if (type(v) != type('') and type(v) != type(u'')) \ or v: # skip empty strings node.setAttribute(n,v) id_ref = node.getAttribute("id-ref") if id_ref: id_ref_2 = cib_factory.resolve_id_ref(e[0],id_ref) node.setAttribute("id-ref",id_ref_2) else: set_id(node,lookup_node(node,oldnode),id_hint) return node def mkxmlnvpairs(e,oldnode,id_hint): ''' Create xml from the (name,dict) pair. The name is the name of the element. The dict contains a set of nvpairs. Stuff such as instance_attributes. NB: Other tags not containing nvpairs are fine if the dict is empty. ''' node = cib_factory.createElement(e[0]) match_node = lookup_node(node,oldnode) #if match_node: #print "found nvpairs set:",match_node.tagName,match_node.getAttribute("id") id_ref = find_value(e[1],"$id-ref") if id_ref: id_ref_2 = cib_factory.resolve_id_ref(e[0],id_ref) node.setAttribute("id-ref",id_ref_2) if e[0] != "operations": return node # id_ref is the only attribute (if not operations) e[1].remove(["$id-ref",id_ref]) v = find_value(e[1],"$id") if v: node.setAttribute("id",v) e[1].remove(["$id",v]) else: if e[0] == "operations": # operations don't need no id set_id(node,match_node,id_hint,id_required = False) else: set_id(node,match_node,id_hint) try: subpfx = vars.subpfx_list[e[0]] except: subpfx = '' subpfx = subpfx and "%s_%s" % (id_hint,subpfx) or id_hint nvpair_pfx = node.getAttribute("id") or subpfx for n,v in e[1]: nvpair = cib_factory.createElement("nvpair") node.appendChild(nvpair) nvpair.setAttribute("name",n) if v != None: nvpair.setAttribute("value",v) set_id(nvpair,lookup_node(nvpair,match_node),nvpair_pfx) return node def mkxmlop(e,oldnode,id_hint): ''' Create an operation xml from the (name,dict) pair. ''' node = cib_factory.createElement(e[0]) inst_attr = [] for n,v in e[1]: if n in olist(vars.req_op_attributes + vars.op_attributes): node.setAttribute(n,v) else: inst_attr.append([n,v]) tmp = cib_factory.createElement("operations") oldops = lookup_node(tmp,oldnode) # first find old operations oldop = lookup_node(node,oldops) set_id(node,oldop,id_hint) if inst_attr: e = ["instance_attributes",inst_attr] nia = mkxmlnvpairs(e,oldop,node.getAttribute("id")) node.appendChild(nia) return node def mkxmldate(e,oldnode,id_hint): ''' Create a date_expression xml from the (name,dict) pair. ''' node = cib_factory.createElement(e[0]) operation = find_value(e[1],"operation").lower() node.setAttribute("operation", operation) old_date = lookup_node(node,oldnode) # first find old date element set_id(node,old_date,id_hint) date_spec_attr = [] for n,v in e[1]: if n in olist(vars.date_ops) or n == "operation": continue elif n in vars.in_range_attrs: node.setAttribute(n,v) else: date_spec_attr.append([n,v]) if not date_spec_attr: return node elem = operation == "date_spec" and "date_spec" or "duration" tmp = cib_factory.createElement(elem) old_date_spec = lookup_node(tmp,old_date) # first find old date element set_id(tmp,old_date_spec,id_hint) for n,v in date_spec_attr: tmp.setAttribute(n,v) node.appendChild(tmp) return node def mkxmlrsc_set(e,oldnode,id_hint): ''' Create a resource_set xml from the (name,dict) pair. ''' node = cib_factory.createElement(e[0]) old_rsc_set = lookup_node(node,oldnode) # first find old date element set_id(node,old_rsc_set,id_hint) for ref in e[1]: if ref[0] == "resource_ref": ref_node = cib_factory.createElement(ref[0]) ref_node.setAttribute(ref[1][0],ref[1][1]) node.appendChild(ref_node) elif ref[0] in ("sequential", "action", "role"): node.setAttribute(ref[0], ref[1]) return node conv_list = { "params": "instance_attributes", "meta": "meta_attributes", "property": "cluster_property_set", "rsc_defaults": "meta_attributes", "op_defaults": "meta_attributes", "attributes": "instance_attributes", "utilization": "utilization", "operations": "operations", "op": "op", } def mkxmlnode(e,oldnode,id_hint): ''' Create xml from the (name,dict) pair. The name is the name of the element. The dict contains either a set of nvpairs or a set of attributes. The id is either generated or copied if found in the provided xml. Stuff such as instance_attributes. ''' if e[0] in conv_list: e[0] = conv_list[e[0]] if e[0] in ("instance_attributes","meta_attributes","operations","cluster_property_set","utilization"): return mkxmlnvpairs(e,oldnode,id_hint) elif e[0] == "op": return mkxmlop(e,oldnode,id_hint) elif e[0] == "date_expression": return mkxmldate(e,oldnode,id_hint) elif e[0] == "resource_set": return mkxmlrsc_set(e,oldnode,id_hint) else: return mkxmlsimple(e,oldnode,id_hint) def set_nvpair(set_node,name,value): n_id = set_node.getAttribute("id") for c in set_node.childNodes: if is_element(c) and c.getAttribute("name") == name: c.setAttribute("value",value) return np = cib_factory.createElement("nvpair") np.setAttribute("name",name) np.setAttribute("value",value) new_id = id_store.new(np,n_id) np.setAttribute("id",new_id) set_node.appendChild(np) # # cib element classes (CibObject the parent class) # class CibObject(object): ''' The top level object of the CIB. Resources and constraints. ''' state_fmt = "%16s %-8s%-8s%-8s%-8s%-8s%-4s" set_names = {} def __init__(self,xml_obj_type,obj_id = None): if not xml_obj_type in cib_object_map: unsupported_err(xml_obj_type) return self.obj_type = cib_object_map[xml_obj_type][0] self.parent_type = cib_object_map[xml_obj_type][2] self.xml_obj_type = xml_obj_type self.origin = "" # where did it originally come from? self.nocli = False # we don't support this one self.nocli_warn = True # don't issue warnings all the time self.updated = False # was the object updated self.invalid = False # the object has been invalidated (removed) self.moved = False # the object has been moved (from/to a container) self.recreate = False # constraints to be recreated self.parent = None # object superior (group/clone/ms) self.children = [] # objects inferior if obj_id: if not self.mknode(obj_id): self = None # won't do :( else: self.obj_id = None self.node = None def dump_state(self): 'Print object status' print self.state_fmt % \ (self.obj_id,self.origin,self.updated,self.moved,self.invalid, \ self.parent and self.parent.obj_id or "", \ len(self.children)) def repr_cli_xml(self,node,format): h = cli_display.keyword("xml") l = node.toprettyxml('\t').split('\n') l = [x for x in l if x] # drop empty lines if format > 0: return "%s %s" % (h,' \\\n'.join(l)) else: return "%s %s" % (h,''.join(l)) def repr_cli(self,node = None,format = 1): ''' CLI representation for the node. repr_cli_head and repr_cli_child in subclasess. ''' if not node: node = self.node if self.nocli: return self.repr_cli_xml(node,format) l = [] head_s = self.repr_cli_head(node) if not head_s: # everybody must have a head return None comments = [] l.append(head_s) cli_add_description(node,l) for c in node.childNodes: if is_comment(c): comments.append(c.data) continue if not is_element(c): continue s = self.repr_cli_child(c,format) if s: l.append(s) return self.cli_format(l,comments,format) def repr_cli_child(self,c,format): if c.tagName in self.set_names: return "%s %s" % \ (cli_display.keyword(self.set_names[c.tagName]), \ cli_pairs(nvpairs2list(c))) def cli2node(self,cli,oldnode = None): ''' Convert CLI representation to a DOM node. Defined in subclasses. ''' cli_list = mk_cli_list(cli) if not cli_list: return None if not oldnode: if self.obj_type == "property": oldnode = cib_factory.topnode[cib_object_map[self.xml_obj_type][2]] else: oldnode = self.node comments = get_comments(cli_list) node = self.cli_list2node(cli_list,oldnode) if comments and node: stuff_comments(node,comments) return node def cli_format(self,l,comments,format): ''' Format and add comment (if any). ''' s = cli_format(l,format) cs = '\n'.join(comments) return (comments and format >=0) and '\n'.join([cs,s]) or s def move_comments(self): ''' Move comments to the top of the node. ''' l = [] firstelem = None for n in self.node.childNodes: if is_comment(n): if firstelem: l.append(n) else: if not firstelem and is_element(n): firstelem = n for comm_node in l: common_debug("move comm %s" % comm_node.toprettyxml()) self.node.insertBefore(comm_node, firstelem) common_debug("obj %s node: %s" % (self.obj_id,self.node.toprettyxml())) def mknode(self,obj_id): if not cib_factory.is_cib_sane(): return False if id_store.id_in_use(obj_id): return False if self.xml_obj_type in vars.defaults_tags: tag = "meta_attributes" else: tag = self.xml_obj_type self.node = cib_factory.createElement(tag) self.obj_id = obj_id self.node.setAttribute("id",self.obj_id) self.origin = "user" return True def can_be_renamed(self): ''' Return False if this object can't be renamed. ''' if is_rsc_running(self.obj_id): common_err("cannot rename a running resource (%s)" % self.obj_id) return False if not is_live_cib() and self.node.tagName == "node": common_err("cannot rename nodes") return False return True def attr_exists(self,attr): if not attr in self.node.attributes.keys(): no_attribute_err(attr,self.obj_id) return False return True def cli_use_validate(self): ''' Check validity of the object, as we know it. It may happen that we don't recognize a construct, but that the object is still valid for the CRM. In that case, the object is marked as "CLI read only", i.e. we will neither convert it to CLI nor try to edit it in that format. The validation procedure: we convert xml to cli and then back to xml. If the two xml representations match then we can understand the xml. ''' if not self.node: return True if not self.attr_exists("id"): return False cli_display.set_no_pretty() cli_text = self.repr_cli(format = 0) cli_display.reset_no_pretty() if not cli_text: return False common_debug("clitext: %s" % cli_text) xml2 = self.cli2node(cli_text) if not xml2: return False rc = xml_cmp(self.node, xml2, show = True) xml2.unlink() return rc def check_sanity(self): ''' Right now, this is only for primitives. And groups/clones/ms and cluster properties. ''' return 0 def matchcli(self,cli_list): head = cli_list[0] return self.obj_type == head[0] \ and self.obj_id == find_value(head[1],"id") def match(self,xml_obj_type,obj_id): return self.xml_obj_type == xml_obj_type and self.obj_id == obj_id def obj_string(self): return "%s:%s" % (self.obj_type,self.obj_id) def reset_updated(self): self.updated = False self.moved = False self.recreate = False for child in self.children: child.reset_updated() def propagate_updated(self): if self.parent: self.parent.updated = self.updated self.parent.propagate_updated() def top_parent(self): '''Return the top parent or self''' if self.parent: return self.parent.top_parent() else: return self def find_child_in_node(self,child): for c in self.node.childNodes: if not is_element(c): continue if c.tagName == child.obj_type and \ c.getAttribute("id") == child.obj_id: return c return None def filter(self,*args): "Filter objects." if not args: return True if args[0] == "NOOBJ": return False if args[0] == "changed": return self.updated or self.origin == "user" + if args[0].startswith("type:"): + return self.obj_type == args[0][5:] return self.obj_id in args def mk_cli_list(cli): 'Sometimes we get a string and sometimes a list.' if type(cli) == type('') or type(cli) == type(u''): cp = CliParser() # what follows looks strange, but the last string actually matters # the previous ones may be comments and are collected by the parser for s in lines2cli(cli): cli_list = cp.parse(s) return cli_list else: return cli class CibNode(CibObject): ''' Node and node's attributes. ''' set_names = { "instance_attributes": "attributes", "utilization": "utilization", } def repr_cli_head(self,node): obj_type = vars.cib_cli_map[node.tagName] node_id = node.getAttribute("id") uname = node.getAttribute("uname") s = cli_display.keyword(obj_type) if node_id != uname: s = '%s $id="%s"' % (s, node_id) s = '%s %s' % (s, cli_display.id(uname)) type = node.getAttribute("type") if type != vars.node_default_type: s = '%s:%s' % (s, type) return s def cli_list2node(self,cli_list,oldnode): head = copy.copy(cli_list[0]) head[0] = backtrans[head[0]] obj_id = find_value(head[1],"$id") if not obj_id: obj_id = find_value(head[1],"uname") if not obj_id: return None type = find_value(head[1],"type") if not type: type = vars.node_default_type head[1].append(["type",type]) headnode = mkxmlsimple(head,cib_factory.topnode[cib_object_map[self.xml_obj_type][2]],'node') id_hint = headnode.getAttribute("id") for e in cli_list[1:]: n = mkxmlnode(e,oldnode,id_hint) headnode.appendChild(n) remove_id_used_attributes(cib_factory.topnode[cib_object_map[self.xml_obj_type][2]]) return headnode def get_ra(node): ra_type = node.getAttribute("type") ra_class = node.getAttribute("class") ra_provider = node.getAttribute("provider") return RAInfo(ra_class,ra_type,ra_provider) class CibPrimitive(CibObject): ''' Primitives. ''' set_names = { "instance_attributes": "params", "meta_attributes": "meta", "utilization": "utilization", } def repr_cli_head(self,node): obj_type = vars.cib_cli_map[node.tagName] node_id = node.getAttribute("id") ra_type = node.getAttribute("type") ra_class = node.getAttribute("class") ra_provider = node.getAttribute("provider") s1 = s2 = '' if ra_class: s1 = "%s:"%ra_class if ra_provider: s2 = "%s:"%ra_provider s = cli_display.keyword(obj_type) id = cli_display.id(node_id) return "%s %s %s" % (s, id, ''.join((s1,s2,ra_type))) def repr_cli_child(self,c,format): if c.tagName in self.set_names: return "%s %s" % \ (cli_display.keyword(self.set_names[c.tagName]), \ cli_pairs(nvpairs2list(c))) elif c.tagName == "operations": return cli_operations(c,format) def cli_list2node(self,cli_list,oldnode): ''' Convert a CLI description to DOM node. Try to preserve as many ids as possible in case there's an old XML version. ''' head = copy.copy(cli_list[0]) head[0] = backtrans[head[0]] headnode = mkxmlsimple(head,oldnode,'rsc') id_hint = headnode.getAttribute("id") operations = None for e in cli_list[1:]: n = mkxmlnode(e,oldnode,id_hint) if keyword_cmp(e[0], "operations"): operations = n if not keyword_cmp(e[0], "op"): headnode.appendChild(n) else: if not operations: operations = mkxmlnode(["operations",{}],oldnode,id_hint) headnode.appendChild(operations) operations.appendChild(n) remove_id_used_attributes(oldnode) return headnode def add_operation(self,cli_list): # check if there is already an op with the same interval comments = get_comments(cli_list) head = copy.copy(cli_list[0]) name = find_value(head[1], "name") interval = find_value(head[1], "interval") if find_operation(self.node,name,interval): common_err("%s already has a %s op with interval %s" % \ (self.obj_id, name, interval)) return None # drop the rsc attribute head[1].remove(["rsc",self.obj_id]) # create an xml node mon_node = mkxmlsimple(head, None, self.obj_id) # get the place to append it to try: op_node = self.node.getElementsByTagName("operations")[0] except: op_node = cib_factory.createElement("operations") self.node.appendChild(op_node) op_node.appendChild(mon_node) if comments and self.node: stuff_comments(self.node,comments) # the resource is updated self.updated = True self.propagate_updated() return self def check_sanity(self): ''' Check operation timeouts and if all required parameters are defined. ''' if not self.node: # eh? common_err("%s: no xml (strange)" % self.obj_id) return user_prefs.get_check_rc() + rc3 = sanity_check_meta(self.obj_id,self.node,vars.rsc_meta_attributes) ra = get_ra(self.node) if not ra.mk_ra_node(): # no RA found? + if cib_factory.is_asymm_cluster(): + return rc3 ra.error("no such resource agent") return user_prefs.get_check_rc() params = [] for c in self.node.childNodes: if not is_element(c): continue if c.tagName == "instance_attributes": params += nvpairs2list(c) rc1 = ra.sanity_check_params(self.obj_id, params) actions = {} for c in self.node.childNodes: if not is_element(c): continue if c.tagName == "operations": for c2 in c.childNodes: if is_element(c2) and c2.tagName == "op": op,pl = op2list(c2) if op: actions[op] = pl default_timeout = get_default_timeout() rc2 = ra.sanity_check_ops(self.obj_id, actions, default_timeout) - rc3 = sanity_check_meta(self.obj_id,self.node,vars.rsc_meta_attributes) return rc1 | rc2 | rc3 class CibContainer(CibObject): ''' Groups and clones and ms. ''' set_names = { "instance_attributes": "params", "meta_attributes": "meta", } def repr_cli_head(self,node): try: obj_type = vars.cib_cli_map[node.tagName] except: unsupported_err(node.tagName) return None node_id = node.getAttribute("id") children = [] for c in node.childNodes: if not is_element(c): continue if (obj_type == "group" and is_primitive(c)) or \ is_child_rsc(c): children.append(cli_display.rscref(c.getAttribute("id"))) elif obj_type in vars.clonems_tags and is_child_rsc(c): children.append(cli_display.rscref(c.getAttribute("id"))) s = cli_display.keyword(obj_type) id = cli_display.id(node_id) return "%s %s %s" % (s, id, ' '.join(children)) def cli_list2node(self,cli_list,oldnode): head = copy.copy(cli_list[0]) head[0] = backtrans[head[0]] headnode = mkxmlsimple(head,oldnode,'grp') id_hint = headnode.getAttribute("id") for e in cli_list[1:]: n = mkxmlnode(e,oldnode,id_hint) headnode.appendChild(n) v = find_value(head[1],"$children") if v: for child_id in v: obj = cib_factory.find_object(child_id) if obj: n = obj.node.cloneNode(1) headnode.appendChild(n) else: no_object_err(child_id) remove_id_used_attributes(oldnode) return headnode def check_sanity(self): ''' Check meta attributes. ''' if not self.node: # eh? common_err("%s: no xml (strange)" % self.obj_id) return user_prefs.get_check_rc() if self.obj_type == "group": l = vars.rsc_meta_attributes elif self.obj_type == "clone": l = vars.clone_meta_attributes elif self.obj_type == "ms": l = vars.clone_meta_attributes + vars.ms_meta_attributes rc = sanity_check_nvpairs(self.obj_id,self.node,l) return rc class CibLocation(CibObject): ''' Location constraint. ''' def repr_cli_head(self,node): obj_type = vars.cib_cli_map[node.tagName] node_id = node.getAttribute("id") rsc = cli_display.rscref(node.getAttribute("rsc")) s = cli_display.keyword(obj_type) id = cli_display.id(node_id) s = "%s %s %s"%(s,id,rsc) pref_node = node.getAttribute("node") score = cli_display.score(get_score(node)) if pref_node: return "%s %s %s" % (s,score,pref_node) else: return s def repr_cli_child(self,c,format): if c.tagName == "rule": return "%s %s" % \ (cli_display.keyword("rule"), cli_rule(c)) def cli_list2node(self,cli_list,oldnode): head = copy.copy(cli_list[0]) head[0] = backtrans[head[0]] headnode = mkxmlsimple(head,oldnode,'location') id_hint = headnode.getAttribute("id") oldrule = None for e in cli_list[1:]: if e[0] in ("expression","date_expression"): n = mkxmlnode(e,oldrule,id_hint) else: n = mkxmlnode(e,oldnode,id_hint) if keyword_cmp(e[0], "rule"): add_missing_attr(n) rule = n headnode.appendChild(n) oldrule = lookup_node(rule,oldnode,location_only=True) else: rule.appendChild(n) remove_id_used_attributes(oldnode) return headnode class CibSimpleConstraint(CibObject): ''' Colocation and order constraints. ''' def repr_cli_head(self,node): obj_type = vars.cib_cli_map[node.tagName] node_id = node.getAttribute("id") s = cli_display.keyword(obj_type) id = cli_display.id(node_id) score = cli_display.score(get_score(node)) if node.getElementsByTagName("resource_set"): col = rsc_set_constraint(node,obj_type) else: col = two_rsc_constraint(node,obj_type) if not col: return None symm = node.getAttribute("symmetrical") if symm: col.append("symmetrical=%s"%symm) return "%s %s %s %s" % (s,id,score,' '.join(col)) def repr_cli_child(self,c,format): pass # no children here def cli_list2node(self,cli_list,oldnode): head = copy.copy(cli_list[0]) head[0] = backtrans[head[0]] headnode = mkxmlsimple(head,oldnode,'') id_hint = headnode.getAttribute("id") for e in cli_list[1:]: # if more than one element, it's a resource set n = mkxmlnode(e,oldnode,id_hint) headnode.appendChild(n) remove_id_used_attributes(oldnode) return headnode class CibProperty(CibObject): ''' Cluster properties. ''' def repr_cli_head(self,node): return '%s $id="%s"' % \ (cli_display.keyword(self.obj_type), node.getAttribute("id")) def repr_cli_child(self,c,format): name = c.getAttribute("name") if "value" in c.attributes.keys(): value = c.getAttribute("value") else: value = None return nvpair_format(name,value) def cli_list2node(self,cli_list,oldnode): head = copy.copy(cli_list[0]) head[0] = backtrans[head[0]] obj_id = find_value(head[1],"$id") if not obj_id: obj_id = cib_object_map[self.xml_obj_type][3] headnode = mkxmlnode(head,oldnode,obj_id) remove_id_used_attributes(oldnode) return headnode def matchcli(self,cli_list): head = cli_list[0] return self.obj_type == head[0] \ and self.obj_id == find_value(head[1],"$id") def check_sanity(self): ''' Match properties with PE metadata. ''' if not self.node: # eh? common_err("%s: no xml (strange)" % self.obj_id) return user_prefs.get_check_rc() l = [] if self.obj_type == "property": l = get_pe_property_list() + get_crmd_property_list() l += ("dc-version","cluster-infrastructure","last-lrm-refresh") elif self.obj_type == "op_defaults": l = vars.op_attributes elif self.obj_type == "rsc_defaults": l = vars.rsc_meta_attributes rc = sanity_check_nvpairs(self.obj_id,self.node,l) return rc # ################################################################ # # cib factory # cib_piped = "cibadmin -p" def get_default_timeout(): t = cib_factory.get_op_default("timeout") if t: return t t = cib_factory.get_property("default-action-timeout") if t: return t if not vars.pe_metadata: vars.pe_metadata = RAInfo("pengine","metadata") if not vars.pe_metadata: return 0 return vars.pe_metadata.param_default("default-action-timeout") def get_pe_property_list(): if not vars.pe_metadata: vars.pe_metadata = RAInfo("pengine","metadata") if not vars.pe_metadata: return [] return vars.pe_metadata.params().keys() def get_crmd_property_list(): if not vars.crmd_metadata: vars.crmd_metadata = RAInfo("crmd","metadata") if not vars.crmd_metadata: return [] return vars.crmd_metadata.params().keys() # xml -> cli translations (and classes) cib_object_map = { "node": ( "node", CibNode, "nodes" ), "primitive": ( "primitive", CibPrimitive, "resources" ), "group": ( "group", CibContainer, "resources" ), "clone": ( "clone", CibContainer, "resources" ), "master": ( "ms", CibContainer, "resources" ), "rsc_location": ( "location", CibLocation, "constraints" ), "rsc_colocation": ( "colocation", CibSimpleConstraint, "constraints" ), "rsc_order": ( "order", CibSimpleConstraint, "constraints" ), "cluster_property_set": ( "property", CibProperty, "crm_config", "cib-bootstrap-options" ), "rsc_defaults": ( "rsc_defaults", CibProperty, "rsc_defaults", "rsc-options" ), "op_defaults": ( "op_defaults", CibProperty, "op_defaults", "op-options" ), } backtrans = odict() # generate a translation cli -> tag for key in cib_object_map: backtrans[cib_object_map[key][0]] = key cib_topnodes = [] # get a list of parents for key in cib_object_map: if not cib_object_map[key][2] in cib_topnodes: cib_topnodes.append(cib_object_map[key][2]) def can_migrate(node): for c in node.childNodes: if not is_element(c) or c.tagName != "meta_attributes": continue pl = nvpairs2list(c) if find_value(pl,"allow-migrate") == "true": return True return False cib_upgrade = "cibadmin --upgrade --force" class CibFactory(Singleton): ''' Juggle with CIB objects. See check_structure below for details on the internal cib representation. ''' shadowcmd = ">/dev/null 1: common_warn("%s contains more than one %s, using first" % \ (obj.obj_id,attr_list_type)) id = node_l[0].getAttribute("id") if not id: common_err("%s reference not found" % id_ref) return id_ref # hope that user will fix that return id # verify if id_ref exists node_l = self.doc.getElementsByTagName(attr_list_type) for node in node_l: if node.getAttribute("id") == id_ref: return id_ref common_err("%s reference not found" % id_ref) return id_ref # hope that user will fix that def _get_attr_value(self,obj_type,attr): if not self.is_cib_sane(): return None for obj in self.cib_objects: if obj.obj_type == obj_type and obj.node: pl = nvpairs2list(obj.node) v = find_value(pl, attr) if v: return v return None def get_property(self,property): ''' Get the value of the given cluster property. ''' return self._get_attr_value("property",property) def get_op_default(self,attr): ''' Get the value of the attribute from op_defaults. ''' return self._get_attr_value("op_defaults",attr) + def is_asymm_cluster(self): + symm = self.get_property("symmetric-cluster") + return symm and symm != "true" def new_object(self,obj_type,obj_id): "Create a new object of type obj_type." if id_store.id_in_use(obj_id): return None for xml_obj_type,v in cib_object_map.items(): if v[0] == obj_type: obj = v[1](xml_obj_type,obj_id) if obj.obj_id: return obj else: return None return None def mkobj_list(self,mode,*args): obj_list = [] for obj in self.cib_objects: f = lambda: obj.filter(*args) if not f(): continue if mode == "cli" and obj.nocli and obj.nocli_warn: obj.nocli_warn = False obj_cli_warn(obj.obj_id) obj_list.append(obj) return obj_list + def is_cib_empty(self): + return not self.mkobj_list("cli","type:primitive") def has_cib_changed(self): return self.mkobj_list("xml","changed") or self.remove_queue def verify_constraints(self,node): ''' Check if all resources referenced in a constraint exist ''' rc = True constraint_id = node.getAttribute("id") for obj_id in referenced_resources(node): if not self.find_object(obj_id): constraint_norefobj_err(constraint_id,obj_id) rc = False return rc def verify_rsc_children(self,node): ''' Check prerequisites: a) all children must exist b) no child may have other parent than me (or should we steal children?) c) there may not be duplicate children ''' obj_id = node.getAttribute("id") if not obj_id: common_err("element %s has no id" % node.tagName) return False try: obj_type = cib_object_map[node.tagName][0] except: common_err("element %s (%s) not recognized"%(node.tagName,obj_id)) return False c_ids = get_rsc_children_ids(node) if not c_ids: return True rc = True c_dict = {} for child_id in c_ids: if not self.verify_child(child_id,obj_type,obj_id): rc = False if child_id in c_dict: common_err("in group %s child %s listed more than once"%(obj_id,child_id)) rc = False c_dict[child_id] = 1 return rc def verify_child(self,child_id,obj_type,obj_id): 'Check if child exists and obj_id is (or may become) its parent.' child = self.find_object(child_id) if not child: no_object_err(child_id) return False if child.parent and child.parent.obj_id != obj_id: common_err("%s already in use at %s"%(child_id,child.parent.obj_id)) return False if obj_type == "group" and child.obj_type != "primitive": common_err("a group may contain only primitives; %s is %s"%(child_id,child.obj_type)) return False if not child.obj_type in vars.children_tags: common_err("%s may contain a primitive or a group; %s is %s"%(obj_type,child_id,child.obj_type)) return False return True def verify_element(self,node): ''' Can we create this object given its CLI representation. This is not about syntax, we're past that, but about semantics. Right now we check if the children, if any, are fit for the parent. And if this is a constraint, if all referenced resources are present. ''' rc = True if not self.verify_rsc_children(node): rc = False if not self.verify_constraints(node): rc = False return rc def create_object(self,*args): return self.create_from_cli(CliParser().parse(list(args))) != None def set_property_cli(self,cli_list): comments = get_comments(cli_list) head_pl = cli_list[0] obj_type = head_pl[0].lower() pset_id = find_value(head_pl[1],"$id") if pset_id: head_pl[1].remove(["$id",pset_id]) else: pset_id = cib_object_map[backtrans[obj_type]][3] obj = self.find_object(pset_id) if not obj: if not is_id_valid(pset_id): invalid_id_err(pset_id) return None obj = self.new_object(obj_type,pset_id) if not obj: return None self.topnode[obj.parent_type].appendChild(obj.node) obj.origin = "user" self.cib_objects.append(obj) for n,v in head_pl[1]: set_nvpair(obj.node,n,v) if comments and obj.node: stuff_comments(obj.node,comments) obj.updated = True return obj def add_op(self,cli_list): '''Add an op to a primitive.''' head = cli_list[0] # does the referenced primitive exist rsc_id = find_value(head[1],"rsc") rsc_obj = self.find_object(rsc_id) if not rsc_obj: no_object_err(rsc_id) return None if rsc_obj.obj_type != "primitive": common_err("%s is not a primitive" % rsc_id) return None return rsc_obj.add_operation(cli_list) def create_from_cli(self,cli): 'Create a new cib object from the cli representation.' cli_list = mk_cli_list(cli) if not cli_list: return None head = cli_list[0] obj_type = head[0].lower() obj_id = find_value(head[1],"id") if obj_id and not is_id_valid(obj_id): invalid_id_err(obj_id) return None if len(cli_list) >= 2 and cli_list[1][0] == "raw": doc = xml.dom.minidom.parseString(cli_list[1][1]) return self.create_from_node(doc.childNodes[0]) if obj_type in olist(vars.nvset_cli_names): return self.set_property_cli(cli_list) if obj_type == "op": return self.add_op(cli_list) if obj_type == "node": obj = self.find_object(obj_id) # make an exception and allow updating nodes if obj: self.merge_from_cli(obj,cli_list) return obj obj = self.new_object(obj_type,obj_id) if not obj: return None node = obj.cli2node(cli_list) return self.add_element(obj, node) def update_from_cli(self,obj,cli_list): 'Update element from the cli intermediate.' id_store.remove_xml(obj.node) if len(cli_list) >= 2 and cli_list[1][0] == "raw": doc = xml.dom.minidom.parseString(cli_list[1][1]) id_store.store_xml(doc.childNodes[0]) return self.update_element(obj,doc.childNodes[0]) return self.update_element(obj,obj.cli2node(cli_list)) def update_from_node(self,obj,node): 'Update element from a doc node.' id_store.replace_xml(obj.node,node) return self.update_element(obj,node) def update_element(self,obj,newnode): 'Update element from a doc node.' if not newnode: return False if not self.is_cib_sane(): id_store.replace_xml(newnode,obj.node) return False oldnode = obj.node if xml_cmp(oldnode,newnode): newnode.unlink() return True # the new and the old versions are equal obj.node = newnode if not self.test_element(obj,newnode): id_store.replace_xml(newnode,oldnode) obj.node = oldnode newnode.unlink() return False obj.node = self.replaceNode(newnode,oldnode) obj.nocli = False # try again after update self.adjust_children(obj) if not obj.cli_use_validate(): obj.nocli_warn = True obj.nocli = True oldnode.unlink() obj.updated = True obj.propagate_updated() return True def merge_from_cli(self,obj,cli_list): node = obj.cli2node(cli_list) if not node: return rc = merge_nodes(obj.node, node) if rc: obj.updated = True obj.propagate_updated() def update_moved(self,obj): 'Updated the moved flag. Mark affected constraints.' obj.moved = not obj.moved if obj.moved: for c_obj in self.related_constraints(obj): c_obj.recreate = True def adjust_children(self,obj): ''' All stuff children related: manage the nodes of children, update the list of children for the parent, update parents in the children. ''' new_children_ids = get_rsc_children_ids(obj.node) if not new_children_ids: return old_children = obj.children obj.children = [self.find_object(x) for x in new_children_ids] self._relink_orphans_to_top(old_children,obj.children) self._update_children(obj) def _relink_child_to_top(self,obj): 'Relink a child to the top node.' obj.node.parentNode.removeChild(obj.node) self.topnode[obj.parent_type].appendChild(obj.node) if obj.origin == "cib": self.update_moved(obj) obj.parent = None def _update_children(self,obj): '''For composite objects: update all children nodes. ''' # unlink all and find them in the new node for child in obj.children: oldnode = child.node child.node = obj.find_child_in_node(child) if child.children: # and children of children self._update_children(child) rmnode(oldnode) if not child.parent and child.origin == "cib": self.update_moved(child) if child.parent and child.parent != obj: child.parent.updated = True # the other parent updated child.parent = obj def _relink_orphans_to_top(self,old_children,new_children): "New orphans move to the top level for the object type." for child in old_children: if child not in new_children: self._relink_child_to_top(child) def test_element(self,obj,node): if not node.getAttribute("id"): return False if not self.verify_element(node): return False if user_prefs.is_check_always() \ and obj.check_sanity() > 1: return False return True def update_links(self,obj): ''' Update the structure links for the object (obj.children, obj.parent). Update also the dom nodes, if necessary. ''' obj.children = [] if obj.obj_type not in vars.container_tags: return for c in obj.node.childNodes: if is_child_rsc(c): child = self.find_object_for_node(c) if not child: missing_obj_err(c) continue child.parent = obj obj.children.append(child) if not c.isSameNode(child.node): rmnode(child.node) child.node = c def add_element(self,obj,node): obj.node = node obj.obj_id = node.getAttribute("id") if not self.test_element(obj, node): id_store.remove_xml(node) node.unlink() return None common_debug("append child %s to %s" % \ (obj.obj_id,self.topnode[obj.parent_type].tagName)) self.topnode[obj.parent_type].appendChild(node) self.adjust_children(obj) self.redirect_children_constraints(obj) if not obj.cli_use_validate(): self.nocli_warn = True obj.nocli = True self.update_links(obj) obj.origin = "user" self.cib_objects.append(obj) return obj def create_from_node(self,node): 'Create a new cib object from a document node.' if not node: return None try: obj_type = cib_object_map[node.tagName][0] except: return None if is_defaults(node): node = get_rscop_defaults_meta_node(node) if not node: return None if node.ownerDocument != self.doc: node = self.doc.importNode(node,1) obj = self.new_object(obj_type, node.getAttribute("id")) if not obj: return None if not id_store.store_xml(node): return None return self.add_element(obj, node) def cib_objects_string(self, obj_list = None): l = [] if not obj_list: obj_list = self.cib_objects for obj in obj_list: l.append(obj.obj_string()) return ' '.join(l) def _remove_obj(self,obj): "Remove a cib object and its children." # remove children first # can't remove them here from obj.children! common_debug("remove object %s" % obj.obj_string()) for child in obj.children: #self._remove_obj(child) # just relink, don't remove children self._relink_child_to_top(child) if obj.parent: # remove obj from its parent, if any obj.parent.children.remove(obj) id_store.remove_xml(obj.node) rmnode(obj.node) obj.invalid = True self.add_to_remove_queue(obj) self.cib_objects.remove(obj) for c_obj in self.related_constraints(obj): if is_simpleconstraint(c_obj.node) and obj.children: # the first child inherits constraints rename_rscref(c_obj,obj.obj_id,obj.children[0].obj_id) delete_rscref(c_obj,obj.obj_id) if silly_constraint(c_obj.node,obj.obj_id): # remove invalid constraints self._remove_obj(c_obj) if not self._no_constraint_rm_msg: err_buf.info("hanging %s deleted" % c_obj.obj_string()) def related_constraints(self,obj): if not is_resource(obj.node): return [] c_list = [] for obj2 in self.cib_objects: if not is_constraint(obj2.node): continue if rsc_constraint(obj.obj_id,obj2.node): c_list.append(obj2) return c_list def redirect_children_constraints(self,obj): ''' Redirect constraints to the new parent ''' for child in obj.children: for c_obj in self.related_constraints(child): rename_rscref(c_obj,child.obj_id,obj.obj_id) # drop useless constraints which may have been created above for c_obj in self.related_constraints(obj): if silly_constraint(c_obj.node,obj.obj_id): self._no_constraint_rm_msg = True self._remove_obj(c_obj) self._no_constraint_rm_msg = False def add_to_remove_queue(self,obj): if obj.origin == "cib": self.remove_queue.append(obj) #print self.cib_objects_string(self.remove_queue) def delete_1(self,obj): ''' Remove an object and its parent in case the object is the only child. ''' if obj.parent and len(obj.parent.children) == 1: self.delete_1(obj.parent) if obj in self.cib_objects: # don't remove parents twice self._remove_obj(obj) def delete(self,*args): 'Delete a cib object.' if not self.doc: empty_cib_err() return False rc = True l = [] for obj_id in args: obj = self.find_object(obj_id) if not obj: no_object_err(obj_id) rc = False continue if is_rsc_running(obj_id): common_warn("resource %s is running, can't delete it" % obj_id) else: l.append(obj) if l: l = processing_sort_cli(l) for obj in reversed(l): self.delete_1(obj) return rc def rename(self,old_id,new_id): ''' Rename a cib object. - check if the resource (if it's a resource) is stopped - check if the new id is not taken - find the object with old id - rename old id to new id in all related objects (constraints) - if the object came from the CIB, then it must be deleted and the one with the new name created - rename old id to new id in the object ''' if not self.doc: empty_cib_err() return False if id_store.id_in_use(new_id): return False obj = self.find_object(old_id) if not obj: no_object_err(old_id) return False if not obj.can_be_renamed(): return False for c_obj in self.related_constraints(obj): rename_rscref(c_obj,old_id,new_id) rename_id(obj.node,old_id,new_id) obj.obj_id = new_id id_store.rename(old_id,new_id) obj.updated = True obj.propagate_updated() def erase(self): "Remove all cib objects." # remove only bottom objects and no constraints # the rest will automatically follow if not self.doc: empty_cib_err() return False erase_ok = True l = [] for obj in [obj for obj in self.cib_objects \ if not obj.children and not is_constraint(obj.node) \ and obj.obj_type != "node" ]: if is_rsc_running(obj.obj_id): common_warn("resource %s is running, can't delete it" % obj.obj_id) erase_ok = False else: l.append(obj) if not erase_ok: common_err("CIB erase aborted (nothing was deleted)") return False self._no_constraint_rm_msg = True for obj in l: self.delete(obj.obj_id) self._no_constraint_rm_msg = False remaining = 0 for obj in self.cib_objects: if obj.obj_type != "node": remaining += 1 if remaining > 0: common_err("strange, but these objects remained:") for obj in self.cib_objects: if obj.obj_type != "node": print >> sys.stderr, obj.obj_string() self.cib_objects = [] return True def erase_nodes(self): "Remove nodes only." if not self.doc: empty_cib_err() return False l = [obj for obj in self.cib_objects if obj.obj_type == "node"] for obj in l: self.delete(obj.obj_id) def refresh(self): "Refresh from the CIB." self.reset() self.initialize() user_prefs = UserPrefs.getInstance() options = Options.getInstance() err_buf = ErrorBuffer.getInstance() vars = Vars.getInstance() cib_factory = CibFactory.getInstance() cli_display = CliDisplay.getInstance() cib_status = CibStatus.getInstance() id_store = IdMgmt.getInstance() # vim:ts=4:sw=4:et: diff --git a/shell/modules/ui.py.in b/shell/modules/ui.py.in index f19e9593ff..2d06992ff6 100644 --- a/shell/modules/ui.py.in +++ b/shell/modules/ui.py.in @@ -1,1958 +1,1959 @@ # Copyright (C) 2008 Dejan Muhamedagic # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or (at your option) any later version. # # This software is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # # You should have received a copy of the GNU General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA # import sys import re import os import readline import shlex import time import bz2 from help import HelpSystem, cmd_help from vars import Vars from levels import Levels from cibconfig import mkset_obj, CibFactory from cibstatus import CibStatus from template import LoadTemplate from cliformat import nvpairs2list from ra import * from msg import * from utils import * from xmlutil import * def cmd_end(cmd,dir = ".."): "Go up one level." levels.droplevel() def cmd_exit(cmd): "Exit the crm program" cmd_end(cmd) if options.interactive and not options.batch: print "bye" try: readline.write_history_file(hist_file) except: pass for f in vars.tmpfiles: os.unlink(f) sys.exit() class UserInterface(object): ''' Stuff common to all user interface classes. ''' global_cmd_aliases = { "quit": ("bye","exit"), "end": ("cd","up"), } def __init__(self): self.cmd_table = odict() self.cmd_table["help"] = (self.help,(0,1),0) self.cmd_table["quit"] = (self.exit,(0,0),0) self.cmd_table["end"] = (self.end,(0,1),0) self.cmd_aliases = self.global_cmd_aliases.copy() def end_game(self, no_questions_asked = False): pass def help(self,cmd,topic = ''): "usage: help []" cmd_help(self.help_table,topic) def end(self,cmd,dir = ".."): "usage: end" self.end_game() cmd_end(cmd,dir) def exit(self,cmd): "usage: exit" self.end_game() cmd_exit(cmd) class CliOptions(UserInterface): ''' Manage user preferences ''' def __init__(self): UserInterface.__init__(self) self.help_table = help_sys.load_level("options") self.cmd_table["skill-level"] = (self.set_skill_level,(1,1),0,(skills_list,)) self.cmd_table["editor"] = (self.set_editor,(1,1),0) self.cmd_table["pager"] = (self.set_pager,(1,1),0) self.cmd_table["user"] = (self.set_crm_user,(0,1),0) self.cmd_table["output"] = (self.set_output,(1,1),0) self.cmd_table["colorscheme"] = (self.set_colors,(1,1),0) self.cmd_table["check-frequency"] = (self.set_check_frequency,(1,1),0) self.cmd_table["check-mode"] = (self.set_check_mode,(1,1),0) self.cmd_table["sort-elements"] = (self.set_sort_elements,(1,1),0) self.cmd_table["save"] = (self.save_options,(0,0),0) self.cmd_table["show"] = (self.show_options,(0,0),0) setup_aliases(self) def set_skill_level(self,cmd,skill_level): """usage: skill-level level: operator | administrator | expert""" return user_prefs.set_skill_level(skill_level) def set_editor(self,cmd,prog): "usage: editor " return user_prefs.set_editor(prog) def set_pager(self,cmd,prog): "usage: pager " return user_prefs.set_pager(prog) def set_crm_user(self,cmd,user = ''): "usage: user []" return user_prefs.set_crm_user(user) def set_output(self,cmd,otypes): "usage: output " return user_prefs.set_output(otypes) def set_colors(self,cmd,scheme): "usage: colorscheme " return user_prefs.set_colors(scheme) def set_check_frequency(self,cmd,freq): "usage: check-frequence " return user_prefs.set_check_freq(freq) def set_check_mode(self,cmd,mode): "usage: check-mode " return user_prefs.set_check_mode(mode) def set_sort_elements(self,cmd,opt): "usage: sort-elements {yes|no}" if not verify_boolean(opt): common_err("%s: bad boolean option"%opt) return True return user_prefs.set_sort_elems(opt) def show_options(self,cmd): "usage: show" return user_prefs.write_rc(sys.stdout) def save_options(self,cmd): "usage: save" return user_prefs.save_options(vars.rc_file) def end_game(self, no_questions_asked = False): if no_questions_asked and not options.interactive: self.save_options("save") def listshadows(): return stdout2list("ls %s | fgrep shadow. | sed 's/^shadow\.//'" % vars.crm_conf_dir) def shadowfile(name): return "%s/shadow.%s" % (vars.crm_conf_dir, name) def shadow2doc(name): return file2doc(shadowfile(name)) class CibShadow(UserInterface): ''' CIB shadow management class ''' extcmd = ">/dev/null &1" % self.extcmd) except os.error: no_prog_err(self.extcmd) return False return True def new(self,cmd,name,*args): "usage: new [withstatus] [force]" if not is_filename_sane(name): return False new_cmd = "%s -c '%s'" % (self.extcmd,name) for par in args: if not par in ("force","--force","withstatus"): syntax_err((cmd,name,par), context = 'new') return False if user_prefs.get_force() or "force" in args or "--force" in args: new_cmd = "%s --force" % new_cmd if ext_cmd(new_cmd) == 0: common_info("%s shadow CIB created"%name) self.use("use",name) if "withstatus" in args: cib_status.load("shadow:%s" % name) def _find_pe(self,infile): 'Find a pe input' for p in ("%s/%s", "%s/%s.bz2", "%s/pe-*-%s.bz2"): fl = glob.glob(p % (vars.pe_dir,infile)) if fl: break if not fl: common_err("no %s pe input file"%infile) return '' if len(fl) > 1: common_err("more than one %s pe input file: %s" % \ (infile,' '.join(fl))) return '' return fl[0] def pe_import(self,cmd,infile,name = None): "usage: import {|} []" if name and not is_filename_sane(name): return False # where's the input? if not os.access(infile,os.F_OK): if "/" in infile: common_err("%s: no such file"%infile) return False infile = self._find_pe(infile) if not infile: return False if not name: name = os.path.basename(infile) # read input try: f = open(infile) except IOError,msg: common_err("open: %s"%msg) return s = ''.join(f) f.close() # decompresed and rename shadow if it ends with .bz2 if infile.endswith(".bz2"): name = name.replace(".bz2","") s = bz2.decompress(s) # copy input to the shadow try: f = open(shadowfile(name), "w") except IOError,msg: common_err("open: %s"%msg) return f.write(s) f.close() # use the shadow and load the status from there return self.use("use",name,"withstatus") def delete(self,cmd,name): "usage: delete " if not is_filename_sane(name): return False if vars.cib_in_use == name: common_err("%s shadow CIB is in use"%name) return False if ext_cmd("%s -D '%s' --force" % (self.extcmd,name)) == 0: common_info("%s shadow CIB deleted"%name) else: common_err("failed to delete %s shadow CIB"%name) return False def reset(self,cmd,name): "usage: reset " if not is_filename_sane(name): return False if ext_cmd("%s -r '%s'" % (self.extcmd,name)) == 0: common_info("copied live CIB to %s"%name) else: common_err("failed to copy live CIB to %s"%name) return False def commit(self,cmd,name): "usage: commit " if not is_filename_sane(name): return False if ext_cmd("%s -C '%s' --force" % (self.extcmd,name)) == 0: common_info("commited '%s' shadow CIB to the cluster"%name) else: common_err("failed to commit the %s shadow CIB"%name) return False def diff(self,cmd): "usage: diff" s = get_stdout(add_sudo("%s -d" % self.extcmd_stdout)) page_string(s) def list(self,cmd): "usage: list" if options.regression_tests: for t in listshadows(): print t else: multicolumn(listshadows()) def _use(self,name,withstatus): # Choose a shadow cib for further changes. If the name # provided is empty, then choose the live (cluster) cib. # Don't allow ' in shadow names if not name or name == "live": os.unsetenv(vars.shadow_envvar) vars.cib_in_use = "" if withstatus: cib_status.load("live") else: os.putenv(vars.shadow_envvar,name) vars.cib_in_use = name if withstatus: cib_status.load("shadow:%s" % name) def use(self,cmd,name = '', withstatus = ''): "usage: use [] [withstatus]" # check the name argument if name and not is_filename_sane(name): return False if name and name != "live": if not os.access(shadowfile(name),os.F_OK): common_err("%s: no such shadow CIB"%name) return False if withstatus and withstatus != "withstatus": syntax_err((cmd,withstatus), context = 'use') return False # If invoked from configure # take special precautions try: prev_level = levels.previous().myname() except: prev_level = '' if prev_level != "cibconfig": self._use(name,withstatus) return True if not cib_factory.has_cib_changed(): self._use(name,withstatus) # new CIB: refresh the CIB factory cib_factory.refresh() return True saved_cib = vars.cib_in_use self._use(name,'') # don't load the status yet if not cib_factory.is_current_cib_equal(silent = True): # user made changes and now wants to switch to a # different and unequal CIB; we refuse to cooperate common_err("the requested CIB is different from the current one") if user_prefs.get_force(): common_info("CIB overwrite forced") elif not ask("All changes will be dropped. Do you want to proceed?"): self._use(saved_cib,'') # revert to the previous CIB return False self._use(name,withstatus) # now load the status too return True def listtemplates(): l = [] for f in os.listdir(vars.tmpl_dir): if os.path.isfile("%s/%s" % (vars.tmpl_dir,f)): l.append(f) return l def listconfigs(): l = [] for f in os.listdir(vars.tmpl_conf_dir): if os.path.isfile("%s/%s" % (vars.tmpl_conf_dir,f)): l.append(f) return l def check_transition(inp,state,possible_l): if not state in possible_l: common_err("input (%s) in wrong state %s" % (inp,state)) return False return True class Template(UserInterface): ''' Configuration templates. ''' def __init__(self): UserInterface.__init__(self) self.help_table = help_sys.load_level("template") self.cmd_table["new"] = (self.new,(2,),1,(null_list,templates_list,loop)) self.cmd_table["load"] = (self.load,(0,1),1,(config_list,)) self.cmd_table["edit"] = (self.edit,(0,1),1,(config_list,)) self.cmd_table["delete"] = (self.delete,(1,2),1,(config_list,)) self.cmd_table["show"] = (self.show,(0,1),0,(config_list,)) self.cmd_table["apply"] = (self.apply,(0,2),1,(config_list_method,config_list)) self.cmd_table["list"] = (self.list,(0,1),0) setup_aliases(self) self.init_dir() self.curr_conf = '' def init_dir(self): '''Create the conf directory, link to templates''' if not os.path.isdir(vars.tmpl_conf_dir): try: os.makedirs(vars.tmpl_conf_dir) except os.error,msg: common_err("makedirs: %s"%msg) return def get_depends(self,tmpl): '''return a list of required templates''' # Not used. May need it later. try: tf = open("%s/%s" % (vars.tmpl_dir, tmpl),"r") except IOError,msg: common_err("open: %s"%msg) return l = [] for s in tf: a = s.split() if len(a) >= 2 and a[0] == '%depends_on': l += a[1:] tf.close() return l def replace_params(self,s,user_data): change = False for i in range(len(s)): word = s[i] for p in user_data: # is parameter in the word? pos = word.find('%' + p) if pos < 0: continue endpos = pos + len('%' + p) # and it isn't part of another word? if re.match("[A-Za-z0-9]", word[endpos:endpos+1]): continue # if the value contains a space or # it is a value of an attribute # put quotes around it if user_data[p].find(' ') >= 0 or word[pos-1:pos] == '=': v = '"' + user_data[p] + '"' else: v = user_data[p] word = word.replace('%' + p, v) change = True # we did replace something if change: s[i] = word if 'opt' in s: if not change: s = [] else: s.remove('opt') return s def generate(self,l,user_data): '''replace parameters (user_data) and generate output ''' l2 = [] for piece in l: piece2 = [] for s in piece: s = self.replace_params(s,user_data) if s: piece2.append(' '.join(s)) if piece2: l2.append(' \\\n\t'.join(piece2)) return '\n'.join(l2) def process(self,config = ''): '''Create a cli configuration from the current config''' try: f = open("%s/%s" % (vars.tmpl_conf_dir, config or self.curr_conf),'r') except IOError,msg: common_err("open: %s"%msg) return '' l = [] piece = [] user_data = {} # states START = 0; PFX = 1; DATA = 2; GENERATE = 3 state = START err_buf.start_tmp_lineno() rc = True for inp in f: err_buf.incr_lineno() if inp.startswith('#'): continue if type(inp) == type(u''): inp = inp.encode('ascii') inp = inp.strip() try: s = shlex.split(inp) except ValueError, msg: common_err(msg) continue while '\n' in s: s.remove('\n') if not s: if state == GENERATE and piece: l.append(piece) piece = [] elif s[0] in ("%name","%depends_on","%suggests"): continue elif s[0] == "%pfx": if check_transition(inp,state,(START,DATA)) and len(s) == 2: pfx = s[1] state = PFX elif s[0] == "%required": if check_transition(inp,state,(PFX,)): state = DATA data_reqd = True elif s[0] == "%optional": if check_transition(inp,state,(PFX,DATA)): state = DATA data_reqd = False elif s[0] == "%%": if state != DATA: common_warn("user data in wrong state %s" % state) if len(s) < 2: common_warn("parameter name missing") elif len(s) == 2: if data_reqd: common_err("required parameter %s not set" % s[1]) rc = False elif len(s) == 3: user_data["%s:%s" % (pfx,s[1])] = s[2] else: common_err("%s: syntax error" % inp) elif s[0] == "%generate": if check_transition(inp,state,(DATA,)): state = GENERATE piece = [] elif state == GENERATE: if s: piece.append(s) else: common_err("<%s> unexpected" % inp) if piece: l.append(piece) err_buf.stop_tmp_lineno() f.close() if not rc: return '' return self.generate(l,user_data) def new(self,cmd,name,*args): "usage: new