diff --git a/crmd/election.c b/crmd/election.c index fa43486084..57b5820b11 100644 --- a/crmd/election.c +++ b/crmd/election.c @@ -1,543 +1,574 @@ /* * Copyright (C) 2004 Andrew Beekhof * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This software is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #include #include #include #include #include #include #include #include #include #include #include +#define STORM_INTERVAL 2 /* in seconds */ +#define STORM_MULTIPLIER 5 /* multiplied by the number of nodes */ + GHashTable *voted = NULL; uint highest_born_on = -1; static int current_election_id = 1; static int crm_uptime(struct timeval *output) { - struct rusage info; - int rc = getrusage(RUSAGE_SELF, &info); + static time_t expires = 0; + static struct rusage info; - output->tv_sec = 0; - output->tv_usec = 0; + time_t tm_now = time(NULL); - if (rc < 0) { - crm_perror(LOG_ERR, "Could not calculate the current uptime"); - return -1; + if(expires < tm_now) { + int rc = getrusage(RUSAGE_SELF, &info); + + output->tv_sec = 0; + output->tv_usec = 0; + + if (rc < 0) { + crm_perror(LOG_ERR, "Could not calculate the current uptime"); + expires = 0; + return -1; + } + + crm_debug("Current CPU usage is: %lds, %ldus", (long)info.ru_utime.tv_sec, + (long)info.ru_utime.tv_usec); } + + expires = tm_now + STORM_INTERVAL; /* N seconds after the last _access_ */ output->tv_sec = info.ru_utime.tv_sec; output->tv_usec = info.ru_utime.tv_usec; - crm_debug("Current CPU usage is: %lds, %ldus", (long)info.ru_utime.tv_sec, - (long)info.ru_utime.tv_usec); return 1; } static int crm_compare_age(struct timeval your_age) { struct timeval our_age; if (crm_uptime(&our_age) < 0) { return -1; } /* We want these times to be "significantly" different */ if (our_age.tv_sec > your_age.tv_sec) { crm_debug("Win: %ld vs %ld (seconds)", (long)our_age.tv_sec, (long)your_age.tv_sec); return 1; } else if (our_age.tv_sec < your_age.tv_sec) { crm_debug("Loose: %ld vs %ld (seconds)", (long)our_age.tv_sec, (long)your_age.tv_sec); return -1; } else if (our_age.tv_usec > your_age.tv_usec) { crm_debug("Win: %ld vs %ld (usec)", (long)our_age.tv_usec, (long)your_age.tv_usec); return 1; } else if (our_age.tv_usec < your_age.tv_usec) { crm_debug("Loose: %ld vs %ld (usec)", (long)our_age.tv_usec, (long)your_age.tv_usec); return -1; } return 0; } /* A_ELECTION_VOTE */ void do_election_vote(long long action, enum crmd_fsa_cause cause, enum crmd_fsa_state cur_state, enum crmd_fsa_input current_input, fsa_data_t * msg_data) { struct timeval age; xmlNode *vote = NULL; gboolean not_voting = FALSE; /* don't vote if we're in one of these states or wanting to shut down */ switch (cur_state) { case S_STARTING: case S_RECOVERY: case S_STOPPING: case S_TERMINATE: crm_warn("Not voting in election, we're in state %s", fsa_state2string(cur_state)); not_voting = TRUE; break; default: break; } if (not_voting == FALSE) { if (is_set(fsa_input_register, R_STARTING)) { not_voting = TRUE; } } if (not_voting) { if (AM_I_DC) { register_fsa_input(C_FSA_INTERNAL, I_RELEASE_DC, NULL); } else { register_fsa_input(C_FSA_INTERNAL, I_PENDING, NULL); } return; } vote = create_request(CRM_OP_VOTE, NULL, NULL, CRM_SYSTEM_CRMD, CRM_SYSTEM_CRMD, NULL); current_election_id++; crm_xml_add(vote, F_CRM_ELECTION_OWNER, fsa_our_uuid); crm_xml_add_int(vote, F_CRM_ELECTION_ID, current_election_id); crm_uptime(&age); crm_xml_add_int(vote, F_CRM_ELECTION_AGE_S, age.tv_sec); crm_xml_add_int(vote, F_CRM_ELECTION_AGE_US, age.tv_usec); send_cluster_message(NULL, crm_msg_crmd, vote, TRUE); free_xml(vote); crm_debug("Started election %d", current_election_id); if (voted) { g_hash_table_destroy(voted); } voted = NULL; if (cur_state == S_ELECTION || cur_state == S_RELEASE_DC) { crm_timer_start(election_timeout); } else if (cur_state != S_INTEGRATION) { crm_err("Broken? Voting in state %s", fsa_state2string(cur_state)); } return; } char *dc_hb_msg = NULL; int beat_num = 0; gboolean do_dc_heartbeat(gpointer data) { return TRUE; } struct election_data_s { const char *winning_uname; unsigned int winning_bornon; }; static void log_member_name(gpointer key, gpointer value, gpointer user_data) { const crm_node_t *node = value; if (crm_is_peer_active(node)) { crm_err("%s: %s proc=%.32x", (char *)user_data, (char *)key, node->processes); } } static void log_node(gpointer key, gpointer value, gpointer user_data) { crm_err("%s: %s", (char *)user_data, (char *)key); } void do_election_check(long long action, enum crmd_fsa_cause cause, enum crmd_fsa_state cur_state, enum crmd_fsa_input current_input, fsa_data_t * msg_data) { int voted_size = 0; int num_members = crm_active_peers(); if (voted) { voted_size = g_hash_table_size(voted); } /* in the case of #voted > #members, it is better to * wait for the timeout and give the cluster time to * stabilize */ if (fsa_state != S_ELECTION) { crm_debug("Ignore election check: we not in an election"); } else if (voted_size >= num_members) { /* we won and everyone has voted */ crm_timer_stop(election_timeout); register_fsa_input(C_FSA_INTERNAL, I_ELECTION_DC, NULL); if (voted_size > num_members) { char *data = NULL; data = strdup("member"); g_hash_table_foreach(crm_peer_cache, log_member_name, data); free(data); data = strdup("voted"); g_hash_table_foreach(voted, log_node, data); free(data); } crm_debug("Destroying voted hash"); g_hash_table_destroy(voted); voted = NULL; } else { crm_debug("Still waiting on %d non-votes (%d total)", num_members - voted_size, num_members); } return; } -#define win_dampen 1 /* in seconds */ #define loss_dampen 2 /* in seconds */ /* A_ELECTION_COUNT */ void do_election_count_vote(long long action, enum crmd_fsa_cause cause, enum crmd_fsa_state cur_state, enum crmd_fsa_input current_input, fsa_data_t * msg_data) { struct timeval your_age; - int age; + int age = 0; int election_id = -1; int log_level = LOG_INFO; gboolean use_born_on = FALSE; gboolean done = FALSE; gboolean we_loose = FALSE; const char *op = NULL; const char *vote_from = NULL; const char *your_version = NULL; const char *election_owner = NULL; const char *reason = "unknown"; crm_node_t *our_node = NULL, *your_node = NULL; ha_msg_input_t *vote = fsa_typed_data(fsa_dt_ha_msg); + static int election_count = 0; + + time_t tm_now = time(NULL); + static time_t expires = 0; static time_t last_election_loss = 0; /* if the membership copy is NULL we REALLY shouldnt be voting * the question is how we managed to get here. */ CRM_CHECK(msg_data != NULL, return); CRM_CHECK(crm_peer_cache != NULL, return); CRM_CHECK(vote != NULL, crm_err("Bogus data from %s", msg_data->origin); return); CRM_CHECK(vote->msg != NULL, crm_err("Bogus data from %s", msg_data->origin); return); your_age.tv_sec = 0; your_age.tv_usec = 0; op = crm_element_value(vote->msg, F_CRM_TASK); vote_from = crm_element_value(vote->msg, F_CRM_HOST_FROM); your_version = crm_element_value(vote->msg, F_CRM_VERSION); election_owner = crm_element_value(vote->msg, F_CRM_ELECTION_OWNER); crm_element_value_int(vote->msg, F_CRM_ELECTION_ID, &election_id); crm_element_value_int(vote->msg, F_CRM_ELECTION_AGE_S, (int *)&(your_age.tv_sec)); crm_element_value_int(vote->msg, F_CRM_ELECTION_AGE_US, (int *)&(your_age.tv_usec)); CRM_CHECK(vote_from != NULL, vote_from = fsa_our_uname); your_node = crm_get_peer(0, vote_from); our_node = crm_get_peer(0, fsa_our_uname); if (voted == NULL) { crm_debug("Created voted hash"); voted = g_hash_table_new_full(crm_str_hash, g_str_equal, g_hash_destroy_str, g_hash_destroy_str); } if (is_heartbeat_cluster()) { use_born_on = TRUE; } else if (is_classic_ais_cluster()) { use_born_on = TRUE; } + if(expires < tm_now) { + election_count = 1; + expires = tm_now + STORM_INTERVAL; + + } else if (FALSE == crm_str_eq(op, CRM_OP_NOVOTE, TRUE)) { + int peers = 1 + g_hash_table_size(crm_peer_cache); + election_count++; + + /* If every node has to vote down every other node, thats N*(N-1) total elections + * Allow some leway before _really_ complaining + */ + if(election_count > (peers * peers)) { + crm_err("Election storm detected: %d elections in %d seconds", election_count, STORM_INTERVAL); + } + } + age = crm_compare_age(your_age); if (cur_state == S_STARTING) { reason = "Still starting"; we_loose = TRUE; } else if (our_node == NULL || crm_is_peer_active(our_node) == FALSE) { reason = "We are not part of the cluster"; log_level = LOG_ERR; we_loose = TRUE; } else if (election_id != current_election_id && crm_str_eq(fsa_our_uuid, election_owner, TRUE)) { log_level = LOG_DEBUG_2; reason = "Superceeded"; done = TRUE; } else if (your_node == NULL || crm_is_peer_active(your_node) == FALSE) { /* Possibly we cached the message in the FSA queue at a point that it wasn't */ reason = "Peer is not part of our cluster"; log_level = LOG_WARNING; done = TRUE; } else if (crm_str_eq(op, CRM_OP_NOVOTE, TRUE)) { char *op_copy = strdup(op); char *uname_copy = strdup(vote_from); CRM_ASSERT(crm_str_eq(fsa_our_uuid, election_owner, TRUE)); /* update the list of nodes that have voted */ g_hash_table_replace(voted, uname_copy, op_copy); reason = "Recorded"; done = TRUE; } else if (crm_str_eq(vote_from, fsa_our_uname, TRUE)) { char *op_copy = strdup(op); char *uname_copy = strdup(vote_from); CRM_ASSERT(crm_str_eq(fsa_our_uuid, election_owner, TRUE)); /* update ourselves in the list of nodes that have voted */ g_hash_table_replace(voted, uname_copy, op_copy); reason = "Recorded"; done = TRUE; } else if (compare_version(your_version, CRM_FEATURE_SET) < 0) { reason = "Version"; we_loose = TRUE; } else if (compare_version(your_version, CRM_FEATURE_SET) > 0) { reason = "Version"; } else if (age < 0) { reason = "Uptime"; we_loose = TRUE; } else if (age > 0) { reason = "Uptime"; /* TODO: Check for y(our) born < 0 */ } else if (use_born_on && your_node->born < our_node->born) { reason = "Born"; we_loose = TRUE; } else if (use_born_on && your_node->born > our_node->born) { reason = "Born"; } else if (fsa_our_uname == NULL) { reason = "Unknown host name"; we_loose = TRUE; } else if (strcasecmp(fsa_our_uname, vote_from) > 0) { reason = "Host name"; we_loose = TRUE; } else { reason = "Host name"; CRM_ASSERT(strcmp(fsa_our_uname, vote_from) != 0); /* cant happen... * } else if(strcasecmp(fsa_our_uname, vote_from) == 0) { * * default... * } else { // strcasecmp(fsa_our_uname, vote_from) < 0 * we win */ } if (done) { do_crm_log(log_level + 1, "Election %d (current: %d, owner: %s): Processed %s from %s (%s)", election_id, current_election_id, election_owner, op, vote_from, reason); } else if (we_loose) { xmlNode *novote = create_request(CRM_OP_NOVOTE, NULL, vote_from, CRM_SYSTEM_CRMD, CRM_SYSTEM_CRMD, NULL); do_crm_log(log_level, "Election %d (owner: %s) lost: %s from %s (%s)", election_id, election_owner, op, vote_from, reason); update_dc(NULL); crm_timer_stop(election_timeout); if (fsa_input_register & R_THE_DC) { crm_trace("Give up the DC to %s", vote_from); register_fsa_input(C_FSA_INTERNAL, I_RELEASE_DC, NULL); } else if (cur_state != S_STARTING) { crm_trace("We werent the DC anyway"); register_fsa_input(C_FSA_INTERNAL, I_PENDING, NULL); } crm_xml_add(novote, F_CRM_ELECTION_OWNER, election_owner); crm_xml_add_int(novote, F_CRM_ELECTION_ID, election_id); send_cluster_message(crm_get_peer(0, vote_from), crm_msg_crmd, novote, TRUE); free_xml(novote); fsa_cib_conn->cmds->set_slave(fsa_cib_conn, cib_scope_local); - last_election_loss = time(NULL); + last_election_loss = tm_now; } else { do_crm_log(log_level, "Election %d (owner: %s) pass: %s from %s (%s)", election_id, election_owner, op, vote_from, reason); if (last_election_loss) { - time_t tm_now = time(NULL); if (tm_now - last_election_loss < (time_t) loss_dampen) { crm_info("Election %d ignore: We already lost an election less than %ds ago (%s)", election_id, loss_dampen, ctime(&last_election_loss)); update_dc(NULL); return; } last_election_loss = 0; } register_fsa_input(C_FSA_INTERNAL, I_ELECTION, NULL); g_hash_table_destroy(voted); voted = NULL; } } /* A_ELECT_TIMER_START, A_ELECTION_TIMEOUT */ /* we won */ void do_election_timer_ctrl(long long action, enum crmd_fsa_cause cause, enum crmd_fsa_state cur_state, enum crmd_fsa_input current_input, fsa_data_t * msg_data) { } static void feature_update_callback(xmlNode * msg, int call_id, int rc, xmlNode * output, void *user_data) { if (rc != pcmk_ok) { fsa_data_t *msg_data = NULL; register_fsa_error(C_FSA_INTERNAL, I_ERROR, NULL); } } /* A_DC_TAKEOVER */ void do_dc_takeover(long long action, enum crmd_fsa_cause cause, enum crmd_fsa_state cur_state, enum crmd_fsa_input current_input, fsa_data_t * msg_data) { int rc = pcmk_ok; xmlNode *cib = NULL; GListPtr gIter = NULL; const char *cluster_type = name_for_cluster_type(get_cluster_type()); crm_info("Taking over DC status for this partition"); set_bit(fsa_input_register, R_THE_DC); for (gIter = stonith_cleanup_list; gIter != NULL; gIter = gIter->next) { char *target = gIter->data; const char *uuid = get_uuid(target); crm_notice("Marking %s, target of a previous stonith action, as clean", target); send_stonith_update(NULL, target, uuid); free(target); } g_list_free(stonith_cleanup_list); stonith_cleanup_list = NULL; #if SUPPORT_COROSYNC if (is_classic_ais_cluster()) { send_ais_text(crm_class_quorum, NULL, TRUE, NULL, crm_msg_ais); } #endif if (voted != NULL) { crm_trace("Destroying voted hash"); g_hash_table_destroy(voted); voted = NULL; } set_bit(fsa_input_register, R_JOIN_OK); set_bit(fsa_input_register, R_INVOKE_PE); fsa_cib_conn->cmds->set_master(fsa_cib_conn, cib_scope_local); cib = create_xml_node(NULL, XML_TAG_CIB); crm_xml_add(cib, XML_ATTR_CRM_VERSION, CRM_FEATURE_SET); fsa_cib_update(XML_TAG_CIB, cib, cib_quorum_override, rc, NULL); add_cib_op_callback(fsa_cib_conn, rc, FALSE, NULL, feature_update_callback); update_attr_delegate( fsa_cib_conn, cib_none, XML_CIB_TAG_CRMCONFIG, NULL, NULL, NULL, NULL, "dc-version", VERSION "-" BUILD_VERSION, FALSE, NULL); update_attr_delegate( fsa_cib_conn, cib_none, XML_CIB_TAG_CRMCONFIG, NULL, NULL, NULL, NULL, "cluster-infrastructure", cluster_type, FALSE, NULL); mainloop_set_trigger(config_read); free_xml(cib); } /* A_DC_RELEASE */ void do_dc_release(long long action, enum crmd_fsa_cause cause, enum crmd_fsa_state cur_state, enum crmd_fsa_input current_input, fsa_data_t * msg_data) { if (action & A_DC_RELEASE) { crm_debug("Releasing the role of DC"); clear_bit(fsa_input_register, R_THE_DC); } else if (action & A_DC_RELEASED) { crm_info("DC role released"); #if 0 if (are there errors) { /* we cant stay up if not healthy */ /* or perhaps I_ERROR and go to S_RECOVER? */ result = I_SHUTDOWN; } #endif register_fsa_input(C_FSA_INTERNAL, I_RELEASE_SUCCESS, NULL); } else { crm_err("Unknown action %s", fsa_action2string(action)); } crm_trace("Am I still the DC? %s", AM_I_DC ? XML_BOOLEAN_YES : XML_BOOLEAN_NO); } diff --git a/cts/CM_ais.py b/cts/CM_ais.py index 6ec5a48165..93b17f4ae7 100644 --- a/cts/CM_ais.py +++ b/cts/CM_ais.py @@ -1,441 +1,442 @@ '''CTS: Cluster Testing System: AIS dependent modules... ''' __copyright__=''' Copyright (C) 2007 Andrew Beekhof ''' # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. import os, sys, warnings from cts.CTSvars import * from cts.CM_lha import crm_lha from cts.CTS import Process ####################################################################### # # LinuxHA v2 dependent modules # ####################################################################### class crm_ais(crm_lha): ''' The crm version 3 cluster manager class. It implements the things we need to talk to and manipulate crm clusters running on top of openais ''' def __init__(self, Environment, randseed=None): crm_lha.__init__(self, Environment, randseed=randseed) self.update({ "Name" : "crm-ais", "EpocheCmd" : "crm_node -e --openais", "QuorumCmd" : "crm_node -q --openais", "ParitionCmd" : "crm_node -p --openais", "Pat:They_stopped" : "%s crmd.*Node %s\[.*state is now lost", "Pat:ChildExit" : "Child process .* exited", # Bad news Regexes. Should never occur. "BadRegexes" : ( r" trace:", r"error:", r"crit:", r"ERROR:", r"CRIT:", r"Shutting down...NOW", r"Timer I_TERMINATE just popped", r"input=I_ERROR", r"input=I_FAIL", r"input=I_INTEGRATED cause=C_TIMER_POPPED", r"input=I_FINALIZED cause=C_TIMER_POPPED", r"input=I_ERROR", r", exiting\.", r"(WARN|warn).*Ignoring HA message.*vote.*not in our membership list", r"pengine.*Attempting recovery of resource", r"is taking more than 2x its timeout", r"Confirm not received from", r"Welcome reply not received from", r"Attempting to schedule .* after a stop", r"Resource .* was active at shutdown", r"duplicate entries for call_id", r"Search terminated:", r":global_timer_callback", r"Faking parameter digest creation", r"Parameters to .* action changed:", r"Parameters to .* changed", r"Child process .* terminated with signal", r"LogActions: Recover", r"rsyslogd.* imuxsock lost .* messages from pid .* due to rate-limiting", r"Peer is not part of our cluster", r"We appear to be in an election loop", r"Unknown node -> we will not deliver message", r"crm_write_blackbox", r"pacemakerd.*Could not connect to Cluster Configuration Database API", #r"crm_ipc_send:.*Request .* failed", #r"crm_ipc_send:.*Sending to .* is disabled until pending reply is recieved", # Not inherently bad, but worth tracking #r"No need to invoke the TE", #r"ping.*: DEBUG: Updated connected = 0", #r"Digest mis-match:", r"te_graph_trigger: Transition failed: terminated", #r"Executing .* fencing operation", #r"fence_pcmk.* Call to fence", #r"fence_pcmk", r"cman killed by node", + r"Election storm", ), }) def errorstoignore(self): # At some point implement a more elegant solution that # also produces a report at the end '''Return list of errors which are known and very noisey should be ignored''' if 1: return [ r"crm_mon:", r"crmadmin:", r"update_trace_data", r"async_notify: strange, client not found", r"Parse error: Ignoring unknown option .*nodename", r"error: log_operation: Operation 'reboot' .* with device 'FencingFail' returned:", r"Child process .* terminated with signal 9", ] return [] def NodeUUID(self, node): return node def ais_components(self): fullcomplist = {} self.complist = [] self.common_ignore = [ "Pending action:", "error: crm_log_message_adv:", "error: MSG: No message to dump", "pending LRM operations at shutdown", "Lost connection to the CIB service", "Connection to the CIB terminated...", "Sending message to CIB service FAILED", "apply_xml_diff: Diff application failed!", "crmd.*Action A_RECOVER .* not supported", "unconfirmed_actions: Waiting on .* unconfirmed actions", "cib_native_msgready: Message pending on command channel", "crmd.*do_exit: Performing A_EXIT_1 - forcefully exiting the CRMd", "verify_stopped: Resource .* was active at shutdown. You may ignore this error if it is unmanaged.", "error: attrd_connection_destroy: Lost connection to attrd", "info: te_fence_node: Executing .* fencing operation", # "error: native_create_actions: Resource .*stonith::.* is active on 2 nodes attempting recovery", # "error: process_pe_message: Transition .* ERRORs found during PE processing", ] cib_ignore = [ "lrmd.*error: crm_ipc_read: Connection to stonith-ng failed", "lrmd.*error: mainloop_gio_callback: Connection to stonith-ng.* closed", "lrmd.*error: stonith_connection_destroy_cb: LRMD lost STONITH connection", "lrmd.*error: stonith_connection_failed: STONITH connection failed, finalizing .* pending operations", ] fullcomplist["cib"] = Process(self, "cib", pats = [ "State transition .* S_RECOVERY", "Respawning .* crmd", "Respawning .* attrd", "error: crm_ipc_read: Connection to cib_.* failed", "error: mainloop_gio_callback: Connection to cib_.* closed", "Connection to the CIB terminated...", "Child process crmd exited .* rc=2", "Child process attrd exited .* rc=1", "crmd.*Input I_TERMINATE from do_recover", "crmd.*I_ERROR.*crmd_cib_connection_destroy", "crmd.*do_exit: Could not recover from internal error", ], badnews_ignore = cib_ignore, common_ignore = self.common_ignore) fullcomplist["lrmd"] = Process(self, "lrmd", pats = [ "State transition .* S_RECOVERY", "LRM Connection failed", "Respawning .* crmd", "error: crm_ipc_read: Connection to lrmd failed", "error: mainloop_gio_callback: Connection to lrmd.* closed", "crmd.*I_ERROR.*lrm_connection_destroy", "Child process crmd exited .* rc=2", "crmd.*Input I_TERMINATE from do_recover", "crmd.*do_exit: Could not recover from internal error", ], badnews_ignore = self.common_ignore) fullcomplist["crmd"] = Process(self, "crmd", pats = [ # "WARN: determine_online_status: Node .* is unclean", # "Scheduling Node .* for STONITH", # "Executing .* fencing operation", # Only if the node wasn't the DC: "State transition S_IDLE", "State transition .* -> S_IDLE", ], badnews_ignore = self.common_ignore) fullcomplist["attrd"] = Process(self, "attrd", pats = [ ], badnews_ignore = self.common_ignore) fullcomplist["pengine"] = Process(self, "pengine", dc_pats = [ "State transition .* S_RECOVERY", "Respawning .* crmd", "Child process crmd exited .* rc=2", "crm_ipc_read: Connection to pengine failed", "error: mainloop_gio_callback: Connection to pengine.* closed", "crit: pe_ipc_destroy: Connection to the Policy Engine failed", "crmd.*I_ERROR.*save_cib_contents", "crmd.*Input I_TERMINATE from do_recover", "crmd.*do_exit: Could not recover from internal error", ], badnews_ignore = self.common_ignore) stonith_ignore = [ "LogActions: Recover Fencing", "update_failcount: Updating failcount for Fencing", "error: te_connect_stonith: Sign-in failed: triggered a retry", "stonith_connection_failed: STONITH connection failed, finalizing .* pending operations.", "process_lrm_event: LRM operation Fencing.* Error" ] stonith_ignore.extend(self.common_ignore) fullcomplist["stonith-ng"] = Process(self, "stonith-ng", process="stonithd", pats = [ "crm_ipc_read: Connection to stonith-ng failed", "stonith_connection_destroy_cb: LRMD lost STONITH connection", "mainloop_gio_callback: Connection to stonith-ng.* closed", "tengine_stonith_connection_destroy: Fencing daemon connection failed", "crmd.*stonith_api_add_notification: Callback already present", ], badnews_ignore = stonith_ignore) vgrind = self.Env["valgrind-procs"].split() for key in fullcomplist.keys(): if self.Env["valgrind-tests"]: if key in vgrind: # Processes running under valgrind can't be shot with "killall -9 processname" self.log("Filtering %s from the component list as it is being profiled by valgrind" % key) continue if key == "stonith-ng" and not self.Env["DoFencing"]: continue self.complist.append(fullcomplist[key]) #self.complist = [ fullcomplist["pengine"] ] return self.complist class crm_whitetank(crm_ais): ''' The crm version 3 cluster manager class. It implements the things we need to talk to and manipulate crm clusters running on top of openais ''' def __init__(self, Environment, randseed=None): crm_ais.__init__(self, Environment, randseed=randseed) self.update({ "Name" : "crm-whitetank", "StartCmd" : "service openais start", "StopCmd" : "service openais stop", "Pat:We_stopped" : "%s.*openais.*pcmk_shutdown: Shutdown complete", "Pat:They_stopped" : "%s crmd.*Node %s\[.*state is now lost", "Pat:They_dead" : "openais:.*Node %s is now: lost", "Pat:ChildKilled" : "%s openais.*Child process %s terminated with signal 9", "Pat:ChildRespawn" : "%s openais.*Respawning failed child process: %s", "Pat:ChildExit" : "Child process .* exited", }) def Components(self): self.ais_components() aisexec_ignore = [ "error: ais_dispatch: Receiving message .* failed", "crmd.*I_ERROR.*crmd_cib_connection_destroy", "cib.*error: cib_ais_destroy: AIS connection terminated", #"crmd.*error: crm_ais_destroy: AIS connection terminated", "crmd.*do_exit: Could not recover from internal error", "crmd.*I_TERMINATE.*do_recover", "attrd.*attrd_ais_destroy: Lost connection to OpenAIS service!", "stonithd.*error: AIS connection terminated", ] aisexec_ignore.extend(self.common_ignore) self.complist.append(Process(self, "aisexec", pats = [ "error: ais_dispatch: AIS connection failed", "crmd.*error: do_exit: Could not recover from internal error", "pengine.*Scheduling Node .* for STONITH", "stonithd.*requests a STONITH operation RESET on node", "stonithd.*Succeeded to STONITH the node", ], badnews_ignore = aisexec_ignore)) class crm_cs_v0(crm_ais): ''' The crm version 3 cluster manager class. It implements the things we need to talk to and manipulate crm clusters running against version 0 of our plugin ''' def __init__(self, Environment, randseed=None): crm_ais.__init__(self, Environment, randseed=randseed) self.update({ "Name" : "crm-plugin-v0", "StartCmd" : "service corosync start", "StopCmd" : "service corosync stop", # The next pattern is too early # "Pat:We_stopped" : "%s.*Service engine unloaded: Pacemaker Cluster Manager", # The next pattern would be preferred, but it doesn't always come out # "Pat:We_stopped" : "%s.*Corosync Cluster Engine exiting with status", "Pat:We_stopped" : "%s.*Service engine unloaded: corosync cluster quorum service", "Pat:They_stopped" : "%s crmd.*Node %s\[.*state is now lost", "Pat:They_dead" : "corosync:.*Node %s is now: lost", "Pat:ChildKilled" : "%s corosync.*Child process %s terminated with signal 9", "Pat:ChildRespawn" : "%s corosync.*Respawning failed child process: %s", }) def Components(self): self.ais_components() corosync_ignore = [ r"pacemakerd.*error: pcmk_child_exit: Child process .* exited", r"error: send_cpg_message: Sending message via cpg FAILED", r"crmd.*error: verify_stopped: Resource .* was active at shutdown. You may ignore this error if it is unmanaged.", r"error: pcmk_cpg_dispatch: Connection to the CPG API failed:", r"error: crm_ipc_read: Connection to cib_shm failed", r"error: mainloop_gio_callback: Connection to .* closed", r"crmd_cib_connection_destroys:", r"crmd.*error: do_recover: Action A_RECOVER .* not supported", r"error: do_log: FSA: Input I_TERMINATE.*do_recover", r"error: cib_ais_destroy: Corosync connection lost! Exiting.", r"attrd.*error: attrd_cib_connection_destroy: Connection to the CIB terminated...", r"error: send_ais_text: Sending message .* via cpg: FAILED", r"error: crm_ipc_read: Connection to stonith-ng failed", r"crit: tengine_stonith_connection_destroy: Fencing daemon connection failed", r"error: stonith_connection_destroy_cb: LRMD lost STONITH connection", r"error: stonith_connection_failed: STONITH connection failed", r"error: te_connect_stonith: Sign-in failed: triggered a retry", r"error: process_lrm_event: LRM operation Fencing.*", r"error: do_log: FSA: Input I_ERROR from crmd_cib_connection_destroy.* received in state", r"error: do_log: FSA: Input I_ERROR from do_shutdown_req.* received in state", r"warning: do_state_transition: State transition .* S_RECOVERY .*origin=crmd_cib_connection_destroy", r"warning: do_state_transition: State transition .* S_RECOVERY .*origin=do_shutdown_req", r"crmd.*error: cib_native_perform_op_delegate: Couldn't perform cib_slave operation.*: Transport endpoint is not connected", r"crmd.*error: cib_native_perform_op_delegate: CIB disconnected", ] self.complist.append(Process(self, "corosync", pats = [ r"pacemakerd.*error: cfg_connection_destroy: Connection destroyed", r"pacemakerd.*error: cpg_connection_destroy: Connection destroyed", r"attrd_ais_destroy: Lost connection to Corosync service!", r"stonith_peer_ais_destroy: AIS connection terminated", r"cib_ais_destroy: Corosync connection lost! Exiting.", r"crmd.*do_exit: Could not recover from internal error", r"pengine.*Scheduling Node .* for STONITH", r"log_operation: Operation .* for host .* with device .* returned: 0", r"tengine_stonith_notify: Peer .* was terminated .*: OK", ], badnews_ignore = corosync_ignore, common_ignore = self.common_ignore)) return self.complist class crm_cs_v1(crm_cs_v0): ''' The crm version 3 cluster manager class. It implements the things we need to talk to and manipulate crm clusters running on top of version 1 of our plugin ''' def __init__(self, Environment, randseed=None): crm_cs_v0.__init__(self, Environment, randseed=randseed) self.update({ "Name" : "crm-plugin-v1", "StartCmd" : "service corosync start && service pacemaker start", "StopCmd" : "service pacemaker stop; service corosync stop", "EpocheCmd" : "crm_node -e", "QuorumCmd" : "crm_node -q", "ParitionCmd" : "crm_node -p", "Pat:We_stopped" : "%s.*Service engine unloaded: corosync cluster quorum service", "Pat:They_stopped" : "%s crmd.*Node %s\[.*state is now lost", "Pat:They_dead" : "crmd.*Node %s\[.*state is now lost", "Pat:ChildKilled" : "%s pacemakerd.*Child process %s terminated with signal 9", "Pat:ChildRespawn" : "%s pacemakerd.*Respawning failed child process: %s", }) class crm_mcp(crm_cs_v0): ''' The crm version 4 cluster manager class. It implements the things we need to talk to and manipulate crm clusters running on top of native corosync (no plugins) ''' def __init__(self, Environment, randseed=None): crm_cs_v0.__init__(self, Environment, randseed=randseed) self.update({ "Name" : "crm-mcp", "StartCmd" : "service corosync start && service pacemaker start", "StopCmd" : "service pacemaker stop; service corosync stop", "EpocheCmd" : "crm_node -e", "QuorumCmd" : "crm_node -q", "ParitionCmd" : "crm_node -p", # Close enough... "Corosync Cluster Engine exiting normally" isn't printed # reliably and there's little interest in doing anything it "Pat:We_stopped" : "%s.*Unloading all Corosync service engines", "Pat:They_stopped" : "%s crmd.*Node %s\[.*state is now lost", "Pat:They_dead" : "crmd.*Node %s\[.*state is now lost", "Pat:ChildKilled" : "%s pacemakerd.*Child process %s terminated with signal 9", "Pat:ChildRespawn" : "%s pacemakerd.*Respawning failed child process: %s", "Pat:InfraUp" : "%s corosync.*Initializing transport", "Pat:PacemakerUp" : "%s pacemakerd.*Starting Pacemaker", }) class crm_cman(crm_cs_v0): ''' The crm version 3 cluster manager class. It implements the things we need to talk to and manipulate crm clusters running on top of openais ''' def __init__(self, Environment, randseed=None): crm_cs_v0.__init__(self, Environment, randseed=randseed) self.update({ "Name" : "crm-cman", "StartCmd" : "service cman start && service pacemaker start", "StopCmd" : "service pacemaker stop; service cman stop;", "EpocheCmd" : "crm_node -e --cman", "QuorumCmd" : "crm_node -q --cman", "ParitionCmd" : "crm_node -p --cman", "Pat:We_stopped" : "%s.*Unloading all Corosync service engines", "Pat:They_stopped" : "%s crmd.*Node %s\[.*state is now lost", "Pat:They_dead" : "crmd.*Node %s\[.*state is now lost", "Pat:ChildKilled" : "%s pacemakerd.*Child process %s terminated with signal 9", "Pat:ChildRespawn" : "%s pacemakerd.*Respawning failed child process: %s", }) diff --git a/include/crm/crm.h b/include/crm/crm.h index d1fe13d20a..5d69231823 100644 --- a/include/crm/crm.h +++ b/include/crm/crm.h @@ -1,196 +1,196 @@ /* * Copyright (C) 2004 Andrew Beekhof * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This software is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #ifndef CRM__H # define CRM__H /** * \file * \brief A dumping ground * \ingroup core */ # include # include # include # include # undef MIN # undef MAX # include # include -# define CRM_FEATURE_SET "3.0.6" +# define CRM_FEATURE_SET "3.0.7" # define MINIMUM_SCHEMA_VERSION "pacemaker-1.0" # define LATEST_SCHEMA_VERSION "pacemaker-"CRM_DTD_VERSION # define EOS '\0' # define DIMOF(a) ((int) (sizeof(a)/sizeof(a[0])) ) # ifndef MAX_NAME # define MAX_NAME 256 # endif # ifndef __GNUC__ # define __builtin_expect(expr, result) (expr) # endif /* Some handy macros used by the Linux kernel */ # define __likely(expr) __builtin_expect(expr, 1) # define __unlikely(expr) __builtin_expect(expr, 0) # define CRM_META "CRM_meta" extern const char *crm_system_name; /* *INDENT-OFF* */ /* Clean these up at some point, some probably should be runtime options */ # define SOCKET_LEN 1024 # define APPNAME_LEN 256 # define MAX_IPC_FAIL 5 # define MAX_IPC_DELAY 120 # define MSG_LOG 1 # define DOT_FSA_ACTIONS 1 # define DOT_ALL_FSA_INPUTS 1 /* #define FSA_TRACE 1 */ # define INFINITY_S "INFINITY" # define MINUS_INFINITY_S "-INFINITY" # define INFINITY 1000000 /* Sub-systems */ # define CRM_SYSTEM_DC "dc" # define CRM_SYSTEM_DCIB "dcib" /* The master CIB */ # define CRM_SYSTEM_CIB "cib" # define CRM_SYSTEM_CRMD "crmd" # define CRM_SYSTEM_LRMD "lrmd" # define CRM_SYSTEM_PENGINE "pengine" # define CRM_SYSTEM_TENGINE "tengine" # define CRM_SYSTEM_STONITHD "stonithd" # define CRM_SYSTEM_MCP "pacemakerd" /* Valid operations */ # define CRM_OP_NOOP "noop" # define CRM_OP_JOIN_ANNOUNCE "join_announce" # define CRM_OP_JOIN_OFFER "join_offer" # define CRM_OP_JOIN_REQUEST "join_request" # define CRM_OP_JOIN_ACKNAK "join_ack_nack" # define CRM_OP_JOIN_CONFIRM "join_confirm" # define CRM_OP_DIE "die_no_respawn" # define CRM_OP_RETRIVE_CIB "retrieve_cib" # define CRM_OP_PING "ping" # define CRM_OP_VOTE "vote" # define CRM_OP_NOVOTE "no-vote" # define CRM_OP_HELLO "hello" # define CRM_OP_HBEAT "dc_beat" # define CRM_OP_PECALC "pe_calc" # define CRM_OP_ABORT "abort" # define CRM_OP_QUIT "quit" # define CRM_OP_LOCAL_SHUTDOWN "start_shutdown" # define CRM_OP_SHUTDOWN_REQ "req_shutdown" # define CRM_OP_SHUTDOWN "do_shutdown" # define CRM_OP_FENCE "stonith" # define CRM_OP_EVENTCC "event_cc" # define CRM_OP_TEABORT "te_abort" # define CRM_OP_TEABORTED "te_abort_confirmed" /* we asked */ # define CRM_OP_TE_HALT "te_halt" # define CRM_OP_TECOMPLETE "te_complete" # define CRM_OP_TETIMEOUT "te_timeout" # define CRM_OP_TRANSITION "transition" # define CRM_OP_REGISTER "register" # define CRM_OP_DEBUG_UP "debug_inc" # define CRM_OP_DEBUG_DOWN "debug_dec" # define CRM_OP_INVOKE_LRM "lrm_invoke" # define CRM_OP_LRM_REFRESH "lrm_refresh" # define CRM_OP_LRM_QUERY "lrm_query" # define CRM_OP_LRM_DELETE "lrm_delete" # define CRM_OP_LRM_FAIL "lrm_fail" # define CRM_OP_PROBED "probe_complete" # define CRM_OP_REPROBE "probe_again" # define CRM_OP_CLEAR_FAILCOUNT "clear_failcount" # define CRM_OP_RELAXED_SET "one-or-more" # define CRM_OP_RM_NODE_CACHE "rm_node_cache" # define CRMD_JOINSTATE_DOWN "down" # define CRMD_JOINSTATE_PENDING "pending" # define CRMD_JOINSTATE_MEMBER "member" # define CRMD_JOINSTATE_NACK "banned" # define CRMD_ACTION_DELETE "delete" # define CRMD_ACTION_CANCEL "cancel" # define CRMD_ACTION_MIGRATE "migrate_to" # define CRMD_ACTION_MIGRATED "migrate_from" # define CRMD_ACTION_START "start" # define CRMD_ACTION_STARTED "running" # define CRMD_ACTION_STOP "stop" # define CRMD_ACTION_STOPPED "stopped" # define CRMD_ACTION_PROMOTE "promote" # define CRMD_ACTION_PROMOTED "promoted" # define CRMD_ACTION_DEMOTE "demote" # define CRMD_ACTION_DEMOTED "demoted" # define CRMD_ACTION_NOTIFY "notify" # define CRMD_ACTION_NOTIFIED "notified" # define CRMD_ACTION_STATUS "monitor" /* short names */ # define RSC_DELETE CRMD_ACTION_DELETE # define RSC_CANCEL CRMD_ACTION_CANCEL # define RSC_MIGRATE CRMD_ACTION_MIGRATE # define RSC_MIGRATED CRMD_ACTION_MIGRATED # define RSC_START CRMD_ACTION_START # define RSC_STARTED CRMD_ACTION_STARTED # define RSC_STOP CRMD_ACTION_STOP # define RSC_STOPPED CRMD_ACTION_STOPPED # define RSC_PROMOTE CRMD_ACTION_PROMOTE # define RSC_PROMOTED CRMD_ACTION_PROMOTED # define RSC_DEMOTE CRMD_ACTION_DEMOTE # define RSC_DEMOTED CRMD_ACTION_DEMOTED # define RSC_NOTIFY CRMD_ACTION_NOTIFY # define RSC_NOTIFIED CRMD_ACTION_NOTIFIED # define RSC_STATUS CRMD_ACTION_STATUS /* *INDENT-ON* */ typedef GList *GListPtr; # include # include # include # define crm_str_hash g_str_hash_traditional guint g_str_hash_traditional(gconstpointer v); #endif