diff --git a/README.markdown b/README.markdown index 586c60d440..c0fbd7d6ab 100644 --- a/README.markdown +++ b/README.markdown @@ -1,94 +1,98 @@ # Pacemaker ## What is Pacemaker? Pacemaker is an advanced, scalable High-Availability cluster resource manager for Linux-HA (Heartbeat) and/or Corosync. It supports "n-node" clusters with significant capabilities for managing resources and dependencies. It will run scripts at initialization, when machines go up or down, when related resources fail and can be configured to periodically check resource health. ## For more information look at: * [Website](http://www.clusterlabs.org) * [Issues/Bugs](http://bugs.clusterlabs.org) * Mailing lists per audience: [users](http://oss.clusterlabs.org/mailman/listinfo/users), [developers](http://oss.clusterlabs.org/mailman/listinfo/developers), or possibly [the original one](http://oss.clusterlabs.org/mailman/listinfo/pacemaker) ([deprecated](http://oss.clusterlabs.org/pipermail/pacemaker/2015-February/023521.html)) * [Documentation](http://www.clusterlabs.org/doc) ## User interfaces / shells There are multiple user interfaces for Pacemaker, both command line tools, graphical user interfaces and web frontends. The _crm shell_ used to be included in the Pacemaker source tree, but is now maintained as a separate project. This is not meant to be an exhaustive list: * _crmsh_: https://crmsh.github.io/ * _pcs_: https://github.com/feist/pcs/ * _LCMC_: http://lcmc.sourceforge.net/ * _hawk_: https://github.com/ClusterLabs/hawk ## Build Dependencies * automake * autoconf * libtool-ltdl-devel * libuuid-devel * pkgconfig * python * glib2-devel * libxml2-devel * libxslt-devel * python-devel * gcc-c++ * bzip2-devel * gnutls-devel * pam-devel * libqb-devel ## Cluster Stack Dependencies (Pick at least one) * clusterlib-devel (CMAN) * corosynclib-devel (Corosync) * heartbeat-devel (Heartbeat) ## Optional Build Dependencies * ncurses-devel * openssl-devel * libselinux-devel * systemd-devel * dbus-devel * cluster-glue-libs-devel (LHA style fencing agents) * libesmtp-devel (Email alerts) * lm_sensors-devel (SNMP alerts) * net-snmp-devel (SNMP alerts) * asciidoc (documentation) * help2man (documentation) * publican (documentation) * inkscape (documentation) * docbook-style-xsl (documentation) +## Optional testing dependencies +* valgrind (if running CTS valgrind tests) +* systemd-python (if using CTS on cluster nodes running systemd) + ## Source Control (GIT) git clone git://github.com/ClusterLabs/pacemaker.git [See Github](https://github.com/ClusterLabs/pacemaker) ## Installing from source $ ./autogen.sh $ ./configure $ make $ sudo make install ## How you can help If you find this project useful, you may want to consider supporting its future development. There are a number of ways to support the project. * Test and report issues. * Tick something off our [todo list](https://github.com/ClusterLabs/pacemaker/blob/master/TODO.markdown). * Help others on the [mailing list](http://oss.clusterlabs.org/mailman/listinfo/users). * Contribute documentation, examples and test cases. * Contribute patches. * Spread the word. diff --git a/crmd/lrm_state.c b/crmd/lrm_state.c index 68d77bed40..97236c8e1d 100644 --- a/crmd/lrm_state.c +++ b/crmd/lrm_state.c @@ -1,749 +1,748 @@ /* * Copyright (C) 2012 David Vossel * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This software is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #include #include #include #include #include #include #include #include GHashTable *lrm_state_table = NULL; extern GHashTable *proxy_table; int lrmd_internal_proxy_send(lrmd_t * lrmd, xmlNode *msg); void lrmd_internal_set_proxy_callback(lrmd_t * lrmd, void *userdata, void (*callback)(lrmd_t *lrmd, void *userdata, xmlNode *msg)); static void free_rsc_info(gpointer value) { lrmd_rsc_info_t *rsc_info = value; lrmd_free_rsc_info(rsc_info); } static void free_deletion_op(gpointer value) { struct pending_deletion_op_s *op = value; free(op->rsc); delete_ha_msg_input(op->input); free(op); } static void free_recurring_op(gpointer value) { struct recurring_op_s *op = (struct recurring_op_s *)value; free(op->user_data); free(op->rsc_id); free(op->op_type); free(op->op_key); if (op->params) { g_hash_table_destroy(op->params); } free(op); } static gboolean fail_pending_op(gpointer key, gpointer value, gpointer user_data) { lrmd_event_data_t event = { 0, }; lrm_state_t *lrm_state = user_data; struct recurring_op_s *op = (struct recurring_op_s *)value; crm_trace("Pre-emptively failing %s_%s_%d on %s (call=%s, %s)", op->rsc_id, op->op_type, op->interval, lrm_state->node_name, key, op->user_data); event.type = lrmd_event_exec_complete; event.rsc_id = op->rsc_id; event.op_type = op->op_type; event.user_data = op->user_data; event.timeout = 0; event.interval = op->interval; event.rc = PCMK_OCF_CONNECTION_DIED; event.op_status = PCMK_LRM_OP_ERROR; event.t_run = op->start_time; event.t_rcchange = op->start_time; event.t_rcchange = op->start_time; event.call_id = op->call_id; event.remote_nodename = lrm_state->node_name; event.params = op->params; process_lrm_event(lrm_state, &event, op); return TRUE; } gboolean lrm_state_is_local(lrm_state_t *lrm_state) { if (lrm_state == NULL || fsa_our_uname == NULL) { return FALSE; } if (strcmp(lrm_state->node_name, fsa_our_uname) != 0) { return FALSE; } return TRUE; } lrm_state_t * lrm_state_create(const char *node_name) { lrm_state_t *state = NULL; if (!node_name) { crm_err("No node name given for lrm state object"); return NULL; } state = calloc(1, sizeof(lrm_state_t)); if (!state) { return NULL; } state->node_name = strdup(node_name); state->rsc_info_cache = g_hash_table_new_full(crm_str_hash, g_str_equal, NULL, free_rsc_info); state->deletion_ops = g_hash_table_new_full(crm_str_hash, g_str_equal, g_hash_destroy_str, free_deletion_op); state->pending_ops = g_hash_table_new_full(crm_str_hash, g_str_equal, g_hash_destroy_str, free_recurring_op); state->resource_history = g_hash_table_new_full(crm_str_hash, g_str_equal, NULL, history_free); g_hash_table_insert(lrm_state_table, (char *)state->node_name, state); return state; } void lrm_state_destroy(const char *node_name) { g_hash_table_remove(lrm_state_table, node_name); } static gboolean remote_proxy_remove_by_node(gpointer key, gpointer value, gpointer user_data) { remote_proxy_t *proxy = value; const char *node_name = user_data; if (safe_str_eq(node_name, proxy->node_name)) { return TRUE; } return FALSE; } static void internal_lrm_state_destroy(gpointer data) { lrm_state_t *lrm_state = data; if (!lrm_state) { return; } crm_trace("Destroying proxy table %s with %d members", lrm_state->node_name, g_hash_table_size(proxy_table)); g_hash_table_foreach_remove(proxy_table, remote_proxy_remove_by_node, (char *) lrm_state->node_name); remote_ra_cleanup(lrm_state); lrmd_api_delete(lrm_state->conn); if (lrm_state->rsc_info_cache) { crm_trace("Destroying rsc info cache with %d members", g_hash_table_size(lrm_state->rsc_info_cache)); g_hash_table_destroy(lrm_state->rsc_info_cache); } if (lrm_state->resource_history) { crm_trace("Destroying history op cache with %d members", g_hash_table_size(lrm_state->resource_history)); g_hash_table_destroy(lrm_state->resource_history); } if (lrm_state->deletion_ops) { crm_trace("Destroying deletion op cache with %d members", g_hash_table_size(lrm_state->deletion_ops)); g_hash_table_destroy(lrm_state->deletion_ops); } if (lrm_state->pending_ops) { crm_trace("Destroying pending op cache with %d members", g_hash_table_size(lrm_state->pending_ops)); g_hash_table_destroy(lrm_state->pending_ops); } free((char *)lrm_state->node_name); free(lrm_state); } void lrm_state_reset_tables(lrm_state_t * lrm_state) { if (lrm_state->resource_history) { crm_trace("Re-setting history op cache with %d members", g_hash_table_size(lrm_state->resource_history)); g_hash_table_remove_all(lrm_state->resource_history); } if (lrm_state->deletion_ops) { crm_trace("Re-setting deletion op cache with %d members", g_hash_table_size(lrm_state->deletion_ops)); g_hash_table_remove_all(lrm_state->deletion_ops); } if (lrm_state->pending_ops) { crm_trace("Re-setting pending op cache with %d members", g_hash_table_size(lrm_state->pending_ops)); g_hash_table_remove_all(lrm_state->pending_ops); } if (lrm_state->rsc_info_cache) { crm_trace("Re-setting rsc info cache with %d members", g_hash_table_size(lrm_state->rsc_info_cache)); g_hash_table_remove_all(lrm_state->rsc_info_cache); } } gboolean lrm_state_init_local(void) { if (lrm_state_table) { return TRUE; } lrm_state_table = g_hash_table_new_full(crm_strcase_hash, crm_strcase_equal, NULL, internal_lrm_state_destroy); if (!lrm_state_table) { return FALSE; } proxy_table = g_hash_table_new_full(crm_strcase_hash, crm_strcase_equal, NULL, remote_proxy_free); if (!proxy_table) { g_hash_table_destroy(lrm_state_table); return FALSE; } return TRUE; } void lrm_state_destroy_all(void) { if (lrm_state_table) { crm_trace("Destroying state table with %d members", g_hash_table_size(lrm_state_table)); g_hash_table_destroy(lrm_state_table); lrm_state_table = NULL; } if(proxy_table) { crm_trace("Destroying proxy table with %d members", g_hash_table_size(proxy_table)); g_hash_table_destroy(proxy_table); proxy_table = NULL; } } lrm_state_t * lrm_state_find(const char *node_name) { if (!node_name) { return NULL; } return g_hash_table_lookup(lrm_state_table, node_name); } lrm_state_t * lrm_state_find_or_create(const char *node_name) { lrm_state_t *lrm_state; lrm_state = g_hash_table_lookup(lrm_state_table, node_name); if (!lrm_state) { lrm_state = lrm_state_create(node_name); } return lrm_state; } GList * lrm_state_get_list(void) { return g_hash_table_get_values(lrm_state_table); } void lrm_state_disconnect(lrm_state_t * lrm_state) { int removed = 0; if (!lrm_state->conn) { return; } crm_trace("Disconnecting %s", lrm_state->node_name); ((lrmd_t *) lrm_state->conn)->cmds->disconnect(lrm_state->conn); if (is_not_set(fsa_input_register, R_SHUTDOWN)) { removed = g_hash_table_foreach_remove(lrm_state->pending_ops, fail_pending_op, lrm_state); crm_trace("Synthesized %d operation failures for %s", removed, lrm_state->node_name); } lrmd_api_delete(lrm_state->conn); lrm_state->conn = NULL; } int lrm_state_is_connected(lrm_state_t * lrm_state) { if (!lrm_state->conn) { return FALSE; } return ((lrmd_t *) lrm_state->conn)->cmds->is_connected(lrm_state->conn); } int lrm_state_poke_connection(lrm_state_t * lrm_state) { if (!lrm_state->conn) { return -1; } return ((lrmd_t *) lrm_state->conn)->cmds->poke_connection(lrm_state->conn); } int lrm_state_ipc_connect(lrm_state_t * lrm_state) { int ret; if (!lrm_state->conn) { lrm_state->conn = lrmd_api_new(); ((lrmd_t *) lrm_state->conn)->cmds->set_callback(lrm_state->conn, lrm_op_callback); } ret = ((lrmd_t *) lrm_state->conn)->cmds->connect(lrm_state->conn, CRM_SYSTEM_CRMD, NULL); if (ret != pcmk_ok) { lrm_state->num_lrm_register_fails++; } else { lrm_state->num_lrm_register_fails = 0; } return ret; } static int remote_proxy_dispatch_internal(const char *buffer, ssize_t length, gpointer userdata) { /* Async responses from cib and friends back to clients via pacemaker_remoted */ xmlNode *xml = NULL; remote_proxy_t *proxy = userdata; lrm_state_t *lrm_state = lrm_state_find(proxy->node_name); uint32_t flags; if (lrm_state == NULL) { return 0; } xml = string2xml(buffer); if (xml == NULL) { crm_warn("Received a NULL msg from IPC service."); return 1; } flags = crm_ipc_buffer_flags(proxy->ipc); if (flags & crm_ipc_proxied_relay_response) { crm_trace("Passing response back to %.8s on %s: %.200s - request id: %d", proxy->session_id, proxy->node_name, buffer, proxy->last_request_id); remote_proxy_relay_response(lrm_state->conn, proxy->session_id, xml, proxy->last_request_id); proxy->last_request_id = 0; } else { crm_trace("Passing event back to %.8s on %s: %.200s", proxy->session_id, proxy->node_name, buffer); remote_proxy_relay_event(lrm_state->conn, proxy->session_id, xml); } free_xml(xml); return 1; } static void remote_proxy_disconnected(void *userdata) { remote_proxy_t *proxy = userdata; lrm_state_t *lrm_state = lrm_state_find(proxy->node_name); crm_trace("Destroying %s (%p)", lrm_state->node_name, userdata); proxy->source = NULL; proxy->ipc = NULL; if (lrm_state && lrm_state->conn) { remote_proxy_notify_destroy(lrm_state->conn, proxy->session_id); } g_hash_table_remove(proxy_table, proxy->session_id); } static remote_proxy_t * remote_proxy_new(const char *node_name, const char *session_id, const char *channel) { static struct ipc_client_callbacks proxy_callbacks = { .dispatch = remote_proxy_dispatch_internal, .destroy = remote_proxy_disconnected }; remote_proxy_t *proxy = calloc(1, sizeof(remote_proxy_t)); proxy->node_name = strdup(node_name); proxy->session_id = strdup(session_id); if (safe_str_eq(channel, CRM_SYSTEM_CRMD)) { proxy->is_local = TRUE; } else { proxy->source = mainloop_add_ipc_client(channel, G_PRIORITY_LOW, 0, proxy, &proxy_callbacks); proxy->ipc = mainloop_get_ipc_client(proxy->source); if (proxy->source == NULL) { remote_proxy_free(proxy); return NULL; } } crm_trace("created proxy session ID %s", proxy->session_id); g_hash_table_insert(proxy_table, proxy->session_id, proxy); return proxy; } gboolean crmd_is_proxy_session(const char *session) { return g_hash_table_lookup(proxy_table, session) ? TRUE : FALSE; } void crmd_proxy_send(const char *session, xmlNode *msg) { remote_proxy_t *proxy = g_hash_table_lookup(proxy_table, session); lrm_state_t *lrm_state = NULL; if (!proxy) { return; } crm_log_xml_trace(msg, "to-proxy"); lrm_state = lrm_state_find(proxy->node_name); if (lrm_state) { crm_trace("Sending event to %.8s on %s", proxy->session_id, proxy->node_name); remote_proxy_relay_event(lrm_state->conn, session, msg); } } static void crmd_proxy_dispatch(const char *session, xmlNode *msg) { crm_log_xml_trace(msg, "CRMd-PROXY[inbound]"); crm_xml_add(msg, F_CRM_SYS_FROM, session); if (crmd_authorize_message(msg, NULL, session)) { route_message(C_IPC_MESSAGE, msg); } trigger_fsa(fsa_source); } static void remote_proxy_cb(lrmd_t *lrmd, void *userdata, xmlNode *msg) { lrm_state_t *lrm_state = userdata; const char *op = crm_element_value(msg, F_LRMD_IPC_OP); const char *session = crm_element_value(msg, F_LRMD_IPC_SESSION); int msg_id = 0; /* sessions are raw ipc connections to IPC, * all we do is proxy requests/responses exactly * like they are given to us at the ipc level. */ CRM_CHECK(op != NULL, return); CRM_CHECK(session != NULL, return); crm_element_value_int(msg, F_LRMD_IPC_MSG_ID, &msg_id); /* This is msg from remote ipc client going to real ipc server */ if (safe_str_eq(op, "new")) { const char *channel = crm_element_value(msg, F_LRMD_IPC_IPC_SERVER); CRM_CHECK(channel != NULL, return); if (remote_proxy_new(lrm_state->node_name, session, channel) == NULL) { remote_proxy_notify_destroy(lrmd, session); } crm_trace("new remote proxy client established to %s, session id %s", channel, session); } else if (safe_str_eq(op, "destroy")) { remote_proxy_end_session(session); } else if (safe_str_eq(op, "request")) { int flags = 0; xmlNode *request = get_message_xml(msg, F_LRMD_IPC_MSG); const char *name = crm_element_value(msg, F_LRMD_IPC_CLIENT); remote_proxy_t *proxy = g_hash_table_lookup(proxy_table, session); CRM_CHECK(request != NULL, return); if (proxy == NULL) { /* proxy connection no longer exists */ remote_proxy_notify_destroy(lrmd, session); return; } else if ((proxy->is_local == FALSE) && (crm_ipc_connected(proxy->ipc) == FALSE)) { remote_proxy_end_session(session); return; } proxy->last_request_id = 0; crm_element_value_int(msg, F_LRMD_IPC_MSG_FLAGS, &flags); crm_xml_add(request, XML_ACL_TAG_ROLE, "pacemaker-remote"); #if ENABLE_ACL CRM_ASSERT(lrm_state->node_name); crm_acl_get_set_user(request, F_LRMD_IPC_USER, lrm_state->node_name); #endif if (proxy->is_local) { /* this is for the crmd, which we are, so don't try * and connect/send to ourselves over ipc. instead * do it directly. */ crmd_proxy_dispatch(session, request); if (flags & crm_ipc_client_response) { xmlNode *op_reply = create_xml_node(NULL, "ack"); crm_xml_add(op_reply, "function", __FUNCTION__); crm_xml_add_int(op_reply, "line", __LINE__); remote_proxy_relay_response(lrmd, session, op_reply, msg_id); free_xml(op_reply); } } else if(is_set(flags, crm_ipc_proxied)) { const char *type = crm_element_value(request, F_TYPE); int rc = 0; if (safe_str_eq(type, T_ATTRD) && crm_element_value(request, F_ATTRD_HOST) == NULL) { crm_xml_add(request, F_ATTRD_HOST, proxy->node_name); - crm_xml_add_int(request, F_ATTRD_HOST_ID, get_local_nodeid(0)); } rc = crm_ipc_send(proxy->ipc, request, flags, 5000, NULL); if(rc < 0) { xmlNode *op_reply = create_xml_node(NULL, "nack"); crm_err("Could not relay %s request %d from %s to %s for %s: %s (%d)", op, msg_id, proxy->node_name, crm_ipc_name(proxy->ipc), name, pcmk_strerror(rc), rc); /* Send a n'ack so the caller doesn't block */ crm_xml_add(op_reply, "function", __FUNCTION__); crm_xml_add_int(op_reply, "line", __LINE__); crm_xml_add_int(op_reply, "rc", rc); remote_proxy_relay_response(lrmd, session, op_reply, msg_id); free_xml(op_reply); } else { crm_trace("Relayed %s request %d from %s to %s for %s", op, msg_id, proxy->node_name, crm_ipc_name(proxy->ipc), name); proxy->last_request_id = msg_id; } } else { int rc = pcmk_ok; xmlNode *op_reply = NULL; /* For backwards compatibility with pacemaker_remoted <= 1.1.10 */ crm_trace("Relaying %s request %d from %s to %s for %s", op, msg_id, proxy->node_name, crm_ipc_name(proxy->ipc), name); rc = crm_ipc_send(proxy->ipc, request, flags, 10000, &op_reply); if(rc < 0) { crm_err("Could not relay %s request %d from %s to %s for %s: %s (%d)", op, msg_id, proxy->node_name, crm_ipc_name(proxy->ipc), name, pcmk_strerror(rc), rc); } else { crm_trace("Relayed %s request %d from %s to %s for %s", op, msg_id, proxy->node_name, crm_ipc_name(proxy->ipc), name); } if(op_reply) { remote_proxy_relay_response(lrmd, session, op_reply, msg_id); free_xml(op_reply); } } } else { crm_err("Unknown proxy operation: %s", op); } } int lrm_state_remote_connect_async(lrm_state_t * lrm_state, const char *server, int port, int timeout_ms) { int ret; if (!lrm_state->conn) { lrm_state->conn = lrmd_remote_api_new(lrm_state->node_name, server, port); if (!lrm_state->conn) { return -1; } ((lrmd_t *) lrm_state->conn)->cmds->set_callback(lrm_state->conn, remote_lrm_op_callback); lrmd_internal_set_proxy_callback(lrm_state->conn, lrm_state, remote_proxy_cb); } crm_trace("initiating remote connection to %s at %d with timeout %d", server, port, timeout_ms); ret = ((lrmd_t *) lrm_state->conn)->cmds->connect_async(lrm_state->conn, lrm_state->node_name, timeout_ms); if (ret != pcmk_ok) { lrm_state->num_lrm_register_fails++; } else { lrm_state->num_lrm_register_fails = 0; } return ret; } int lrm_state_get_metadata(lrm_state_t * lrm_state, const char *class, const char *provider, const char *agent, char **output, enum lrmd_call_options options) { if (!lrm_state->conn) { return -ENOTCONN; } /* Optimize this... only retrieve metadata from local lrmd connection. Perhaps consider * caching result. */ return ((lrmd_t *) lrm_state->conn)->cmds->get_metadata(lrm_state->conn, class, provider, agent, output, options); } int lrm_state_cancel(lrm_state_t * lrm_state, const char *rsc_id, const char *action, int interval) { if (!lrm_state->conn) { return -ENOTCONN; } /* Optimize this, cancel requires a synced request/response to the server. * Figure out a way to make this async. */ if (is_remote_lrmd_ra(NULL, NULL, rsc_id)) { return remote_ra_cancel(lrm_state, rsc_id, action, interval); } return ((lrmd_t *) lrm_state->conn)->cmds->cancel(lrm_state->conn, rsc_id, action, interval); } lrmd_rsc_info_t * lrm_state_get_rsc_info(lrm_state_t * lrm_state, const char *rsc_id, enum lrmd_call_options options) { lrmd_rsc_info_t *rsc = NULL; if (!lrm_state->conn) { return NULL; } if (is_remote_lrmd_ra(NULL, NULL, rsc_id)) { return remote_ra_get_rsc_info(lrm_state, rsc_id); } rsc = g_hash_table_lookup(lrm_state->rsc_info_cache, rsc_id); if (rsc == NULL) { /* only contact the lrmd if we don't already have a cached rsc info */ rsc = ((lrmd_t *) lrm_state->conn)->cmds->get_rsc_info(lrm_state->conn, rsc_id, options); if (rsc == NULL) { return NULL; } /* cache the result */ g_hash_table_insert(lrm_state->rsc_info_cache, rsc->id, rsc); } return lrmd_copy_rsc_info(rsc); } int lrm_state_exec(lrm_state_t * lrm_state, const char *rsc_id, const char *action, const char *userdata, int interval, /* ms */ int timeout, /* ms */ int start_delay, /* ms */ lrmd_key_value_t * params) { if (!lrm_state->conn) { lrmd_key_value_freeall(params); return -ENOTCONN; } if (is_remote_lrmd_ra(NULL, NULL, rsc_id)) { return remote_ra_exec(lrm_state, rsc_id, action, userdata, interval, timeout, start_delay, params); } return ((lrmd_t *) lrm_state->conn)->cmds->exec(lrm_state->conn, rsc_id, action, userdata, interval, timeout, start_delay, lrmd_opt_notify_changes_only, params); } int lrm_state_register_rsc(lrm_state_t * lrm_state, const char *rsc_id, const char *class, const char *provider, const char *agent, enum lrmd_call_options options) { if (!lrm_state->conn) { return -ENOTCONN; } /* optimize this... this function is a synced round trip from client to daemon. * The crmd/lrm.c code path should be re-factored to allow the register of resources * to be performed async. The lrmd client api needs to make an async version * of register available. */ if (is_remote_lrmd_ra(agent, provider, NULL)) { return lrm_state_find_or_create(rsc_id) ? pcmk_ok : -1; } return ((lrmd_t *) lrm_state->conn)->cmds->register_rsc(lrm_state->conn, rsc_id, class, provider, agent, options); } int lrm_state_unregister_rsc(lrm_state_t * lrm_state, const char *rsc_id, enum lrmd_call_options options) { if (!lrm_state->conn) { return -ENOTCONN; } /* optimize this... this function is a synced round trip from client to daemon. * The crmd/lrm.c code path that uses this function should always treat it as an * async operation. The lrmd client api needs to make an async version unreg available. */ if (is_remote_lrmd_ra(NULL, NULL, rsc_id)) { lrm_state_destroy(rsc_id); return pcmk_ok; } g_hash_table_remove(lrm_state->rsc_info_cache, rsc_id); return ((lrmd_t *) lrm_state->conn)->cmds->unregister_rsc(lrm_state->conn, rsc_id, options); } diff --git a/cts/CIB.py b/cts/CIB.py index b53bdd0825..a9dbcae745 100644 --- a/cts/CIB.py +++ b/cts/CIB.py @@ -1,531 +1,550 @@ '''CTS: Cluster Testing System: CIB generator ''' __copyright__ = ''' Author: Andrew Beekhof Copyright (C) 2008 Andrew Beekhof ''' import os, string, warnings from cts.CTSvars import * class CibBase: def __init__(self, Factory, tag, _id, **kwargs): self.tag = tag self.name = _id self.kwargs = kwargs self.children = [] self.Factory = Factory def __repr__(self): return "%s-%s" % (self.tag, self.name) def add_child(self, child): self.children.append(child) def __setitem__(self, key, value): if value: self.kwargs[key] = value else: self.kwargs.pop(key, None) from cib_xml import * class ConfigBase: cts_cib = None version = "unknown" feature_set = "unknown" Factory = None def __init__(self, CM, factory, tmpfile=None): self.CM = CM self.Factory = factory if not tmpfile: warnings.filterwarnings("ignore") tmpfile = os.tmpnam() warnings.resetwarnings() self.Factory.tmpfile = tmpfile def version(self): return self.version def NextIP(self): ip = self.CM.Env["IPBase"] if ":" in ip: (prefix, sep, suffix) = ip.rpartition(":") suffix = str(hex(int(suffix, 16)+1)).lstrip("0x") else: (prefix, sep, suffix) = ip.rpartition(".") suffix = str(int(suffix)+1) ip = prefix + sep + suffix self.CM.Env["IPBase"] = ip return ip.strip() class CIB11(ConfigBase): feature_set = "3.0" version = "pacemaker-1.1" counter = 1 def _show(self, command=""): output = "" (rc, result) = self.Factory.rsh(self.Factory.target, "HOME=/root CIB_file="+self.Factory.tmpfile+" cibadmin -Ql "+command, None, ) for line in result: output += line self.Factory.debug("Generated Config: "+line) return output def NewIP(self, name=None, standard="ocf"): if self.CM.Env["IPagent"] == "IPaddr2": ip = self.NextIP() if not name: if ":" in ip: (prefix, sep, suffix) = ip.rpartition(":") name = "r"+suffix else: name = "r"+ip r = Resource(self.Factory, name, self.CM.Env["IPagent"], standard) r["ip"] = ip if ":" in ip: r["cidr_netmask"] = "64" r["nic"] = "eth0" else: r["cidr_netmask"] = "32" else: if not name: name = "r%s%d" % (self.CM.Env["IPagent"], self.counter) self.counter = self.counter + 1 r = Resource(self.Factory, name, self.CM.Env["IPagent"], standard) r.add_op("monitor", "5s") return r def get_node_id(self, node_name): """ Check the cluster configuration for a node ID. """ # We can't account for every possible configuration, # so we only return a node ID if: # * The node is specified in /etc/corosync/corosync.conf # with "ring0_addr:" equal to node_name and "nodeid:" # explicitly specified. # * Or, the node is specified in /etc/cluster/cluster.conf # with name="node_name" nodeid="X" # In all other cases, we return 0. node_id = 0 # awkward command: use } as record separator # so each corosync.conf "object" is one record; # match the "node {" record that has "ring0_addr: node_name"; # then print the substring of that record after "nodeid:" (rc, output) = self.Factory.rsh(self.Factory.target, r"""awk -v RS="}" """ r"""'/^(\s*nodelist\s*{)?\s*node\s*{.*ring0_addr:\s*%s(\s+|$)/""" r"""{gsub(/.*nodeid:\s*/,"");gsub(/\s+.*$/,"");print}'""" r""" /etc/corosync/corosync.conf""" % node_name, None) if rc == 0 and len(output) == 1: try: node_id = int(output[0]) except ValueError: node_id = 0 # another awkward command: use < or > as record separator # so each cluster.conf XML tag is one record; # match the clusternode record that has name="node_name"; # then print the substring of that record for nodeid="X" if node_id == 0: (rc, output) = self.Factory.rsh(self.Factory.target, r"""awk -v RS="[<>]" """ r"""'/^clusternode\s+.*name="%s".*/""" r"""{gsub(/.*nodeid="/,"");gsub(/".*/,"");print}'""" r""" /etc/cluster/cluster.conf""" % node_name, None) if rc == 0 and len(output) == 1: try: node_id = int(output[0]) except ValueError: node_id = 0 return node_id def install(self, target): old = self.Factory.tmpfile # Force a rebuild self.cts_cib = None self.Factory.tmpfile = CTSvars.CRM_CONFIG_DIR+"/cib.xml" self.contents(target) self.Factory.rsh(self.Factory.target, "chown "+CTSvars.CRM_DAEMON_USER+" "+self.Factory.tmpfile) self.Factory.tmpfile = old def contents(self, target=None): # fencing resource if self.cts_cib: return self.cts_cib if target: self.Factory.target = target self.Factory.rsh(self.Factory.target, "HOME=/root cibadmin --empty %s > %s" % (self.version, self.Factory.tmpfile)) #cib_base = self.cib_template % (self.feature_set, self.version, ''' remote-tls-port='9898' remote-clear-port='9999' ''') self.num_nodes = len(self.CM.Env["nodes"]) no_quorum = "stop" if self.num_nodes < 3: no_quorum = "ignore" self.Factory.log("Cluster only has %d nodes, configuring: no-quorum-policy=ignore" % self.num_nodes) # We don't need a nodes section unless we add attributes stn = None # Fencing resource # Define first so that the shell doesn't reject every update if self.CM.Env["DoFencing"]: + + # Define the "real" fencing device st = Resource(self.Factory, "Fencing", ""+self.CM.Env["stonith-type"], "stonith") + # Set a threshold for unreliable stonith devices such as the vmware one st.add_meta("migration-threshold", "5") st.add_op("monitor", "120s", timeout="120s") st.add_op("stop", "0", timeout="60s") st.add_op("start", "0", timeout="60s") + # For remote node tests, a cluster node is stopped and brought back up + # as a remote node with the name "remote_OLDNAME". To allow fencing + # devices to fence these nodes, create a list of all possible node names. + all_node_names = [ prefix+n for n in self.CM.Env["nodes"] for prefix in ('', 'remote_') ] + + # Add all parameters specified by user entries = string.split(self.CM.Env["stonith-params"], ',') for entry in entries: (name, value) = string.split(entry, '=') - if name == "hostlist" and value == "all": - value = string.join(self.CM.Env["nodes"], " ") - elif name == "pcmk_host_list" and value == "all": - value = string.join(self.CM.Env["nodes"], " ") + + # Allow user to specify "all" as the node list, and expand it here + if name in [ "hostlist", "pcmk_host_list" ] and value == "all": + value = string.join(all_node_names, " ") st[name] = value st.commit() # Test advanced fencing logic if True: stf_nodes = [] stt_nodes = [] attr_nodes = {} # Create the levels stl = FencingTopology(self.Factory) for node in self.CM.Env["nodes"]: + # Remote node tests will rename the node + remote_node = "remote_" + node + # Randomly assign node to a fencing method ftype = self.CM.Env.RandomGen.choice(["levels-and", "levels-or ", "broadcast "]) # For levels-and, randomly choose targeting by node name or attribute by = "" if ftype == "levels-and": if self.CM.Env.RandomGen.choice([True, False]): node_id = self.get_node_id(node) if node_id == 0: # We couldn't find a node ID, so revert to targeting by name by = " (by name)" else: attr_nodes[node] = node_id by = " (by attribute)" else: by = " (by name)" self.CM.log(" - Using %s fencing for node: %s%s" % (ftype, node, by)) - # for baremetal remote node tests (is this really necessary?) - stt_nodes.append("remote_%s" % node) - if ftype == "levels-and": + # If targeting by name, add a topology level for this node if node not in attr_nodes: stl.level(1, node, "FencingPass,Fencing") - stt_nodes.append(node) + + # Always target remote nodes by name, otherwise we would need to add + # an attribute to the remote node only during remote tests (we don't + # want nonexistent remote nodes showing up in the non-remote tests). + # That complexity is not worth the effort. + stl.level(1, remote_node, "FencingPass,Fencing") + + # Add the node (and its remote equivalent) to the list of levels-and nodes. + stt_nodes.extend([node, remote_node]) elif ftype == "levels-or ": - stl.level(1, node, "FencingFail") - stl.level(2, node, "Fencing") - stf_nodes.append(node) + for n in [ node, remote_node ]: + stl.level(1, n, "FencingFail") + stl.level(2, n, "Fencing") + stf_nodes.extend([node, remote_node]) # If any levels-and nodes were targeted by attribute, # create the attributes and a level for the attribute. if attr_nodes: stn = Nodes(self.Factory) for (node_name, node_id) in attr_nodes.items(): stn.add_node(node_name, node_id, { "cts-fencing" : "levels-and" }) stl.level(1, "cts-fencing=levels-and", "FencingPass,Fencing") # Create a Dummy agent that always passes for levels-and if len(stt_nodes): self.CM.install_helper("fence_dummy", destdir="/usr/sbin", sourcedir=CTSvars.Fencing_home) stt = Resource(self.Factory, "FencingPass", "fence_dummy", "stonith") stt["pcmk_host_list"] = string.join(stt_nodes, " ") # Wait this many seconds before doing anything, handy for letting disks get flushed too stt["random_sleep_range"] = "30" stt["mode"] = "pass" stt.commit() # Create a Dummy agent that always fails for levels-or if len(stf_nodes): self.CM.install_helper("fence_dummy", destdir="/usr/sbin", sourcedir=CTSvars.Fencing_home) stf = Resource(self.Factory, "FencingFail", "fence_dummy", "stonith") stf["pcmk_host_list"] = string.join(stf_nodes, " ") # Wait this many seconds before doing anything, handy for letting disks get flushed too stf["random_sleep_range"] = "30" stf["mode"] = "fail" stf.commit() # Now commit the levels themselves stl.commit() o = Option(self.Factory, "stonith-enabled", self.CM.Env["DoFencing"]) o["start-failure-is-fatal"] = "false" o["pe-input-series-max"] = "5000" o["default-action-timeout"] = "90s" o["shutdown-escalation"] = "5min" o["batch-limit"] = "10" o["dc-deadtime"] = "5s" o["no-quorum-policy"] = no_quorum o["expected-quorum-votes"] = self.num_nodes o["notification-agent"] = "/var/lib/pacemaker/notify.sh" o["notification-recipient"] = "/var/lib/pacemaker/notify.log" if self.CM.Env["DoBSC"] == 1: o["ident-string"] = "Linux-HA TEST configuration file - REMOVEME!!" o.commit() # Commit the nodes section if we defined one if stn is not None: stn.commit() # Add resources? if self.CM.Env["CIBResource"] == 1: self.add_resources() if self.CM.cluster_monitor == 1: mon = Resource(self.Factory, "cluster_mon", "ocf", "ClusterMon", "pacemaker") mon.add_op("start", "0", requires="nothing") mon.add_op("monitor", "5s", requires="nothing") mon["update"] = "10" mon["extra_options"] = "-r -n" mon["user"] = "abeekhof" mon["htmlfile"] = "/suse/abeekhof/Export/cluster.html" mon.commit() #self._create('''location prefer-dc cluster_mon rule -INFINITY: \#is_dc eq false''') # generate cib self.cts_cib = self._show() if self.Factory.tmpfile != CTSvars.CRM_CONFIG_DIR+"/cib.xml": self.Factory.rsh(self.Factory.target, "rm -f "+self.Factory.tmpfile) return self.cts_cib def add_resources(self): # Per-node resources for node in self.CM.Env["nodes"]: name = "rsc_"+node r = self.NewIP(name) r.prefer(node, "100") r.commit() # Migrator # Make this slightly sticky (since we have no other location constraints) to avoid relocation during Reattach m = Resource(self.Factory, "migrator","Dummy", "ocf", "pacemaker") m["passwd"] = "whatever" m.add_meta("resource-stickiness","1") m.add_meta("allow-migrate", "1") m.add_op("monitor", "P10S") m.commit() # Ping the test master p = Resource(self.Factory, "ping-1","ping", "ocf", "pacemaker") p.add_op("monitor", "60s") p["host_list"] = self.CM.Env["cts-master"] p["name"] = "connected" p["debug"] = "true" c = Clone(self.Factory, "Connectivity", p) c["globally-unique"] = "false" c.commit() #master slave resource s = Resource(self.Factory, "stateful-1", "Stateful", "ocf", "pacemaker") s.add_op("monitor", "15s", timeout="60s") s.add_op("monitor", "16s", timeout="60s", role="Master") ms = Master(self.Factory, "master-1", s) ms["clone-max"] = self.num_nodes ms["master-max"] = 1 ms["clone-node-max"] = 1 ms["master-node-max"] = 1 # Require conectivity to run the master r = Rule(self.Factory, "connected", "-INFINITY", op="or") r.add_child(Expression(self.Factory, "m1-connected-1", "connected", "lt", "1")) r.add_child(Expression(self.Factory, "m1-connected-2", "connected", "not_defined", None)) ms.prefer("connected", rule=r) ms.commit() # Group Resource g = Group(self.Factory, "group-1") g.add_child(self.NewIP()) if self.CM.Env["have_systemd"]: # It would be better to put the python in a separate file, so we # could loop "while True" rather than sleep for 24 hours. We can't # put a loop in a single-line python command; only simple commands # may be separated by semicolon in python. dummy_service_file = """ [Unit] Description=Dummy resource that takes a while to start [Service] Type=notify ExecStart=/usr/bin/python -c 'import time, systemd.daemon; time.sleep(10); systemd.daemon.notify("READY=1"); time.sleep(86400)' ExecStop=/bin/sleep 10 ExecStop=/bin/kill -s KILL \$MAINPID """ os.system("cat <<-END >/tmp/DummySD.service\n%s\nEND" % (dummy_service_file)) self.CM.install_helper("DummySD.service", destdir="/usr/lib/systemd/system/", sourcedir="/tmp") sysd = Resource(self.Factory, "petulant", "DummySD", "service") sysd.add_op("monitor", "P10S") g.add_child(sysd) else: g.add_child(self.NewIP()) g.add_child(self.NewIP()) # Group with the master g.after("master-1", first="promote", then="start") g.colocate("master-1", "INFINITY", withrole="Master") g.commit() # LSB resource lsb_agent = self.CM.install_helper("LSBDummy") lsb = Resource(self.Factory, "lsb-dummy",lsb_agent, "lsb") lsb.add_op("monitor", "5s") # LSB with group lsb.after("group-1") lsb.colocate("group-1") lsb.commit() class CIB12(CIB11): feature_set = "3.0" version = "pacemaker-1.2" class CIB20(CIB11): feature_set = "3.0" version = "pacemaker-2.0" #class HASI(CIB10): # def add_resources(self): # # DLM resource # self._create('''primitive dlm ocf:pacemaker:controld op monitor interval=120s''') # self._create('''clone dlm-clone dlm meta globally-unique=false interleave=true''') # O2CB resource # self._create('''primitive o2cb ocf:ocfs2:o2cb op monitor interval=120s''') # self._create('''clone o2cb-clone o2cb meta globally-unique=false interleave=true''') # self._create('''colocation o2cb-with-dlm INFINITY: o2cb-clone dlm-clone''') # self._create('''order start-o2cb-after-dlm mandatory: dlm-clone o2cb-clone''') class ConfigFactory: def __init__(self, CM): self.CM = CM self.rsh = self.CM.rsh self.register("pacemaker11", CIB11, CM, self) self.register("pacemaker12", CIB12, CM, self) self.register("pacemaker20", CIB20, CM, self) # self.register("hae", HASI, CM, self) self.target = self.CM.Env["nodes"][0] self.tmpfile = None def log(self, args): self.CM.log("cib: %s" % args) def debug(self, args): self.CM.debug("cib: %s" % args) def register(self, methodName, constructor, *args, **kargs): """register a constructor""" _args = [constructor] _args.extend(args) setattr(self, methodName, ConfigFactoryItem(*_args, **kargs)) def unregister(self, methodName): """unregister a constructor""" delattr(self, methodName) def createConfig(self, name="pacemaker-1.0"): if name == "pacemaker-1.0": name = "pacemaker10"; elif name == "pacemaker-1.1": name = "pacemaker11"; elif name == "pacemaker-1.2": name = "pacemaker12"; elif name == "pacemaker-2.0": name = "pacemaker20"; elif name == "hasi": name = "hae"; if hasattr(self, name): return getattr(self, name)() else: self.CM.log("Configuration variant '%s' is unknown. Defaulting to latest config" % name) return self.pacemaker12() class ConfigFactoryItem: def __init__(self, function, *args, **kargs): self._function = function self._args = args self._kargs = kargs def __call__(self, *args, **kargs): """call function""" _args = list(self._args) _args.extend(args) _kargs = self._kargs.copy() _kargs.update(kargs) return self._function(*_args,**_kargs) if __name__ == '__main__': """ Unit test (pass cluster node names as command line arguments) """ import CTS import CM_ais import sys if len(sys.argv) < 2: print("Usage: %s ..." % sys.argv[0]) sys.exit(1) args = [ "--nodes", " ".join(sys.argv[1:]), "--clobber-cib", "--populate-resources", "--stack", "corosync", "--test-ip-base", "fe80::1234:56:7890:1000", "--stonith", "rhcs", "--stonith-args", "pcmk_arg_map=domain:uname" ] env = CTS.CtsLab(args) cm = CM_ais.crm_mcp(env) CibFactory = ConfigFactory(cm) cib = CibFactory.createConfig("pacemaker-1.1") print(cib.contents()) diff --git a/cts/CTStests.py b/cts/CTStests.py index 00fcd13058..ff4e60fc44 100644 --- a/cts/CTStests.py +++ b/cts/CTStests.py @@ -1,3230 +1,3234 @@ '''CTS: Cluster Testing System: Tests module There are a few things we want to do here: ''' __copyright__ = ''' Copyright (C) 2000, 2001 Alan Robertson Licensed under the GNU GPL. Add RecourceRecover testcase Zhao Kai ''' # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. # # SPECIAL NOTE: # # Tests may NOT implement any cluster-manager-specific code in them. # EXTEND the ClusterManager object to provide the base capabilities # the test needs if you need to do something that the current CM classes # do not. Otherwise you screw up the whole point of the object structure # in CTS. # # Thank you. # import time, os, re, string, tempfile from stat import * from cts import CTS from cts.CTSaudits import * from cts.CTSvars import * from cts.patterns import PatternSelector from cts.logging import LogFactory from cts.remote import RemoteFactory from cts.watcher import LogWatcher from cts.environment import EnvFactory AllTestClasses = [ ] class CTSTest: ''' A Cluster test. We implement the basic set of properties and behaviors for a generic cluster test. Cluster tests track their own statistics. We keep each of the kinds of counts we track as separate {name,value} pairs. ''' def __init__(self, cm): #self.name="the unnamed test" self.Stats = {"calls":0 , "success":0 , "failure":0 , "skipped":0 , "auditfail":0} # if not issubclass(cm.__class__, ClusterManager): # raise ValueError("Must be a ClusterManager object") self.CM = cm self.Env = EnvFactory().getInstance() self.rsh = RemoteFactory().getInstance() self.logger = LogFactory() self.templates = PatternSelector(cm["Name"]) self.Audits = [] self.timeout = 120 self.passed = 1 self.is_loop = 0 self.is_unsafe = 0 self.is_docker_unsafe = 0 self.is_experimental = 0 self.is_container = 0 self.is_valgrind = 0 self.benchmark = 0 # which tests to benchmark self.timer = {} # timers def log(self, args): self.logger.log(args) def debug(self, args): self.logger.debug(args) def has_key(self, key): return key in self.Stats def __setitem__(self, key, value): self.Stats[key] = value def __getitem__(self, key): if str(key) == "0": raise ValueError("Bad call to 'foo in X', should reference 'foo in X.Stats' instead") if key in self.Stats: return self.Stats[key] return None def log_mark(self, msg): self.debug("MARK: test %s %s %d" % (self.name,msg,time.time())) return def get_timer(self,key = "test"): try: return self.timer[key] except: return 0 def set_timer(self,key = "test"): self.timer[key] = time.time() return self.timer[key] def log_timer(self,key = "test"): elapsed = 0 if key in self.timer: elapsed = time.time() - self.timer[key] s = key == "test" and self.name or "%s:%s" % (self.name,key) self.debug("%s runtime: %.2f" % (s, elapsed)) del self.timer[key] return elapsed def incr(self, name): '''Increment (or initialize) the value associated with the given name''' if not name in self.Stats: self.Stats[name] = 0 self.Stats[name] = self.Stats[name]+1 # Reset the test passed boolean if name == "calls": self.passed = 1 def failure(self, reason="none"): '''Increment the failure count''' self.passed = 0 self.incr("failure") self.logger.log(("Test %s" % self.name).ljust(35) + " FAILED: %s" % reason) return None def success(self): '''Increment the success count''' self.incr("success") return 1 def skipped(self): '''Increment the skipped count''' self.incr("skipped") return 1 def __call__(self, node): '''Perform the given test''' raise ValueError("Abstract Class member (__call__)") self.incr("calls") return self.failure() def audit(self): passed = 1 if len(self.Audits) > 0: for audit in self.Audits: if not audit(): self.logger.log("Internal %s Audit %s FAILED." % (self.name, audit.name())) self.incr("auditfail") passed = 0 return passed def setup(self, node): '''Setup the given test''' return self.success() def teardown(self, node): '''Tear down the given test''' return self.success() def create_watch(self, patterns, timeout, name=None): if not name: name = self.name return LogWatcher(self.Env["LogFileName"], patterns, name, timeout, kind=self.Env["LogWatcher"], hosts=self.Env["nodes"]) def local_badnews(self, prefix, watch, local_ignore=[]): errcount = 0 if not prefix: prefix = "LocalBadNews:" ignorelist = [] ignorelist.append(" CTS: ") ignorelist.append(prefix) ignorelist.extend(local_ignore) while errcount < 100: match = watch.look(0) if match: add_err = 1 for ignore in ignorelist: if add_err == 1 and re.search(ignore, match): add_err = 0 if add_err == 1: self.logger.log(prefix + " " + match) errcount = errcount + 1 else: break else: self.logger.log("Too many errors!") watch.end() return errcount def is_applicable(self): return self.is_applicable_common() def is_applicable_common(self): '''Return TRUE if we are applicable in the current test configuration''' #raise ValueError("Abstract Class member (is_applicable)") if self.is_loop and not self.Env["loop-tests"]: return 0 elif self.is_unsafe and not self.Env["unsafe-tests"]: return 0 elif self.is_valgrind and not self.Env["valgrind-tests"]: return 0 elif self.is_experimental and not self.Env["experimental-tests"]: return 0 elif self.is_docker_unsafe and self.Env["docker"]: return 0 elif self.is_container and not self.Env["container-tests"]: return 0 elif self.Env["benchmark"] and self.benchmark == 0: return 0 return 1 def find_ocfs2_resources(self, node): self.r_o2cb = None self.r_ocfs2 = [] (rc, lines) = self.rsh(node, "crm_resource -c", None) for line in lines: if re.search("^Resource", line): r = AuditResource(self.CM, line) if r.rtype == "o2cb" and r.parent != "NA": self.debug("Found o2cb: %s" % self.r_o2cb) self.r_o2cb = r.parent if re.search("^Constraint", line): c = AuditConstraint(self.CM, line) if c.type == "rsc_colocation" and c.target == self.r_o2cb: self.r_ocfs2.append(c.rsc) self.debug("Found ocfs2 filesystems: %s" % repr(self.r_ocfs2)) return len(self.r_ocfs2) def canrunnow(self, node): '''Return TRUE if we can meaningfully run right now''' return 1 def errorstoignore(self): '''Return list of errors which are 'normal' and should be ignored''' return [] class StopTest(CTSTest): '''Stop (deactivate) the cluster manager on a node''' def __init__(self, cm): CTSTest.__init__(self, cm) self.name = "Stop" def __call__(self, node): '''Perform the 'stop' test. ''' self.incr("calls") if self.CM.ShouldBeStatus[node] != "up": return self.skipped() patterns = [] # Technically we should always be able to notice ourselves stopping patterns.append(self.templates["Pat:We_stopped"] % node) #if self.Env["use_logd"]: # patterns.append(self.templates["Pat:Logd_stopped"] % node) # Any active node needs to notice this one left # NOTE: This wont work if we have multiple partitions for other in self.Env["nodes"]: if self.CM.ShouldBeStatus[other] == "up" and other != node: patterns.append(self.templates["Pat:They_stopped"] %(other, self.CM.key_for_node(node))) #self.debug("Checking %s will notice %s left"%(other, node)) watch = self.create_watch(patterns, self.Env["DeadTime"]) watch.setwatch() if node == self.CM.OurNode: self.incr("us") else: if self.CM.upcount() <= 1: self.incr("all") else: self.incr("them") self.CM.StopaCM(node) watch_result = watch.lookforall() failreason = None UnmatchedList = "||" if watch.unmatched: (rc, output) = self.rsh(node, "/bin/ps axf", None) for line in output: self.debug(line) (rc, output) = self.rsh(node, "/usr/sbin/dlm_tool dump", None) for line in output: self.debug(line) for regex in watch.unmatched: self.logger.log ("ERROR: Shutdown pattern not found: %s" % (regex)) UnmatchedList += regex + "||"; failreason = "Missing shutdown pattern" self.CM.cluster_stable(self.Env["DeadTime"]) if not watch.unmatched or self.CM.upcount() == 0: return self.success() if len(watch.unmatched) >= self.CM.upcount(): return self.failure("no match against (%s)" % UnmatchedList) if failreason == None: return self.success() else: return self.failure(failreason) # # We don't register StopTest because it's better when called by # another test... # class StartTest(CTSTest): '''Start (activate) the cluster manager on a node''' def __init__(self, cm, debug=None): CTSTest.__init__(self,cm) self.name = "start" self.debug = debug def __call__(self, node): '''Perform the 'start' test. ''' self.incr("calls") if self.CM.upcount() == 0: self.incr("us") else: self.incr("them") if self.CM.ShouldBeStatus[node] != "down": return self.skipped() elif self.CM.StartaCM(node): return self.success() else: return self.failure("Startup %s on node %s failed" % (self.Env["Name"], node)) # # We don't register StartTest because it's better when called by # another test... # class FlipTest(CTSTest): '''If it's running, stop it. If it's stopped start it. Overthrow the status quo... ''' def __init__(self, cm): CTSTest.__init__(self,cm) self.name = "Flip" self.start = StartTest(cm) self.stop = StopTest(cm) def __call__(self, node): '''Perform the 'Flip' test. ''' self.incr("calls") if self.CM.ShouldBeStatus[node] == "up": self.incr("stopped") ret = self.stop(node) type = "up->down" # Give the cluster time to recognize it's gone... time.sleep(self.Env["StableTime"]) elif self.CM.ShouldBeStatus[node] == "down": self.incr("started") ret = self.start(node) type = "down->up" else: return self.skipped() self.incr(type) if ret: return self.success() else: return self.failure("%s failure" % type) # Register FlipTest as a good test to run AllTestClasses.append(FlipTest) class RestartTest(CTSTest): '''Stop and restart a node''' def __init__(self, cm): CTSTest.__init__(self,cm) self.name = "Restart" self.start = StartTest(cm) self.stop = StopTest(cm) self.benchmark = 1 def __call__(self, node): '''Perform the 'restart' test. ''' self.incr("calls") self.incr("node:" + node) ret1 = 1 if self.CM.StataCM(node): self.incr("WasStopped") if not self.start(node): return self.failure("start (setup) failure: "+node) self.set_timer() if not self.stop(node): return self.failure("stop failure: "+node) if not self.start(node): return self.failure("start failure: "+node) return self.success() # Register RestartTest as a good test to run AllTestClasses.append(RestartTest) class StonithdTest(CTSTest): def __init__(self, cm): CTSTest.__init__(self, cm) self.name = "Stonithd" self.startall = SimulStartLite(cm) self.benchmark = 1 def __call__(self, node): self.incr("calls") if len(self.Env["nodes"]) < 2: return self.skipped() ret = self.startall(None) if not ret: return self.failure("Setup failed") is_dc = self.CM.is_node_dc(node) watchpats = [] watchpats.append(self.templates["Pat:FenceOpOK"] % node) watchpats.append(self.templates["Pat:NodeFenced"] % node) if self.Env["at-boot"] == 0: self.debug("Expecting %s to stay down" % node) self.CM.ShouldBeStatus[node] = "down" else: self.debug("Expecting %s to come up again %d" % (node, self.Env["at-boot"])) watchpats.append("%s.* S_STARTING -> S_PENDING" % node) watchpats.append("%s.* S_PENDING -> S_NOT_DC" % node) watch = self.create_watch(watchpats, 30 + self.Env["DeadTime"] + self.Env["StableTime"] + self.Env["StartTime"]) watch.setwatch() origin = self.Env.RandomGen.choice(self.Env["nodes"]) rc = self.rsh(origin, "stonith_admin --reboot %s -VVVVVV" % node) if rc == 194: # 194 - 256 = -62 = Timer expired # # Look for the patterns, usually this means the required # device was running on the node to be fenced - or that # the required devices were in the process of being loaded # and/or moved # # Effectively the node committed suicide so there will be # no confirmation, but pacemaker should be watching and # fence the node again self.logger.log("Fencing command on %s to fence %s timed out" % (origin, node)) elif origin != node and rc != 0: self.debug("Waiting for the cluster to recover") self.CM.cluster_stable() self.debug("Waiting STONITHd node to come back up") self.CM.ns.WaitForAllNodesToComeUp(self.Env["nodes"], 600) self.logger.log("Fencing command on %s failed to fence %s (rc=%d)" % (origin, node, rc)) elif origin == node and rc != 255: # 255 == broken pipe, ie. the node was fenced as expected self.logger.log("Locally originated fencing returned %d" % rc) self.set_timer("fence") matched = watch.lookforall() self.log_timer("fence") self.set_timer("reform") if watch.unmatched: self.logger.log("Patterns not found: " + repr(watch.unmatched)) self.debug("Waiting for the cluster to recover") self.CM.cluster_stable() self.debug("Waiting STONITHd node to come back up") self.CM.ns.WaitForAllNodesToComeUp(self.Env["nodes"], 600) self.debug("Waiting for the cluster to re-stabilize with all nodes") is_stable = self.CM.cluster_stable(self.Env["StartTime"]) if not matched: return self.failure("Didn't find all expected patterns") elif not is_stable: return self.failure("Cluster did not become stable") self.log_timer("reform") return self.success() def errorstoignore(self): return [ self.templates["Pat:Fencing_start"] % ".*", self.templates["Pat:Fencing_ok"] % ".*", r"error.*: Resource .*stonith::.* is active on 2 nodes attempting recovery", r"error.*: Operation reboot of .*by .* for stonith_admin.*: Timer expired", ] def is_applicable(self): if not self.is_applicable_common(): return 0 if "DoFencing" in self.Env.keys(): return self.Env["DoFencing"] return 1 AllTestClasses.append(StonithdTest) class StartOnebyOne(CTSTest): '''Start all the nodes ~ one by one''' def __init__(self, cm): CTSTest.__init__(self,cm) self.name = "StartOnebyOne" self.stopall = SimulStopLite(cm) self.start = StartTest(cm) self.ns = CTS.NodeStatus(cm.Env) def __call__(self, dummy): '''Perform the 'StartOnebyOne' test. ''' self.incr("calls") # We ignore the "node" parameter... # Shut down all the nodes... ret = self.stopall(None) if not ret: return self.failure("Test setup failed") failed = [] self.set_timer() for node in self.Env["nodes"]: if not self.start(node): failed.append(node) if len(failed) > 0: return self.failure("Some node failed to start: " + repr(failed)) return self.success() # Register StartOnebyOne as a good test to run AllTestClasses.append(StartOnebyOne) class SimulStart(CTSTest): '''Start all the nodes ~ simultaneously''' def __init__(self, cm): CTSTest.__init__(self,cm) self.name = "SimulStart" self.stopall = SimulStopLite(cm) self.startall = SimulStartLite(cm) def __call__(self, dummy): '''Perform the 'SimulStart' test. ''' self.incr("calls") # We ignore the "node" parameter... # Shut down all the nodes... ret = self.stopall(None) if not ret: return self.failure("Setup failed") self.CM.clear_all_caches() if not self.startall(None): return self.failure("Startall failed") return self.success() # Register SimulStart as a good test to run AllTestClasses.append(SimulStart) class SimulStop(CTSTest): '''Stop all the nodes ~ simultaneously''' def __init__(self, cm): CTSTest.__init__(self,cm) self.name = "SimulStop" self.startall = SimulStartLite(cm) self.stopall = SimulStopLite(cm) def __call__(self, dummy): '''Perform the 'SimulStop' test. ''' self.incr("calls") # We ignore the "node" parameter... # Start up all the nodes... ret = self.startall(None) if not ret: return self.failure("Setup failed") if not self.stopall(None): return self.failure("Stopall failed") return self.success() # Register SimulStop as a good test to run AllTestClasses.append(SimulStop) class StopOnebyOne(CTSTest): '''Stop all the nodes in order''' def __init__(self, cm): CTSTest.__init__(self,cm) self.name = "StopOnebyOne" self.startall = SimulStartLite(cm) self.stop = StopTest(cm) def __call__(self, dummy): '''Perform the 'StopOnebyOne' test. ''' self.incr("calls") # We ignore the "node" parameter... # Start up all the nodes... ret = self.startall(None) if not ret: return self.failure("Setup failed") failed = [] self.set_timer() for node in self.Env["nodes"]: if not self.stop(node): failed.append(node) if len(failed) > 0: return self.failure("Some node failed to stop: " + repr(failed)) self.CM.clear_all_caches() return self.success() # Register StopOnebyOne as a good test to run AllTestClasses.append(StopOnebyOne) class RestartOnebyOne(CTSTest): '''Restart all the nodes in order''' def __init__(self, cm): CTSTest.__init__(self,cm) self.name = "RestartOnebyOne" self.startall = SimulStartLite(cm) def __call__(self, dummy): '''Perform the 'RestartOnebyOne' test. ''' self.incr("calls") # We ignore the "node" parameter... # Start up all the nodes... ret = self.startall(None) if not ret: return self.failure("Setup failed") did_fail = [] self.set_timer() self.restart = RestartTest(self.CM) for node in self.Env["nodes"]: if not self.restart(node): did_fail.append(node) if did_fail: return self.failure("Could not restart %d nodes: %s" % (len(did_fail), repr(did_fail))) return self.success() # Register StopOnebyOne as a good test to run AllTestClasses.append(RestartOnebyOne) class PartialStart(CTSTest): '''Start a node - but tell it to stop before it finishes starting up''' def __init__(self, cm): CTSTest.__init__(self,cm) self.name = "PartialStart" self.startall = SimulStartLite(cm) self.stopall = SimulStopLite(cm) self.stop = StopTest(cm) #self.is_unsafe = 1 def __call__(self, node): '''Perform the 'PartialStart' test. ''' self.incr("calls") ret = self.stopall(None) if not ret: return self.failure("Setup failed") # FIXME! This should use the CM class to get the pattern # then it would be applicable in general watchpats = [] watchpats.append("crmd.*Connecting to cluster infrastructure") watch = self.create_watch(watchpats, self.Env["DeadTime"]+10) watch.setwatch() self.CM.StartaCMnoBlock(node) ret = watch.lookforall() if not ret: self.logger.log("Patterns not found: " + repr(watch.unmatched)) return self.failure("Setup of %s failed" % node) ret = self.stop(node) if not ret: return self.failure("%s did not stop in time" % node) return self.success() def errorstoignore(self): '''Return list of errors which should be ignored''' # We might do some fencing in the 2-node case if we make it up far enough return [ """Executing reboot fencing operation""", ] # Register StopOnebyOne as a good test to run AllTestClasses.append(PartialStart) class StandbyTest(CTSTest): def __init__(self, cm): CTSTest.__init__(self,cm) self.name = "Standby" self.benchmark = 1 self.start = StartTest(cm) self.startall = SimulStartLite(cm) # make sure the node is active # set the node to standby mode # check resources, none resource should be running on the node # set the node to active mode # check resouces, resources should have been migrated back (SHOULD THEY?) def __call__(self, node): self.incr("calls") ret = self.startall(None) if not ret: return self.failure("Start all nodes failed") self.debug("Make sure node %s is active" % node) if self.CM.StandbyStatus(node) != "off": if not self.CM.SetStandbyMode(node, "off"): return self.failure("can't set node %s to active mode" % node) self.CM.cluster_stable() status = self.CM.StandbyStatus(node) if status != "off": return self.failure("standby status of %s is [%s] but we expect [off]" % (node, status)) self.debug("Getting resources running on node %s" % node) rsc_on_node = self.CM.active_resources(node) watchpats = [] watchpats.append(r"State transition .* -> S_POLICY_ENGINE") watch = self.create_watch(watchpats, self.Env["DeadTime"]+10) watch.setwatch() self.debug("Setting node %s to standby mode" % node) if not self.CM.SetStandbyMode(node, "on"): return self.failure("can't set node %s to standby mode" % node) self.set_timer("on") ret = watch.lookforall() if not ret: self.logger.log("Patterns not found: " + repr(watch.unmatched)) self.CM.SetStandbyMode(node, "off") return self.failure("cluster didn't react to standby change on %s" % node) self.CM.cluster_stable() status = self.CM.StandbyStatus(node) if status != "on": return self.failure("standby status of %s is [%s] but we expect [on]" % (node, status)) self.log_timer("on") self.debug("Checking resources") bad_run = self.CM.active_resources(node) if len(bad_run) > 0: rc = self.failure("%s set to standby, %s is still running on it" % (node, repr(bad_run))) self.debug("Setting node %s to active mode" % node) self.CM.SetStandbyMode(node, "off") return rc self.debug("Setting node %s to active mode" % node) if not self.CM.SetStandbyMode(node, "off"): return self.failure("can't set node %s to active mode" % node) self.set_timer("off") self.CM.cluster_stable() status = self.CM.StandbyStatus(node) if status != "off": return self.failure("standby status of %s is [%s] but we expect [off]" % (node, status)) self.log_timer("off") return self.success() AllTestClasses.append(StandbyTest) class ValgrindTest(CTSTest): '''Check for memory leaks''' def __init__(self, cm): CTSTest.__init__(self,cm) self.name = "Valgrind" self.stopall = SimulStopLite(cm) self.startall = SimulStartLite(cm) self.is_valgrind = 1 self.is_loop = 1 def setup(self, node): self.incr("calls") ret = self.stopall(None) if not ret: return self.failure("Stop all nodes failed") # Enable valgrind self.logger.logPat = "/tmp/%s-*.valgrind" % self.name self.Env["valgrind-prefix"] = self.name self.rsh(node, "rm -f %s" % self.logger.logPat, None) ret = self.startall(None) if not ret: return self.failure("Start all nodes failed") for node in self.Env["nodes"]: (rc, output) = self.rsh(node, "ps u --ppid `pidofproc aisexec`", None) for line in output: self.debug(line) return self.success() def teardown(self, node): # Disable valgrind self.Env["valgrind-prefix"] = None # Return all nodes to normal ret = self.stopall(None) if not ret: return self.failure("Stop all nodes failed") return self.success() def find_leaks(self): # Check for leaks leaked = [] self.stop = StopTest(self.CM) for node in self.Env["nodes"]: (rc, ps_out) = self.rsh(node, "ps u --ppid `pidofproc aisexec`", None) rc = self.stop(node) if not rc: self.failure("Couldn't shut down %s" % node) rc = self.rsh(node, "grep -e indirectly.*lost:.*[1-9] -e definitely.*lost:.*[1-9] -e (ERROR|error).*SUMMARY:.*[1-9].*errors %s" % self.logger.logPat, 0) if rc != 1: leaked.append(node) self.failure("Valgrind errors detected on %s" % node) for line in ps_out: self.logger.log(line) (rc, output) = self.rsh(node, "grep -e lost: -e SUMMARY: %s" % self.logger.logPat, None) for line in output: self.logger.log(line) (rc, output) = self.rsh(node, "cat %s" % self.logger.logPat, None) for line in output: self.debug(line) self.rsh(node, "rm -f %s" % self.logger.logPat, None) return leaked def __call__(self, node): leaked = self.find_leaks() if len(leaked) > 0: return self.failure("Nodes %s leaked" % repr(leaked)) return self.success() def errorstoignore(self): '''Return list of errors which should be ignored''' return [ r"cib.*: \*\*\*\*\*\*\*\*\*\*\*\*\*", r"cib.*: .* avoid confusing Valgrind", r"HA_VALGRIND_ENABLED", ] class StandbyLoopTest(ValgrindTest): '''Check for memory leaks by putting a node in and out of standby for an hour''' def __init__(self, cm): ValgrindTest.__init__(self,cm) self.name = "StandbyLoop" def __call__(self, node): lpc = 0 delay = 2 failed = 0 done = time.time() + self.Env["loop-minutes"] * 60 while time.time() <= done and not failed: lpc = lpc + 1 time.sleep(delay) if not self.CM.SetStandbyMode(node, "on"): self.failure("can't set node %s to standby mode" % node) failed = lpc time.sleep(delay) if not self.CM.SetStandbyMode(node, "off"): self.failure("can't set node %s to active mode" % node) failed = lpc leaked = self.find_leaks() if failed: return self.failure("Iteration %d failed" % failed) elif len(leaked) > 0: return self.failure("Nodes %s leaked" % repr(leaked)) return self.success() AllTestClasses.append(StandbyLoopTest) class BandwidthTest(CTSTest): # Tests should not be cluster-manager-specific # If you need to find out cluster manager configuration to do this, then # it should be added to the generic cluster manager API. '''Test the bandwidth which heartbeat uses''' def __init__(self, cm): CTSTest.__init__(self, cm) self.name = "Bandwidth" self.start = StartTest(cm) self.__setitem__("min",0) self.__setitem__("max",0) self.__setitem__("totalbandwidth",0) self.tempfile = tempfile.mktemp(".cts") self.startall = SimulStartLite(cm) def __call__(self, node): '''Perform the Bandwidth test''' self.incr("calls") if self.CM.upcount() < 1: return self.skipped() Path = self.CM.InternalCommConfig() if "ip" not in Path["mediatype"]: return self.skipped() port = Path["port"][0] port = int(port) ret = self.startall(None) if not ret: return self.failure("Test setup failed") time.sleep(5) # We get extra messages right after startup. fstmpfile = "/var/run/band_estimate" dumpcmd = "tcpdump -p -n -c 102 -i any udp port %d > %s 2>&1" \ % (port, fstmpfile) rc = self.rsh(node, dumpcmd) if rc == 0: farfile = "root@%s:%s" % (node, fstmpfile) self.rsh.cp(farfile, self.tempfile) Bandwidth = self.countbandwidth(self.tempfile) if not Bandwidth: self.logger.log("Could not compute bandwidth.") return self.success() intband = int(Bandwidth + 0.5) self.logger.log("...bandwidth: %d bits/sec" % intband) self.Stats["totalbandwidth"] = self.Stats["totalbandwidth"] + Bandwidth if self.Stats["min"] == 0: self.Stats["min"] = Bandwidth if Bandwidth > self.Stats["max"]: self.Stats["max"] = Bandwidth if Bandwidth < self.Stats["min"]: self.Stats["min"] = Bandwidth self.rsh(node, "rm -f %s" % fstmpfile) os.unlink(self.tempfile) return self.success() else: return self.failure("no response from tcpdump command [%d]!" % rc) def countbandwidth(self, file): fp = open(file, "r") fp.seek(0) count = 0 sum = 0 while 1: line = fp.readline() if not line: return None if re.search("udp",line) or re.search("UDP,", line): count = count + 1 linesplit = string.split(line," ") for j in range(len(linesplit)-1): if linesplit[j] == "udp": break if linesplit[j] == "length:": break try: sum = sum + int(linesplit[j+1]) except ValueError: self.logger.log("Invalid tcpdump line: %s" % line) return None T1 = linesplit[0] timesplit = string.split(T1,":") time2split = string.split(timesplit[2],".") time1 = (int(timesplit[0])*60+int(timesplit[1]))*60+int(time2split[0])+int(time2split[1])*0.000001 break while count < 100: line = fp.readline() if not line: return None if re.search("udp",line) or re.search("UDP,", line): count = count+1 linessplit = string.split(line," ") for j in range(len(linessplit)-1): if linessplit[j] == "udp": break if linesplit[j] == "length:": break try: sum = int(linessplit[j+1]) + sum except ValueError: self.logger.log("Invalid tcpdump line: %s" % line) return None T2 = linessplit[0] timesplit = string.split(T2,":") time2split = string.split(timesplit[2],".") time2 = (int(timesplit[0])*60+int(timesplit[1]))*60+int(time2split[0])+int(time2split[1])*0.000001 time = time2-time1 if (time <= 0): return 0 return (sum*8)/time def is_applicable(self): '''BandwidthTest never applicable''' return 0 AllTestClasses.append(BandwidthTest) ################################################################### class MaintenanceMode(CTSTest): ################################################################### def __init__(self, cm): CTSTest.__init__(self,cm) self.name = "MaintenanceMode" self.start = StartTest(cm) self.startall = SimulStartLite(cm) self.max = 30 #self.is_unsafe = 1 self.benchmark = 1 self.action = "asyncmon" self.interval = 0 self.rid = "maintenanceDummy" def toggleMaintenanceMode(self, node, action): pats = [] pats.append(self.templates["Pat:DC_IDLE"]) # fail the resource right after turning Maintenance mode on # verify it is not recovered until maintenance mode is turned off if action == "On": pats.append(r"pengine.*:\s+warning:.*Processing failed op %s for %s on" % (self.action, self.rid)) else: pats.append(self.templates["Pat:RscOpOK"] % (self.rid, "stop_0")) pats.append(self.templates["Pat:RscOpOK"] % (self.rid, "start_0")) watch = self.create_watch(pats, 60) watch.setwatch() self.debug("Turning maintenance mode %s" % action) self.rsh(node, self.templates["MaintenanceMode%s" % (action)]) if (action == "On"): self.rsh(node, "crm_resource -V -F -r %s -H %s &>/dev/null" % (self.rid, node)) self.set_timer("recover%s" % (action)) watch.lookforall() self.log_timer("recover%s" % (action)) if watch.unmatched: self.debug("Failed to find patterns when turning maintenance mode %s" % action) return repr(watch.unmatched) return "" def insertMaintenanceDummy(self, node): pats = [] pats.append(("%s.*" % node) + (self.templates["Pat:RscOpOK"] % (self.rid, "start_0"))) watch = self.create_watch(pats, 60) watch.setwatch() self.CM.AddDummyRsc(node, self.rid) self.set_timer("addDummy") watch.lookforall() self.log_timer("addDummy") if watch.unmatched: self.debug("Failed to find patterns when adding maintenance dummy resource") return repr(watch.unmatched) return "" def removeMaintenanceDummy(self, node): pats = [] pats.append(self.templates["Pat:RscOpOK"] % (self.rid, "stop_0")) watch = self.create_watch(pats, 60) watch.setwatch() self.CM.RemoveDummyRsc(node, self.rid) self.set_timer("removeDummy") watch.lookforall() self.log_timer("removeDummy") if watch.unmatched: self.debug("Failed to find patterns when removing maintenance dummy resource") return repr(watch.unmatched) return "" def managedRscList(self, node): rscList = [] (rc, lines) = self.rsh(node, "crm_resource -c", None) for line in lines: if re.search("^Resource", line): tmp = AuditResource(self.CM, line) if tmp.managed(): rscList.append(tmp.id) return rscList def verifyResources(self, node, rscList, managed): managedList = list(rscList) managed_str = "managed" if not managed: managed_str = "unmanaged" (rc, lines) = self.rsh(node, "crm_resource -c", None) for line in lines: if re.search("^Resource", line): tmp = AuditResource(self.CM, line) if managed and not tmp.managed(): continue elif not managed and tmp.managed(): continue elif managedList.count(tmp.id): managedList.remove(tmp.id) if len(managedList) == 0: self.debug("Found all %s resources on %s" % (managed_str, node)) return True self.logger.log("Could not find all %s resources on %s. %s" % (managed_str, node, managedList)) return False def __call__(self, node): '''Perform the 'MaintenanceMode' test. ''' self.incr("calls") verify_managed = False verify_unmanaged = False failPat = "" ret = self.startall(None) if not ret: return self.failure("Setup failed") # get a list of all the managed resources. We use this list # after enabling maintenance mode to verify all managed resources # become un-managed. After maintenance mode is turned off, we use # this list to verify all the resources become managed again. managedResources = self.managedRscList(node) if len(managedResources) == 0: self.logger.log("No managed resources on %s" % node) return self.skipped() # insert a fake resource we can fail during maintenance mode # so we can verify recovery does not take place until after maintenance # mode is disabled. failPat = failPat + self.insertMaintenanceDummy(node) # toggle maintenance mode ON, then fail dummy resource. failPat = failPat + self.toggleMaintenanceMode(node, "On") # verify all the resources are now unmanaged if self.verifyResources(node, managedResources, False): verify_unmanaged = True # Toggle maintenance mode OFF, verify dummy is recovered. failPat = failPat + self.toggleMaintenanceMode(node, "Off") # verify all the resources are now managed again if self.verifyResources(node, managedResources, True): verify_managed = True # Remove our maintenance dummy resource. failPat = failPat + self.removeMaintenanceDummy(node) self.CM.cluster_stable() if failPat != "": return self.failure("Unmatched patterns: %s" % (failPat)) elif verify_unmanaged is False: return self.failure("Failed to verify resources became unmanaged during maintenance mode") elif verify_managed is False: return self.failure("Failed to verify resources switched back to managed after disabling maintenance mode") return self.success() def errorstoignore(self): '''Return list of errors which should be ignored''' return [ r"Updating failcount for %s" % self.rid, r"pengine.*: Recover %s\s*\(.*\)" % self.rid, r"Unknown operation: fail", r"(ERROR|error): sending stonithRA op to stonithd failed.", self.templates["Pat:RscOpOK"] % (self.rid, ("%s_%d" % (self.action, self.interval))), r"(ERROR|error).*: Action %s_%s_%d .* initiated outside of a transition" % (self.rid, self.action, self.interval), ] AllTestClasses.append(MaintenanceMode) class ResourceRecover(CTSTest): def __init__(self, cm): CTSTest.__init__(self,cm) self.name = "ResourceRecover" self.start = StartTest(cm) self.startall = SimulStartLite(cm) self.max = 30 self.rid = None self.rid_alt = None #self.is_unsafe = 1 self.benchmark = 1 # these are the values used for the new LRM API call self.action = "asyncmon" self.interval = 0 def __call__(self, node): '''Perform the 'ResourceRecover' test. ''' self.incr("calls") ret = self.startall(None) if not ret: return self.failure("Setup failed") resourcelist = self.CM.active_resources(node) # if there are no resourcelist, return directly if len(resourcelist) == 0: self.logger.log("No active resources on %s" % node) return self.skipped() self.rid = self.Env.RandomGen.choice(resourcelist) self.rid_alt = self.rid rsc = None (rc, lines) = self.rsh(node, "crm_resource -c", None) for line in lines: if re.search("^Resource", line): tmp = AuditResource(self.CM, line) if tmp.id == self.rid: rsc = tmp # Handle anonymous clones that get renamed self.rid = rsc.clone_id break if not rsc: return self.failure("Could not find %s in the resource list" % self.rid) self.debug("Shooting %s aka. %s" % (rsc.clone_id, rsc.id)) pats = [] pats.append(r"pengine.*:\s+warning:.*Processing failed op %s for (%s|%s) on" % (self.action, rsc.id, rsc.clone_id)) if rsc.managed(): pats.append(self.templates["Pat:RscOpOK"] % (self.rid, "stop_0")) if rsc.unique(): pats.append(self.templates["Pat:RscOpOK"] % (self.rid, "start_0")) else: # Anonymous clones may get restarted with a different clone number pats.append(self.templates["Pat:RscOpOK"] % (".*", "start_0")) watch = self.create_watch(pats, 60) watch.setwatch() self.rsh(node, "crm_resource -V -F -r %s -H %s &>/dev/null" % (self.rid, node)) self.set_timer("recover") watch.lookforall() self.log_timer("recover") self.CM.cluster_stable() recovered = self.CM.ResourceLocation(self.rid) if watch.unmatched: return self.failure("Patterns not found: %s" % repr(watch.unmatched)) elif rsc.unique() and len(recovered) > 1: return self.failure("%s is now active on more than one node: %s"%(self.rid, repr(recovered))) elif len(recovered) > 0: self.debug("%s is running on: %s" % (self.rid, repr(recovered))) elif rsc.managed(): return self.failure("%s was not recovered and is inactive" % self.rid) return self.success() def errorstoignore(self): '''Return list of errors which should be ignored''' return [ r"Updating failcount for %s" % self.rid, r"pengine.*: Recover (%s|%s)\s*\(.*\)" % (self.rid, self.rid_alt), r"Unknown operation: fail", r"(ERROR|error): sending stonithRA op to stonithd failed.", self.templates["Pat:RscOpOK"] % (self.rid, ("%s_%d" % (self.action, self.interval))), r"(ERROR|error).*: Action %s_%s_%d .* initiated outside of a transition" % (self.rid, self.action, self.interval), ] AllTestClasses.append(ResourceRecover) class ComponentFail(CTSTest): def __init__(self, cm): CTSTest.__init__(self,cm) self.name = "ComponentFail" # TODO make this work correctly in docker. self.is_docker_unsafe = 1 self.startall = SimulStartLite(cm) self.complist = cm.Components() self.patterns = [] self.okerrpatterns = [] self.is_unsafe = 1 def __call__(self, node): '''Perform the 'ComponentFail' test. ''' self.incr("calls") self.patterns = [] self.okerrpatterns = [] # start all nodes ret = self.startall(None) if not ret: return self.failure("Setup failed") if not self.CM.cluster_stable(self.Env["StableTime"]): return self.failure("Setup failed - unstable") node_is_dc = self.CM.is_node_dc(node, None) # select a component to kill chosen = self.Env.RandomGen.choice(self.complist) while chosen.dc_only == 1 and node_is_dc == 0: chosen = self.Env.RandomGen.choice(self.complist) self.debug("...component %s (dc=%d,boot=%d)" % (chosen.name, node_is_dc,chosen.triggersreboot)) self.incr(chosen.name) if chosen.name != "aisexec" and chosen.name != "corosync": if self.Env["Name"] != "crm-lha" or chosen.name != "pengine": self.patterns.append(self.templates["Pat:ChildKilled"] %(node, chosen.name)) self.patterns.append(self.templates["Pat:ChildRespawn"] %(node, chosen.name)) self.patterns.extend(chosen.pats) if node_is_dc: self.patterns.extend(chosen.dc_pats) # In an ideal world, this next stuff should be in the "chosen" object as a member function if self.Env["Name"] == "crm-lha" and chosen.triggersreboot: # Make sure the node goes down and then comes back up if it should reboot... for other in self.Env["nodes"]: if other != node: self.patterns.append(self.templates["Pat:They_stopped"] %(other, self.CM.key_for_node(node))) self.patterns.append(self.templates["Pat:Slave_started"] % node) self.patterns.append(self.templates["Pat:Local_started"] % node) if chosen.dc_only: # Sometimes these will be in the log, and sometimes they won't... self.okerrpatterns.append("%s .*Process %s:.* exited" % (node, chosen.name)) self.okerrpatterns.append("%s .*I_ERROR.*crmdManagedChildDied" % node) self.okerrpatterns.append("%s .*The %s subsystem terminated unexpectedly" % (node, chosen.name)) self.okerrpatterns.append("(ERROR|error): Client .* exited with return code") else: # Sometimes this won't be in the log... self.okerrpatterns.append(self.templates["Pat:ChildKilled"] %(node, chosen.name)) self.okerrpatterns.append(self.templates["Pat:ChildRespawn"] %(node, chosen.name)) self.okerrpatterns.append(self.templates["Pat:ChildExit"]) if chosen.name == "stonith": # Ignore actions for STONITH resources (rc, lines) = self.rsh(node, "crm_resource -c", None) for line in lines: if re.search("^Resource", line): r = AuditResource(self.CM, line) if r.rclass == "stonith": self.okerrpatterns.append(self.templates["Pat:Fencing_recover"] % r.id) # supply a copy so self.patterns doesnt end up empty tmpPats = [] tmpPats.extend(self.patterns) self.patterns.extend(chosen.badnews_ignore) # Look for STONITH ops, depending on Env["at-boot"] we might need to change the nodes status stonithPats = [] stonithPats.append(self.templates["Pat:Fencing_ok"] % node) stonith = self.create_watch(stonithPats, 0) stonith.setwatch() # set the watch for stable watch = self.create_watch( tmpPats, self.Env["DeadTime"] + self.Env["StableTime"] + self.Env["StartTime"]) watch.setwatch() # kill the component chosen.kill(node) self.debug("Waiting for the cluster to recover") self.CM.cluster_stable() self.debug("Waiting for any STONITHd node to come back up") self.CM.ns.WaitForAllNodesToComeUp(self.Env["nodes"], 600) self.debug("Waiting for the cluster to re-stabilize with all nodes") self.CM.cluster_stable(self.Env["StartTime"]) self.debug("Checking if %s was shot" % node) shot = stonith.look(60) if shot: self.debug("Found: " + repr(shot)) self.okerrpatterns.append(self.templates["Pat:Fencing_start"] % node) if self.Env["at-boot"] == 0: self.CM.ShouldBeStatus[node] = "down" # If fencing occurred, chances are many (if not all) the expected logs # will not be sent - or will be lost when the node reboots return self.success() # check for logs indicating a graceful recovery matched = watch.lookforall(allow_multiple_matches=1) if watch.unmatched: self.logger.log("Patterns not found: " + repr(watch.unmatched)) self.debug("Waiting for the cluster to re-stabilize with all nodes") is_stable = self.CM.cluster_stable(self.Env["StartTime"]) if not matched: return self.failure("Didn't find all expected %s patterns" % chosen.name) elif not is_stable: return self.failure("Cluster did not become stable after killing %s" % chosen.name) return self.success() def errorstoignore(self): '''Return list of errors which should be ignored''' # Note that okerrpatterns refers to the last time we ran this test # The good news is that this works fine for us... self.okerrpatterns.extend(self.patterns) return self.okerrpatterns AllTestClasses.append(ComponentFail) class SplitBrainTest(CTSTest): '''It is used to test split-brain. when the path between the two nodes break check the two nodes both take over the resource''' def __init__(self,cm): CTSTest.__init__(self,cm) self.name = "SplitBrain" self.start = StartTest(cm) self.startall = SimulStartLite(cm) self.is_experimental = 1 def isolate_partition(self, partition): other_nodes = [] other_nodes.extend(self.Env["nodes"]) for node in partition: try: other_nodes.remove(node) except ValueError: self.logger.log("Node "+node+" not in " + repr(self.Env["nodes"]) + " from " +repr(partition)) if len(other_nodes) == 0: return 1 self.debug("Creating partition: " + repr(partition)) self.debug("Everyone else: " + repr(other_nodes)) for node in partition: if not self.CM.isolate_node(node, other_nodes): self.logger.log("Could not isolate %s" % node) return 0 return 1 def heal_partition(self, partition): other_nodes = [] other_nodes.extend(self.Env["nodes"]) for node in partition: try: other_nodes.remove(node) except ValueError: self.logger.log("Node "+node+" not in " + repr(self.Env["nodes"])) if len(other_nodes) == 0: return 1 self.debug("Healing partition: " + repr(partition)) self.debug("Everyone else: " + repr(other_nodes)) for node in partition: self.CM.unisolate_node(node, other_nodes) def __call__(self, node): '''Perform split-brain test''' self.incr("calls") self.passed = 1 partitions = {} ret = self.startall(None) if not ret: return self.failure("Setup failed") while 1: # Retry until we get multiple partitions partitions = {} p_max = len(self.Env["nodes"]) for node in self.Env["nodes"]: p = self.Env.RandomGen.randint(1, p_max) if not p in partitions: partitions[p] = [] partitions[p].append(node) p_max = len(partitions.keys()) if p_max > 1: break # else, try again self.debug("Created %d partitions" % p_max) for key in list(partitions.keys()): self.debug("Partition["+str(key)+"]:\t"+repr(partitions[key])) # Disabling STONITH to reduce test complexity for now self.rsh(node, "crm_attribute -V -n stonith-enabled -v false") for key in list(partitions.keys()): self.isolate_partition(partitions[key]) count = 30 while count > 0: if len(self.CM.find_partitions()) != p_max: time.sleep(10) else: break else: self.failure("Expected partitions were not created") # Target number of partitions formed - wait for stability if not self.CM.cluster_stable(): self.failure("Partitioned cluster not stable") # Now audit the cluster state self.CM.partitions_expected = p_max if not self.audit(): self.failure("Audits failed") self.CM.partitions_expected = 1 # And heal them again for key in list(partitions.keys()): self.heal_partition(partitions[key]) # Wait for a single partition to form count = 30 while count > 0: if len(self.CM.find_partitions()) != 1: time.sleep(10) count -= 1 else: break else: self.failure("Cluster did not reform") # Wait for it to have the right number of members count = 30 while count > 0: members = [] partitions = self.CM.find_partitions() if len(partitions) > 0: members = partitions[0].split() if len(members) != len(self.Env["nodes"]): time.sleep(10) count -= 1 else: break else: self.failure("Cluster did not completely reform") # Wait up to 20 minutes - the delay is more preferable than # trying to continue with in a messed up state if not self.CM.cluster_stable(1200): self.failure("Reformed cluster not stable") answer = raw_input('Continue? [nY]') if answer and answer == "n": raise ValueError("Reformed cluster not stable") # Turn fencing back on if self.Env["DoFencing"]: self.rsh(node, "crm_attribute -V -D -n stonith-enabled") self.CM.cluster_stable() if self.passed: return self.success() return self.failure("See previous errors") def errorstoignore(self): '''Return list of errors which are 'normal' and should be ignored''' return [ r"Another DC detected:", r"(ERROR|error).*: .*Application of an update diff failed", r"crmd.*:.*not in our membership list", r"CRIT:.*node.*returning after partition", ] def is_applicable(self): if not self.is_applicable_common(): return 0 return len(self.Env["nodes"]) > 2 AllTestClasses.append(SplitBrainTest) class Reattach(CTSTest): def __init__(self, cm): CTSTest.__init__(self,cm) self.name = "Reattach" self.startall = SimulStartLite(cm) self.restart1 = RestartTest(cm) self.stopall = SimulStopLite(cm) self.is_unsafe = 0 # Handled by canrunnow() def setup(self, node): attempt = 0 if not self.startall(None): return None # Make sure we are really _really_ stable and that all # resources, including those that depend on transient node # attributes, are started while not self.CM.cluster_stable(double_check=True): if attempt < 5: attempt += 1 self.debug("Not stable yet, re-testing") else: self.logger.log("Cluster is not stable") return None return 1 def teardown(self, node): # Make sure 'node' is up start = StartTest(self.CM) start(node) is_managed = self.rsh(node, "crm_attribute -Q -G -t crm_config -n is-managed-default -d true", 1) is_managed = is_managed[:-1] # Strip off the newline if is_managed != "true": self.logger.log("Attempting to re-enable resource management on %s (%s)" % (node, is_managed)) managed = self.create_watch(["is-managed-default"], 60) managed.setwatch() self.rsh(node, "crm_attribute -V -D -n is-managed-default") if not managed.lookforall(): self.logger.log("Patterns not found: " + repr(managed.unmatched)) self.logger.log("Could not re-enable resource management") return 0 return 1 def canrunnow(self, node): '''Return TRUE if we can meaningfully run right now''' if self.find_ocfs2_resources(node): self.logger.log("Detach/Reattach scenarios are not possible with OCFS2 services present") return 0 return 1 def __call__(self, node): self.incr("calls") pats = [] managed = self.create_watch(["is-managed-default"], 60) managed.setwatch() self.debug("Disable resource management") self.rsh(node, "crm_attribute -V -n is-managed-default -v false") if not managed.lookforall(): self.logger.log("Patterns not found: " + repr(managed.unmatched)) return self.failure("Resource management not disabled") pats = [] pats.append(self.templates["Pat:RscOpOK"] % (".*", "start")) pats.append(self.templates["Pat:RscOpOK"] % (".*", "stop")) pats.append(self.templates["Pat:RscOpOK"] % (".*", "promote")) pats.append(self.templates["Pat:RscOpOK"] % (".*", "demote")) pats.append(self.templates["Pat:RscOpOK"] % (".*", "migrate")) watch = self.create_watch(pats, 60, "ShutdownActivity") watch.setwatch() self.debug("Shutting down the cluster") ret = self.stopall(None) if not ret: self.debug("Re-enable resource management") self.rsh(node, "crm_attribute -V -D -n is-managed-default") return self.failure("Couldn't shut down the cluster") self.debug("Bringing the cluster back up") ret = self.startall(None) time.sleep(5) # allow ping to update the CIB if not ret: self.debug("Re-enable resource management") self.rsh(node, "crm_attribute -V -D -n is-managed-default") return self.failure("Couldn't restart the cluster") if self.local_badnews("ResourceActivity:", watch): self.debug("Re-enable resource management") self.rsh(node, "crm_attribute -V -D -n is-managed-default") return self.failure("Resources stopped or started during cluster restart") watch = self.create_watch(pats, 60, "StartupActivity") watch.setwatch() managed = self.create_watch(["is-managed-default"], 60) managed.setwatch() self.debug("Re-enable resource management") self.rsh(node, "crm_attribute -V -D -n is-managed-default") if not managed.lookforall(): self.logger.log("Patterns not found: " + repr(managed.unmatched)) return self.failure("Resource management not enabled") self.CM.cluster_stable() # Ignore actions for STONITH resources ignore = [] (rc, lines) = self.rsh(node, "crm_resource -c", None) for line in lines: if re.search("^Resource", line): r = AuditResource(self.CM, line) if r.rclass == "stonith": self.debug("Ignoring start actions for %s" % r.id) ignore.append(self.templates["Pat:RscOpOK"] % (r.id, "start_0")) if self.local_badnews("ResourceActivity:", watch, ignore): return self.failure("Resources stopped or started after resource management was re-enabled") return ret def errorstoignore(self): '''Return list of errors which should be ignored''' return [ r"resources were active at shutdown", ] def is_applicable(self): if self.Env["Name"] == "crm-lha": return None return 1 AllTestClasses.append(Reattach) class SpecialTest1(CTSTest): '''Set up a custom test to cause quorum failure issues for Andrew''' def __init__(self, cm): CTSTest.__init__(self,cm) self.name = "SpecialTest1" self.startall = SimulStartLite(cm) self.restart1 = RestartTest(cm) self.stopall = SimulStopLite(cm) def __call__(self, node): '''Perform the 'SpecialTest1' test for Andrew. ''' self.incr("calls") # Shut down all the nodes... ret = self.stopall(None) if not ret: return self.failure("Could not stop all nodes") # Test config recovery when the other nodes come up self.rsh(node, "rm -f "+CTSvars.CRM_CONFIG_DIR+"/cib*") # Start the selected node ret = self.restart1(node) if not ret: return self.failure("Could not start "+node) # Start all remaining nodes ret = self.startall(None) if not ret: return self.failure("Could not start the remaining nodes") return self.success() def errorstoignore(self): '''Return list of errors which should be ignored''' # Errors that occur as a result of the CIB being wiped return [ r"error.*: v1 patchset error, patch failed to apply: Application of an update diff failed", r"error.*: Resource start-up disabled since no STONITH resources have been defined", r"error.*: Either configure some or disable STONITH with the stonith-enabled option", r"error.*: NOTE: Clusters with shared data need STONITH to ensure data integrity", ] AllTestClasses.append(SpecialTest1) class HAETest(CTSTest): '''Set up a custom test to cause quorum failure issues for Andrew''' def __init__(self, cm): CTSTest.__init__(self,cm) self.name = "HAETest" self.stopall = SimulStopLite(cm) self.startall = SimulStartLite(cm) self.is_loop = 1 def setup(self, node): # Start all remaining nodes ret = self.startall(None) if not ret: return self.failure("Couldn't start all nodes") return self.success() def teardown(self, node): # Stop everything ret = self.stopall(None) if not ret: return self.failure("Couldn't stop all nodes") return self.success() def wait_on_state(self, node, resource, expected_clones, attempts=240): while attempts > 0: active = 0 (rc, lines) = self.rsh(node, "crm_resource -r %s -W -Q" % resource, stdout=None) # Hack until crm_resource does the right thing if rc == 0 and lines: active = len(lines) if len(lines) == expected_clones: return 1 elif rc == 1: self.debug("Resource %s is still inactive" % resource) elif rc == 234: self.logger.log("Unknown resource %s" % resource) return 0 elif rc == 246: self.logger.log("Cluster is inactive") return 0 elif rc != 0: self.logger.log("Call to crm_resource failed, rc=%d" % rc) return 0 else: self.debug("Resource %s is active on %d times instead of %d" % (resource, active, expected_clones)) attempts -= 1 time.sleep(1) return 0 def find_dlm(self, node): self.r_dlm = None (rc, lines) = self.rsh(node, "crm_resource -c", None) for line in lines: if re.search("^Resource", line): r = AuditResource(self.CM, line) if r.rtype == "controld" and r.parent != "NA": self.debug("Found dlm: %s" % self.r_dlm) self.r_dlm = r.parent return 1 return 0 def find_hae_resources(self, node): self.r_dlm = None self.r_o2cb = None self.r_ocfs2 = [] if self.find_dlm(node): self.find_ocfs2_resources(node) def is_applicable(self): if not self.is_applicable_common(): return 0 if self.Env["Schema"] == "hae": return 1 return None class HAERoleTest(HAETest): def __init__(self, cm): '''Lars' mount/unmount test for the HA extension. ''' HAETest.__init__(self,cm) self.name = "HAERoleTest" def change_state(self, node, resource, target): rc = self.rsh(node, "crm_resource -V -r %s -p target-role -v %s --meta" % (resource, target)) return rc def __call__(self, node): self.incr("calls") lpc = 0 failed = 0 delay = 2 done = time.time() + self.Env["loop-minutes"]*60 self.find_hae_resources(node) clone_max = len(self.Env["nodes"]) while time.time() <= done and not failed: lpc = lpc + 1 self.change_state(node, self.r_dlm, "Stopped") if not self.wait_on_state(node, self.r_dlm, 0): self.failure("%s did not go down correctly" % self.r_dlm) failed = lpc self.change_state(node, self.r_dlm, "Started") if not self.wait_on_state(node, self.r_dlm, clone_max): self.failure("%s did not come up correctly" % self.r_dlm) failed = lpc if not self.wait_on_state(node, self.r_o2cb, clone_max): self.failure("%s did not come up correctly" % self.r_o2cb) failed = lpc for fs in self.r_ocfs2: if not self.wait_on_state(node, fs, clone_max): self.failure("%s did not come up correctly" % fs) failed = lpc if failed: return self.failure("iteration %d failed" % failed) return self.success() AllTestClasses.append(HAERoleTest) class HAEStandbyTest(HAETest): '''Set up a custom test to cause quorum failure issues for Andrew''' def __init__(self, cm): HAETest.__init__(self,cm) self.name = "HAEStandbyTest" def change_state(self, node, resource, target): rc = self.rsh(node, "crm_standby -V -l reboot -v %s" % (target)) return rc def __call__(self, node): self.incr("calls") lpc = 0 failed = 0 done = time.time() + self.Env["loop-minutes"]*60 self.find_hae_resources(node) clone_max = len(self.Env["nodes"]) while time.time() <= done and not failed: lpc = lpc + 1 self.change_state(node, self.r_dlm, "true") if not self.wait_on_state(node, self.r_dlm, clone_max-1): self.failure("%s did not go down correctly" % self.r_dlm) failed = lpc self.change_state(node, self.r_dlm, "false") if not self.wait_on_state(node, self.r_dlm, clone_max): self.failure("%s did not come up correctly" % self.r_dlm) failed = lpc if not self.wait_on_state(node, self.r_o2cb, clone_max): self.failure("%s did not come up correctly" % self.r_o2cb) failed = lpc for fs in self.r_ocfs2: if not self.wait_on_state(node, fs, clone_max): self.failure("%s did not come up correctly" % fs) failed = lpc if failed: return self.failure("iteration %d failed" % failed) return self.success() AllTestClasses.append(HAEStandbyTest) class NearQuorumPointTest(CTSTest): ''' This test brings larger clusters near the quorum point (50%). In addition, it will test doing starts and stops at the same time. Here is how I think it should work: - loop over the nodes and decide randomly which will be up and which will be down Use a 50% probability for each of up/down. - figure out what to do to get into that state from the current state - in parallel, bring up those going up and bring those going down. ''' def __init__(self, cm): CTSTest.__init__(self,cm) self.name = "NearQuorumPoint" def __call__(self, dummy): '''Perform the 'NearQuorumPoint' test. ''' self.incr("calls") startset = [] stopset = [] stonith = self.CM.prepare_fencing_watcher("NearQuorumPoint") #decide what to do with each node for node in self.Env["nodes"]: action = self.Env.RandomGen.choice(["start","stop"]) #action = self.Env.RandomGen.choice(["start","stop","no change"]) if action == "start" : startset.append(node) elif action == "stop" : stopset.append(node) self.debug("start nodes:" + repr(startset)) self.debug("stop nodes:" + repr(stopset)) #add search patterns watchpats = [ ] for node in stopset: if self.CM.ShouldBeStatus[node] == "up": watchpats.append(self.templates["Pat:We_stopped"] % node) for node in startset: if self.CM.ShouldBeStatus[node] == "down": #watchpats.append(self.templates["Pat:Slave_started"] % node) watchpats.append(self.templates["Pat:Local_started"] % node) else: for stopping in stopset: if self.CM.ShouldBeStatus[stopping] == "up": watchpats.append(self.templates["Pat:They_stopped"] % (node, self.CM.key_for_node(stopping))) if len(watchpats) == 0: return self.skipped() if len(startset) != 0: watchpats.append(self.templates["Pat:DC_IDLE"]) watch = self.create_watch(watchpats, self.Env["DeadTime"]+10) watch.setwatch() #begin actions for node in stopset: if self.CM.ShouldBeStatus[node] == "up": self.CM.StopaCMnoBlock(node) for node in startset: if self.CM.ShouldBeStatus[node] == "down": self.CM.StartaCMnoBlock(node) #get the result if watch.lookforall(): self.CM.cluster_stable() self.CM.fencing_cleanup("NearQuorumPoint", stonith) return self.success() self.logger.log("Warn: Patterns not found: " + repr(watch.unmatched)) #get the "bad" nodes upnodes = [] for node in stopset: if self.CM.StataCM(node) == 1: upnodes.append(node) downnodes = [] for node in startset: if self.CM.StataCM(node) == 0: downnodes.append(node) self.CM.fencing_cleanup("NearQuorumPoint", stonith) if upnodes == [] and downnodes == []: self.CM.cluster_stable() # Make sure they're completely down with no residule for node in stopset: self.rsh(node, self.templates["StopCmd"]) return self.success() if len(upnodes) > 0: self.logger.log("Warn: Unstoppable nodes: " + repr(upnodes)) if len(downnodes) > 0: self.logger.log("Warn: Unstartable nodes: " + repr(downnodes)) return self.failure() def is_applicable(self): if self.Env["Name"] == "crm-cman": return None return 1 AllTestClasses.append(NearQuorumPointTest) class RollingUpgradeTest(CTSTest): '''Perform a rolling upgrade of the cluster''' def __init__(self, cm): CTSTest.__init__(self,cm) self.name = "RollingUpgrade" self.start = StartTest(cm) self.stop = StopTest(cm) self.stopall = SimulStopLite(cm) self.startall = SimulStartLite(cm) def setup(self, node): # Start all remaining nodes ret = self.stopall(None) if not ret: return self.failure("Couldn't stop all nodes") for node in self.Env["nodes"]: if not self.downgrade(node, None): return self.failure("Couldn't downgrade %s" % node) ret = self.startall(None) if not ret: return self.failure("Couldn't start all nodes") return self.success() def teardown(self, node): # Stop everything ret = self.stopall(None) if not ret: return self.failure("Couldn't stop all nodes") for node in self.Env["nodes"]: if not self.upgrade(node, None): return self.failure("Couldn't upgrade %s" % node) return self.success() def install(self, node, version, start=1, flags="--force"): target_dir = "/tmp/rpm-%s" % version src_dir = "%s/%s" % (self.Env["rpm-dir"], version) self.logger.log("Installing %s on %s with %s" % (version, node, flags)) if not self.stop(node): return self.failure("stop failure: "+node) rc = self.rsh(node, "mkdir -p %s" % target_dir) rc = self.rsh(node, "rm -f %s/*.rpm" % target_dir) (rc, lines) = self.rsh(node, "ls -1 %s/*.rpm" % src_dir, None) for line in lines: line = line[:-1] rc = self.rsh.cp("%s" % (line), "%s:%s/" % (node, target_dir)) rc = self.rsh(node, "rpm -Uvh %s %s/*.rpm" % (flags, target_dir)) if start and not self.start(node): return self.failure("start failure: "+node) return self.success() def upgrade(self, node, start=1): return self.install(node, self.Env["current-version"], start) def downgrade(self, node, start=1): return self.install(node, self.Env["previous-version"], start, "--force --nodeps") def __call__(self, node): '''Perform the 'Rolling Upgrade' test. ''' self.incr("calls") for node in self.Env["nodes"]: if self.upgrade(node): return self.failure("Couldn't upgrade %s" % node) self.CM.cluster_stable() return self.success() def is_applicable(self): if not self.is_applicable_common(): return None if not "rpm-dir" in self.Env.keys(): return None if not "current-version" in self.Env.keys(): return None if not "previous-version" in self.Env.keys(): return None return 1 # Register RestartTest as a good test to run AllTestClasses.append(RollingUpgradeTest) class BSC_AddResource(CTSTest): '''Add a resource to the cluster''' def __init__(self, cm): CTSTest.__init__(self, cm) self.name = "AddResource" self.resource_offset = 0 self.cib_cmd = """cibadmin -C -o %s -X '%s' """ def __call__(self, node): self.incr("calls") self.resource_offset = self.resource_offset + 1 r_id = "bsc-rsc-%s-%d" % (node, self.resource_offset) start_pat = "crmd.*%s_start_0.*confirmed.*ok" patterns = [] patterns.append(start_pat % r_id) watch = self.create_watch(patterns, self.Env["DeadTime"]) watch.setwatch() ip = self.NextIP() if not self.make_ip_resource(node, r_id, "ocf", "IPaddr", ip): return self.failure("Make resource %s failed" % r_id) failed = 0 watch_result = watch.lookforall() if watch.unmatched: for regex in watch.unmatched: self.logger.log ("Warn: Pattern not found: %s" % (regex)) failed = 1 if failed: return self.failure("Resource pattern(s) not found") if not self.CM.cluster_stable(self.Env["DeadTime"]): return self.failure("Unstable cluster") return self.success() def NextIP(self): ip = self.Env["IPBase"] if ":" in ip: fields = ip.rpartition(":") fields[2] = str(hex(int(fields[2], 16)+1)) print(str(hex(int(f[2], 16)+1))) else: fields = ip.rpartition('.') fields[2] = str(int(fields[2])+1) ip = fields[0] + fields[1] + fields[3]; self.Env["IPBase"] = ip return ip.strip() def make_ip_resource(self, node, id, rclass, type, ip): self.logger.log("Creating %s::%s:%s (%s) on %s" % (rclass,type,id,ip,node)) rsc_xml=""" """ % (id, rclass, type, id, id, ip) node_constraint = """ """ % (id, id, id, id, node) rc = 0 (rc, lines) = self.rsh(node, self.cib_cmd % ("constraints", node_constraint), None) if rc != 0: self.logger.log("Constraint creation failed: %d" % rc) return None (rc, lines) = self.rsh(node, self.cib_cmd % ("resources", rsc_xml), None) if rc != 0: self.logger.log("Resource creation failed: %d" % rc) return None return 1 def is_applicable(self): if self.Env["DoBSC"]: return 1 return None AllTestClasses.append(BSC_AddResource) class SimulStopLite(CTSTest): '''Stop any active nodes ~ simultaneously''' def __init__(self, cm): CTSTest.__init__(self,cm) self.name = "SimulStopLite" def __call__(self, dummy): '''Perform the 'SimulStopLite' setup work. ''' self.incr("calls") self.debug("Setup: " + self.name) # We ignore the "node" parameter... watchpats = [ ] for node in self.Env["nodes"]: if self.CM.ShouldBeStatus[node] == "up": self.incr("WasStarted") watchpats.append(self.templates["Pat:We_stopped"] % node) #if self.Env["use_logd"]: # watchpats.append(self.templates["Pat:Logd_stopped"] % node) if len(watchpats) == 0: self.CM.clear_all_caches() return self.success() # Stop all the nodes - at about the same time... watch = self.create_watch(watchpats, self.Env["DeadTime"]+10) watch.setwatch() self.set_timer() for node in self.Env["nodes"]: if self.CM.ShouldBeStatus[node] == "up": self.CM.StopaCMnoBlock(node) if watch.lookforall(): self.CM.clear_all_caches() # Make sure they're completely down with no residule for node in self.Env["nodes"]: self.rsh(node, self.templates["StopCmd"]) return self.success() did_fail = 0 up_nodes = [] for node in self.Env["nodes"]: if self.CM.StataCM(node) == 1: did_fail = 1 up_nodes.append(node) if did_fail: return self.failure("Active nodes exist: " + repr(up_nodes)) self.logger.log("Warn: All nodes stopped but CTS didnt detect: " + repr(watch.unmatched)) self.CM.clear_all_caches() return self.failure("Missing log message: "+repr(watch.unmatched)) def is_applicable(self): '''SimulStopLite is a setup test and never applicable''' return 0 class SimulStartLite(CTSTest): '''Start any stopped nodes ~ simultaneously''' def __init__(self, cm): CTSTest.__init__(self,cm) self.name = "SimulStartLite" def __call__(self, dummy): '''Perform the 'SimulStartList' setup work. ''' self.incr("calls") self.debug("Setup: " + self.name) # We ignore the "node" parameter... node_list = [] for node in self.Env["nodes"]: if self.CM.ShouldBeStatus[node] == "down": self.incr("WasStopped") node_list.append(node) self.set_timer() while len(node_list) > 0: # Repeat until all nodes come up watchpats = [ ] uppat = self.templates["Pat:Slave_started"] if self.CM.upcount() == 0: uppat = self.templates["Pat:Local_started"] watchpats.append(self.templates["Pat:DC_IDLE"]) for node in node_list: watchpats.append(uppat % node) watchpats.append(self.templates["Pat:InfraUp"] % node) watchpats.append(self.templates["Pat:PacemakerUp"] % node) # Start all the nodes - at about the same time... watch = self.create_watch(watchpats, self.Env["DeadTime"]+10) watch.setwatch() stonith = self.CM.prepare_fencing_watcher(self.name) for node in node_list: self.CM.StartaCMnoBlock(node) watch.lookforall() node_list = self.CM.fencing_cleanup(self.name, stonith) # Remove node_list messages from watch.unmatched for node in node_list: self.logger.debug("Dealing with stonith operations for %s" % repr(node_list)) if watch.unmatched: try: watch.unmatched.remove(uppat % node) except: self.debug("Already matched: %s" % (uppat % node)) try: watch.unmatched.remove(self.templates["Pat:InfraUp"] % node) except: self.debug("Already matched: %s" % (self.templates["Pat:InfraUp"] % node)) try: watch.unmatched.remove(self.templates["Pat:PacemakerUp"] % node) except: self.debug("Already matched: %s" % (self.templates["Pat:PacemakerUp"] % node)) if watch.unmatched: for regex in watch.unmatched: self.logger.log ("Warn: Startup pattern not found: %s" %(regex)) if not self.CM.cluster_stable(): return self.failure("Cluster did not stabilize") did_fail = 0 unstable = [] for node in self.Env["nodes"]: if self.CM.StataCM(node) == 0: did_fail = 1 unstable.append(node) if did_fail: return self.failure("Unstarted nodes exist: " + repr(unstable)) unstable = [] for node in self.Env["nodes"]: if not self.CM.node_stable(node): did_fail = 1 unstable.append(node) if did_fail: return self.failure("Unstable cluster nodes exist: " + repr(unstable)) return self.success() def is_applicable(self): '''SimulStartLite is a setup test and never applicable''' return 0 def TestList(cm, audits): result = [] for testclass in AllTestClasses: bound_test = testclass(cm) if bound_test.is_applicable(): bound_test.Audits = audits result.append(bound_test) return result class RemoteLXC(CTSTest): def __init__(self, cm): CTSTest.__init__(self,cm) self.name = "RemoteLXC" self.start = StartTest(cm) self.startall = SimulStartLite(cm) self.num_containers = 2 self.is_container = 1 self.is_docker_unsafe = 1 self.failed = 0 self.fail_string = "" def start_lxc_simple(self, node): # restore any artifacts laying around from a previous test. self.rsh(node, "/usr/share/pacemaker/tests/cts/lxc_autogen.sh -R &>/dev/null") # generate the containers, put them in the config, add some resources to them pats = [ ] watch = self.create_watch(pats, 120) watch.setwatch() pats.append(self.templates["Pat:RscOpOK"] % ("lxc1", "start_0")) pats.append(self.templates["Pat:RscOpOK"] % ("lxc2", "start_0")) pats.append(self.templates["Pat:RscOpOK"] % ("lxc-ms", "start_0")) pats.append(self.templates["Pat:RscOpOK"] % ("lxc-ms", "promote_0")) self.rsh(node, "/usr/share/pacemaker/tests/cts/lxc_autogen.sh -g -a -m -s -c %d &>/dev/null" % self.num_containers) self.set_timer("remoteSimpleInit") watch.lookforall() self.log_timer("remoteSimpleInit") if watch.unmatched: self.fail_string = "Unmatched patterns: %s" % (repr(watch.unmatched)) self.failed = 1 def cleanup_lxc_simple(self, node): pats = [ ] # if the test failed, attempt to clean up the cib and libvirt environment # as best as possible if self.failed == 1: # restore libvirt and cib self.rsh(node, "/usr/share/pacemaker/tests/cts/lxc_autogen.sh -R &>/dev/null") self.rsh(node, "crm_resource -C -r container1 &>/dev/null") self.rsh(node, "crm_resource -C -r container2 &>/dev/null") self.rsh(node, "crm_resource -C -r lxc1 &>/dev/null") self.rsh(node, "crm_resource -C -r lxc2 &>/dev/null") self.rsh(node, "crm_resource -C -r lxc-ms &>/dev/null") time.sleep(20) return watch = self.create_watch(pats, 120) watch.setwatch() pats.append(self.templates["Pat:RscOpOK"] % ("container1", "stop_0")) pats.append(self.templates["Pat:RscOpOK"] % ("container2", "stop_0")) self.rsh(node, "/usr/share/pacemaker/tests/cts/lxc_autogen.sh -p &>/dev/null") self.set_timer("remoteSimpleCleanup") watch.lookforall() self.log_timer("remoteSimpleCleanup") if watch.unmatched: self.fail_string = "Unmatched patterns: %s" % (repr(watch.unmatched)) self.failed = 1 # cleanup libvirt self.rsh(node, "/usr/share/pacemaker/tests/cts/lxc_autogen.sh -R &>/dev/null") def __call__(self, node): '''Perform the 'RemoteLXC' test. ''' self.incr("calls") ret = self.startall(None) if not ret: return self.failure("Setup failed, start all nodes failed.") rc = self.rsh(node, "/usr/share/pacemaker/tests/cts/lxc_autogen.sh -v &>/dev/null") if rc == 1: self.log("Environment test for lxc support failed.") return self.skipped() self.start_lxc_simple(node) self.cleanup_lxc_simple(node) self.debug("Waiting for the cluster to recover") self.CM.cluster_stable() if self.failed == 1: return self.failure(self.fail_string) return self.success() def errorstoignore(self): '''Return list of errors which should be ignored''' return [ r"Updating failcount for ping", r"pengine.*: Recover (ping|lxc-ms|container)\s*\(.*\)", # The orphaned lxc-ms resource causes an expected transition error # that is a result of the pengine not having knowledge that the # ms resource used to be a clone. As a result it looks like that # resource is running in multiple locations when it shouldn't... But in # this instance we know why this error is occurring and that it is expected. r"Calculated Transition .* /var/lib/pacemaker/pengine/pe-error", r"Resource lxc-ms .* is active on 2 nodes attempting recovery", r"Unknown operation: fail", r"(ERROR|error): sending stonithRA op to stonithd failed.", ] AllTestClasses.append(RemoteLXC) ################################################################### class RemoteDriver(CTSTest): ################################################################### def __init__(self, cm): CTSTest.__init__(self,cm) self.name = "RemoteDriver" self.is_docker_unsafe = 1 self.start = StartTest(cm) self.startall = SimulStartLite(cm) self.stop = StopTest(cm) self.pcmk_started = 0 self.failed = 0 self.fail_string = "" self.remote_node_added = 0 self.remote_rsc_added = 0 self.remote_rsc = "remote-rsc" self.remote_use_reconnect_interval = self.Env.RandomGen.choice(["true","false"]) self.cib_cmd = """cibadmin -C -o %s -X '%s' """ + def fail(self, msg): + """ Mark test as failed. """ + + # TODO: It's a boolean. Use True/False. + self.failed = 1 + + # Always log the failure. + self.logger.log(msg) + + # Use first failure as test status, as it's likely to be most useful. + if not self.fail_string: + self.fail_string = msg + def get_othernode(self, node): for othernode in self.Env["nodes"]: if othernode == node: # we don't want to try and use the cib that we just shutdown. # find a cluster node that is not our soon to be remote-node. continue else: return othernode def del_rsc(self, node, rsc): othernode = self.get_othernode(node) rc = self.rsh(othernode, "crm_resource -D -r %s -t primitive" % (rsc)) if rc != 0: - self.fail_string = ("Removal of resource '%s' failed" % (rsc)) - self.failed = 1 + self.fail("Removal of resource '%s' failed" % rsc) def add_rsc(self, node, rsc_xml): othernode = self.get_othernode(node) rc = self.rsh(othernode, self.cib_cmd % ("resources", rsc_xml)) if rc != 0: - self.fail_string = "resource creation failed" - self.failed = 1 + self.fail("resource creation failed") def add_primitive_rsc(self, node): rsc_xml = """ """ % (self.remote_rsc) self.add_rsc(node, rsc_xml) if self.failed == 0: self.remote_rsc_added = 1 def add_connection_rsc(self, node): if self.remote_use_reconnect_interval == "true": # use reconnect interval and make sure to set cluster-recheck-interval as well. rsc_xml = """ """ % (self.remote_node, node) self.rsh(self.get_othernode(node), self.templates["SetCheckInterval"] % ("45s")) else: # not using reconnect interval rsc_xml = """ """ % (self.remote_node, node) self.add_rsc(node, rsc_xml) if self.failed == 0: self.remote_node_added = 1 def stop_pcmk_remote(self, node): # disable pcmk remote for i in range(10): rc = self.rsh(node, "service pacemaker_remote stop") if rc != 0: time.sleep(6) else: break def start_pcmk_remote(self, node): for i in range(10): rc = self.rsh(node, "service pacemaker_remote start") if rc != 0: time.sleep(6) else: self.pcmk_started = 1 break def start_metal(self, node): pcmk_started = 0 # make sure the resource doesn't already exist for some reason self.rsh(node, "crm_resource -D -r %s -t primitive" % (self.remote_rsc)) self.rsh(node, "crm_resource -D -r %s -t primitive" % (self.remote_node)) if not self.stop(node): - self.failed = 1 - self.fail_string = "Failed to shutdown cluster node %s" % (node) + self.fail("Failed to shutdown cluster node %s" % node) return self.start_pcmk_remote(node) if self.pcmk_started == 0: - self.failed = 1 - self.fail_string = "Failed to start pacemaker_remote on node %s" % (node) + self.fail("Failed to start pacemaker_remote on node %s" % node) return # convert node to baremetal node now that it has shutdow the cluster stack pats = [ ] watch = self.create_watch(pats, 120) watch.setwatch() pats.append(self.templates["Pat:RscOpOK"] % (self.remote_node, "start")) pats.append(self.templates["Pat:DC_IDLE"]) self.add_connection_rsc(node) self.set_timer("remoteMetalInit") watch.lookforall() self.log_timer("remoteMetalInit") if watch.unmatched: - self.fail_string = "Unmatched patterns: %s" % (repr(watch.unmatched)) - self.failed = 1 + self.fail("Unmatched patterns: %s" % watch.unmatched) def migrate_connection(self, node): if self.failed == 1: return pats = [ ] pats.append(self.templates["Pat:RscOpOK"] % (self.remote_node, "migrate_to")) pats.append(self.templates["Pat:RscOpOK"] % (self.remote_node, "migrate_from")) pats.append(self.templates["Pat:DC_IDLE"]) watch = self.create_watch(pats, 120) watch.setwatch() (rc, lines) = self.rsh(node, "crm_resource -M -r %s" % (self.remote_node), None) if rc != 0: - self.fail_string = "failed to move remote node connection resource" - self.logger.log(self.fail_string) - self.failed = 1 + self.fail("failed to move remote node connection resource") return self.set_timer("remoteMetalMigrate") watch.lookforall() self.log_timer("remoteMetalMigrate") if watch.unmatched: - self.fail_string = "Unmatched patterns: %s" % (repr(watch.unmatched)) - self.logger.log(self.fail_string) - self.failed = 1 + self.fail("Unmatched patterns: %s" % watch.unmatched) return def fail_rsc(self, node): if self.failed == 1: return watchpats = [ ] watchpats.append(self.templates["Pat:RscRemoteOpOK"] % (self.remote_rsc, "stop", self.remote_node)) watchpats.append(self.templates["Pat:RscRemoteOpOK"] % (self.remote_rsc, "start", self.remote_node)) watchpats.append(self.templates["Pat:DC_IDLE"]) watch = self.create_watch(watchpats, 120) watch.setwatch() self.debug("causing dummy rsc to fail.") rc = self.rsh(node, "rm -f /var/run/resource-agents/Dummy*") self.set_timer("remoteRscFail") watch.lookforall() self.log_timer("remoteRscFail") if watch.unmatched: - self.fail_string = "Unmatched patterns during rsc fail: %s" % (repr(watch.unmatched)) - self.logger.log(self.fail_string) - self.failed = 1 + self.fail("Unmatched patterns during rsc fail: %s" % watch.unmatched) def fail_connection(self, node): if self.failed == 1: return watchpats = [ ] watchpats.append(self.templates["Pat:FenceOpOK"] % self.remote_node) watchpats.append(self.templates["Pat:NodeFenced"] % self.remote_node) watch = self.create_watch(watchpats, 120) watch.setwatch() # force stop the pcmk remote daemon. this will result in fencing self.debug("Force stopped active remote node") self.stop_pcmk_remote(node) self.debug("Waiting for remote node to be fenced.") self.set_timer("remoteMetalFence") watch.lookforall() self.log_timer("remoteMetalFence") if watch.unmatched: - self.fail_string = "Unmatched patterns: %s" % (repr(watch.unmatched)) - self.logger.log(self.fail_string) - self.failed = 1 + self.fail("Unmatched patterns: %s" % watch.unmatched) return self.debug("Waiting for the remote node to come back up") self.CM.ns.WaitForNodeToComeUp(node, 120); pats = [ ] - watch = self.create_watch(pats, 200) + watch = self.create_watch(pats, 240) watch.setwatch() pats.append(self.templates["Pat:RscOpOK"] % (self.remote_node, "start")) if self.remote_rsc_added == 1: - pats.append(self.templates["Pat:RscOpOK"] % (self.remote_rsc, "monitor")) + pats.append(self.templates["Pat:RscRemoteOpOK"] % (self.remote_rsc, "start", self.remote_node)) # start the remote node again watch it integrate back into cluster. self.start_pcmk_remote(node) if self.pcmk_started == 0: - self.failed = 1 - self.fail_string = "Failed to start pacemaker_remote on node %s" % (node) - self.logger.log(self.fail_string) + self.fail("Failed to start pacemaker_remote on node %s" % node) return self.debug("Waiting for remote node to rejoin cluster after being fenced.") self.set_timer("remoteMetalRestart") watch.lookforall() self.log_timer("remoteMetalRestart") if watch.unmatched: - self.fail_string = "Unmatched patterns: %s" % (repr(watch.unmatched)) - self.failed = 1 - self.logger.log(self.fail_string) + self.fail("Unmatched patterns: %s" % watch.unmatched) return def add_dummy_rsc(self, node): if self.failed == 1: return # verify we can put a resource on the remote node pats = [ ] watch = self.create_watch(pats, 120) watch.setwatch() pats.append(self.templates["Pat:RscRemoteOpOK"] % (self.remote_rsc, "start", self.remote_node)) pats.append(self.templates["Pat:DC_IDLE"]) # Add a resource that must live on remote-node self.add_primitive_rsc(node) # force that rsc to prefer the remote node. (rc, line) = self.CM.rsh(node, "crm_resource -M -r %s -N %s -f" % (self.remote_rsc, self.remote_node), None) if rc != 0: - self.fail_string = "Failed to place remote resource on remote node." - self.failed = 1 + self.fail("Failed to place remote resource on remote node.") return self.set_timer("remoteMetalRsc") watch.lookforall() self.log_timer("remoteMetalRsc") if watch.unmatched: - self.fail_string = "Unmatched patterns: %s" % (repr(watch.unmatched)) - self.failed = 1 + self.fail("Unmatched patterns: %s" % watch.unmatched) def test_attributes(self, node): if self.failed == 1: return # This verifies permanent attributes can be set on a remote-node. It also # verifies the remote-node can edit it's own cib node section remotely. (rc, line) = self.CM.rsh(node, "crm_attribute -l forever -n testattr -v testval -N %s" % (self.remote_node), None) if rc != 0: - self.fail_string = "Failed to set remote-node attribute. rc:%s output:%s" % (rc, line) - self.failed = 1 + self.fail("Failed to set remote-node attribute. rc:%s output:%s" % (rc, line)) return (rc, line) = self.CM.rsh(node, "crm_attribute -l forever -n testattr -Q -N %s" % (self.remote_node), None) if rc != 0: - self.fail_string = "Failed to get remote-node attribute" - self.failed = 1 + self.fail("Failed to get remote-node attribute") return (rc, line) = self.CM.rsh(node, "crm_attribute -l forever -n testattr -D -N %s" % (self.remote_node), None) if rc != 0: - self.fail_string = "Failed to delete remote-node attribute" - self.failed = 1 + self.fail("Failed to delete remote-node attribute") return def cleanup_metal(self, node): if self.pcmk_started == 0: return pats = [ ] watch = self.create_watch(pats, 120) watch.setwatch() if self.remote_rsc_added == 1: pats.append(self.templates["Pat:RscOpOK"] % (self.remote_rsc, "stop")) if self.remote_node_added == 1: pats.append(self.templates["Pat:RscOpOK"] % (self.remote_node, "stop")) self.set_timer("remoteMetalCleanup") if self.remote_use_reconnect_interval == "true": self.debug("Cleaning up re-check interval") self.rsh(self.get_othernode(node), self.templates["ClearCheckInterval"]) + if self.remote_rsc_added == 1: + + # Remove dummy resource added for remote node tests self.debug("Cleaning up dummy rsc put on remote node") - self.rsh(node, "crm_resource -U -r %s -N %s" % (self.remote_rsc, self.remote_node)) + self.rsh(node, "crm_resource -U -r %s" % self.remote_rsc) self.del_rsc(node, self.remote_rsc) + if self.remote_node_added == 1: + + # Remove remote node's connection resource self.debug("Cleaning up remote node connection resource") self.rsh(node, "crm_resource -U -r %s" % (self.remote_node)) self.del_rsc(node, self.remote_node) watch.lookforall() self.log_timer("remoteMetalCleanup") if watch.unmatched: - self.fail_string = "Unmatched patterns: %s" % (repr(watch.unmatched)) - self.failed = 1 + self.fail("Unmatched patterns: %s" % watch.unmatched) self.stop_pcmk_remote(node) + self.debug("Waiting for the cluster to recover") + self.CM.cluster_stable() + + if self.remote_node_added == 1: + # Remove remote node itself + self.debug("Cleaning up node entry for remote node") + self.rsh(self.get_othernode(node), "crm_node --force --remove %s" % self.remote_node) + def setup_env(self, node): self.remote_node = "remote_%s" % (node) sync_key = 0 # we are assuming if all nodes have a key, that it is # the right key... If any node doesn't have a remote # key, we regenerate it everywhere. for node in self.Env["nodes"]: rc = self.rsh(node, "ls /etc/pacemaker/authkey") if rc != 0: sync_key = 1 break if sync_key == 0: return # create key locally os.system("/usr/share/pacemaker/tests/cts/lxc_autogen.sh -k &> /dev/null") # sync key throughout the cluster for node in self.Env["nodes"]: rc = self.rsh(node, "mkdir /etc/pacemaker") self.rsh.cp("/etc/pacemaker/authkey", "%s:/etc/pacemaker/authkey" % (node)) def is_applicable(self): if not self.is_applicable_common(): return False for node in self.Env["nodes"]: rc = self.rsh(node, "type pacemaker_remoted >/dev/null 2>&1") if rc != 0: return False return True def __call__(self, node): '''Perform the 'RemoteBaremetal' test. ''' self.incr("calls") ret = self.startall(None) if not ret: return self.failure("Setup failed, start all nodes failed.") self.setup_env(node) self.start_metal(node) self.add_dummy_rsc(node) self.test_attributes(node) self.cleanup_metal(node) self.debug("Waiting for the cluster to recover") self.CM.cluster_stable() if self.failed == 1: return self.failure(self.fail_string) return self.success() def errorstoignore(self): '''Return list of errors which should be ignored''' return [ """is running on remote.*which isn't allowed""", """Connection terminated""", """Failed to send remote""", ] # Remote driver is called by other tests. ################################################################### class RemoteBasic(CTSTest): ################################################################### def __init__(self, cm): CTSTest.__init__(self,cm) self.name = "RemoteBasic" self.start = StartTest(cm) self.startall = SimulStartLite(cm) self.driver = RemoteDriver(cm) self.is_docker_unsafe = 1 def __call__(self, node): '''Perform the 'RemoteBaremetal' test. ''' self.incr("calls") ret = self.startall(None) if not ret: return self.failure("Setup failed, start all nodes failed.") self.driver.setup_env(node) self.driver.start_metal(node) self.driver.add_dummy_rsc(node) self.driver.test_attributes(node) self.driver.cleanup_metal(node) self.debug("Waiting for the cluster to recover") self.CM.cluster_stable() if self.driver.failed == 1: return self.failure(self.driver.fail_string) return self.success() def is_applicable(self): return self.driver.is_applicable() def errorstoignore(self): return self.driver.errorstoignore() AllTestClasses.append(RemoteBasic) ################################################################### class RemoteStonithd(CTSTest): ################################################################### def __init__(self, cm): CTSTest.__init__(self,cm) self.name = "RemoteStonithd" self.start = StartTest(cm) self.startall = SimulStartLite(cm) self.driver = RemoteDriver(cm) self.is_docker_unsafe = 1 def __call__(self, node): '''Perform the 'RemoteStonithd' test. ''' self.incr("calls") ret = self.startall(None) if not ret: return self.failure("Setup failed, start all nodes failed.") self.driver.setup_env(node) self.driver.start_metal(node) self.driver.add_dummy_rsc(node) self.driver.fail_connection(node) self.driver.cleanup_metal(node) self.debug("Waiting for the cluster to recover") self.CM.cluster_stable() if self.driver.failed == 1: return self.failure(self.driver.fail_string) return self.success() def is_applicable(self): if not self.driver.is_applicable(): return False if "DoFencing" in self.Env.keys(): return self.Env["DoFencing"] return True def errorstoignore(self): ignore_pats = [ r"Unexpected disconnect on remote-node", r"crmd.*: error.*: Operation remote_.*_monitor", r"pengine.*: Recover remote_.*\s*\(.*\)", r"Calculated Transition .* /var/lib/pacemaker/pengine/pe-error", r"error.*: Resource .*ocf::.* is active on 2 nodes attempting recovery", ] ignore_pats.extend(self.driver.errorstoignore()) return ignore_pats AllTestClasses.append(RemoteStonithd) ################################################################### class RemoteMigrate(CTSTest): ################################################################### def __init__(self, cm): CTSTest.__init__(self,cm) self.name = "RemoteMigrate" self.start = StartTest(cm) self.startall = SimulStartLite(cm) self.driver = RemoteDriver(cm) self.is_docker_unsafe = 1 def __call__(self, node): '''Perform the 'RemoteMigrate' test. ''' self.incr("calls") ret = self.startall(None) if not ret: return self.failure("Setup failed, start all nodes failed.") self.driver.setup_env(node) self.driver.start_metal(node) self.driver.add_dummy_rsc(node) self.driver.migrate_connection(node) self.driver.cleanup_metal(node) self.debug("Waiting for the cluster to recover") self.CM.cluster_stable() if self.driver.failed == 1: return self.failure(self.driver.fail_string) return self.success() def is_applicable(self): return self.driver.is_applicable() def errorstoignore(self): return self.driver.errorstoignore() AllTestClasses.append(RemoteMigrate) ################################################################### class RemoteRscFailure(CTSTest): ################################################################### def __init__(self, cm): # fail a rsc on a remote node, verify recovery. CTSTest.__init__(self,cm) self.name = "RemoteRscFailure" self.start = StartTest(cm) self.startall = SimulStartLite(cm) self.driver = RemoteDriver(cm) self.is_docker_unsafe = 1 def __call__(self, node): '''Perform the 'RemoteRscFailure' test. ''' self.incr("calls") ret = self.startall(None) if not ret: return self.failure("Setup failed, start all nodes failed.") self.driver.setup_env(node) self.driver.start_metal(node) self.driver.add_dummy_rsc(node) # This is an important step. We are migrating the connection # before failing the resource. This verifies that the migration # has properly maintained control over the remote-node. self.driver.migrate_connection(node) self.driver.fail_rsc(node) self.driver.cleanup_metal(node) self.debug("Waiting for the cluster to recover") self.CM.cluster_stable() if self.driver.failed == 1: return self.failure(self.driver.fail_string) return self.success() def is_applicable(self): return self.driver.is_applicable() def errorstoignore(self): ignore_pats = [ r"pengine.*: Recover remote-rsc\s*\(.*\)", ] ignore_pats.extend(self.driver.errorstoignore()) return ignore_pats AllTestClasses.append(RemoteRscFailure) # vim:ts=4:sw=4:et: diff --git a/cts/README b/cts/README index d4d564f7e1..23ffcd04ef 100644 --- a/cts/README +++ b/cts/README @@ -1,142 +1,235 @@ PACEMAKER CLUSTER TEST SUITE (CTS) Purpose ------- CTS thoroughly exercises a pacemaker test cluster by running a randomized series of predefined tests on the cluster. CTS can be run against a pre-existing cluster configuration or (more typically) overwrite the existing configuration with a test configuration. Requirements ------------ * Three or more machines (one test exerciser and two or more test cluster machines). * The test cluster machines should be on the same subnet and have journalling filesystems (ext3, ext4, xfs, etc.) for all of their filesystems other than /boot. You also need a number of free IP addresses on that subnet if you intend to test mutual IP address takeover. * The test exerciser machine doesn't need to be on the same subnet as the test cluster machines. Minimal demands are made on the exerciser machine - it just has to stay up during the tests. * It helps a lot in tracking problems if all machines' clocks are closely synchronized. NTP does this automatically, but you can do it by hand if you want. * The exerciser needs to be able to ssh over to the cluster nodes as root without a password challenge. Configure ssh accordingly (see the Mini-HOWTO at the end of this document for more details). * The exerciser needs to be able to resolve the machine names of the test cluster - either by DNS or by /etc/hosts. - +* CTS is not guaranteed to run on all platforms that pacemaker itself does. + It calls commands such as service that may not be provided by all OSes. + Preparation ----------- Install Pacemaker (including CTS) on all machines. These scripts are coordinated with particular versions of Pacemaker, so you need the same version of CTS as the rest of Pacemaker, and you need the same version of pacemaker and CTS on both the test exerciser and the test cluster machines. +You can install CTS from source, although many distributions provide +packages that include it (e.g. pacemaker-cts or pacemaker-dev). +Typically, packages will install CTS as /usr/share/pacemaker/tests/cts. + Configure cluster communications (Corosync, CMAN or Heartbeat) on the cluster machines and verify everything works. NOTE: Do not run the cluster on the test exerciser machine. NOTE: Wherever machine names are mentioned in these configuration files, they must match the machines' `uname -n` name. This may or may not match the machines' FQDN (fully qualified domain name) - it depends on how you (and your OS) have named the machines. Run CTS ------- Now assuming you did all this, what you need to do is run CTSlab.py: python ./CTSlab.py [options] number-of-tests-to-run You must specify which nodes are part of the cluster with --nodes, e.g.: --node "pcmk-1 pcmk-2 pcmk-3" Most people will want to save the output with --outputfile, e.g.: --outputfile ~/cts.log Unless you want to test your pre-existing cluster configuration, you also want: --clobber-cib --populate-resources --test-ip-base $IP # e.g. --test-ip-base 192.168.9.100 and configure some sort of fencing: - --stonith $TYPE # e.g. "--stonith rhcs" to use fence_xvm or "--stonith lha" to use external/ssh + --stonith $TYPE # e.g. "--stonith xvm" to use fence_xvm or "--stonith lha" to use external/ssh A complete command line might look like: - - python ./CTSlab.py --nodes "pcmk-1 pcmk-2 pcmk-3" --outputfile ~/cts.log \ - --clobber-cib --populate-resources --test-ip-base 192.168.9.100 \ - --stonith rhcs 50 + + python ./CTSlab.py --nodes "pcmk-1 pcmk-2 pcmk-3" --outputfile ~/cts.log \ + --clobber-cib --populate-resources --test-ip-base 192.168.9.100 \ + --stonith xvm 50 For more options, use the --help option. NOTE: Perhaps more convenient way to compile a command line like above is to use cluster_test script that, at least in the source repository, sits in the same directory as this very file. To extract the result of a particular test, run: crm_report -T $test +Memory testing +-------------- + +Pacemaker and CTS have various options for testing memory management. On the +cluster nodes, pacemaker components will use various environment variables to +control these options. How these variables are set varies by OS, but usually +they are set in the /etc/sysconfig/pacemaker or /etc/default/pacemaker file. + +Valgrind is a program for detecting memory management problems (such as +use-after-free errors). If you have valgrind installed, you can enable it by +setting the following environment variables on all cluster nodes: + + PCMK_valgrind_enabled=attrd,cib,crmd,lrmd,pengine,stonith-ng + VALGRIND_OPTS="--leak-check=full --trace-children=no --num-callers=25 + --log-file=/var/lib/pacemaker/valgrind-%p + --suppressions=/usr/share/pacemaker/tests/valgrind-pcmk.suppressions + --gen-suppressions=all" + +and running CTS with these options: + + --valgrind-tests --valgrind-procs="attrd cib crmd lrmd pengine stonith-ng" + +These options should only be set while specifically testing memory management, +because they may slow down the cluster significantly, and they will disable +writes to the CIB. If desired, you can enable valgrind on a subset of pacemaker +components rather than all of them as listed above. + +Valgrind will put a text file for each process in the location specified by +valgrind's --log-file option. For explanations of the messages valgrind +generates, see: + + http://valgrind.org/docs/manual/mc-manual.html + +Separately, if you are using the GNU C library, the G_SLICE, MALLOC_PERTURB_, +and MALLOC_CHECK_ environment variables can be set to affect the library's +memory management functions. + +When using valgrind, G_SLICE should be set to "always-malloc", which helps +valgrind track memory by always using the malloc() and free() routines +directly. When not using valgrind, G_SLICE can be left unset, or set to +"debug-blocks", which enables the C library to catch many memory errors +but may impact performance. + +If the MALLOC_PERTURB_ environment variable is set to an 8-bit integer, the C +library will initialize all newly allocated bytes of memory to the integer +value, and will set all newly freed bytes of memory to the bitwise inverse of +the integer value. This helps catch uses of uninitialized or freed memory +blocks that might otherwise go unnoticed. Example: + + MALLOC_PERTURB_=221 + +If the MALLOC_CHECK_ environment variable is set, the C library will check for +certain heap corruption errors. The most useful value in testing is 3, which +will cause the library to print a message to stderr and abort execution. +Example: + + MALLOC_CHECK_=3 + +Valgrind should be enabled for either all nodes or none, but the C library +variables may be set differently on different nodes. + + +Remote node testing +------------------- + +If the pacemaker_remoted daemon is installed on all cluster nodes, CTS will +enable remote node tests. + +The remote node tests choose a random node, stop the cluster on it, start +pacemaker_remote on it, and add an ocf:pacemaker:remote resource to turn it +into a remote node. When the test is done, CTS will turn the node back into +a cluster node. + +To avoid conflicts, CTS will rename the node, prefixing the original node name +with "remote_". For example, "pcmk-1" will become "remote_pcmk-1". + +The name change may require special stonith configuration, if the fence agent +expects the node name to be the same as its hostname. A common approach is to +specify the "remote_" names in pcmk_host_list. If you use pcmk_host_list=all, +CTS will expand that to all cluster nodes and their "remote_" names. +You may additionally need a pcmk_host_map argument to map the "remote_" names +to the hostnames. Example: + + --stonith xvm --stonith-args \ + pcmk_arg_map=domain:uname,pcmk_host_list=all,pcmk_host_map=remote_pcmk-1:pcmk-1;remote_pcmk-2:pcmk-2 + + Mini-HOWTO: Allow passwordless remote SSH connections ----------------------------------------------------- The CTS scripts run "ssh -l root" so you don't have to do any of your testing logged in as root on the test machine. Here is how to allow such connections without requiring a password to be entered each time: * On your test exerciser, create an SSH key if you do not already have one. Most commonly, SSH keys will be in your ~/.ssh directory, with the private key file not having an extension, and the public key file named the same with the extension ".pub" (for example, ~/.ssh/id_dsa.pub). If you don't already have a key, you can create one with: ssh-keygen -t dsa * From your test exerciser, authorize your SSH public key for root on all test machines (both the exerciser and the cluster test machines): ssh-copy-id -i ~/.ssh/id_dsa.pub root@$MACHINE You will probably have to provide your password, and possibly say "yes" to some questions about accepting the identity of the test machines. The above assumes you have a DSA SSH key in the specified location; if you have some other type of key (RSA, ECDSA, etc.), use its file name in the -i option above. If you have an old version of SSH that doesn't have ssh-copy-id, you can take the single line out of your public key file (e.g. ~/.ssh/identity.pub or ~/.ssh/id_dsa.pub) and manually add it to root's ~/.ssh/authorized_keys file on each test machine. * To test, try this command from the exerciser machine for each of your cluster machines, and for the exerciser machine itself. ssh -l root $MACHINE If this works without prompting for a password, you're in business. If not, look at the documentation for your version of ssh. diff --git a/lib/common/remote.c b/lib/common/remote.c index 2a5b4493ed..24c94c3ab3 100644 --- a/lib/common/remote.c +++ b/lib/common/remote.c @@ -1,918 +1,922 @@ /* * Copyright (c) 2008 Andrew Beekhof * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef HAVE_GNUTLS_GNUTLS_H # undef KEYFILE # include const int psk_tls_kx_order[] = { GNUTLS_KX_DHE_PSK, GNUTLS_KX_PSK, }; const int anon_tls_kx_order[] = { GNUTLS_KX_ANON_DH, GNUTLS_KX_DHE_RSA, GNUTLS_KX_DHE_DSS, GNUTLS_KX_RSA, 0 }; #endif /* Swab macros from linux/swab.h */ #ifdef HAVE_LINUX_SWAB_H # include #else /* * casts are necessary for constants, because we never know how for sure * how U/UL/ULL map to __u16, __u32, __u64. At least not in a portable way. */ #define __swab16(x) ((uint16_t)( \ (((uint16_t)(x) & (uint16_t)0x00ffU) << 8) | \ (((uint16_t)(x) & (uint16_t)0xff00U) >> 8))) #define __swab32(x) ((uint32_t)( \ (((uint32_t)(x) & (uint32_t)0x000000ffUL) << 24) | \ (((uint32_t)(x) & (uint32_t)0x0000ff00UL) << 8) | \ (((uint32_t)(x) & (uint32_t)0x00ff0000UL) >> 8) | \ (((uint32_t)(x) & (uint32_t)0xff000000UL) >> 24))) #define __swab64(x) ((uint64_t)( \ (((uint64_t)(x) & (uint64_t)0x00000000000000ffULL) << 56) | \ (((uint64_t)(x) & (uint64_t)0x000000000000ff00ULL) << 40) | \ (((uint64_t)(x) & (uint64_t)0x0000000000ff0000ULL) << 24) | \ (((uint64_t)(x) & (uint64_t)0x00000000ff000000ULL) << 8) | \ (((uint64_t)(x) & (uint64_t)0x000000ff00000000ULL) >> 8) | \ (((uint64_t)(x) & (uint64_t)0x0000ff0000000000ULL) >> 24) | \ (((uint64_t)(x) & (uint64_t)0x00ff000000000000ULL) >> 40) | \ (((uint64_t)(x) & (uint64_t)0xff00000000000000ULL) >> 56))) #endif #define REMOTE_MSG_VERSION 1 #define ENDIAN_LOCAL 0xBADADBBD struct crm_remote_header_v0 { uint32_t endian; /* Detect messages from hosts with different endian-ness */ uint32_t version; uint64_t id; uint64_t flags; uint32_t size_total; uint32_t payload_offset; uint32_t payload_compressed; uint32_t payload_uncompressed; /* New fields get added here */ } __attribute__ ((packed)); static struct crm_remote_header_v0 * crm_remote_header(crm_remote_t * remote) { struct crm_remote_header_v0 *header = (struct crm_remote_header_v0 *)remote->buffer; if(remote->buffer_offset < sizeof(struct crm_remote_header_v0)) { return NULL; } else if(header->endian != ENDIAN_LOCAL) { uint32_t endian = __swab32(header->endian); CRM_LOG_ASSERT(endian == ENDIAN_LOCAL); if(endian != ENDIAN_LOCAL) { crm_err("Invalid message detected, endian mismatch: %lx is neither %lx nor the swab'd %lx", ENDIAN_LOCAL, header->endian, endian); return NULL; } header->id = __swab64(header->id); header->flags = __swab64(header->flags); header->endian = __swab32(header->endian); header->version = __swab32(header->version); header->size_total = __swab32(header->size_total); header->payload_offset = __swab32(header->payload_offset); header->payload_compressed = __swab32(header->payload_compressed); header->payload_uncompressed = __swab32(header->payload_uncompressed); } return header; } #ifdef HAVE_GNUTLS_GNUTLS_H int crm_initiate_client_tls_handshake(crm_remote_t * remote, int timeout_ms) { int rc = 0; int pollrc = 0; time_t start = time(NULL); do { rc = gnutls_handshake(*remote->tls_session); if (rc == GNUTLS_E_INTERRUPTED || rc == GNUTLS_E_AGAIN) { pollrc = crm_remote_ready(remote, 1000); if (pollrc < 0) { /* poll returned error, there is no hope */ rc = -1; } } } while (((time(NULL) - start) < (timeout_ms / 1000)) && (rc == GNUTLS_E_INTERRUPTED || rc == GNUTLS_E_AGAIN)); if (rc < 0) { crm_trace("gnutls_handshake() failed with %d", rc); } return rc; } void * crm_create_anon_tls_session(int csock, int type /* GNUTLS_SERVER, GNUTLS_CLIENT */ , void *credentials) { gnutls_session_t *session = gnutls_malloc(sizeof(gnutls_session_t)); gnutls_init(session, type); # ifdef HAVE_GNUTLS_PRIORITY_SET_DIRECT /* http://www.manpagez.com/info/gnutls/gnutls-2.10.4/gnutls_81.php#Echo-Server-with-anonymous-authentication */ gnutls_priority_set_direct(*session, "NORMAL:+ANON-DH", NULL); /* gnutls_priority_set_direct (*session, "NONE:+VERS-TLS-ALL:+CIPHER-ALL:+MAC-ALL:+SIGN-ALL:+COMP-ALL:+ANON-DH", NULL); */ # else gnutls_set_default_priority(*session); gnutls_kx_set_priority(*session, anon_tls_kx_order); # endif gnutls_transport_set_ptr(*session, (gnutls_transport_ptr_t) GINT_TO_POINTER(csock)); switch (type) { case GNUTLS_SERVER: gnutls_credentials_set(*session, GNUTLS_CRD_ANON, (gnutls_anon_server_credentials_t) credentials); break; case GNUTLS_CLIENT: gnutls_credentials_set(*session, GNUTLS_CRD_ANON, (gnutls_anon_client_credentials_t) credentials); break; } return session; } void * create_psk_tls_session(int csock, int type /* GNUTLS_SERVER, GNUTLS_CLIENT */ , void *credentials) { gnutls_session_t *session = gnutls_malloc(sizeof(gnutls_session_t)); gnutls_init(session, type); # ifdef HAVE_GNUTLS_PRIORITY_SET_DIRECT gnutls_priority_set_direct(*session, "NORMAL:+DHE-PSK:+PSK", NULL); # else gnutls_set_default_priority(*session); gnutls_kx_set_priority(*session, psk_tls_kx_order); # endif gnutls_transport_set_ptr(*session, (gnutls_transport_ptr_t) GINT_TO_POINTER(csock)); switch (type) { case GNUTLS_SERVER: gnutls_credentials_set(*session, GNUTLS_CRD_PSK, (gnutls_psk_server_credentials_t) credentials); break; case GNUTLS_CLIENT: gnutls_credentials_set(*session, GNUTLS_CRD_PSK, (gnutls_psk_client_credentials_t) credentials); break; } return session; } static int crm_send_tls(gnutls_session_t * session, const char *buf, size_t len) { const char *unsent = buf; int rc = 0; int total_send; if (buf == NULL) { return -1; } total_send = len; crm_trace("Message size: %d", len); while (TRUE) { rc = gnutls_record_send(*session, unsent, len); if (rc == GNUTLS_E_INTERRUPTED || rc == GNUTLS_E_AGAIN) { crm_debug("Retry"); } else if (rc < 0) { crm_err("Connection terminated rc = %d", rc); break; } else if (rc < len) { - crm_debug("Only sent %d of %d bytes", rc, len); + crm_debug("Sent %d of %d bytes", rc, len); len -= rc; unsent += rc; } else { - crm_debug("Sent %d bytes", rc); + crm_trace("Sent all %d bytes", rc); break; } } return rc < 0 ? rc : total_send; } #endif static int crm_send_plaintext(int sock, const char *buf, size_t len) { int rc = 0; const char *unsent = buf; int total_send; if (buf == NULL) { return -1; } total_send = len; crm_trace("Message on socket %d: size=%d", sock, len); retry: rc = write(sock, unsent, len); if (rc < 0) { switch (errno) { case EINTR: case EAGAIN: crm_trace("Retry"); goto retry; default: crm_perror(LOG_ERR, "Could only write %d of the remaining %d bytes", rc, (int)len); break; } } else if (rc < len) { crm_trace("Only sent %d of %d remaining bytes", rc, len); len -= rc; unsent += rc; goto retry; } else { crm_trace("Sent %d bytes: %.100s", rc, buf); } return rc < 0 ? rc : total_send; } static int crm_remote_sendv(crm_remote_t * remote, struct iovec * iov, int iovs) { int lpc = 0; int rc = -ESOCKTNOSUPPORT; for(; lpc < iovs; lpc++) { #ifdef HAVE_GNUTLS_GNUTLS_H if (remote->tls_session) { rc = crm_send_tls(remote->tls_session, iov[lpc].iov_base, iov[lpc].iov_len); } else if (remote->tcp_socket) { #else if (remote->tcp_socket) { #endif rc = crm_send_plaintext(remote->tcp_socket, iov[lpc].iov_base, iov[lpc].iov_len); } else { crm_err("Unsupported connection type"); } } return rc; } int crm_remote_send(crm_remote_t * remote, xmlNode * msg) { int rc = -1; static uint64_t id = 0; char *xml_text = dump_xml_unformatted(msg); struct iovec iov[2]; struct crm_remote_header_v0 *header; if (xml_text == NULL) { crm_err("Invalid XML, can not send msg"); return -1; } header = calloc(1, sizeof(struct crm_remote_header_v0)); iov[0].iov_base = header; iov[0].iov_len = sizeof(struct crm_remote_header_v0); iov[1].iov_base = xml_text; iov[1].iov_len = 1 + strlen(xml_text); id++; header->id = id; header->endian = ENDIAN_LOCAL; header->version = REMOTE_MSG_VERSION; header->payload_offset = iov[0].iov_len; header->payload_uncompressed = iov[1].iov_len; header->size_total = iov[0].iov_len + iov[1].iov_len; crm_trace("Sending len[0]=%d, start=%x\n", (int)iov[0].iov_len, *(int*)(void*)xml_text); rc = crm_remote_sendv(remote, iov, 2); if (rc < 0) { crm_err("Failed to send remote msg, rc = %d", rc); } free(iov[0].iov_base); free(iov[1].iov_base); return rc; } /*! * \internal * \brief handles the recv buffer and parsing out msgs. * \note new_data is owned by this function once it is passed in. */ xmlNode * crm_remote_parse_buffer(crm_remote_t * remote) { xmlNode *xml = NULL; struct crm_remote_header_v0 *header = crm_remote_header(remote); if (remote->buffer == NULL || header == NULL) { return NULL; } /* take ownership of the buffer */ remote->buffer_offset = 0; /* Support compression on the receiving end now, in case we ever want to add it later */ if (header->payload_compressed) { int rc = 0; unsigned int size_u = 1 + header->payload_uncompressed; char *uncompressed = calloc(1, header->payload_offset + size_u); crm_trace("Decompressing message data %d bytes into %d bytes", header->payload_compressed, size_u); rc = BZ2_bzBuffToBuffDecompress(uncompressed + header->payload_offset, &size_u, remote->buffer + header->payload_offset, header->payload_compressed, 1, 0); if (rc != BZ_OK && header->version > REMOTE_MSG_VERSION) { crm_warn("Couldn't decompress v%d message, we only understand v%d", header->version, REMOTE_MSG_VERSION); free(uncompressed); return NULL; } else if (rc != BZ_OK) { crm_err("Decompression failed: %s (%d)", bz2_strerror(rc), rc); free(uncompressed); return NULL; } CRM_ASSERT(size_u == header->payload_uncompressed); memcpy(uncompressed, remote->buffer, header->payload_offset); /* Preserve the header */ remote->buffer_size = header->payload_offset + size_u; free(remote->buffer); remote->buffer = uncompressed; header = crm_remote_header(remote); } CRM_LOG_ASSERT(remote->buffer[sizeof(struct crm_remote_header_v0) + header->payload_uncompressed - 1] == 0); xml = string2xml(remote->buffer + header->payload_offset); if (xml == NULL && header->version > REMOTE_MSG_VERSION) { crm_warn("Couldn't parse v%d message, we only understand v%d", header->version, REMOTE_MSG_VERSION); } else if (xml == NULL) { crm_err("Couldn't parse: '%.120s'", remote->buffer + header->payload_offset); } return xml; } /*! * \internal * \brief Determine if a remote session has data to read * * \retval 0, timeout occured. * \retval positive, data is ready to be read * \retval negative, session has ended */ int crm_remote_ready(crm_remote_t * remote, int timeout /* ms */ ) { struct pollfd fds = { 0, }; int sock = 0; int rc = 0; time_t start; #ifdef HAVE_GNUTLS_GNUTLS_H if (remote->tls_session) { void *sock_ptr = gnutls_transport_get_ptr(*remote->tls_session); sock = GPOINTER_TO_INT(sock_ptr); } else if (remote->tcp_socket) { #else if (remote->tcp_socket) { #endif sock = remote->tcp_socket; } else { crm_err("Unsupported connection type"); } if (sock <= 0) { crm_trace("No longer connected"); return -ENOTCONN; } start = time(NULL); errno = 0; do { fds.fd = sock; fds.events = POLLIN; /* If we got an EINTR while polling, and we have a * specific timeout we are trying to honor, attempt * to adjust the timeout to the closest second. */ if (errno == EINTR && (timeout > 0)) { timeout = timeout - ((time(NULL) - start) * 1000); if (timeout < 1000) { timeout = 1000; } } rc = poll(&fds, 1, timeout); } while (rc < 0 && errno == EINTR); return rc; } /*! * \internal * \brief Read bytes off non blocking remote connection. * * \note only use with NON-Blocking sockets. Should only be used after polling socket. * This function will return once max_size is met, the socket read buffer * is empty, or an error is encountered. * * \retval number of bytes received */ static size_t crm_remote_recv_once(crm_remote_t * remote) { int rc = 0; size_t read_len = sizeof(struct crm_remote_header_v0); struct crm_remote_header_v0 *header = crm_remote_header(remote); if(header) { /* Stop at the end of the current message */ read_len = header->size_total; } /* automatically grow the buffer when needed */ if(remote->buffer_size < read_len) { remote->buffer_size = 2 * read_len; crm_trace("Expanding buffer to %u bytes", remote->buffer_size); remote->buffer = realloc_safe(remote->buffer, remote->buffer_size + 1); CRM_ASSERT(remote->buffer != NULL); } #ifdef HAVE_GNUTLS_GNUTLS_H if (remote->tls_session) { rc = gnutls_record_recv(*(remote->tls_session), remote->buffer + remote->buffer_offset, remote->buffer_size - remote->buffer_offset); if (rc == GNUTLS_E_INTERRUPTED) { rc = -EINTR; } else if (rc == GNUTLS_E_AGAIN) { rc = -EAGAIN; } else if (rc < 0) { crm_debug("TLS receive failed: %s (%d)", gnutls_strerror(rc), rc); rc = -pcmk_err_generic; } } else if (remote->tcp_socket) { #else if (remote->tcp_socket) { #endif errno = 0; rc = read(remote->tcp_socket, remote->buffer + remote->buffer_offset, remote->buffer_size - remote->buffer_offset); if(rc < 0) { rc = -errno; } } else { crm_err("Unsupported connection type"); return -ESOCKTNOSUPPORT; } /* process any errors. */ if (rc > 0) { remote->buffer_offset += rc; /* always null terminate buffer, the +1 to alloc always allows for this. */ remote->buffer[remote->buffer_offset] = '\0'; crm_trace("Received %u more bytes, %u total", rc, remote->buffer_offset); } else if (rc == -EINTR || rc == -EAGAIN) { crm_trace("non-blocking, exiting read: %s (%d)", pcmk_strerror(rc), rc); } else if (rc == 0) { crm_debug("EOF encoutered after %u bytes", remote->buffer_offset); return -ENOTCONN; } else { crm_debug("Error receiving message after %u bytes: %s (%d)", remote->buffer_offset, pcmk_strerror(rc), rc); return -ENOTCONN; } header = crm_remote_header(remote); if(header) { if(remote->buffer_offset < header->size_total) { crm_trace("Read less than the advertised length: %u < %u bytes", remote->buffer_offset, header->size_total); } else { crm_trace("Read full message of %u bytes", remote->buffer_offset); return remote->buffer_offset; } } return -EAGAIN; } /*! * \internal * \brief Read data off the socket until at least one full message is present or timeout occures. * \retval TRUE message read * \retval FALSE full message not read */ gboolean crm_remote_recv(crm_remote_t * remote, int total_timeout /*ms */ , int *disconnected) { int rc; time_t start = time(NULL); int remaining_timeout = 0; if (total_timeout == 0) { total_timeout = 10000; } else if (total_timeout < 0) { total_timeout = 60000; } *disconnected = 0; remaining_timeout = total_timeout; while ((remaining_timeout > 0) && !(*disconnected)) { /* read some more off the tls buffer if we still have time left. */ crm_trace("waiting to receive remote msg, starting timeout %d, remaining_timeout %d", total_timeout, remaining_timeout); rc = crm_remote_ready(remote, remaining_timeout); if (rc == 0) { crm_err("poll timed out (%d ms) while waiting to receive msg", remaining_timeout); return FALSE; + } else if (rc == -EAGAIN) { + crm_trace("waiting for remote connection data (up to %dms)", + remaining_timeout); + } else if(rc < 0) { crm_debug("poll() failed: %s (%d)", pcmk_strerror(rc), rc); } else { rc = crm_remote_recv_once(remote); if(rc > 0) { return TRUE; } else if (rc < 0) { crm_debug("recv() failed: %s (%d)", pcmk_strerror(rc), rc); } } if(rc == -ENOTCONN) { *disconnected = 1; return FALSE; } remaining_timeout = remaining_timeout - ((time(NULL) - start) * 1000); } return FALSE; } struct tcp_async_cb_data { gboolean success; int sock; void *userdata; void (*callback) (void *userdata, int sock); int timeout; /*ms */ time_t start; }; static gboolean check_connect_finished(gpointer userdata) { struct tcp_async_cb_data *cb_data = userdata; int rc = 0; int sock = cb_data->sock; int error = 0; fd_set rset, wset; socklen_t len = sizeof(error); struct timeval ts = { 0, }; if (cb_data->success == TRUE) { goto dispatch_done; } FD_ZERO(&rset); FD_SET(sock, &rset); wset = rset; crm_trace("fd %d: checking to see if connect finished", sock); rc = select(sock + 1, &rset, &wset, NULL, &ts); if (rc < 0) { rc = errno; if ((errno == EINPROGRESS) || (errno == EAGAIN)) { /* reschedule if there is still time left */ if ((time(NULL) - cb_data->start) < (cb_data->timeout / 1000)) { goto reschedule; } else { rc = -ETIMEDOUT; } } crm_trace("fd %d: select failed %d connect dispatch ", rc); goto dispatch_done; } else if (rc == 0) { if ((time(NULL) - cb_data->start) < (cb_data->timeout / 1000)) { goto reschedule; } crm_debug("fd %d: timeout during select", sock); rc = -ETIMEDOUT; goto dispatch_done; } else { crm_trace("fd %d: select returned success", sock); rc = 0; } /* can we read or write to the socket now? */ if (FD_ISSET(sock, &rset) || FD_ISSET(sock, &wset)) { if (getsockopt(sock, SOL_SOCKET, SO_ERROR, &error, &len) < 0) { crm_trace("fd %d: call to getsockopt failed", sock); rc = -1; goto dispatch_done; } if (error) { crm_trace("fd %d: error returned from getsockopt: %d", sock, error); rc = -1; goto dispatch_done; } } else { crm_trace("neither read nor write set after select"); rc = -1; goto dispatch_done; } dispatch_done: if (!rc) { crm_trace("fd %d: connected", sock); /* Success, set the return code to the sock to report to the callback */ rc = cb_data->sock; cb_data->sock = 0; } else { close(sock); } if (cb_data->callback) { cb_data->callback(cb_data->userdata, rc); } free(cb_data); return FALSE; reschedule: /* will check again next interval */ return TRUE; } static int internal_tcp_connect_async(int sock, const struct sockaddr *addr, socklen_t addrlen, int timeout /* ms */ , int *timer_id, void *userdata, void (*callback) (void *userdata, int sock)) { int rc = 0; int flag = 0; int interval = 500; int timer; struct tcp_async_cb_data *cb_data = NULL; if ((flag = fcntl(sock, F_GETFL)) >= 0) { if (fcntl(sock, F_SETFL, flag | O_NONBLOCK) < 0) { crm_err("fcntl() write failed"); return -1; } } rc = connect(sock, addr, addrlen); if (rc < 0 && (errno != EINPROGRESS) && (errno != EAGAIN)) { return -1; } cb_data = calloc(1, sizeof(struct tcp_async_cb_data)); cb_data->userdata = userdata; cb_data->callback = callback; cb_data->sock = sock; cb_data->timeout = timeout; cb_data->start = time(NULL); if (rc == 0) { /* The connect was successful immediately, we still return to mainloop * and let this callback get called later. This avoids the user of this api * to have to account for the fact the callback could be invoked within this * function before returning. */ cb_data->success = TRUE; interval = 1; } /* Check connect finished is mostly doing a non-block poll on the socket * to see if we can read/write to it. Once we can, the connect has completed. * This method allows us to connect to the server without blocking mainloop. * * This is a poor man's way of polling to see when the connection finished. * At some point we should figure out a way to use a mainloop fd callback for this. * Something about the way mainloop is currently polling prevents this from working at the * moment though. */ crm_trace("fd %d: scheduling to check if connect finished in %dms second", sock, interval); timer = g_timeout_add(interval, check_connect_finished, cb_data); if (timer_id) { *timer_id = timer; } return 0; } static int internal_tcp_connect(int sock, const struct sockaddr *addr, socklen_t addrlen) { int flag = 0; int rc = connect(sock, addr, addrlen); if (rc == 0) { if ((flag = fcntl(sock, F_GETFL)) >= 0) { if (fcntl(sock, F_SETFL, flag | O_NONBLOCK) < 0) { crm_err("fcntl() write failed"); return -1; } } } return rc; } /*! * \internal * \brief tcp connection to server at specified port * \retval negative, failed to connect. * \retval positive, sock fd */ int crm_remote_tcp_connect_async(const char *host, int port, int timeout, /*ms */ int *timer_id, void *userdata, void (*callback) (void *userdata, int sock)) { char buffer[256]; struct addrinfo *res = NULL; struct addrinfo *rp = NULL; struct addrinfo hints; const char *server = host; int ret_ga; int sock = -1; /* getaddrinfo */ memset(&hints, 0, sizeof(struct addrinfo)); hints.ai_family = AF_UNSPEC; /* Allow IPv4 or IPv6 */ hints.ai_socktype = SOCK_STREAM; hints.ai_flags = AI_CANONNAME; crm_debug("Looking up %s", server); ret_ga = getaddrinfo(server, NULL, &hints, &res); if (ret_ga) { crm_err("getaddrinfo: %s", gai_strerror(ret_ga)); return -1; } if (!res || !res->ai_addr) { crm_err("getaddrinfo failed"); goto async_cleanup; } for (rp = res; rp != NULL; rp = rp->ai_next) { struct sockaddr *addr = rp->ai_addr; if (!addr) { continue; } if (rp->ai_canonname) { server = res->ai_canonname; } crm_debug("Got address %s for %s", server, host); /* create socket */ sock = socket(rp->ai_family, SOCK_STREAM, IPPROTO_TCP); if (sock == -1) { crm_err("Socket creation failed for remote client connection."); continue; } memset(buffer, 0, DIMOF(buffer)); if (addr->sa_family == AF_INET6) { struct sockaddr_in6 *addr_in = (struct sockaddr_in6 *)(void*)addr; addr_in->sin6_port = htons(port); inet_ntop(addr->sa_family, &addr_in->sin6_addr, buffer, DIMOF(buffer)); } else { struct sockaddr_in *addr_in = (struct sockaddr_in *)(void*)addr; addr_in->sin_port = htons(port); inet_ntop(addr->sa_family, &addr_in->sin_addr, buffer, DIMOF(buffer)); } crm_info("Attempting to connect to remote server at %s:%d", buffer, port); if (callback) { if (internal_tcp_connect_async (sock, rp->ai_addr, rp->ai_addrlen, timeout, timer_id, userdata, callback) == 0) { goto async_cleanup; /* Success for now, we'll hear back later in the callback */ } } else { if (internal_tcp_connect(sock, rp->ai_addr, rp->ai_addrlen) == 0) { break; /* Success */ } } close(sock); sock = -1; } async_cleanup: if (res) { freeaddrinfo(res); } return sock; } int crm_remote_tcp_connect(const char *host, int port) { return crm_remote_tcp_connect_async(host, port, -1, NULL, NULL, NULL); }