diff --git a/crmd/notify.c b/crmd/notify.c index ca2be0faa7..916a04ce06 100644 --- a/crmd/notify.c +++ b/crmd/notify.c @@ -1,199 +1,199 @@ /* * Copyright (C) 2015 Andrew Beekhof * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This software is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #include #include #include #include "notify.h" char *notify_script = NULL; char *notify_target = NULL; static const char *notify_keys[] = { "CRM_notify_recipient", "CRM_notify_node", "CRM_notify_nodeid", "CRM_notify_rsc", "CRM_notify_task", "CRM_notify_interval", "CRM_notify_desc", "CRM_notify_status", "CRM_notify_target_rc", "CRM_notify_rc", "CRM_notify_kind", "CRM_notify_version", }; void crmd_enable_notifications(const char *script, const char *target) { free(notify_script); notify_script = NULL; free(notify_target); notify_target = NULL; if(script == NULL || safe_str_eq(script, "/dev/null")) { crm_notice("Notifications disabled"); return; } notify_script = strdup(script); notify_target = strdup(target); crm_notice("Notifications enabled"); } static void set_notify_key(const char *name, const char *cvalue, char *value) { int lpc; bool found = 0; if(cvalue == NULL) { cvalue = value; } for(lpc = 0; lpc < DIMOF(notify_keys); lpc++) { if(safe_str_eq(name, notify_keys[lpc])) { found = 1; crm_trace("Setting notify key %s = '%s'", name, cvalue); setenv(name, cvalue, 1); break; } } CRM_ASSERT(found != 0); free(value); } static void crmd_notify_complete(svc_action_t *op) { if(op->rc == 0) { crm_info("Notification %d (%s) complete", op->sequence, op->agent); } else { crm_warn("Notification %d (%s) failed: %d", op->sequence, op->agent, op->rc); } } static void send_notification(const char *kind) { int lpc; svc_action_t *notify = NULL; static int operations = 0; crm_debug("Sending '%s' notification to '%s' via '%s'", kind, notify_target, notify_script); set_notify_key("CRM_notify_recipient", notify_target, NULL); set_notify_key("CRM_notify_kind", kind, NULL); set_notify_key("CRM_notify_version", VERSION, NULL); notify = services_action_create_generic(notify_script, NULL); notify->timeout = 300; notify->standard = strdup("event"); notify->id = strdup(notify_script); notify->agent = strdup(notify_script); notify->sequence = ++operations; if(services_action_async(notify, &crmd_notify_complete) == FALSE) { services_action_free(notify); } for(lpc = 0; lpc < DIMOF(notify_keys); lpc++) { unsetenv(notify_keys[lpc]); } } void crmd_notify_node_event(crm_node_t *node) { if(notify_script == NULL) { return; } set_notify_key("CRM_notify_node", node->uname, NULL); set_notify_key("CRM_notify_nodeid", NULL, crm_itoa(node->id)); set_notify_key("CRM_notify_desc", node->state, NULL); send_notification("node"); } void crmd_notify_fencing_op(stonith_event_t * e) { char *desc = NULL; - if(notify_script) { + if (notify_script == NULL) { return; } desc = crm_strdup_printf("Operation %s requested by %s for peer %s: %s (ref=%s)", e->operation, e->origin, e->target, pcmk_strerror(e->result), e->id); set_notify_key("CRM_notify_node", e->target, NULL); set_notify_key("CRM_notify_task", e->operation, NULL); set_notify_key("CRM_notify_desc", NULL, desc); set_notify_key("CRM_notify_rc", NULL, crm_itoa(e->result)); send_notification("fencing"); } void crmd_notify_resource_op(const char *node, lrmd_event_data_t * op) { int target_rc = 0; if(notify_script == NULL) { return; } target_rc = rsc_op_expected_rc(op); if(op->interval == 0 && target_rc == op->rc && safe_str_eq(op->op_type, RSC_STATUS)) { /* Leave it up to the script if they want to notify for * 'failed' probes, only swallow ones for which the result was * unexpected. * * Even if we find a resource running, it was probably because * someone erased the status section. */ return; } set_notify_key("CRM_notify_node", node, NULL); set_notify_key("CRM_notify_rsc", op->rsc_id, NULL); set_notify_key("CRM_notify_task", op->op_type, NULL); set_notify_key("CRM_notify_interval", NULL, crm_itoa(op->interval)); set_notify_key("CRM_notify_target_rc", NULL, crm_itoa(target_rc)); set_notify_key("CRM_notify_status", NULL, crm_itoa(op->op_status)); set_notify_key("CRM_notify_rc", NULL, crm_itoa(op->rc)); if(op->op_status == PCMK_LRM_OP_DONE) { set_notify_key("CRM_notify_desc", services_ocf_exitcode_str(op->rc), NULL); } else { set_notify_key("CRM_notify_desc", services_lrm_status_str(op->op_status), NULL); } send_notification("resource"); } diff --git a/cts/Makefile.am b/cts/Makefile.am index 732f4c5fcc..2a7a7d0027 100644 --- a/cts/Makefile.am +++ b/cts/Makefile.am @@ -1,54 +1,54 @@ # # heartbeat: Linux-HA heartbeat code # # Copyright (C) 2001 Michael Moerz # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. # MAINTAINERCLEANFILES = Makefile.in CLEANFILES = LSBDummy HBDummy EXTRA_DIST = $(cts_SCRIPTS) $(cts_DATA) ctsdir = $(datadir)/$(PACKAGE)/tests/cts ctslibdir = $(pyexecdir)/cts ctslib_PYTHON = __init__.py \ CTSvars.py \ CM_lha.py \ CM_ais.py \ CTS.py \ CTSaudits.py \ CTStests.py \ CTSscenarios.py \ CIB.py \ cib_xml.py \ environment.py \ logging.py \ patterns.py \ remote.py \ watcher.py -cts_DATA = README cts.supp +cts_DATA = README.md cts.supp cts_SCRIPTS = cts \ CTSlab.py \ lxc_autogen.sh \ LSBDummy \ HBDummy \ $(top_srcdir)/fencing/fence_dummy SUBDIRS = benchmark diff --git a/cts/README b/cts/README.md similarity index 77% rename from cts/README rename to cts/README.md index 23ffcd04ef..0486e9eac1 100644 --- a/cts/README +++ b/cts/README.md @@ -1,235 +1,253 @@ +# Pacemaker Cluster Test Suite (CTS) - PACEMAKER - CLUSTER TEST SUITE (CTS) - - -Purpose -------- +## Purpose CTS thoroughly exercises a pacemaker test cluster by running a randomized series of predefined tests on the cluster. CTS can be run against a pre-existing cluster configuration or (more typically) overwrite the existing configuration with a test configuration. -Requirements ------------- +## Requirements * Three or more machines (one test exerciser and two or more test cluster machines). * The test cluster machines should be on the same subnet and have journalling filesystems (ext3, ext4, xfs, etc.) for all of their filesystems other than /boot. You also need a number of free IP addresses on that subnet if you intend to test mutual IP address takeover. * The test exerciser machine doesn't need to be on the same subnet as the test cluster machines. Minimal demands are made on the exerciser machine - it just has to stay up during the tests. * It helps a lot in tracking problems if all machines' clocks are closely synchronized. NTP does this automatically, but you can do it by hand if you want. * The exerciser needs to be able to ssh over to the cluster nodes as root without a password challenge. Configure ssh accordingly (see the Mini-HOWTO at the end of this document for more details). * The exerciser needs to be able to resolve the machine names of the test cluster - either by DNS or by /etc/hosts. * CTS is not guaranteed to run on all platforms that pacemaker itself does. It calls commands such as service that may not be provided by all OSes. -Preparation ------------ +## Preparation Install Pacemaker (including CTS) on all machines. These scripts are coordinated with particular versions of Pacemaker, so you need the same version of CTS as the rest of Pacemaker, and you need the same version of pacemaker and CTS on both the test exerciser and the test cluster machines. You can install CTS from source, although many distributions provide packages that include it (e.g. pacemaker-cts or pacemaker-dev). Typically, packages will install CTS as /usr/share/pacemaker/tests/cts. Configure cluster communications (Corosync, CMAN or Heartbeat) on the cluster machines and verify everything works. NOTE: Do not run the cluster on the test exerciser machine. NOTE: Wherever machine names are mentioned in these configuration files, they must match the machines' `uname -n` name. This may or may not match the machines' FQDN (fully qualified domain name) - it depends on how you (and your OS) have named the machines. -Run CTS -------- +## Run CTS Now assuming you did all this, what you need to do is run CTSlab.py: python ./CTSlab.py [options] number-of-tests-to-run You must specify which nodes are part of the cluster with --nodes, e.g.: --node "pcmk-1 pcmk-2 pcmk-3" Most people will want to save the output with --outputfile, e.g.: --outputfile ~/cts.log Unless you want to test your pre-existing cluster configuration, you also want: --clobber-cib --populate-resources --test-ip-base $IP # e.g. --test-ip-base 192.168.9.100 and configure some sort of fencing: - --stonith $TYPE # e.g. "--stonith xvm" to use fence_xvm or "--stonith lha" to use external/ssh + --stonith $TYPE # e.g. "--stonith xvm" to use fence_xvm or "--stonith lha" to use external/ssh A complete command line might look like: - python ./CTSlab.py --nodes "pcmk-1 pcmk-2 pcmk-3" --outputfile ~/cts.log \ - --clobber-cib --populate-resources --test-ip-base 192.168.9.100 \ - --stonith xvm 50 + python ./CTSlab.py --nodes "pcmk-1 pcmk-2 pcmk-3" --outputfile ~/cts.log \ + --clobber-cib --populate-resources --test-ip-base 192.168.9.100 \ + --stonith xvm 50 For more options, use the --help option. NOTE: Perhaps more convenient way to compile a command line like above is to use cluster_test script that, at least in the source repository, sits in the same directory as this very file. To extract the result of a particular test, run: crm_report -T $test -Memory testing --------------- +## Optional/advanced testing + +### Memory testing Pacemaker and CTS have various options for testing memory management. On the cluster nodes, pacemaker components will use various environment variables to control these options. How these variables are set varies by OS, but usually they are set in the /etc/sysconfig/pacemaker or /etc/default/pacemaker file. Valgrind is a program for detecting memory management problems (such as use-after-free errors). If you have valgrind installed, you can enable it by setting the following environment variables on all cluster nodes: - PCMK_valgrind_enabled=attrd,cib,crmd,lrmd,pengine,stonith-ng - VALGRIND_OPTS="--leak-check=full --trace-children=no --num-callers=25 - --log-file=/var/lib/pacemaker/valgrind-%p - --suppressions=/usr/share/pacemaker/tests/valgrind-pcmk.suppressions - --gen-suppressions=all" + PCMK_valgrind_enabled=attrd,cib,crmd,lrmd,pengine,stonith-ng + VALGRIND_OPTS="--leak-check=full --trace-children=no --num-callers=25 + --log-file=/var/lib/pacemaker/valgrind-%p + --suppressions=/usr/share/pacemaker/tests/valgrind-pcmk.suppressions + --gen-suppressions=all" and running CTS with these options: - --valgrind-tests --valgrind-procs="attrd cib crmd lrmd pengine stonith-ng" + --valgrind-tests --valgrind-procs="attrd cib crmd lrmd pengine stonith-ng" These options should only be set while specifically testing memory management, because they may slow down the cluster significantly, and they will disable writes to the CIB. If desired, you can enable valgrind on a subset of pacemaker components rather than all of them as listed above. Valgrind will put a text file for each process in the location specified by valgrind's --log-file option. For explanations of the messages valgrind -generates, see: - - http://valgrind.org/docs/manual/mc-manual.html +generates, see http://valgrind.org/docs/manual/mc-manual.html Separately, if you are using the GNU C library, the G_SLICE, MALLOC_PERTURB_, and MALLOC_CHECK_ environment variables can be set to affect the library's memory management functions. When using valgrind, G_SLICE should be set to "always-malloc", which helps valgrind track memory by always using the malloc() and free() routines directly. When not using valgrind, G_SLICE can be left unset, or set to "debug-blocks", which enables the C library to catch many memory errors but may impact performance. If the MALLOC_PERTURB_ environment variable is set to an 8-bit integer, the C library will initialize all newly allocated bytes of memory to the integer value, and will set all newly freed bytes of memory to the bitwise inverse of the integer value. This helps catch uses of uninitialized or freed memory blocks that might otherwise go unnoticed. Example: - MALLOC_PERTURB_=221 + MALLOC_PERTURB_=221 If the MALLOC_CHECK_ environment variable is set, the C library will check for certain heap corruption errors. The most useful value in testing is 3, which will cause the library to print a message to stderr and abort execution. Example: - MALLOC_CHECK_=3 + MALLOC_CHECK_=3 Valgrind should be enabled for either all nodes or none, but the C library variables may be set differently on different nodes. -Remote node testing -------------------- +### Remote node testing If the pacemaker_remoted daemon is installed on all cluster nodes, CTS will enable remote node tests. The remote node tests choose a random node, stop the cluster on it, start pacemaker_remote on it, and add an ocf:pacemaker:remote resource to turn it into a remote node. When the test is done, CTS will turn the node back into a cluster node. To avoid conflicts, CTS will rename the node, prefixing the original node name with "remote_". For example, "pcmk-1" will become "remote_pcmk-1". The name change may require special stonith configuration, if the fence agent expects the node name to be the same as its hostname. A common approach is to specify the "remote_" names in pcmk_host_list. If you use pcmk_host_list=all, CTS will expand that to all cluster nodes and their "remote_" names. You may additionally need a pcmk_host_map argument to map the "remote_" names to the hostnames. Example: - --stonith xvm --stonith-args \ - pcmk_arg_map=domain:uname,pcmk_host_list=all,pcmk_host_map=remote_pcmk-1:pcmk-1;remote_pcmk-2:pcmk-2 + --stonith xvm --stonith-args \ + pcmk_arg_map=domain:uname,pcmk_host_list=all,pcmk_host_map=remote_pcmk-1:pcmk-1;remote_pcmk-2:pcmk-2 + +### Remote node testing with valgrind + +When running the remote node tests, the pacemaker components on the cluster +nodes can be run under valgrind as described in the "Memory testing" section. +However, pacemaker_remote cannot be run under valgrind that way, because it is +started by the OS's regular boot system and not by pacemaker. + +Details vary by system, but the goal is to set the VALGRIND_OPTS environment +variable and then start pacemaker_remoted by prefixing it with the path to +valgrind. + +The init script and systemd service file provided with pacemaker_remote will +load the pacemaker environment variables from the same location used by other +pacemaker components, so VALGRIND_OPTS will be set correctly if using one of +those. + +For an OS using systemd, you can override the ExecStart parameter to run +valgrind. For example: + + mkdir /etc/systemd/system/pacemaker_remote.service.d + cat >/etc/systemd/system/pacemaker_remote.service.d/valgrind.conf < * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * */ #include #include #include #include #include #include #include #include #include #include #include #include static qb_ipcs_service_t *cib_ro = NULL; static qb_ipcs_service_t *cib_rw = NULL; static qb_ipcs_service_t *cib_shm = NULL; static qb_ipcs_service_t *attrd_ipcs = NULL; static qb_ipcs_service_t *crmd_ipcs = NULL; static qb_ipcs_service_t *stonith_ipcs = NULL; /* ipc providers == crmd clients connecting from cluster nodes */ GHashTable *ipc_providers; /* ipc clients == things like cibadmin, crm_resource, connecting locally */ GHashTable *ipc_clients; static int32_t ipc_proxy_accept(qb_ipcs_connection_t * c, uid_t uid, gid_t gid, const char *ipc_channel) { void *key = NULL; void *value = NULL; crm_client_t *client; crm_client_t *ipc_proxy = NULL; GHashTableIter iter; xmlNode *msg; crm_trace("Connection %p on channel %s", c, ipc_channel); if (g_hash_table_size(ipc_providers) == 0) { crm_err("No ipc providers available for uid %d gid %d", uid, gid); return -EREMOTEIO; } g_hash_table_iter_init(&iter, ipc_providers); if (g_hash_table_iter_next(&iter, (gpointer *) & key, (gpointer *) & value)) { /* grab the first provider available, any provider in this * table will work. Usually there will only be one. These are * lrmd client connections originating for a cluster node's crmd. */ ipc_proxy = value; } else { crm_err("No ipc providers available for uid %d gid %d", uid, gid); return -EREMOTEIO; } /* this new client is a local ipc client on a remote * guest wanting to access the ipc on any available cluster nodes */ client = crm_client_new(c, uid, gid); if (client == NULL) { return -EREMOTEIO; } /* This ipc client is bound to a single ipc provider. If the * provider goes away, this client is disconnected */ client->userdata = strdup(ipc_proxy->id); client->name = crm_strdup_printf("proxy-%s-%d-%.8s", ipc_channel, client->pid, client->id); g_hash_table_insert(ipc_clients, client->id, client); msg = create_xml_node(NULL, T_LRMD_IPC_PROXY); crm_xml_add(msg, F_LRMD_IPC_OP, "new"); crm_xml_add(msg, F_LRMD_IPC_IPC_SERVER, ipc_channel); crm_xml_add(msg, F_LRMD_IPC_SESSION, client->id); lrmd_server_send_notify(ipc_proxy, msg); free_xml(msg); crm_debug("created new ipc proxy with session id %s", client->id); return 0; } static int32_t crmd_proxy_accept(qb_ipcs_connection_t * c, uid_t uid, gid_t gid) { return ipc_proxy_accept(c, uid, gid, CRM_SYSTEM_CRMD); } static int32_t attrd_proxy_accept(qb_ipcs_connection_t * c, uid_t uid, gid_t gid) { return ipc_proxy_accept(c, uid, gid, T_ATTRD); } static int32_t stonith_proxy_accept(qb_ipcs_connection_t * c, uid_t uid, gid_t gid) { return ipc_proxy_accept(c, uid, gid, "stonith-ng"); } static int32_t cib_proxy_accept_rw(qb_ipcs_connection_t * c, uid_t uid, gid_t gid) { return ipc_proxy_accept(c, uid, gid, cib_channel_rw); } static int32_t cib_proxy_accept_ro(qb_ipcs_connection_t * c, uid_t uid, gid_t gid) { return ipc_proxy_accept(c, uid, gid, cib_channel_ro); } static void ipc_proxy_created(qb_ipcs_connection_t * c) { crm_trace("Connection %p", c); } void ipc_proxy_forward_client(crm_client_t *ipc_proxy, xmlNode *xml) { const char *session = crm_element_value(xml, F_LRMD_IPC_SESSION); const char *msg_type = crm_element_value(xml, F_LRMD_IPC_OP); xmlNode *msg = get_message_xml(xml, F_LRMD_IPC_MSG); crm_client_t *ipc_client = crm_client_get_by_id(session); int rc = 0; if (ipc_client == NULL) { xmlNode *msg = create_xml_node(NULL, T_LRMD_IPC_PROXY); crm_xml_add(msg, F_LRMD_IPC_OP, "destroy"); crm_xml_add(msg, F_LRMD_IPC_SESSION, session); lrmd_server_send_notify(ipc_proxy, msg); free_xml(msg); return; } /* This is an event or response from the ipc provider * going to the local ipc client. * * Looking at the chain of events. * * -----remote node----------------|---- cluster node ------ * ipc_client <--1--> this code <--2--> crmd:remote_proxy_cb/remote_proxy_relay_event() <----3----> ipc server * * This function is receiving a msg from connection 2 * and forwarding it to connection 1. */ if (safe_str_eq(msg_type, "event")) { crm_trace("Sending event to %s", ipc_client->id); rc = crm_ipcs_send(ipc_client, 0, msg, crm_ipc_server_event); } else if (safe_str_eq(msg_type, "response")) { int msg_id = 0; crm_element_value_int(xml, F_LRMD_IPC_MSG_ID, &msg_id); crm_trace("Sending response to %d - %s", ipc_client->request_id, ipc_client->id); rc = crm_ipcs_send(ipc_client, msg_id, msg, FALSE); CRM_LOG_ASSERT(msg_id == ipc_client->request_id); ipc_client->request_id = 0; } else if (safe_str_eq(msg_type, "destroy")) { qb_ipcs_disconnect(ipc_client->ipcs); } else { crm_err("Unknown ipc proxy msg type %s" , msg_type); } if (rc < 0) { crm_warn("IPC Proxy send to ipc client %s failed, rc = %d", ipc_client->id, rc); } } static int32_t ipc_proxy_dispatch(qb_ipcs_connection_t * c, void *data, size_t size) { uint32_t id = 0; uint32_t flags = 0; crm_client_t *client = crm_client_get(c); crm_client_t *ipc_proxy = crm_client_get_by_id(client->userdata); xmlNode *request = NULL; xmlNode *msg = NULL; if (!ipc_proxy) { qb_ipcs_disconnect(client->ipcs); return 0; } /* This is a request from the local ipc client going * to the ipc provider. * * Looking at the chain of events. * * -----remote node----------------|---- cluster node ------ * ipc_client <--1--> this code <--2--> crmd:remote_proxy_dispatch_internal() <----3----> ipc server * * This function is receiving a request from connection * 1 and forwarding it to connection 2. */ request = crm_ipcs_recv(client, data, size, &id, &flags); if (!request) { return 0; } CRM_CHECK(client != NULL, crm_err("Invalid client"); - return FALSE); + free_xml(request); return FALSE); CRM_CHECK(client->id != NULL, crm_err("Invalid client: %p", client); - return FALSE); + free_xml(request); return FALSE); /* this ensures that synced request/responses happen over the event channel * in the crmd, allowing the crmd to process the messages async */ set_bit(flags, crm_ipc_proxied); client->request_id = id; msg = create_xml_node(NULL, T_LRMD_IPC_PROXY); crm_xml_add(msg, F_LRMD_IPC_OP, "request"); crm_xml_add(msg, F_LRMD_IPC_SESSION, client->id); crm_xml_add(msg, F_LRMD_IPC_CLIENT, crm_client_name(client)); crm_xml_add(msg, F_LRMD_IPC_USER, client->user); crm_xml_add_int(msg, F_LRMD_IPC_MSG_ID, id); crm_xml_add_int(msg, F_LRMD_IPC_MSG_FLAGS, flags); add_message_xml(msg, F_LRMD_IPC_MSG, request); lrmd_server_send_notify(ipc_proxy, msg); + free_xml(request); free_xml(msg); return 0; } static int32_t ipc_proxy_closed(qb_ipcs_connection_t * c) { crm_client_t *client = crm_client_get(c); crm_client_t *ipc_proxy; if (client == NULL) { return 0; } ipc_proxy = crm_client_get_by_id(client->userdata); crm_trace("Connection %p", c); if (ipc_proxy) { xmlNode *msg = create_xml_node(NULL, T_LRMD_IPC_PROXY); crm_xml_add(msg, F_LRMD_IPC_OP, "destroy"); crm_xml_add(msg, F_LRMD_IPC_SESSION, client->id); lrmd_server_send_notify(ipc_proxy, msg); free_xml(msg); } g_hash_table_remove(ipc_clients, client->id); free(client->userdata); client->userdata = NULL; crm_client_destroy(client); return 0; } static void ipc_proxy_destroy(qb_ipcs_connection_t * c) { crm_trace("Connection %p", c); ipc_proxy_closed(c); } static struct qb_ipcs_service_handlers crmd_proxy_callbacks = { .connection_accept = crmd_proxy_accept, .connection_created = ipc_proxy_created, .msg_process = ipc_proxy_dispatch, .connection_closed = ipc_proxy_closed, .connection_destroyed = ipc_proxy_destroy }; static struct qb_ipcs_service_handlers attrd_proxy_callbacks = { .connection_accept = attrd_proxy_accept, .connection_created = ipc_proxy_created, .msg_process = ipc_proxy_dispatch, .connection_closed = ipc_proxy_closed, .connection_destroyed = ipc_proxy_destroy }; static struct qb_ipcs_service_handlers stonith_proxy_callbacks = { .connection_accept = stonith_proxy_accept, .connection_created = ipc_proxy_created, .msg_process = ipc_proxy_dispatch, .connection_closed = ipc_proxy_closed, .connection_destroyed = ipc_proxy_destroy }; static struct qb_ipcs_service_handlers cib_proxy_callbacks_ro = { .connection_accept = cib_proxy_accept_ro, .connection_created = ipc_proxy_created, .msg_process = ipc_proxy_dispatch, .connection_closed = ipc_proxy_closed, .connection_destroyed = ipc_proxy_destroy }; static struct qb_ipcs_service_handlers cib_proxy_callbacks_rw = { .connection_accept = cib_proxy_accept_rw, .connection_created = ipc_proxy_created, .msg_process = ipc_proxy_dispatch, .connection_closed = ipc_proxy_closed, .connection_destroyed = ipc_proxy_destroy }; void ipc_proxy_add_provider(crm_client_t *ipc_proxy) { if (ipc_providers == NULL) { return; } g_hash_table_insert(ipc_providers, ipc_proxy->id, ipc_proxy); } void ipc_proxy_remove_provider(crm_client_t *ipc_proxy) { GHashTableIter iter; crm_client_t *ipc_client = NULL; char *key = NULL; GList *remove_these = NULL; GListPtr gIter = NULL; if (ipc_providers == NULL) { return; } g_hash_table_remove(ipc_providers, ipc_proxy->id); g_hash_table_iter_init(&iter, ipc_clients); while (g_hash_table_iter_next(&iter, (gpointer *) & key, (gpointer *) & ipc_client)) { const char *proxy_id = ipc_client->userdata; if (safe_str_eq(proxy_id, ipc_proxy->id)) { crm_info("ipc proxy connection for client %s pid %d destroyed because cluster node disconnected.", ipc_client->id, ipc_client->pid); /* we can't remove during the iteration, so copy items * to a list we can destroy later */ remove_these = g_list_append(remove_these, ipc_client); } } for (gIter = remove_these; gIter != NULL; gIter = gIter->next) { ipc_client = gIter->data; qb_ipcs_disconnect(ipc_client->ipcs); } /* just frees the list, not the elements in the list */ g_list_free(remove_these); } void ipc_proxy_init(void) { ipc_clients = g_hash_table_new_full(crm_str_hash, g_str_equal, NULL, NULL); ipc_providers = g_hash_table_new_full(crm_str_hash, g_str_equal, NULL, NULL); cib_ipc_servers_init(&cib_ro, &cib_rw, &cib_shm, &cib_proxy_callbacks_ro, &cib_proxy_callbacks_rw); attrd_ipc_server_init(&attrd_ipcs, &attrd_proxy_callbacks); stonith_ipc_server_init(&stonith_ipcs, &stonith_proxy_callbacks); crmd_ipcs = crmd_ipc_server_init(&crmd_proxy_callbacks); if (crmd_ipcs == NULL) { crm_err("Failed to create crmd server: exiting and inhibiting respawn."); crm_warn("Verify pacemaker and pacemaker_remote are not both enabled."); crm_exit(DAEMON_RESPAWN_STOP); } } void ipc_proxy_cleanup(void) { if (ipc_providers) { g_hash_table_destroy(ipc_providers); } if (ipc_clients) { g_hash_table_destroy(ipc_clients); } cib_ipc_servers_destroy(cib_ro, cib_rw, cib_shm); qb_ipcs_destroy(attrd_ipcs); qb_ipcs_destroy(stonith_ipcs); qb_ipcs_destroy(crmd_ipcs); cib_ro = NULL; cib_rw = NULL; cib_shm = NULL; ipc_providers = NULL; ipc_clients = NULL; }