diff --git a/agents/virt/man/fence_virt.conf.5 b/agents/virt/man/fence_virt.conf.5
index dfb3504f..2a02accf 100644
--- a/agents/virt/man/fence_virt.conf.5
+++ b/agents/virt/man/fence_virt.conf.5
@@ -1,354 +1,361 @@
 .TH fence_virt.conf 5
 
 .SH NAME
 fence_virt.conf - configuration file for fence_virtd
 
 .SH DESCRIPTION
 
 The fence_virt.conf file contains configuration information for fence_virtd,
 a fencing request routing daemon for clusters of virtual machines.
 
 The file is tree-structured.  There are parent/child relationships and sibling
 relationships between the nodes.
 
   foo {
     bar {
       baz = "1";
     }
   }
 
 There are three primary sections of fence_virt.conf.
 
 .SH SECTIONS
 .SS fence_virtd
 
 This section contains global information about how fence_virtd is to operate.
 The most important pieces of information are as follows:
 
 .TP
 .B listener
 .
 the listener plugin for receiving fencing requests from clients
 
 .TP
 .B backend
 .
 the plugin to be used to carry out fencing requests
 
 .TP
 .B foreground
 .
 do not fork into the background.
 
 .TP
 .B wait_for_init
 .
 wait for the frontend and backends to become available rather than giving up immediately.
 This replaces wait_for_backend in 0.2.x.
 
 .TP
 .B module_path
 .
 the module path to search for plugins
 
 .SS listeners
 
 This section contains listener-specific configuration information; see the
 section about listeners below.
 
 .SS backends
 
 This section contains listener-specific configuration information; see the
 section about listeners below.
 
 .SS groups
 
 This section contains static maps of which virtual machines
 may fence which other virtual machines; see the section
 about groups below.
 
 
 .SH LISTENERS
 
 There are various listeners available for fence_virtd, each one handles
 decoding and authentication of a given fencing request.  The following 
 configuration blocks belong in the \fBlisteners\fP section of fence_virt.conf
 
 .SS multicast
 .TP
 .B key_file
 .
 the shared key file to use (default: /etc/cluster/fence_xvm.key).
 
 .TP
 .B hash
 .
 the weakest hashing algorithm allowed for client requests.  Clients may send packets with stronger hashes than the one specified, but not weaker ones.  (default: sha256, but could
 be sha1, sha512, or none)
 
 .TP
 .B auth
 .
 the hashing algorithm to use for the simplistic challenge-response authentication
 (default: sha256, but could be sha1, sha512, or none)
 
 .TP
 .B family
 .
 the IP family to use (default: ipv4, but may be ipv6)
 
 .TP
 .B address
 .
 the multicast address to listen on (default: 225.0.0.12)
 
 .TP
 .B port
 .
 the multicast port to listen on (default: 1229)
 
 .TP
 .B interface
 .
 interface to listen on.  By default, fence_virtd listens on all interfaces.
 However, this causes problems in some environments where the host computer
 is used as a gateway.
 
 .SS serial
 
 The serial listener plugin utilizes libvirt's serial (or VMChannel)
 mapping to listen for requests.  When using the serial listener, it is
 necessary to add a serial port (preferably pointing to /dev/ttyS1) or
 a channel (preferably pointing to 10.0.2.179:1229) to the
 libvirt domain description.  Note that only type
 .B unix
 , mode 
 .B bind
 serial ports and channels are supported.  Example libvirt XML:
 
 .in 8
   <\fBserial\fP type='unix'>
     <source mode='bind' path='/sandbox/guests/fence_socket_molly'/>
     <target port='1'/>
   </serial>
   <\fBchannel\fP type='unix'>
     <source mode='bind' path='/sandbox/guests/fence_molly_vmchannel'/>
     <target type='guestfwd' address='10.0.2.179' port='1229'/>
   </channel>
 .in 0
 
 .TP
 .B uri
 .
 the URI to use when connecting to libvirt by the serial plugin.
 
 .TP
 .B path
 .
 The same directory that is defined for the domain serial port path (From example above: /sandbox/guests). Sockets must reside in this directory in order to be considered valid. This can be used to prevent fence_virtd from using the wrong sockets.
 
 .TP
 .B mode
 .
 This selects the type of sockets to register.  Valid values are "serial"
 (default) and "vmchannel".
 
 .SS tcp
 The tcp listener operates similarly to the multicast listener but uses TCP sockets for communication instead of using multicast packets.
 
 .TP
 .B key_file
 .
 the shared key file to use (default: /etc/cluster/fence_xvm.key).
 
 .TP
 .B hash
 .
 the hashing algorithm to use for packet signing (default: sha256, but could
 be sha1, sha512, or none)
 
 .TP
 .B auth
 .
 the hashing algorithm to use for the simplistic challenge-response authentication
 (default: sha256, but could be sha1, sha512, or none)
 
 .TP
 .B family
 .
 the IP family to use (default: ipv4, but may be ipv6)
 
 .TP
 .B address
 .
 the IP address to listen on (default: 127.0.0.1 for IPv4, ::1 for IPv6)
 
 .TP
 .B port
 .
 the TCP port to listen on (default: 1229)
 
 .SS vsock
 The vsock listener operates similarly to the multicast listener but uses virtual machine sockets (AF_VSOCK) for communication instead of using multicast packets.
 
 .TP
 .B key_file
 .
 the shared key file to use (default: /etc/cluster/fence_xvm.key).
 
 .TP
 .B hash
 .
 the hashing algorithm to use for packet signing (default: sha256, but could
 be sha1, sha512, or none)
 
 .TP
 .B auth
 .
 the hashing algorithm to use for the simplistic challenge-response authentication
 (default: sha256, but could be sha1, sha512, or none)
 
 .TP
 .B port
 .
 the vsock port to listen on (default: 1229)
 
 .SH BACKENDS
 
 There are various backends available for fence_virtd, each one handles
 routing a fencing request to a hypervisor or management tool.  The following 
 configuration blocks belong in the \fBbackends\fP section of fence_virt.conf
 
 .SS libvirt
 
 The libvirt plugin is the simplest plugin.  It is used in environments where
 routing fencing requests between multiple hosts is not required, for example
 by a user running a cluster of virtual machines on a single desktop computer.
 
 .TP
 .B uri
 .
 the URI to use when connecting to libvirt.
 
 All libvirt URIs are accepted and passed as-is.
 
 See https://libvirt.org/uri.html#remote-uris for examples.
 
 NOTE: When VMs are run as non-root user the socket path must be set as part
 of the URI.
 
 Example: qemu:///session?socket=/run/user/<UID>/libvirt/virtqemud-sock
 
 .SS libvirt-qmf
 
 The libvirt-qmf plugin acts as a QMFv2 Console to the libvirt-qmf daemon in
 order to route fencing requests over AMQP to the appropriate computer.
 
 .TP
 .B host
 .
 host or IP address of qpid broker.  Defaults to 127.0.0.1.
 
 .TP
 .B port
 .
 IP port of qpid broker.  Defaults to 5672.
 
 .TP
 .B username
 .
 Username for GSSAPI, if configured.
 
 .TP
 .B service
 .
 Qpid service to connect to.
 
 .TP
 .B gssapi
 .
 If set to 1, have fence_virtd use GSSAPI for authentication when communicating
 with the Qpid broker.  Default is 0 (off).
 
 .SS cpg
 
 The cpg plugin uses corosync CPG and libvirt to track virtual
 machines and route fencing requests to the appropriate computer.
 
 .TP
 .B uri
 .
 the URI to use when connecting to libvirt by the cpg plugin.
 
 .TP
 .B name_mode
 .
 The cpg plugin, in order to retain compatibility with fence_xvm,
 stores virtual machines in a certain way.  The
 default was to use 'name' when using fence_xvm and fence_xvmd, and so this
 is still the default.  However, it is strongly recommended to use 'uuid'
 instead of 'name' in all cluster environments involving more than one
 physical host in order to avoid the potential for name collisions.
 
 .SH GROUPS
 
 Fence_virtd supports static maps which allow grouping of VMs.  The
 groups are arbitrary and are checked at fence time.  Any member of
 a group may fence any other member.  Hosts may be assigned to multiple
 groups if desired.
 
 .SS group
 
 This defines a group.
 
+.TP
+.B name
+.
+Optinally define the name of the group. Useful only for configuration
+redability and debugging of configuration parsing.
+
 .TP
 .B uuid
 .
 Defines UUID as a member of a group.  It can be used multiple times
 to specify both node name and UUID values that can be fenced.
 
 .TP
 .B ip
 .
 Defines an IP which is allowed to send fencing requests
 for members of this group (e.g. for multicast).  It can be used
 multiple times to allow more than 1 IP to send fencing requests to
 the group.  It is highly recommended that this be used in conjunction
 with a key file.
 
 
 
 .SH EXAMPLE
 
  fence_virtd {
   listener = "multicast";
   backend = "cpg";
  }
 
  # this is the listeners section
 
  listeners {
   multicast {
    key_file = "/etc/cluster/fence_xvm.key";
   }
  }
 
  backends {
   libvirt { 
    uri = "qemu:///system";
   }
  }
  
  groups {
   group {
+   name = "cluster1";
    ip = "192.168.1.1";
    ip = "192.168.1.2";
    uuid = "44179d3f-6c63-474f-a212-20c8b4b25b16";
    uuid = "1ce02c4b-dfa1-42cb-b5b1-f0b1091ece60";
    uuid = "node1";
    uuid = "node2";
   }
  }
 
 .SH SEE ALSO
 fence_virtd(8), fence_virt(8), fence_xvm(8), fence(8)
diff --git a/agents/virt/server/static_map.c b/agents/virt/server/static_map.c
index f3caf1ce..4a5b84c0 100644
--- a/agents/virt/server/static_map.c
+++ b/agents/virt/server/static_map.c
@@ -1,226 +1,229 @@
 #include "config.h"
 
 #include <stdio.h>
 #include <string.h>
 #include <signal.h>
 #include <stdlib.h>
 #include <assert.h>
 #include <stdio.h>
 
 #include "simpleconfig.h"
 #include "static_map.h"
 #include "list.h"
 #include "debug.h"
 #include "serial.h"
 #include "uuid-test.h"
 
 struct perm_entry {
 	list_head();
 	char name[129];
 };
 
 struct perm_group {
 	list_head();
 	struct perm_entry *uuids;
 	struct perm_entry *ips;
 	char name[129];
 };
 
 
 static void
 static_map_cleanup(void **info)
 {
 	struct perm_group *groups = (struct perm_group *)(*info);
 	struct perm_group *group;
 	struct perm_entry *entry;
 
 	while (groups) {
 		group = groups;
 		list_remove(&groups, group);
 		while (group->uuids) {
 			entry = group->uuids;
 			list_remove(&group->uuids, entry);
 			free(entry);
 		}
 		while (group->ips) {
 			entry = group->ips;
 			list_remove(&group->ips, entry);
 			free(entry);
 		}
 		free(group);
 	}
 
 	*info = NULL;
 }
 
 
 static int
 static_map_check(void *info, const char *value1, const char *value2)
 {
 	struct perm_group *groups = (struct perm_group *)info;
 	struct perm_group *group;
 	struct perm_entry *left, *tmp;
 	int x, y, uuid = 0;
 
 	if (!info)
 		return 1; /* no maps == wide open */
 
 	uuid = is_uuid(value1);
 
 	list_for(&groups, group, x) {
 		left = NULL;
 
 		if (uuid) {
 			list_for(&group->uuids, tmp, y) {
 				if (!strcasecmp(tmp->name, value1)) {
 					left = tmp;
 					break;
 				}
 			}
 		} else {
 			list_for(&group->ips, tmp, y) {
 				if (!strcasecmp(tmp->name, value1)) {
 					left = tmp;
 					break;
 				}
 			}
 		}
 
 		if (!left)
 			continue;
 
 		list_for(&group->uuids, tmp, y) {
 			if (!strcasecmp(tmp->name, value2)) {
 				return 1;
 			}
 		}
 	}
 
 	return 0;
 }
 
 
 static int
 static_map_load(void *config_ptr, void **perm_info)
 {
 	config_object_t *config = config_ptr;
 	int group_idx = 0;
 	int entry_idx = 0;
 	int found;
 	char value[128];
 	char buf[256];
 	char buf2[512];
 	struct perm_group *group = NULL, *groups = NULL;
 	struct perm_entry *entry = NULL;
 
 	if (!perm_info)
 		return -1;
 
 	do {
 		snprintf(buf, sizeof(buf)-1, "groups/group[%d]", ++group_idx);
 
 		if (sc_get(config, buf, value, sizeof(value)) != 0) {
 			snprintf(buf2, sizeof(buf2)-1, "%s/@uuid", buf);
 			if (sc_get(config, buf2, value, sizeof(value)) != 0) {
 				snprintf(buf2, sizeof(buf2)-1, "%s/@ip", buf);
 				if (sc_get(config, buf2, value,
 					   sizeof(value)) != 0) {
 					break;
 				}
 			}
-			snprintf(value, sizeof(value), "unnamed-%d",
-				 group_idx);
+			snprintf(buf2, sizeof(buf2)-1, "%s/@name", buf);
+			if (sc_get(config, buf2, value, sizeof(value)) != 0) {
+				snprintf(value, sizeof(value), "unnamed-%d",
+					 group_idx);
+			}
 		}
 
 		group = malloc(sizeof(*group));
 		assert(group);
 		memset(group, 0, sizeof(*group));
 		strncpy(group->name, value, sizeof(group->name));
 		dbg_printf(3, "Group: %s\n", value);
 
 		found = 0;
 		entry_idx = 0;
 		do {
 			snprintf(buf2, sizeof(buf2)-1, "%s/@uuid[%d]",
 				 buf, ++entry_idx);
 
 			if (sc_get(config, buf2, value, sizeof(value)) != 0) {
 				break;
 			}
 
 			++found;
 			entry = malloc(sizeof(*entry));
 			assert(entry);
 			memset(entry, 0, sizeof(*entry));
 			strncpy(entry->name, value, sizeof(entry->name));
 			dbg_printf(3, " - UUID Entry: %s\n", value);
 
 			list_insert(&group->uuids, entry);
 
 		} while (1);
 
 		entry_idx = 0;
 		do {
 			snprintf(buf2, sizeof(buf2)-1, "%s/@ip[%d]",
 				 buf, ++entry_idx);
 
 			if (sc_get(config, buf2, value, sizeof(value)) != 0) {
 				break;
 			}
 
 			++found;
 			entry = malloc(sizeof(*entry));
 			assert(entry);
 			memset(entry, 0, sizeof(*entry));
 			strncpy(entry->name, value, sizeof(entry->name));
 			dbg_printf(3, " - IP Entry: %s\n", value);
 
 			list_insert(&group->ips, entry);
 
 		} while (1);
 
 
 		if (!found)
 			free(group);
 		else
 			list_insert(&groups, group);
 
 	} while (1);
 
 	*perm_info = groups;
 
 	return 0;
 }
 
 
 static const map_object_t static_map_obj = {
 	.load = static_map_load,
 	.check = static_map_check,
 	.cleanup = static_map_cleanup,
 	.info = NULL
 };
 
 
 void *
 map_init(void)
 {
 	map_object_t *o;
 
 	o = malloc(sizeof(*o));
 	if (!o)
 		return NULL;
 	memset(o, 0, sizeof(*o));
 	memcpy(o, &static_map_obj, sizeof(*o));
 
 	return (void *)o;
 }
 
 
 void
 map_release(void *c)
 {
 	map_object_t *o = (map_object_t *)c;
 
 	static_map_cleanup(&o->info);
 	free(c);
 }