Page Menu
Home
ClusterLabs Projects
Search
Configure Global Search
Log In
Files
F1842015
No One
Temporary
Actions
View File
Edit File
Delete File
View Transforms
Subscribe
Mute Notifications
Flag For Later
Award Token
Size
75 KB
Referenced Files
None
Subscribers
None
View Options
diff --git a/fence/fenced/cpg.c b/fence/fenced/cpg.c
index ccebbd9b7..5b6c8269c 100644
--- a/fence/fenced/cpg.c
+++ b/fence/fenced/cpg.c
@@ -1,1436 +1,1554 @@
#include "fd.h"
#include "config.h"
static unsigned int protocol_active[3] = {1, 0, 0};
+static cpg_handle_t cpg_handle_daemon;
+static struct cpg_address daemon_member_list[MAX_NODES];
+static int daemon_member_list_entries;
struct member {
struct list_head list;
int nodeid;
int start; /* 1 if we received a start message for this change */
int added; /* 1 if added by this change */
int failed; /* 1 if failed in this change */
int disallowed;
uint32_t start_flags;
};
/* fd_info and id_info: for syncing state in start message */
struct fd_info {
uint32_t fd_info_size;
uint32_t id_info_size;
uint32_t id_info_count;
uint32_t started_count;
int member_count;
int joined_count;
int remove_count;
int failed_count;
};
#define IDI_NODEID_IS_MEMBER 0x00000001
struct id_info {
int nodeid;
uint32_t flags;
/* the following syncs info to make queries useful from all nodes */
int fence_external_node;
int fence_master;
int fence_how;
uint64_t fence_time;
uint64_t fence_external_time;
};
static void fd_info_in(struct fd_info *fi)
{
fi->fd_info_size = le32_to_cpu(fi->fd_info_size);
fi->id_info_size = le32_to_cpu(fi->id_info_size);
fi->id_info_count = le32_to_cpu(fi->id_info_count);
fi->started_count = le32_to_cpu(fi->started_count);
fi->member_count = le32_to_cpu(fi->member_count);
fi->joined_count = le32_to_cpu(fi->joined_count);
fi->remove_count = le32_to_cpu(fi->remove_count);
fi->failed_count = le32_to_cpu(fi->failed_count);
}
static void id_info_in(struct id_info *id)
{
id->nodeid = le32_to_cpu(id->nodeid);
id->flags = le32_to_cpu(id->flags);
id->fence_external_node = le32_to_cpu(id->fence_external_node);
id->fence_master = le32_to_cpu(id->fence_master);
id->fence_how = le32_to_cpu(id->fence_how);
id->fence_time = le64_to_cpu(id->fence_time);
id->fence_external_time = le64_to_cpu(id->fence_external_time);
}
static void ids_in(struct fd_info *fi, struct id_info *ids)
{
struct id_info *id;
int i;
id = ids;
for (i = 0; i < fi->id_info_count; i++) {
id_info_in(id);
id = (struct id_info *)((char *)id + fi->id_info_size);
}
}
static char *msg_name(int type)
{
switch (type) {
case FD_MSG_START:
return "start";
case FD_MSG_VICTIM_DONE:
return "victim_done";
case FD_MSG_COMPLETE:
return "complete";
case FD_MSG_EXTERNAL:
return "external";
default:
return "unknown";
}
}
static int _send_message(cpg_handle_t h, void *buf, int len, int type)
{
struct iovec iov;
cpg_error_t error;
int retries = 0;
iov.iov_base = buf;
iov.iov_len = len;
retry:
error = cpg_mcast_joined(h, CPG_TYPE_AGREED, &iov, 1);
if (error == CPG_ERR_TRY_AGAIN) {
retries++;
usleep(1000);
if (!(retries % 100))
log_error("cpg_mcast_joined retry %d %s",
retries, msg_name(type));
goto retry;
}
if (error != CPG_OK) {
log_error("cpg_mcast_joined error %d handle %llx %s",
error, (unsigned long long)h, msg_name(type));
return -1;
}
if (retries)
log_debug("cpg_mcast_joined retried %d %s",
retries, msg_name(type));
return 0;
}
/* header fields caller needs to set: type, to_nodeid, flags, msgdata */
static void fd_send_message(struct fd *fd, char *buf, int len)
{
struct fd_header *hd = (struct fd_header *) buf;
int type = hd->type;
hd->version[0] = cpu_to_le16(protocol_active[0]);
hd->version[1] = cpu_to_le16(protocol_active[1]);
hd->version[2] = cpu_to_le16(protocol_active[2]);
hd->type = cpu_to_le16(hd->type);
hd->nodeid = cpu_to_le32(our_nodeid);
hd->to_nodeid = cpu_to_le32(hd->to_nodeid);
hd->flags = cpu_to_le32(hd->flags);
hd->msgdata = cpu_to_le32(hd->msgdata);
_send_message(fd->cpg_handle, buf, len, type);
}
static struct member *find_memb(struct change *cg, int nodeid)
{
struct member *memb;
list_for_each_entry(memb, &cg->members, list) {
if (memb->nodeid == nodeid)
return memb;
}
return NULL;
}
static struct fd *find_fd_handle(cpg_handle_t h)
{
struct fd *fd;
list_for_each_entry(fd, &domains, list) {
if (fd->cpg_handle == h)
return fd;
}
return NULL;
}
static struct fd *find_fd_ci(int ci)
{
struct fd *fd;
list_for_each_entry(fd, &domains, list) {
if (fd->cpg_client == ci)
return fd;
}
return NULL;
}
void free_cg(struct change *cg)
{
struct member *memb, *safe;
list_for_each_entry_safe(memb, safe, &cg->members, list) {
list_del(&memb->list);
free(memb);
}
list_for_each_entry_safe(memb, safe, &cg->removed, list) {
list_del(&memb->list);
free(memb);
}
free(cg);
}
static struct node_history *get_node_history(struct fd *fd, int nodeid)
{
struct node_history *node;
list_for_each_entry(node, &fd->node_history, list) {
if (node->nodeid == nodeid)
return node;
}
return NULL;
}
static void node_history_init(struct fd *fd, int nodeid)
{
struct node_history *node;
node = get_node_history(fd, nodeid);
if (node)
return;
node = malloc(sizeof(struct node_history));
if (!node)
return;
memset(node, 0, sizeof(struct node_history));
node->nodeid = nodeid;
list_add_tail(&node->list, &fd->node_history);
}
static void node_history_start(struct fd *fd, int nodeid)
{
struct node_history *node;
node = get_node_history(fd, nodeid);
if (!node) {
log_error("node_history_start no nodeid %d", nodeid);
return;
}
node->add_time = time(NULL);
}
static void node_history_left(struct fd *fd, int nodeid)
{
struct node_history *node;
node = get_node_history(fd, nodeid);
if (!node) {
log_error("node_history_left no nodeid %d", nodeid);
return;
}
node->left_time = time(NULL);
}
static void node_history_fail(struct fd *fd, int nodeid)
{
struct node_history *node;
node = get_node_history(fd, nodeid);
if (!node) {
log_error("node_history_fail no nodeid %d", nodeid);
return;
}
node->fail_time = time(NULL);
node->check_quorum = 1;
}
/* The master node updates this info when it fences the victim, the other
domain members update it when they receive the status message from the
master. */
void node_history_fence(struct fd *fd, int victim, int master, int how,
uint64_t mastertime)
{
struct node_history *node;
node = get_node_history(fd, victim);
if (!node) {
log_error("node_history_fence no nodeid %d", victim);
return;
}
node->fence_master = master;
node->fence_time = mastertime;
node->fence_how = how;
}
/* When the fence_node command is run on a machine, it will first call
libfence:fence_node(victim) to do the fencing. Afterward, it should call
libfenced:fence_external(victim) to tell fenced what it's done, so fenced
can avoid fencing the node a second time. This will result in a message
being sent to all domain members which will update their node_history entry
for the victim. The recover.c:fence_victims() code can check whether
a victim has been externally fenced since the last add_time, and if so
skip the fencing. This won't always work perfectly; a node might in some
circumstances be fenced a second time by fenced. */
static void node_history_fence_external(struct fd *fd, int nodeid, int from)
{
struct node_history *node;
node = get_node_history(fd, nodeid);
if (!node) {
log_error("node_history_fence_external no nodeid %d", nodeid);
return;
}
node->fence_external_time = time(NULL);
node->fence_external_node = from;
}
static void save_history(struct fd *fd, struct fd_info *fi, struct id_info *ids)
{
struct node_history *node;
struct id_info *id;
int i;
id = ids;
for (i = 0; i < fi->id_info_count; i++) {
node = get_node_history(fd, id->nodeid);
if (!node)
goto next;
if (!node->fence_time && id->fence_time) {
node->fence_master = id->fence_master;
node->fence_time = id->fence_time;
node->fence_how = id->fence_how;
log_debug("save_history %d master %d time %llu how %d",
node->nodeid, node->fence_master,
(unsigned long long)node->fence_time,
node->fence_how);
}
if (!node->fence_external_time && id->fence_external_time) {
node->fence_external_time = id->fence_external_time;
node->fence_external_node = id->fence_external_node;
log_debug("save_history %d ext node %d ext time %llu",
node->nodeid, node->fence_external_node,
(unsigned long long)node->fence_external_time);
}
next:
id = (struct id_info *)((char *)id + fi->id_info_size);
}
}
/* call this from libfenced:fenced_external() */
void send_external(struct fd *fd, int victim)
{
struct fd_header *hd;
char *buf;
int len;
len = sizeof(struct fd_header);
buf = malloc(len);
if (!buf) {
return;
}
memset(buf, 0, len);
hd = (struct fd_header *)buf;
hd->type = FD_MSG_EXTERNAL;
hd->msgdata = victim;
log_debug("send_external %u", victim);
fd_send_message(fd, buf, len);
free(buf);
}
/* now, if the victim dies and the fence domain sees it fail,
it will be added as an fd victim, but fence_victims() will
call is_fenced_external() which will see that it's already
fenced and bypass fencing it again */
static void receive_external(struct fd *fd, struct fd_header *hd, int len)
{
log_debug("receive_external from %d len %d victim %d",
hd->nodeid, len, hd->msgdata);
node_history_fence_external(fd, hd->msgdata, hd->nodeid);
}
int is_fenced_external(struct fd *fd, int nodeid)
{
struct node_history *node;
node = get_node_history(fd, nodeid);
if (!node) {
log_error("is_fenced_external no nodeid %d", nodeid);
return 0;
}
if (node->fence_external_time > node->add_time)
return 1;
return 0;
}
/* completed victim must be removed from victims list before calling this
because we count the number of entries on the victims list for remaining */
void send_victim_done(struct fd *fd, int victim)
{
struct change *cg = list_first_entry(&fd->changes, struct change, list);
struct fd_header *hd;
struct id_info *id;
struct node_history *node;
char *buf;
int len;
len = sizeof(struct fd_header) + sizeof(struct id_info);
buf = malloc(len);
if (!buf) {
log_error("send_victim_done no mem len %d", len);
return;
}
memset(buf, 0, len);
hd = (struct fd_header *)buf;
hd->type = FD_MSG_VICTIM_DONE;
hd->msgdata = cg->seq;
if (fd->init_complete)
hd->flags |= FD_MFLG_COMPLETE;
node = get_node_history(fd, victim);
if (!node) {
log_error("send_victim_done %d no node struct", victim);
return;
}
id = (struct id_info *)(buf + sizeof(struct fd_header));
id->nodeid = cpu_to_le32(victim);
id->fence_master = cpu_to_le32(our_nodeid);
id->fence_time = cpu_to_le64(node->fence_time);
id->fence_how = cpu_to_le32(node->fence_how);
log_debug("send_victim_done %u flags %x victim %d",
cg->seq, hd->flags, victim);
fd_send_message(fd, buf, len);
free(buf);
}
static void receive_victim_done(struct fd *fd, struct fd_header *hd, int len)
{
struct node *node;
uint32_t seq = hd->msgdata;
int found;
struct id_info *id;
log_debug("receive_victim_done %d:%u flags %x len %d", hd->nodeid, seq,
hd->flags, len);
/* check that hd->nodeids is fd->master ? */
/* I don't think there's any problem with the master removing the
victim when it's done instead of waiting to remove it when it
receives its own victim_done message, like the other nodes do */
if (hd->nodeid == our_nodeid)
return;
id = (struct id_info *)((char *)hd + sizeof(struct fd_header));
id_info_in(id);
found = 0;
list_for_each_entry(node, &fd->victims, list) {
if (node->nodeid == id->nodeid) {
log_debug("receive_victim_done remove %d how %d",
id->nodeid, id->fence_how);
node_history_fence(fd, id->nodeid, id->fence_master,
id->fence_how, id->fence_time);
list_del(&node->list);
free(node);
found = 1;
break;
}
}
if (!found)
log_debug("receive_victim_done victim %d not found from %d",
id->nodeid, hd->nodeid);
}
static int check_quorum_done(struct fd *fd)
{
struct node_history *node;
int wait_count = 0;
/* We don't want to trust the cman_quorate value until we know
that cman has seen the same nodes fail that we have. So, we
first make sure that all nodes we've seen fail are also
failed in cman, then we can just check cman_quorate. This
assumes that we'll get to this function to do all the checks
before any of the failed nodes can actually rejoin and become
cman members again (if that assumption doesn't hold, perhaps
do something with timestamps of join/fail). */
list_for_each_entry(node, &fd->node_history, list) {
if (!node->check_quorum)
continue;
if (!is_cman_member(node->nodeid)) {
node->check_quorum = 0;
} else {
log_debug("check_quorum %d is_cman_member",
node->nodeid);
wait_count++;
}
}
if (wait_count)
return 0;
if (!cman_quorate) {
log_debug("check_quorum not quorate");
return 0;
}
log_debug("check_quorum done");
return 1;
}
static int wait_conditions_done(struct fd *fd)
{
if (!check_quorum_done(fd))
return 0;
return 1;
}
static int wait_messages_done(struct fd *fd)
{
struct change *cg = list_first_entry(&fd->changes, struct change, list);
struct member *memb;
int need = 0, total = 0;
list_for_each_entry(memb, &cg->members, list) {
if (!memb->start)
need++;
total++;
}
if (need) {
log_debug("wait_messages_done need %d of %d", need, total);
return 0;
}
log_debug("wait_messages_done got all %d", total);
return 1;
}
static void cleanup_changes(struct fd *fd)
{
struct change *cg = list_first_entry(&fd->changes, struct change, list);
struct change *safe;
list_del(&cg->list);
if (fd->started_change)
free_cg(fd->started_change);
fd->started_change = cg;
/* zero started_count means "never started" */
fd->started_count++;
if (!fd->started_count)
fd->started_count++;
list_for_each_entry_safe(cg, safe, &fd->changes, list) {
list_del(&cg->list);
free_cg(cg);
}
}
static void set_master(struct fd *fd)
{
struct change *cg = list_first_entry(&fd->changes, struct change, list);
struct member *memb;
int low = 0, complete = 0;
list_for_each_entry(memb, &cg->members, list) {
if (!low || memb->nodeid < low)
low = memb->nodeid;
if (!(memb->start_flags & FD_MFLG_COMPLETE))
continue;
if (!complete || memb->nodeid < complete)
complete = memb->nodeid;
}
log_debug("set_master from %d to %s node %d", fd->master,
complete ? "complete" : "low",
complete ? complete : low);
fd->master = complete ? complete : low;
}
static struct id_info *get_id_struct(struct id_info *ids, int count, int size,
int nodeid)
{
struct id_info *id = ids;
int i;
for (i = 0; i < count; i++) {
if (id->nodeid == nodeid)
return id;
id = (struct id_info *)((char *)id + size);
}
return NULL;
}
/* do the change details in the message match the details of the given change */
static int match_change(struct fd *fd, struct change *cg, struct fd_header *hd,
struct fd_info *fi, struct id_info *ids)
{
struct id_info *id;
struct member *memb;
uint32_t seq = hd->msgdata;
int i, members_mismatch;
/* We can ignore messages if we're not in the list of members.
The one known time this will happen is after we've joined
the cpg, we can get messages for changes prior to the change
in which we're added. */
id = get_id_struct(ids, fi->id_info_count, fi->id_info_size,our_nodeid);
if (!id || !(id->flags & IDI_NODEID_IS_MEMBER)) {
log_debug("match_change fail %d:%u we are not in members",
hd->nodeid, seq);
return 0;
}
memb = find_memb(cg, hd->nodeid);
if (!memb) {
log_debug("match_change fail %d:%u sender not member",
hd->nodeid, seq);
return 0;
}
/* verify this is the right change by matching the counts
and the nodeids of the current members */
if (fi->member_count != cg->member_count ||
fi->joined_count != cg->joined_count ||
fi->remove_count != cg->remove_count ||
fi->failed_count != cg->failed_count) {
log_debug("match_change fail %d:%u expect counts "
"%d %d %d %d", hd->nodeid, seq,
cg->member_count, cg->joined_count,
cg->remove_count, cg->failed_count);
return 0;
}
members_mismatch = 0;
id = ids;
for (i = 0; i < fi->id_info_count; i++) {
if (id->flags & IDI_NODEID_IS_MEMBER) {
memb = find_memb(cg, id->nodeid);
if (!memb) {
log_debug("match_change fail %d:%u memb %d",
hd->nodeid, seq, id->nodeid);
members_mismatch = 1;
break;
}
}
id = (struct id_info *)((char *)id + fi->id_info_size);
}
if (members_mismatch)
return 0;
log_debug("match_change done %d:%u", hd->nodeid, seq);
return 1;
}
/* Unfortunately, there's no really simple way to match a message with the
specific change that it was sent for. We hope that by passing all the
details of the change in the message, we will be able to uniquely match the
it to the correct change. */
/* A start message will usually be for the first (current) change on our list.
In some cases it will be for a non-current change, and we can ignore it:
1. A,B,C get confchg1 adding C
2. C sends start for confchg1
3. A,B,C get confchg2 adding D
4. A,B,C,D recv start from C for confchg1 - ignored
5. C,D send start for confchg2
6. A,B send start for confchg2
7. A,B,C,D recv all start messages for confchg2, and start kernel
In step 4, how do the nodes know whether the start message from C is
for confchg1 or confchg2? Hopefully by comparing the counts and members. */
static struct change *find_change(struct fd *fd, struct fd_header *hd,
struct fd_info *fi, struct id_info *ids)
{
struct change *cg;
list_for_each_entry_reverse(cg, &fd->changes, list) {
if (!match_change(fd, cg, hd, fi, ids))
continue;
return cg;
}
log_debug("find_change %d:%u no match", hd->nodeid, hd->msgdata);
return NULL;
}
static int is_added(struct fd *fd, int nodeid)
{
struct change *cg;
struct member *memb;
list_for_each_entry(cg, &fd->changes, list) {
memb = find_memb(cg, nodeid);
if (memb && memb->added)
return 1;
}
return 0;
}
static void receive_start(struct fd *fd, struct fd_header *hd, int len)
{
struct change *cg;
struct member *memb;
struct fd_info *fi;
struct id_info *ids;
uint32_t seq = hd->msgdata;
int added;
log_debug("receive_start %d:%u len %d", hd->nodeid, seq, len);
fi = (struct fd_info *)((char *)hd + sizeof(struct fd_header));
ids = (struct id_info *)((char *)fi + sizeof(struct fd_info));
fd_info_in(fi);
ids_in(fi, ids);
cg = find_change(fd, hd, fi, ids);
if (!cg)
return;
memb = find_memb(cg, hd->nodeid);
if (!memb) {
/* this should never happen since match_change checks it */
log_error("receive_start no member %d", hd->nodeid);
return;
}
memb->start_flags = hd->flags;
added = is_added(fd, hd->nodeid);
if (added && fi->started_count) {
log_error("receive_start %d:%u add node with started_count %u",
hd->nodeid, seq, fi->started_count);
/* observe this scheme working before using it; I'm not sure
that a joining node won't ever see an existing node as added
under normal circumstances */
/*
memb->disallowed = 1;
return;
*/
}
node_history_start(fd, hd->nodeid);
memb->start = 1;
/* save any fencing history from this message that we don't have */
save_history(fd, fi, ids);
}
static void receive_complete(struct fd *fd, struct fd_header *hd, int len)
{
struct fd_info *fi;
struct id_info *ids, *id;
uint32_t seq = hd->msgdata;
struct node *node, *safe;
log_debug("receive_complete %d:%u len %d", hd->nodeid, seq, len);
if (fd->init_complete)
return;
fi = (struct fd_info *)((char *)hd + sizeof(struct fd_header));
ids = (struct id_info *)((char *)fi + sizeof(struct fd_info));
fd_info_in(fi);
ids_in(fi, ids);
id = get_id_struct(ids, fi->id_info_count, fi->id_info_size,our_nodeid);
if (!id || !(id->flags & IDI_NODEID_IS_MEMBER)) {
log_debug("receive_complete %d:%u we are not in members",
hd->nodeid, seq);
return;
}
fd->init_complete = 1;
/* we may have victims from init which we can clear now */
list_for_each_entry_safe(node, safe, &fd->victims, list) {
log_debug("receive_complete clear victim %d init %d",
node->nodeid, node->init_victim);
list_del(&node->list);
free(node);
}
}
static int count_ids(struct fd *fd)
{
struct node_history *node;
int count = 0;
list_for_each_entry(node, &fd->node_history, list)
count++;
return count;
}
static void send_info(struct fd *fd, int type)
{
struct change *cg;
struct fd_header *hd;
struct fd_info *fi;
struct id_info *id;
struct node_history *node;
char *buf;
uint32_t flags;
int len, id_count;
cg = list_first_entry(&fd->changes, struct change, list);
id_count = count_ids(fd);
len = sizeof(struct fd_header) + sizeof(struct fd_info) +
id_count * sizeof(struct id_info);
buf = malloc(len);
if (!buf) {
log_error("send_info len %d no mem", len);
return;
}
memset(buf, 0, len);
hd = (struct fd_header *)buf;
fi = (struct fd_info *)(buf + sizeof(*hd));
id = (struct id_info *)(buf + sizeof(*hd) + sizeof(*fi));
/* fill in header (fd_send_message handles part of header) */
hd->type = type;
hd->msgdata = cg->seq;
if (cg->we_joined)
hd->flags |= FD_MFLG_JOINING;
if (fd->init_complete)
hd->flags |= FD_MFLG_COMPLETE;
/* fill in fd_info */
fi->fd_info_size = cpu_to_le32(sizeof(struct fd_info));
fi->id_info_size = cpu_to_le32(sizeof(struct id_info));
fi->id_info_count = cpu_to_le32(id_count);
fi->started_count = cpu_to_le32(fd->started_count);
fi->member_count = cpu_to_le32(cg->member_count);
fi->joined_count = cpu_to_le32(cg->joined_count);
fi->remove_count = cpu_to_le32(cg->remove_count);
fi->failed_count = cpu_to_le32(cg->failed_count);
/* fill in id_info entries */
list_for_each_entry(node, &fd->node_history, list) {
flags = 0;
if (find_memb(cg, node->nodeid))
flags = IDI_NODEID_IS_MEMBER;
id->flags = cpu_to_le32(flags);
id->nodeid = cpu_to_le32(node->nodeid);
id->fence_external_node= cpu_to_le32(node->fence_external_node);
id->fence_master = cpu_to_le32(node->fence_master);
id->fence_how = cpu_to_le32(node->fence_how);
id->fence_time = cpu_to_le64(node->fence_time);
id->fence_external_time= cpu_to_le64(node->fence_external_time);
id++;
}
log_debug("send_%s %u flags %x counts %u %d %d %d %d",
type == FD_MSG_START ? "start" : "complete",
cg->seq, hd->flags, fd->started_count, cg->member_count,
cg->joined_count, cg->remove_count, cg->failed_count);
fd_send_message(fd, buf, len);
free(buf);
}
static void send_start(struct fd *fd)
{
send_info(fd, FD_MSG_START);
}
/* same content as a start message, a new (incomplete) node will look for
a complete message that shows it as a member, when it sees one it can
clear any init_victims and set init_complete for future cycles */
static void send_complete(struct fd *fd)
{
send_info(fd, FD_MSG_COMPLETE);
}
/* FIXME: better to just look in victims list for any nodes with init_victim? */
static int nodes_added(struct fd *fd)
{
struct change *cg;
list_for_each_entry(cg, &fd->changes, list) {
if (cg->joined_count)
return 1;
}
return 0;
}
/* If we're being added by the current change, we'll have an empty victims
list, while other previous members may already have nodes in their
victims list. So, we need to assume that any node in cluster.conf that's
not a cluster member when we're added to the fd is already a victim.
We can go back on that assumption, and clear out any presumed victims, when
we see a message from a previous member saying that are no current victims. */
static void add_victims(struct fd *fd, struct change *cg)
{
struct member *memb;
struct node *node;
list_for_each_entry(memb, &cg->removed, list) {
if (!memb->failed)
continue;
node = get_new_node(fd, memb->nodeid);
if (!node)
return;
list_add(&node->list, &fd->victims);
log_debug("add node %d to victims", node->nodeid);
}
}
/* with start messages from all members, we can pick which one should be master
and do the fencing (low nodeid with state, "COMPLETE"). as the master
successfully fences each victim, it sends a status message such that all
members remove the node from their victims list.
after all victims have been dealt following a change (or set of changes),
the master sends a complete message that indicates the members of the group
for the change it has completed processing. when a joining node sees this
complete message and sees itself as a member, it knows it can clear all
init_victims from startup init, and it sets init_complete so it will
volunteer to be master in the next round by setting COMPLETE flag.
once the master begins fencing victims, it won't process any new changes
until it's done. the non-master members will process changes while the
master is fencing, but will wait for the master to catch up in
WAIT_MESSAGES. if the master fails, the others will no longer wait for it.*/
static void apply_changes(struct fd *fd)
{
struct change *cg;
if (list_empty(&fd->changes))
return;
cg = list_first_entry(&fd->changes, struct change, list);
switch (cg->state) {
case CGST_WAIT_CONDITIONS:
if (wait_conditions_done(fd)) {
send_start(fd);
cg->state = CGST_WAIT_MESSAGES;
}
break;
case CGST_WAIT_MESSAGES:
if (wait_messages_done(fd)) {
set_master(fd);
if (fd->master == our_nodeid) {
delay_fencing(fd, nodes_added(fd));
fence_victims(fd);
send_complete(fd);
} else {
defer_fencing(fd);
}
cleanup_changes(fd);
fd->joining_group = 0;
}
break;
default:
log_error("apply_changes invalid state %d", cg->state);
}
}
void process_fd_changes(void)
{
struct fd *fd, *safe;
list_for_each_entry_safe(fd, safe, &domains, list) {
if (!list_empty(&fd->changes))
apply_changes(fd);
}
}
static int add_change(struct fd *fd,
struct cpg_address *member_list, int member_list_entries,
struct cpg_address *left_list, int left_list_entries,
struct cpg_address *joined_list, int joined_list_entries,
struct change **cg_out)
{
struct change *cg;
struct member *memb;
int i, error;
cg = malloc(sizeof(struct change));
if (!cg)
goto fail_nomem;
memset(cg, 0, sizeof(struct change));
INIT_LIST_HEAD(&cg->members);
INIT_LIST_HEAD(&cg->removed);
cg->seq = ++fd->change_seq;
cg->state = CGST_WAIT_CONDITIONS;
cg->member_count = member_list_entries;
cg->joined_count = joined_list_entries;
cg->remove_count = left_list_entries;
for (i = 0; i < member_list_entries; i++) {
memb = malloc(sizeof(struct member));
if (!memb)
goto fail_nomem;
memset(memb, 0, sizeof(struct member));
memb->nodeid = member_list[i].nodeid;
list_add_tail(&memb->list, &cg->members);
}
for (i = 0; i < left_list_entries; i++) {
memb = malloc(sizeof(struct member));
if (!memb)
goto fail_nomem;
memset(memb, 0, sizeof(struct member));
memb->nodeid = left_list[i].nodeid;
if (left_list[i].reason == CPG_REASON_NODEDOWN ||
left_list[i].reason == CPG_REASON_PROCDOWN) {
memb->failed = 1;
cg->failed_count++;
}
list_add_tail(&memb->list, &cg->removed);
if (memb->failed)
node_history_fail(fd, memb->nodeid);
else
node_history_left(fd, memb->nodeid);
log_debug("add_change %u nodeid %d remove reason %d",
cg->seq, memb->nodeid, left_list[i].reason);
if (left_list[i].reason == CPG_REASON_PROCDOWN)
kick_node_from_cluster(memb->nodeid);
}
for (i = 0; i < joined_list_entries; i++) {
memb = find_memb(cg, joined_list[i].nodeid);
if (!memb) {
log_error("no member %d", joined_list[i].nodeid);
error = -ENOENT;
goto fail;
}
memb->added = 1;
if (memb->nodeid == our_nodeid)
cg->we_joined = 1;
else
node_history_init(fd, memb->nodeid);
log_debug("add_change %u nodeid %d joined", cg->seq,
memb->nodeid);
}
if (cg->we_joined)
list_for_each_entry(memb, &cg->members, list)
node_history_init(fd, memb->nodeid);
log_debug("add_change %u member %d joined %d remove %d failed %d",
cg->seq, cg->member_count, cg->joined_count, cg->remove_count,
cg->failed_count);
list_add(&cg->list, &fd->changes);
*cg_out = cg;
return 0;
fail_nomem:
log_error("no memory");
error = -ENOMEM;
fail:
free_cg(cg);
return error;
}
/* add a victim for each node in complete list (represents all nodes in
cluster.conf) that is not a cman member (and not already a victim) */
static void add_victims_init(struct fd *fd, struct change *cg)
{
struct node *node, *safe;
list_for_each_entry_safe(node, safe, &fd->complete, list) {
list_del(&node->list);
if (!is_cman_member(node->nodeid) &&
!find_memb(cg, node->nodeid) &&
!is_victim(fd, node->nodeid)) {
node->init_victim = 1;
list_add(&node->list, &fd->victims);
log_debug("add_victims_init %d", node->nodeid);
} else {
free(node);
}
}
}
static int we_left(struct cpg_address *left_list, int left_list_entries)
{
int i;
for (i = 0; i < left_list_entries; i++) {
if (left_list[i].nodeid == our_nodeid)
return 1;
}
return 0;
}
static void confchg_cb(cpg_handle_t handle, struct cpg_name *group_name,
struct cpg_address *member_list, int member_list_entries,
struct cpg_address *left_list, int left_list_entries,
struct cpg_address *joined_list, int joined_list_entries)
{
struct fd *fd;
struct change *cg;
int rv;
fd = find_fd_handle(handle);
if (!fd) {
log_error("confchg_cb no fence domain for cpg %s",
group_name->value);
return;
}
if (fd->leaving_group && we_left(left_list, left_list_entries)) {
/* we called cpg_leave(), and this should be the final
cpg callback we receive */
log_debug("confchg for our leave");
cpg_finalize(fd->cpg_handle);
client_dead(fd->cpg_client);
list_del(&fd->list);
free_fd(fd);
return;
}
rv = add_change(fd, member_list, member_list_entries,
left_list, left_list_entries,
joined_list, joined_list_entries, &cg);
if (rv)
return;
/* failed nodes in this change become victims */
add_victims(fd, cg);
/* As a joining domain member with no previous state, we need to
assume non-member nodes are already victims; these initial victims
are cleared if we get a "complete" message from the master.
But, if we're the master, we do end up fencing these init nodes. */
if (cg->we_joined)
add_victims_init(fd, cg);
}
static void deliver_cb(cpg_handle_t handle, struct cpg_name *group_name,
uint32_t nodeid, uint32_t pid, void *data, int len)
{
struct fd *fd;
struct fd_header *hd;
fd = find_fd_handle(handle);
if (!fd) {
log_error("deliver_cb no fd for cpg %s", group_name->value);
return;
}
hd = (struct fd_header *)data;
hd->version[0] = le16_to_cpu(hd->version[0]);
hd->version[1] = le16_to_cpu(hd->version[1]);
hd->version[2] = le16_to_cpu(hd->version[2]);
hd->type = le16_to_cpu(hd->type);
hd->nodeid = le32_to_cpu(hd->nodeid);
hd->to_nodeid = le32_to_cpu(hd->to_nodeid);
hd->global_id = le32_to_cpu(hd->global_id);
hd->flags = le32_to_cpu(hd->flags);
hd->msgdata = le32_to_cpu(hd->msgdata);
if (hd->version[0] != protocol_active[0]) {
log_error("reject message from %d version %u.%u.%u vs %u.%u.%u",
nodeid, hd->version[0], hd->version[1],
hd->version[2], protocol_active[0],
protocol_active[1], protocol_active[2]);
return;
}
if (hd->nodeid != nodeid) {
log_error("bad msg nodeid %d %d", hd->nodeid, nodeid);
return;
}
switch (hd->type) {
case FD_MSG_START:
receive_start(fd, hd, len);
break;
case FD_MSG_VICTIM_DONE:
receive_victim_done(fd, hd, len);
break;
case FD_MSG_COMPLETE:
receive_complete(fd, hd, len);
break;
case FD_MSG_EXTERNAL:
receive_external(fd, hd, len);
break;
default:
log_error("unknown msg type %d", hd->type);
}
}
static cpg_callbacks_t cpg_callbacks = {
.cpg_deliver_fn = deliver_cb,
.cpg_confchg_fn = confchg_cb,
};
static void process_fd_cpg(int ci)
{
struct fd *fd;
cpg_error_t error;
fd = find_fd_ci(ci);
if (!fd) {
log_error("process_fd_cpg no fence domain for ci %d", ci);
return;
}
error = cpg_dispatch(fd->cpg_handle, CPG_DISPATCH_ALL);
if (error != CPG_OK) {
log_error("cpg_dispatch error %d", error);
return;
}
apply_changes(fd);
}
int fd_join(struct fd *fd)
{
cpg_error_t error;
cpg_handle_t h;
struct cpg_name name;
int i = 0, f, ci;
error = cpg_initialize(&h, &cpg_callbacks);
if (error != CPG_OK) {
log_error("cpg_initialize error %d", error);
goto fail_free;
}
cpg_fd_get(h, &f);
ci = client_add(f, process_fd_cpg, NULL);
list_add(&fd->list, &domains);
fd->cpg_handle = h;
fd->cpg_client = ci;
fd->cpg_fd = f;
fd->joining_group = 1;
memset(&name, 0, sizeof(name));
sprintf(name.value, "fenced:%s", fd->name);
name.length = strlen(name.value) + 1;
retry:
error = cpg_join(h, &name);
if (error == CPG_ERR_TRY_AGAIN) {
sleep(1);
if (!(++i % 10))
log_error("cpg_join error retrying");
goto retry;
}
if (error != CPG_OK) {
log_error("cpg_join error %d", error);
- cpg_finalize(h);
goto fail;
}
return 0;
fail:
list_del(&fd->list);
client_dead(ci);
cpg_finalize(h);
fail_free:
free(fd);
return error;
}
int fd_leave(struct fd *fd)
{
cpg_error_t error;
struct cpg_name name;
int i = 0;
fd->leaving_group = 1;
memset(&name, 0, sizeof(name));
sprintf(name.value, "fenced:%s", fd->name);
name.length = strlen(name.value) + 1;
retry:
error = cpg_leave(fd->cpg_handle, &name);
if (error == CPG_ERR_TRY_AGAIN) {
sleep(1);
if (!(++i % 10))
log_error("cpg_leave error retrying");
goto retry;
}
if (error != CPG_OK)
log_error("cpg_leave error %d", error);
return 0;
}
+/* process_cpg(), setup_cpg(), close_cpg() are for the "daemon" cpg which
+ tracks the presence of other daemons; it's not the fenced domain cpg.
+ Joining this cpg tells others that we don't have uncontrolled dlm/gfs
+ kernel state and they can skip fencing us if we're a victim. (We have
+ to check for that uncontrolled state before calling setup_cpg, obviously.) */
+
+static void deliver_cb_daemon(cpg_handle_t handle, struct cpg_name *group_name,
+ uint32_t nodeid, uint32_t pid, void *data, int len)
+{
+}
+
+static void confchg_cb_daemon(cpg_handle_t handle, struct cpg_name *group_name,
+ struct cpg_address *member_list, int member_list_entries,
+ struct cpg_address *left_list, int left_list_entries,
+ struct cpg_address *joined_list, int joined_list_entries)
+{
+ memset(&daemon_member_list, 0, sizeof(daemon_member_list));
+ memcpy(&daemon_member_list, member_list,
+ member_list_entries * sizeof(struct cpg_address));
+ daemon_member_list_entries = member_list_entries;
+}
+
+static cpg_callbacks_t cpg_callbacks_daemon = {
+ .cpg_deliver_fn = deliver_cb_daemon,
+ .cpg_confchg_fn = confchg_cb_daemon,
+};
+
+void process_cpg(int ci)
+{
+ cpg_error_t error;
+
+ error = cpg_dispatch(cpg_handle_daemon, CPG_DISPATCH_ALL);
+ if (error != CPG_OK)
+ log_error("daemon cpg_dispatch error %d", error);
+}
+
+int in_daemon_member_list(int nodeid)
+{
+ int i;
+
+ cpg_dispatch(cpg_handle_daemon, CPG_DISPATCH_ALL);
+
+ for (i = 0; i < daemon_member_list_entries; i++) {
+ if (daemon_member_list[i].nodeid == nodeid)
+ return 1;
+ }
+ return 0;
+}
+
+int setup_cpg(void)
+{
+ cpg_error_t error;
+ cpg_handle_t h;
+ struct cpg_name name;
+ int i = 0, f;
+
+ error = cpg_initialize(&h, &cpg_callbacks_daemon);
+ if (error != CPG_OK) {
+ log_error("daemon cpg_initialize error %d", error);
+ goto fail;
+ }
+
+ cpg_fd_get(h, &f);
+
+ cpg_handle_daemon = h;
+
+ memset(&name, 0, sizeof(name));
+ sprintf(name.value, "fenced:daemon");
+ name.length = strlen(name.value) + 1;
+
+ retry:
+ error = cpg_join(h, &name);
+ if (error == CPG_ERR_TRY_AGAIN) {
+ sleep(1);
+ if (!(++i % 10))
+ log_error("daemon cpg_join error retrying");
+ goto retry;
+ }
+ if (error != CPG_OK) {
+ log_error("daemon cpg_join error %d", error);
+ goto fail;
+ }
+
+ log_debug("setup_cpg %d", f);
+ return f;
+
+ fail:
+ cpg_finalize(h);
+ return -1;
+}
+
+void close_cpg(void)
+{
+ cpg_error_t error;
+ struct cpg_name name;
+ int i = 0;
+
+ if (!cpg_handle_daemon)
+ return;
+
+ memset(&name, 0, sizeof(name));
+ sprintf(name.value, "fenced:daemon");
+ name.length = strlen(name.value) + 1;
+
+ retry:
+ error = cpg_leave(cpg_handle_daemon, &name);
+ if (error == CPG_ERR_TRY_AGAIN) {
+ sleep(1);
+ if (!(++i % 10))
+ log_error("daemon cpg_leave error retrying");
+ goto retry;
+ }
+ if (error != CPG_OK)
+ log_error("daemon cpg_leave error %d", error);
+}
+
int set_node_info(struct fd *fd, int nodeid, struct fenced_node *nodeinfo)
{
struct node_history *node;
struct member *memb;
nodeinfo->nodeid = nodeid;
nodeinfo->victim = is_victim(fd, nodeid);
if (!fd->started_change)
goto history;
memb = find_memb(fd->started_change, nodeid);
if (memb)
nodeinfo->member = memb->disallowed ? 0 : 1;
history:
node = get_node_history(fd, nodeid);
if (!node)
return 0;
nodeinfo->last_fenced_master = node->fence_master;
nodeinfo->last_fenced_how = node->fence_how;
nodeinfo->last_fenced_time = node->fence_time;
return 0;
}
int set_domain_info(struct fd *fd, struct fenced_domain *domain)
{
struct change *cg = fd->started_change;
if (cg) {
domain->member_count = cg->member_count;
domain->state = cg->state;
}
domain->master_nodeid = fd->master;
domain->victim_count = list_count(&fd->victims);
domain->current_victim = fd->current_victim;
return 0;
}
int set_domain_nodes(struct fd *fd, int option, int *node_count,
struct fenced_node **nodes_out)
{
struct change *cg = fd->started_change;
struct fenced_node *nodes = NULL, *n;
struct node_history *nh;
struct member *memb;
int count = 0;
if (option == FENCED_NODES_MEMBERS) {
if (!cg)
goto out;
count = cg->member_count;
nodes = malloc(count * sizeof(struct fenced_node));
if (!nodes)
return -ENOMEM;
memset(nodes, 0, sizeof(*nodes));
n = nodes;
list_for_each_entry(memb, &cg->members, list)
set_node_info(fd, memb->nodeid, n++);
}
else if (option == FENCED_NODES_ALL) {
list_for_each_entry(nh, &fd->node_history, list)
count++;
nodes = malloc(count * sizeof(struct fenced_node));
if (!nodes)
return -ENOMEM;
memset(nodes, 0, sizeof(*nodes));
n = nodes;
list_for_each_entry(nh, &fd->node_history, list)
set_node_info(fd, nh->nodeid, n++);
}
out:
*node_count = count;
*nodes_out = nodes;
return 0;
}
diff --git a/fence/fenced/fd.h b/fence/fenced/fd.h
index 21cac397a..a9dacbd63 100644
--- a/fence/fenced/fd.h
+++ b/fence/fenced/fd.h
@@ -1,271 +1,276 @@
#ifndef __FD_DOT_H__
#define __FD_DOT_H__
#include <stdio.h>
#include <stdlib.h>
#include <stddef.h>
#include <signal.h>
#include <unistd.h>
#include <fcntl.h>
#include <errno.h>
#include <string.h>
#include <stdint.h>
#include <time.h>
#include <sched.h>
#include <limits.h>
+#include <dirent.h>
#include <sys/ioctl.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <sys/socket.h>
#include <sys/un.h>
#include <sys/poll.h>
#include <sys/select.h>
#include <sys/time.h>
#include <openais/saAis.h>
#include <corosync/cpg.h>
#include <corosync/engine/logsys.h>
#include "list.h"
#include "linux_endian.h"
#include "libfence.h"
#include "libfenced.h"
#include "fenced.h"
/* Max name length for a group, pointless since we only ever create the
"default" group. Regardless, set arbitrary max to match dlm's
DLM_LOCKSPACE_LEN 64. The libcpg limit is larger at 128; we prefix
the fence domain name with "fenced:" to create the cpg name. */
#define MAX_GROUPNAME_LEN 64
/* Max name length for a node. This should match libcman's
CMAN_MAX_NODENAME_LEN which is 255. */
#define MAX_NODENAME_LEN 255
/* Maximum members of the fence domain, or cluster. Should match
CPG_MEMBERS_MAX in openais/cpg.h. */
#define MAX_NODES 128
/* Max string length printed on a line, for debugging/dump output. */
#define MAXLINE 256
/* group_mode */
#define GROUP_LIBGROUP 2
#define GROUP_LIBCPG 3
extern int daemon_debug_opt;
extern int daemon_quit;
extern struct list_head domains;
extern int cman_quorate;
extern int our_nodeid;
extern char our_name[MAX_NODENAME_LEN+1];
extern char daemon_debug_buf[256];
extern char dump_buf[FENCED_DUMP_SIZE];
extern int dump_point;
extern int dump_wrap;
extern int group_mode;
extern void daemon_dump_save(void);
#define log_debug(fmt, args...) \
do { \
snprintf(daemon_debug_buf, 255, "%ld " fmt "\n", time(NULL), ##args); \
daemon_dump_save(); \
if (daemon_debug_opt) \
fprintf(stderr, "%s", daemon_debug_buf); \
if (cfgd_debug_logsys) \
log_printf(LOG_DEBUG, "%s", daemon_debug_buf); \
} while (0)
#define log_error(fmt, args...) \
do { \
log_debug(fmt, ##args); \
log_printf(LOG_ERR, fmt, ##args); \
} while (0)
#define log_level(lvl, fmt, args...) \
do { \
log_debug(fmt, ##args); \
log_printf(lvl, fmt, ##args); \
} while (0)
#define FD_MSG_START 1
#define FD_MSG_VICTIM_DONE 2
#define FD_MSG_COMPLETE 3
#define FD_MSG_EXTERNAL 4
#define FD_MFLG_JOINING 1 /* accompanies start, we are joining */
#define FD_MFLG_COMPLETE 2 /* accompanies start, we have complete info */
struct fd_header {
uint16_t version[3];
uint16_t type; /* FD_MSG_ */
uint32_t nodeid; /* sender */
uint32_t to_nodeid; /* recipient, 0 for all */
uint32_t global_id; /* global unique id for this domain */
uint32_t flags; /* FD_MFLG_ */
uint32_t msgdata; /* in-header payload depends on MSG type */
uint32_t pad1;
uint64_t pad2;
};
#define CGST_WAIT_CONDITIONS 1
#define CGST_WAIT_MESSAGES 2
struct change {
struct list_head list;
struct list_head members;
struct list_head removed; /* nodes removed by this change */
int member_count;
int joined_count;
int remove_count;
int failed_count;
int state; /* CGST_ */
int we_joined;
uint32_t seq; /* just used as a reference when debugging */
};
#define VIC_DONE_AGENT 1
#define VIC_DONE_MEMBER 2
#define VIC_DONE_OVERRIDE 3
#define VIC_DONE_EXTERNAL 4
struct node_history {
struct list_head list;
int nodeid;
int check_quorum;
uint64_t add_time;
uint64_t left_time;
uint64_t fail_time;
uint64_t fence_time;
uint64_t fence_external_time;
int fence_external_node;
int fence_master;
int fence_how; /* VIC_DONE_ */
};
struct node {
struct list_head list;
int nodeid;
int init_victim;
char name[MAX_NODENAME_LEN+1];
};
struct fd {
struct list_head list;
char name[MAX_GROUPNAME_LEN+1];
/* libcpg domain membership */
cpg_handle_t cpg_handle;
int cpg_client;
int cpg_fd;
uint32_t change_seq;
uint32_t started_count;
struct change *started_change;
struct list_head changes;
struct list_head node_history;
int init_complete;
/* general domain membership */
int master;
int joining_group;
int leaving_group;
int current_victim; /* for queries */
struct list_head victims;
struct list_head complete;
/* libgroup domain membership */
int last_stop;
int last_start;
int last_finish;
int first_recovery;
int prev_count;
struct list_head prev;
struct list_head leaving;
};
/* config.c */
int setup_ccs(void);
void close_ccs(void);
void read_ccs_name(char *path, char *name);
void read_ccs_yesno(char *path, int *yes, int *no);
void read_ccs_int(char *path, int *config_val);
int read_ccs(struct fd *fd);
/* cpg.c */
+void process_cpg(int ci);
+int setup_cpg(void);
+void close_cpg(void);
void free_cg(struct change *cg);
void node_history_fence(struct fd *fd, int victim, int master, int how,
uint64_t mastertime);
void send_external(struct fd *fd, int victim);
int is_fenced_external(struct fd *fd, int nodeid);
void send_victim_done(struct fd *fd, int victim);
void process_fd_changes(void);
int fd_join(struct fd *fd);
int fd_leave(struct fd *fd);
int set_node_info(struct fd *fd, int nodeid, struct fenced_node *node);
int set_domain_info(struct fd *fd, struct fenced_domain *domain);
int set_domain_nodes(struct fd *fd, int option, int *node_count,
struct fenced_node **nodes);
+int in_daemon_member_list(int nodeid);
/* group.c */
void process_groupd(int ci);
int setup_groupd(void);
void close_groupd(void);
int fd_join_group(struct fd *fd);
int fd_leave_group(struct fd *fd);
int set_node_info_group(struct fd *fd, int nodeid, struct fenced_node *node);
int set_domain_info_group(struct fd *fd, struct fenced_domain *domain);
int set_domain_nodes_group(struct fd *fd, int option, int *node_count,
struct fenced_node **nodes);
void set_group_mode(void);
/* main.c */
void client_dead(int ci);
int client_add(int fd, void (*workfn)(int ci), void (*deadfn)(int ci));
void free_fd(struct fd *fd);
struct fd *find_fd(char *name);
void query_lock(void);
void query_unlock(void);
void cluster_dead(int ci);
/* member_cman.c */
void process_cman(int ci);
int setup_cman(void);
void close_cman(void);
int is_cman_member(int nodeid);
char *nodeid_to_name(int nodeid);
int name_to_nodeid(char *name);
struct node *get_new_node(struct fd *fd, int nodeid);
void kick_node_from_cluster(int nodeid);
/* recover.c */
void free_node_list(struct list_head *head);
void add_complete_node(struct fd *fd, int nodeid);
int list_count(struct list_head *head);
int is_victim(struct fd *fd, int nodeid);
void delay_fencing(struct fd *fd, int node_join);
void defer_fencing(struct fd *fd);
void fence_victims(struct fd *fd);
/* logging.c */
void init_logging(void);
void setup_logging();
void close_logging(void);
#endif /* __FD_DOT_H__ */
diff --git a/fence/fenced/main.c b/fence/fenced/main.c
index ae5f66224..01937f1ab 100644
--- a/fence/fenced/main.c
+++ b/fence/fenced/main.c
@@ -1,932 +1,1044 @@
#include "fd.h"
#include "config.h"
#include <pthread.h>
#include "copyright.cf"
#define LOCKFILE_NAME "/var/run/fenced.pid"
#define CLIENT_NALLOC 32
static int client_maxi;
static int client_size = 0;
static struct client *client = NULL;
static struct pollfd *pollfd = NULL;
static pthread_t query_thread;
static pthread_mutex_t query_mutex;
+static struct list_head controlled_entries;
struct client {
int fd;
void *workfn;
void *deadfn;
};
static int do_read(int fd, void *buf, size_t count)
{
int rv, off = 0;
while (off < count) {
rv = read(fd, buf + off, count - off);
if (rv == 0)
return -1;
if (rv == -1 && errno == EINTR)
continue;
if (rv == -1)
return -1;
off += rv;
}
return 0;
}
static int do_write(int fd, void *buf, size_t count)
{
int rv, off = 0;
retry:
rv = write(fd, buf + off, count);
if (rv == -1 && errno == EINTR)
goto retry;
if (rv < 0) {
return rv;
}
if (rv != count) {
count -= rv;
off += rv;
goto retry;
}
return 0;
}
static void client_alloc(void)
{
int i;
if (!client) {
client = malloc(CLIENT_NALLOC * sizeof(struct client));
pollfd = malloc(CLIENT_NALLOC * sizeof(struct pollfd));
} else {
client = realloc(client, (client_size + CLIENT_NALLOC) *
sizeof(struct client));
pollfd = realloc(pollfd, (client_size + CLIENT_NALLOC) *
sizeof(struct pollfd));
if (!pollfd)
log_error("can't alloc for pollfd");
}
if (!client || !pollfd)
log_error("can't alloc for client array");
for (i = client_size; i < client_size + CLIENT_NALLOC; i++) {
client[i].workfn = NULL;
client[i].deadfn = NULL;
client[i].fd = -1;
pollfd[i].fd = -1;
pollfd[i].revents = 0;
}
client_size += CLIENT_NALLOC;
}
void client_dead(int ci)
{
close(client[ci].fd);
client[ci].workfn = NULL;
client[ci].fd = -1;
pollfd[ci].fd = -1;
}
int client_add(int fd, void (*workfn)(int ci), void (*deadfn)(int ci))
{
int i;
if (!client)
client_alloc();
again:
for (i = 0; i < client_size; i++) {
if (client[i].fd == -1) {
client[i].workfn = workfn;
if (deadfn)
client[i].deadfn = deadfn;
else
client[i].deadfn = client_dead;
client[i].fd = fd;
pollfd[i].fd = fd;
pollfd[i].events = POLLIN;
if (i > client_maxi)
client_maxi = i;
return i;
}
}
client_alloc();
goto again;
}
static void sigterm_handler(int sig)
{
daemon_quit = 1;
}
static struct fd *create_fd(char *name)
{
struct fd *fd;
if (strlen(name) > MAX_GROUPNAME_LEN)
return NULL;
fd = malloc(sizeof(struct fd));
if (!fd)
return NULL;
memset(fd, 0, sizeof(struct fd));
strcpy(fd->name, name);
INIT_LIST_HEAD(&fd->changes);
INIT_LIST_HEAD(&fd->node_history);
INIT_LIST_HEAD(&fd->victims);
INIT_LIST_HEAD(&fd->complete);
INIT_LIST_HEAD(&fd->prev);
INIT_LIST_HEAD(&fd->leaving);
return fd;
}
void free_fd(struct fd *fd)
{
struct change *cg, *cg_safe;
struct node_history *nodeh, *nodeh_safe;
list_for_each_entry_safe(cg, cg_safe, &fd->changes, list) {
list_del(&cg->list);
free_cg(cg);
}
if (fd->started_change)
free_cg(fd->started_change);
list_for_each_entry_safe(nodeh, nodeh_safe, &fd->node_history, list) {
list_del(&nodeh->list);
free(nodeh);
}
free_node_list(&fd->victims);
free_node_list(&fd->complete);
free_node_list(&fd->prev);
free_node_list(&fd->leaving);
free(fd);
}
struct fd *find_fd(char *name)
{
struct fd *fd;
list_for_each_entry(fd, &domains, list) {
if (strlen(name) == strlen(fd->name) &&
!strncmp(fd->name, name, strlen(name)))
return fd;
}
return NULL;
}
static int do_join(char *name)
{
struct fd *fd;
int rv;
fd = find_fd(name);
if (fd) {
log_debug("join error: domain %s exists", name);
rv = -EEXIST;
goto out;
}
fd = create_fd(name);
if (!fd) {
rv = -ENOMEM;
goto out;
}
rv = read_ccs(fd);
if (rv) {
free(fd);
goto out;
}
if (group_mode == GROUP_LIBGROUP)
rv = fd_join_group(fd);
else
rv = fd_join(fd);
out:
return rv;
}
static int do_leave(char *name)
{
struct fd *fd;
int rv;
fd = find_fd(name);
if (!fd)
return -EINVAL;
if (group_mode == GROUP_LIBGROUP)
rv = fd_leave_group(fd);
else
rv = fd_leave(fd);
return rv;
}
static int do_external(char *name, char *extra, int extra_len)
{
struct fd *fd;
int rv = 0;
fd = find_fd(name);
if (!fd)
return -EINVAL;
if (group_mode == GROUP_LIBGROUP)
rv = -ENOSYS;
else
send_external(fd, name_to_nodeid(extra));
return rv;
}
static void init_header(struct fenced_header *h, int cmd, int result,
int extra_len)
{
memset(h, 0, sizeof(struct fenced_header));
h->magic = FENCED_MAGIC;
h->version = FENCED_VERSION;
h->len = sizeof(struct fenced_header) + extra_len;
h->command = cmd;
h->data = result;
}
/* combines a header and the data and sends it back to the client in
a single do_write() call */
static void do_reply(int f, int cmd, int result, char *buf, int buflen)
{
char *reply;
int reply_len;
reply_len = sizeof(struct fenced_header) + buflen;
reply = malloc(reply_len);
if (!reply)
return;
memset(reply, 0, reply_len);
init_header((struct fenced_header *)reply, cmd, result, buflen);
if (buf && buflen)
memcpy(reply + sizeof(struct fenced_header), buf, buflen);
do_write(f, reply, reply_len);
free(reply);
}
static void query_dump_debug(int f)
{
struct fenced_header h;
int extra_len;
int len;
/* in the case of dump_wrap, extra_len will go in two writes,
first the log tail, then the log head */
if (dump_wrap)
extra_len = FENCED_DUMP_SIZE;
else
extra_len = dump_point;
init_header(&h, FENCED_CMD_DUMP_DEBUG, 0, extra_len);
do_write(f, &h, sizeof(h));
if (dump_wrap) {
len = FENCED_DUMP_SIZE - dump_point;
do_write(f, dump_buf + dump_point, len);
len = dump_point;
} else
len = dump_point;
/* NUL terminate the debug string */
dump_buf[dump_point] = '\0';
do_write(f, dump_buf, len);
}
static void query_node_info(int f, int data_nodeid)
{
struct fd *fd;
struct fenced_node node;
int nodeid, rv;
fd = find_fd("default");
if (!fd) {
rv = -ENOENT;
goto out;
}
if (data_nodeid == FENCED_NODEID_US)
nodeid = our_nodeid;
else
nodeid = data_nodeid;
if (group_mode == GROUP_LIBGROUP)
rv = set_node_info_group(fd, nodeid, &node);
else
rv = set_node_info(fd, nodeid, &node);
out:
do_reply(f, FENCED_CMD_NODE_INFO, rv, (char *)&node, sizeof(node));
}
static void query_domain_info(int f)
{
struct fd *fd;
struct fenced_domain domain;
int rv;
fd = find_fd("default");
if (!fd) {
rv = -ENOENT;
goto out;
}
memset(&domain, 0, sizeof(domain));
domain.group_mode = group_mode;
if (group_mode == GROUP_LIBGROUP)
rv = set_domain_info_group(fd, &domain);
else
rv = set_domain_info(fd, &domain);
out:
do_reply(f, FENCED_CMD_DOMAIN_INFO, rv, (char *)&domain, sizeof(domain));
}
static void query_domain_nodes(int f, int option, int max)
{
struct fd *fd;
int node_count = 0;
struct fenced_node *nodes = NULL;
int rv, result;
fd = find_fd("default");
if (!fd) {
result = -ENOENT;
node_count = 0;
goto out;
}
if (group_mode == GROUP_LIBGROUP)
rv = set_domain_nodes_group(fd, option, &node_count, &nodes);
else
rv = set_domain_nodes(fd, option, &node_count, &nodes);
if (rv < 0) {
result = rv;
node_count = 0;
goto out;
}
/* node_count is the number of structs copied/returned; the caller's
max may be less than that, in which case we copy as many as they
asked for and return -E2BIG */
if (node_count > max) {
result = -E2BIG;
node_count = max;
} else {
result = node_count;
}
out:
do_reply(f, FENCED_CMD_DOMAIN_NODES, result,
(char *)nodes, node_count * sizeof(struct fenced_node));
if (nodes)
free(nodes);
}
static void process_connection(int ci)
{
struct fenced_header h;
char *extra = NULL;
int rv, extra_len;
rv = do_read(client[ci].fd, &h, sizeof(h));
if (rv < 0) {
log_debug("connection %d read error %d", ci, rv);
goto out;
}
if (h.magic != FENCED_MAGIC) {
log_debug("connection %d magic error %x", ci, h.magic);
goto out;
}
if ((h.version & 0xFFFF0000) != (FENCED_VERSION & 0xFFFF0000)) {
log_debug("connection %d version error %x", ci, h.version);
goto out;
}
if (h.len > sizeof(h)) {
extra_len = h.len - sizeof(h);
extra = malloc(extra_len);
if (!extra) {
log_error("process_connection no mem %d", extra_len);
goto out;
}
memset(extra, 0, extra_len);
rv = do_read(client[ci].fd, extra, extra_len);
if (rv < 0) {
log_debug("connection %d extra read error %d", ci, rv);
goto out;
}
}
switch (h.command) {
case FENCED_CMD_JOIN:
do_join("default");
break;
case FENCED_CMD_LEAVE:
do_leave("default");
break;
case FENCED_CMD_EXTERNAL:
do_external("default", extra, extra_len);
break;
case FENCED_CMD_DUMP_DEBUG:
case FENCED_CMD_NODE_INFO:
case FENCED_CMD_DOMAIN_INFO:
case FENCED_CMD_DOMAIN_NODES:
log_error("process_connection query on wrong socket");
break;
default:
log_error("process_connection %d unknown command %d",
ci, h.command);
}
out:
if (extra)
free(extra);
client_dead(ci);
}
static void process_listener(int ci)
{
int fd, i;
fd = accept(client[ci].fd, NULL, NULL);
if (fd < 0) {
log_error("process_listener: accept error %d %d", fd, errno);
return;
}
i = client_add(fd, process_connection, NULL);
log_debug("client connection %d fd %d", i, fd);
}
static int setup_listener(char *sock_path)
{
struct sockaddr_un addr;
socklen_t addrlen;
int rv, s;
/* we listen for new client connections on socket s */
s = socket(AF_LOCAL, SOCK_STREAM, 0);
if (s < 0) {
log_error("socket error %d %d", s, errno);
return s;
}
memset(&addr, 0, sizeof(addr));
addr.sun_family = AF_LOCAL;
strcpy(&addr.sun_path[1], sock_path);
addrlen = sizeof(sa_family_t) + strlen(addr.sun_path+1) + 1;
rv = bind(s, (struct sockaddr *) &addr, addrlen);
if (rv < 0) {
log_error("bind error %d %d", rv, errno);
close(s);
return rv;
}
rv = listen(s, 5);
if (rv < 0) {
log_error("listen error %d %d", rv, errno);
close(s);
return rv;
}
return s;
}
void query_lock(void)
{
pthread_mutex_lock(&query_mutex);
}
void query_unlock(void)
{
pthread_mutex_unlock(&query_mutex);
}
/* This is a thread, so we have to be careful, don't call log_ functions.
We need a thread to process queries because the main thread will block
for long periods when running fence agents. */
static void *process_queries(void *arg)
{
struct fenced_header h;
int s = *((int *)arg);
int f, rv;
for (;;) {
f = accept(s, NULL, NULL);
rv = do_read(f, &h, sizeof(h));
if (rv < 0) {
goto out;
}
if (h.magic != FENCED_MAGIC) {
goto out;
}
if ((h.version & 0xFFFF0000) != (FENCED_VERSION & 0xFFFF0000)) {
goto out;
}
pthread_mutex_lock(&query_mutex);
switch (h.command) {
case FENCED_CMD_DUMP_DEBUG:
query_dump_debug(f);
break;
case FENCED_CMD_NODE_INFO:
query_node_info(f, h.data);
break;
case FENCED_CMD_DOMAIN_INFO:
query_domain_info(f);
break;
case FENCED_CMD_DOMAIN_NODES:
query_domain_nodes(f, h.option, h.data);
break;
default:
break;
}
pthread_mutex_unlock(&query_mutex);
out:
close(f);
}
}
static int setup_queries(void)
{
int rv, s;
rv = setup_listener(FENCED_QUERY_SOCK_PATH);
if (rv < 0)
return rv;
s = rv;
pthread_mutex_init(&query_mutex, NULL);
rv = pthread_create(&query_thread, NULL, process_queries, &s);
if (rv < 0) {
log_error("can't create query thread");
close(s);
return rv;
}
return 0;
}
+struct controlled_entry {
+ struct list_head list;
+ char path[PATH_MAX+1];
+};
+
+static void register_controlled_dir(char *path)
+{
+ struct controlled_entry *ce;
+
+ ce = malloc(sizeof(struct controlled_entry));
+ if (!ce)
+ return;
+ memset(ce, 0, sizeof(struct controlled_entry));
+ strncpy(ce->path, path, PATH_MAX);
+ list_add(&ce->list, &controlled_entries);
+}
+
+static int ignore_nolock(char *sysfs_dir, char *table)
+{
+ char path[PATH_MAX];
+ int fd;
+
+ memset(path, 0, PATH_MAX);
+
+ snprintf(path, PATH_MAX, "%s/%s/lock_module/proto_name",
+ sysfs_dir, table);
+
+ /* lock_nolock doesn't create the "lock_module" dir at all,
+ so we'll fail to open this */
+
+ fd = open(path, O_RDONLY);
+ if (fd < 0)
+ return 1;
+
+ close(fd);
+ return 0;
+}
+
+static int check_controlled_dir(char *path)
+{
+ DIR *d;
+ struct dirent *de;
+ int count = 0;
+
+ d = opendir(path);
+ if (!d)
+ return 0;
+
+ while ((de = readdir(d))) {
+ if (de->d_name[0] == '.')
+ continue;
+
+ if (strstr(path, "fs/gfs") && ignore_nolock(path, de->d_name))
+ continue;
+
+ log_error("found uncontrolled entry %s/%s", path, de->d_name);
+ count++;
+ }
+ closedir(d);
+
+ return count;
+}
+
+/* Joining the "fenced:daemon" cpg (in setup_cpg()) tells fenced on other
+ nodes that we are in a "clean state", and don't need fencing. So, if
+ we're a pending fence victim on another node, they'll skip fencing us
+ once we start fenced and join the "daemon" cpg (it's not the fence domain
+ cpg which we join when fence_tool join is run). This "daemon" cpg is just
+ to notify others that we have no uncontrolled gfs/dlm objects.
+ (Conceptually, we could use the fence domain cpg for this purpose instead,
+ but that would require processing domain membership changes during
+ fence_victims(), which would be a major change in the way the daemon works.)
+
+ So, if we (the local node) are *not* in a clean state, we don't join the
+ daemon cpg and we exit; we still need to be fenced. If we are starting
+ up and find that instances of gfs/dlm in the kernel have been previously
+ abandoned, that's an unclean, unreset state, and we still need fencing. */
+
+static int check_uncontrolled_entries(void)
+{
+ struct controlled_entry *ce;
+ int count = 0;
+
+ list_for_each_entry(ce, &controlled_entries, list) {
+ if (strncmp(ce->path, "-", 1))
+ goto skip_default;
+ }
+
+ /* the default dirs to check */
+ register_controlled_dir("/sys/kernel/dlm");
+ register_controlled_dir("/sys/fs/gfs2");
+ register_controlled_dir("/sys/fs/gfs");
+
+ skip_default:
+ list_for_each_entry(ce, &controlled_entries, list)
+ count += check_controlled_dir(ce->path);
+
+ if (count)
+ return -1;
+ return 0;
+}
+
void cluster_dead(int ci)
{
log_error("cluster is down, exiting");
daemon_quit = 1;
}
static void loop(void)
{
int rv, i;
void (*workfn) (int ci);
void (*deadfn) (int ci);
rv = setup_queries();
if (rv < 0)
goto out;
rv = setup_listener(FENCED_SOCK_PATH);
if (rv < 0)
goto out;
client_add(rv, process_listener, NULL);
rv = setup_cman();
if (rv < 0)
goto out;
client_add(rv, process_cman, cluster_dead);
rv = setup_ccs();
if (rv < 0)
goto out;
setup_logging();
+ rv = check_uncontrolled_entries();
+ if (rv < 0)
+ goto out;
+
+ rv = setup_cpg();
+ if (rv < 0)
+ goto out;
+ client_add(rv, process_cpg, cluster_dead);
+
group_mode = GROUP_LIBCPG;
if (cfgd_groupd_compat) {
rv = setup_groupd();
if (rv < 0)
goto out;
client_add(rv, process_groupd, cluster_dead);
group_mode = GROUP_LIBGROUP;
if (cfgd_groupd_compat == 2)
set_group_mode();
}
log_debug("group_mode %d compat %d", group_mode, cfgd_groupd_compat);
- if (group_mode == GROUP_LIBCPG) {
- /*
- rv = setup_cpg();
- if (rv < 0)
- goto out;
- client_add(rv, process_cpg, cluster_dead);
- */
- }
-
for (;;) {
rv = poll(pollfd, client_maxi + 1, -1);
if (rv == -1 && errno == EINTR) {
if (daemon_quit && list_empty(&domains))
goto out;
daemon_quit = 0;
continue;
}
if (rv < 0) {
log_error("poll errno %d", errno);
goto out;
}
pthread_mutex_lock(&query_mutex);
for (i = 0; i <= client_maxi; i++) {
if (client[i].fd < 0)
continue;
if (pollfd[i].revents & POLLIN) {
workfn = client[i].workfn;
workfn(i);
}
if (pollfd[i].revents & (POLLERR | POLLHUP | POLLNVAL)) {
deadfn = client[i].deadfn;
deadfn(i);
}
}
pthread_mutex_unlock(&query_mutex);
if (daemon_quit)
break;
}
out:
if (cfgd_groupd_compat)
close_groupd();
+ close_cpg();
close_logging();
close_ccs();
close_cman();
if (!list_empty(&domains))
log_error("domain abandoned");
}
static void lockfile(void)
{
int fd, error;
struct flock lock;
char buf[33];
memset(buf, 0, 33);
fd = open(LOCKFILE_NAME, O_CREAT|O_WRONLY,
S_IRUSR|S_IWUSR|S_IRGRP|S_IROTH);
if (fd < 0) {
fprintf(stderr, "cannot open/create lock file %s\n",
LOCKFILE_NAME);
exit(EXIT_FAILURE);
}
lock.l_type = F_WRLCK;
lock.l_start = 0;
lock.l_whence = SEEK_SET;
lock.l_len = 0;
error = fcntl(fd, F_SETLK, &lock);
if (error) {
fprintf(stderr, "is already running\n");
exit(EXIT_FAILURE);
}
error = ftruncate(fd, 0);
if (error) {
fprintf(stderr, "cannot clear lock file %s\n", LOCKFILE_NAME);
exit(EXIT_FAILURE);
}
sprintf(buf, "%d\n", getpid());
error = write(fd, buf, strlen(buf));
if (error <= 0) {
fprintf(stderr, "cannot write lock file %s\n", LOCKFILE_NAME);
exit(EXIT_FAILURE);
}
}
static void print_usage(void)
{
printf("Usage:\n");
printf("\n");
printf("fenced [options]\n");
printf("\n");
printf("Options:\n");
printf("\n");
printf(" -D Enable debugging code and don't fork\n");
printf(" -L <num> Enable (1) or disable (0) debugging to logsys (default %d)\n", DEFAULT_DEBUG_LOGSYS);
printf(" -g <num> groupd compatibility mode, 0 off, 1 on, 2 detect (default %d)\n", DEFAULT_GROUPD_COMPAT);
printf(" 0: use libcpg, no backward compat, best performance\n");
printf(" 1: use libgroup for compat with cluster2/rhel5\n");
printf(" 2: use groupd to detect old, or mode 1, nodes that\n"
" require compat, use libcpg if none found\n");
- printf(" -c All nodes are in a clean state to start\n");
+ printf(" -r <path> Register a directory that needs to be empty for\n");
+ printf(" the daemon to start. \"-\" to skip default directories\n");
+ printf(" /sys/fs/gfs, /sys/fs/gfs2, /sys/kernel/dlm\n");
+ printf(" -c All nodes are in a clean state to start; do no startup fencing\n");
printf(" -s Skip startup fencing of nodes with no defined fence methods\n");
printf(" -j <secs> Post-join fencing delay (default %d)\n", DEFAULT_POST_JOIN_DELAY);
printf(" -f <secs> Post-fail fencing delay (default %d)\n", DEFAULT_POST_FAIL_DELAY);
printf(" -R <secs> Override time (default %d)\n", DEFAULT_OVERRIDE_TIME);
printf(" -O <path> Override path (default %s)\n", DEFAULT_OVERRIDE_PATH);
printf(" -h Print this help, then exit\n");
printf(" -V Print program version information, then exit\n");
printf("\n");
printf("Command line values override those in " DEFAULT_CONFIG_DIR "/" DEFAULT_CONFIG_FILE ".\n");
printf("For an unbounded delay use <secs> value of -1.\n");
printf("\n");
}
-#define OPTION_STRING "L:g:cj:f:Dn:O:hVSs"
+#define OPTION_STRING "L:g:cj:f:Dn:O:hVSse:r:"
static void read_arguments(int argc, char **argv)
{
int cont = 1;
int optchar;
while (cont) {
optchar = getopt(argc, argv, OPTION_STRING);
switch (optchar) {
case 'D':
daemon_debug_opt = 1;
break;
case 'L':
optd_debug_logsys = 1;
cfgd_debug_logsys = atoi(optarg);
break;
case 'g':
optd_groupd_compat = 1;
cfgd_groupd_compat = atoi(optarg);
break;
case 'c':
optd_clean_start = 1;
cfgd_clean_start = 1;
break;
case 's':
optd_skip_undefined = 1;
cfgd_skip_undefined = 1;
break;
case 'j':
optd_post_join_delay = 1;
cfgd_post_join_delay = atoi(optarg);
break;
case 'f':
optd_post_fail_delay = 1;
cfgd_post_fail_delay = atoi(optarg);
break;
case 'R':
optd_override_time = 1;
cfgd_override_time = atoi(optarg);
if (cfgd_override_time < 3)
cfgd_override_time = 3;
break;
case 'O':
optd_override_path = 1;
cfgd_override_path = strdup(optarg);
break;
+ case 'r':
+ register_controlled_dir(optarg);
+ break;
+
case 'h':
print_usage();
exit(EXIT_SUCCESS);
break;
case 'V':
printf("fenced %s (built %s %s)\n", RELEASE_VERSION,
__DATE__, __TIME__);
printf("%s\n", REDHAT_COPYRIGHT);
exit(EXIT_SUCCESS);
break;
case ':':
case '?':
fprintf(stderr, "Please use '-h' for usage.\n");
exit(EXIT_FAILURE);
break;
case EOF:
cont = 0;
break;
default:
fprintf(stderr, "unknown option: %c", optchar);
exit(EXIT_FAILURE);
};
}
if (!optd_debug_logsys && getenv("FENCED_DEBUG")) {
optd_debug_logsys = 1;
cfgd_debug_logsys = atoi(getenv("FENCED_DEBUG"));
}
}
static void set_oom_adj(int val)
{
FILE *fp;
fp = fopen("/proc/self/oom_adj", "w");
if (!fp)
return;
fprintf(fp, "%i", val);
fclose(fp);
}
int main(int argc, char **argv)
{
INIT_LIST_HEAD(&domains);
+ INIT_LIST_HEAD(&controlled_entries);
init_logging();
read_arguments(argc, argv);
lockfile();
if (!daemon_debug_opt) {
if (daemon(0, 0) < 0) {
perror("main: cannot fork");
exit(EXIT_FAILURE);
}
umask(0);
}
signal(SIGTERM, sigterm_handler);
set_oom_adj(-16);
loop();
return 0;
}
void daemon_dump_save(void)
{
int len, i;
len = strlen(daemon_debug_buf);
for (i = 0; i < len; i++) {
dump_buf[dump_point++] = daemon_debug_buf[i];
if (dump_point == FENCED_DUMP_SIZE) {
dump_point = 0;
dump_wrap = 1;
}
}
}
int daemon_debug_opt;
int daemon_quit;
struct list_head domains;
int cman_quorate;
int our_nodeid;
char our_name[MAX_NODENAME_LEN+1];
char daemon_debug_buf[256];
char dump_buf[FENCED_DUMP_SIZE];
int dump_point;
int dump_wrap;
int group_mode;
diff --git a/fence/fenced/recover.c b/fence/fenced/recover.c
index d70a87638..ecb13d51b 100644
--- a/fence/fenced/recover.c
+++ b/fence/fenced/recover.c
@@ -1,303 +1,305 @@
#include "fd.h"
#include "config.h"
void free_node_list(struct list_head *head)
{
struct node *node;
while (!list_empty(head)) {
node = list_entry(head->next, struct node, list);
list_del(&node->list);
free(node);
}
}
void add_complete_node(struct fd *fd, int nodeid)
{
struct node *node;
node = get_new_node(fd, nodeid);
list_add(&node->list, &fd->complete);
}
int list_count(struct list_head *head)
{
struct list_head *tmp;
int count = 0;
list_for_each(tmp, head)
count++;
return count;
}
int is_victim(struct fd *fd, int nodeid)
{
struct node *node;
list_for_each_entry(node, &fd->victims, list) {
if (node->nodeid == nodeid)
return 1;
}
return 0;
}
static void victim_done(struct fd *fd, int victim, int how)
{
if (group_mode == GROUP_LIBGROUP)
return;
node_history_fence(fd, victim, our_nodeid, how, time(NULL));
send_victim_done(fd, victim);
}
/* This routine should probe other indicators to check if victims
can be reduced. Right now we just check if the victim has rejoined the
cluster. */
static int reduce_victims(struct fd *fd)
{
struct node *node, *safe;
int num_victims;
num_victims = list_count(&fd->victims);
list_for_each_entry_safe(node, safe, &fd->victims, list) {
- if (is_cman_member(node->nodeid)) {
+ if (is_cman_member(node->nodeid) &&
+ in_daemon_member_list(node->nodeid)) {
log_debug("reduce victim %s", node->name);
victim_done(fd, node->nodeid, VIC_DONE_MEMBER);
list_del(&node->list);
free(node);
num_victims--;
}
}
return num_victims;
}
static inline void close_override(int *fd, char *path)
{
unlink(path);
if (fd) {
if (*fd >= 0)
close(*fd);
*fd = -1;
}
}
static int open_override(char *path)
{
int ret;
mode_t om;
om = umask(077);
ret = mkfifo(path, (S_IRUSR | S_IWUSR));
umask(om);
if (ret < 0)
return -1;
return open(path, O_RDONLY | O_NONBLOCK);
}
static int check_override(int ofd, char *nodename, int timeout)
{
char buf[128];
fd_set rfds;
struct timeval tv = {0, 0};
int ret, x;
if (ofd < 0 || !nodename || !strlen(nodename)) {
sleep(timeout);
return 0;
}
FD_ZERO(&rfds);
FD_SET(ofd, &rfds);
tv.tv_usec = 0;
tv.tv_sec = timeout;
ret = select(ofd + 1, &rfds, NULL, NULL, &tv);
if (ret < 0) {
log_debug("check_override select: %s", strerror(errno));
return -1;
}
if (ret == 0)
return 0;
memset(buf, 0, sizeof(buf));
ret = read(ofd, buf, sizeof(buf) - 1);
if (ret < 0) {
log_debug("check_override read: %s", strerror(errno));
return -1;
}
/* chop off control characters */
for (x = 0; x < ret; x++) {
if (buf[x] < 0x20) {
buf[x] = 0;
break;
}
}
if (!strcasecmp(nodename, buf)) {
/* Case insensitive, but not as nice as, say, name_equal
in the other file... */
return 1;
}
return 0;
}
/* If there are victims after a node has joined, it's a good indication that
they may be joining the cluster shortly. If we delay a bit they might
become members and we can avoid fencing them. This is only really an issue
when the fencing method reboots the victims. Otherwise, the nodes should
unfence themselves when they start up. */
void delay_fencing(struct fd *fd, int node_join)
{
struct timeval first, last, start, now;
int victim_count, last_count = 0, delay = 0;
struct node *node;
char *delay_type;
if (list_empty(&fd->victims))
return;
if (node_join) {
delay = cfgd_post_join_delay;
delay_type = "post_join_delay";
} else {
delay = cfgd_post_fail_delay;
delay_type = "post_fail_delay";
}
if (delay == 0)
goto out;
gettimeofday(&first, NULL);
gettimeofday(&start, NULL);
query_unlock();
for (;;) {
sleep(1);
victim_count = reduce_victims(fd);
if (victim_count == 0)
break;
if (victim_count < last_count) {
gettimeofday(&start, NULL);
if (delay > 0 && cfgd_post_join_delay > delay) {
delay = cfgd_post_join_delay;
delay_type = "post_join_delay (modified)";
}
}
last_count = victim_count;
/* negative delay means wait forever */
if (delay == -1)
continue;
gettimeofday(&now, NULL);
if (now.tv_sec - start.tv_sec >= delay)
break;
}
query_lock();
gettimeofday(&last, NULL);
log_debug("delay of %ds leaves %d victims",
(int) (last.tv_sec - first.tv_sec), victim_count);
out:
list_for_each_entry(node, &fd->victims, list) {
log_debug("%s not a cluster member after %d sec %s",
node->name, delay, delay_type);
}
}
void defer_fencing(struct fd *fd)
{
char *master_name;
if (list_empty(&fd->victims))
return;
master_name = nodeid_to_name(fd->master);
log_level(LOG_INFO, "fencing deferred to %s", master_name);
}
void fence_victims(struct fd *fd)
{
struct node *node;
int error;
int override = -1;
- int member, fenced;
+ int cman_member, cpg_member, ext;
while (!list_empty(&fd->victims)) {
node = list_entry(fd->victims.next, struct node, list);
- member = is_cman_member(node->nodeid);
+ cman_member = is_cman_member(node->nodeid);
+ cpg_member = in_daemon_member_list(node->nodeid);
if (group_mode == GROUP_LIBCPG)
- fenced = is_fenced_external(fd, node->nodeid);
+ ext = is_fenced_external(fd, node->nodeid);
else
- fenced = 0;
+ ext = 0;
- if (member || fenced) {
+ if ((cman_member && cpg_member) || ext) {
log_debug("averting fence of node %s "
- "member %d external %d",
- node->name, member, fenced);
- victim_done(fd, node->nodeid, member ? VIC_DONE_MEMBER :
- VIC_DONE_EXTERNAL);
+ "cman member %d cpg member %d external %d",
+ node->name, cman_member, cpg_member, ext);
+ victim_done(fd, node->nodeid,
+ ext ? VIC_DONE_EXTERNAL : VIC_DONE_MEMBER);
list_del(&node->list);
free(node);
continue;
}
log_level(LOG_INFO, "fencing node \"%s\"", node->name);
/* for queries */
fd->current_victim = node->nodeid;
query_unlock();
error = fence_node(node->name);
query_lock();
fd->current_victim = 0;
log_level(LOG_INFO, "fence \"%s\" %s", node->name,
error ? "failed" : "success");
if (!error) {
victim_done(fd, node->nodeid, VIC_DONE_AGENT);
list_del(&node->list);
free(node);
continue;
}
if (!cfgd_override_path) {
query_unlock();
sleep(5);
query_lock();
continue;
}
query_unlock();
/* Check for manual intervention */
override = open_override(cfgd_override_path);
if (check_override(override, node->name,
cfgd_override_time) > 0) {
log_level(LOG_WARNING, "fence \"%s\" overridden by "
"administrator intervention", node->name);
victim_done(fd, node->nodeid, VIC_DONE_OVERRIDE);
list_del(&node->list);
free(node);
}
close_override(&override, cfgd_override_path);
query_lock();
}
}
File Metadata
Details
Attached
Mime Type
text/x-diff
Expires
Sat, Nov 23, 11:06 AM (1 d, 12 h)
Storage Engine
blob
Storage Format
Raw Data
Storage Handle
1018637
Default Alt Text
(75 KB)
Attached To
Mode
rR Resource Agents
Attached
Detach File
Event Timeline
Log In to Comment