diff --git a/tools/crm_report.in b/tools/crm_report.in
index c2d48d7b58..ebfdf73c0e 100755
--- a/tools/crm_report.in
+++ b/tools/crm_report.in
@@ -1,504 +1,505 @@
 #!/bin/sh
 
  # Copyright (C) 2010 Andrew Beekhof <andrew@beekhof.net>
  #
  # This program is free software; you can redistribute it and/or
  # modify it under the terms of the GNU General Public
  # License as published by the Free Software Foundation; either
  # version 2.1 of the License, or (at your option) any later version.
  #
  # This software is distributed in the hope that it will be useful,
  # but WITHOUT ANY WARRANTY; without even the implied warranty of
  # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  # General Public License for more details.
  #
  # You should have received a copy of the GNU General Public
  # License along with this library; if not, write to the Free Software
  # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
  #
 
 
 # Note the quotes around `$TEMP': they are essential!
 TEMP=`getopt				\
     -o hv?xl:f:t:n:T:L:p:c:dSACHu:D:MVse:	\
     --long help,cts:,cts-log:,dest:,node:,nodes:,from:,to:,sos-mode,logfile:,as-directory,single-node,cluster:,user:,max-depth:,version,features,rsh:	\
     -n 'crm_report' -- "$@"`
 eval set -- "$TEMP"
 
 rsh="ssh -T"
 times=""
 tests=""
 nodes=""
 compress=1
 cluster="any"
 ssh_user="root"
 search_logs=1
 report_data=`dirname $0`
+maxdepth=5
 
 extra_logs=""
 sanitize_patterns="passw.*"
 log_patterns="CRIT: ERROR:"
 
 usage() {
 
 cat<<EOF
 crm_report - Create a tarball containing everything needed when reporting cluster problems
 
 
 Usage: `basename $0` --from date-time [--to date-time] [additional options] [dest]
 
 Options:
   -V			increase verbosity
   -v,--version          software version
      --features         software features
   -f, --from time       time prior to problems beginning: "YYYY-M-D H:M:S"  (do not forget the quotes)
   -t, --to time	        time at which all problems were resolved: "YYYY-M-D H:M:S" (default: 'now')
   -T, --cts test	CTS test or set of tests to extract
       --cts-log		CTS master logfile
   -n, --nodes nodes	node names for this cluster
 			only needed if the cluster is not active on the current machine
 			accepts both -n "a b" and -n a -n b
   -l, --logfile file	log file to collect, normally this will be determined automatically
   -p patt		additional regular expression to match variables to be removed
 			(default: "passw.*")
   -L patt		additional regular expression to match in log files for analysis
 			(default: $log_patterns)
   -M			collect only the logs specified by -l
   -S, --single-node	single node operation; don't try to start report collectors on other nodes
   -c, --cluster type	force the cluster type (corosync,openais,heartbeat,logmaster)
   -A, --openais		force the cluster type to be OpenAIS
   -C, --corosync	force the cluster type to be CoroSync
   -H, --heartbeat	force the cluster type to be Heartbeat
   -u, --user user	ssh username for cluster nodes (default: root)
   -D, --depth           search depth to use when attempting to locate files
   -e, --rsh		specify the remote shell to use (default: ssh -T)
   --dest       		a custom destination directory/file
   dest			a custom destination directory/file
 
 Examples:
    crm_report -f "2011-12-14 13:05:00" unexplained-apache-failure
    crm_report -f 2011-12-14 -t 2011-12-15 something-that-took-multiple-days
    crm_report -f 13:05:00   -t 13:12:00   brief-outage
 EOF
 }
 
 case "$1" in
     -v|--version)   echo "@VERSION@ - @BUILD_VERSION@"; exit 0;;
     --features)     echo "@VERSION@ - @BUILD_VERSION@: @PCMK_FEATURES@"; exit 0;;
     --|-h|--help) usage; exit 0;;
 esac
 
 # Prefer helpers in the same directory if they exist, to simplify development
 if [ ! -f $report_data/report.common ]; then
     report_data=@datadir@/@PACKAGE@
 else
     echo "Using local helpers"
 fi
 
 . $report_data/report.common
 
 while true; do
     case "$1" in
 	-x) set -x; shift;;
 	-V) verbose=`expr $verbose + 1`; shift;;
 	-T|--cts-test) tests="$tests $2"; shift; shift;;
 	   --cts-log) ctslog="$2"; shift; shift;;
 	-f|--from) start_time=`get_time "$2"`; shift; shift;;
 	-t|--to) end_time=`get_time "$2"`; shift; shift;;
 	-n|--node|--nodes) nodes="$nodes $2"; shift; shift;;
 	-S|--single-node) nodes="$host"; shift;;
 	-E|-l|--logfile) extra_logs="$extra_logs $2"; shift; shift;;
 	-p) sanitize_patterns="$sanitize_patterns $2"; shift; shift;;
 	-L) log_patterns="$log_patterns `echo $2 | sed 's/ /\\\W/g'`"; shift; shift;;
 	-d|--as-directory) compress=0; shift;;
 	-A|--openais)   cluster="openais";   shift;;
 	-C|--corosync)  cluster="corosync";  shift;;
 	-H|--heartbeat) cluster="heartbeat"; shift;;
 	-c|--cluster)   cluster="$2"; shift; shift;;
 	-e|--rsh)       rsh="$2";     shift; shift;;
 	-u|--user)      ssh_user="$2"; shift; shift;;
         -D|--max-depth)     maxdepth="$2"; shift; shift;;
 	-M) search_logs=0; shift;;
         --sos-mode) search_logs=0; extra_logs="/var/log/pacemaker.log /var/log/cluster/corosync.log"; nodes="$host"; shift;;
 	--dest) DESTDIR=$2; shift; shift;;
 	--) if [ ! -z $2 ]; then DESTDIR=$2; fi; break;;
 	-h|--help) usage; exit 0;;
 	# Options for compatability with hb_report
 	-s) shift;;
 
 	*) echo "Unknown argument: $1"; usage; exit 1;;
     esac
 done
 
 
 collect_data() {
     label="$1"
     start=`expr $2 - 10`
     end=`expr $3 + 10`
     masterlog=$4
 
     if [ "x$DESTDIR" != x ]; then
 	echo $DESTDIR | grep -e "^/" -qs
 	if [ $? = 0 ]; then
 	    l_base=$DESTDIR
 	else
 	    l_base="`pwd`/$DESTDIR"
 	fi
 	debug "Using custom scratch dir: $l_base"
 	r_base=`basename $l_base`
     else
 	l_base=$HOME/$label
 	r_base=$label
     fi
 
     if [ -e $l_base ]; then
 	fatal "Output directory $l_base already exists, specify an alternate name with --dest"
     fi
     mkdir -p $l_base
 
     if [ "x$masterlog" != "x" ]; then
 	dumplogset "$masterlog" $start $end > "$l_base/$HALOG_F"
     fi
 
     for node in $nodes; do
 	cat <<EOF >$l_base/.env
 LABEL="$label"
 REPORT_HOME="$r_base"
 REPORT_MASTER="$host"
 REPORT_TARGET="$node"
 LOG_START=$start
 LOG_END=$end
 REMOVE=1
 SANITIZE="$sanitize_patterns"
 CLUSTER=$cluster
 LOG_PATTERNS="$log_patterns"
 EXTRA_LOGS="$extra_logs"
 SEARCH_LOGS=$search_logs
 verbose=$verbose
 maxdepth=$maxdepth
 EOF
 
 	if [ $host = $node ]; then
 	    cat <<EOF >>$l_base/.env
 REPORT_HOME="$l_base"
 EOF
 	    cat $l_base/.env $report_data/report.common $report_data/report.collector > $l_base/collector
 	    bash $l_base/collector
 	else
 	    cat $l_base/.env $report_data/report.common $report_data/report.collector \
 		| $rsh -l $ssh_user $node -- "mkdir -p $r_base; cat > $r_base/collector; bash $r_base/collector" | (cd $l_base && tar mxf -)
 	fi
     done
 
     analyze $l_base > $l_base/$ANALYSIS_F
     if [ -f $l_base/$HALOG_F ]; then
 	node_events $l_base/$HALOG_F > $l_base/$EVENTS_F
     fi
 
     for node in $nodes; do
 	cat $l_base/$node/$ANALYSIS_F >> $l_base/$ANALYSIS_F
 	if [ -s $l_base/$node/$EVENTS_F ]; then
 	    cat $l_base/$node/$EVENTS_F >> $l_base/$EVENTS_F
 	elif [ -s $l_base/$HALOG_F ]; then
 	    awk "\$4==\"$nodes\"" $l_base/$EVENTS_F >> $l_base/$n/$EVENTS_F
 	fi
     done
 
     log " "
     if [ $compress = 1 ]; then
 	fname=`shrink $l_base`
 	rm -rf $l_base
 	log "Collected results are available in $fname"
 	log " "
 	log "Please create a bug entry at"
 	log "    http://developerbugs.linux-foundation.org/enter_bug.cgi?product=Pacemaker"
 	log "Include a description of your problem and attach this tarball"
 	log " "
 	log "Thank you for taking time to create this report."
     else
 	log "Collected results are available in $l_base"
     fi
     log " "
 }
 
 #
 # check if files have same content in the cluster
 #
 cibdiff() {
     d1=`dirname $1`
     d2=`dirname $2`
     if [ -f $d1/RUNNING -a -f $d2/RUNNING ] ||
 	[ -f $d1/STOPPED -a -f $d2/STOPPED ]; then
 	if which crm_diff > /dev/null 2>&1; then
 	    crm_diff -c -n $1 -o $2
 	else
 	    info "crm_diff(8) not found, cannot diff CIBs"
 	fi
     else
 	echo "can't compare cibs from running and stopped systems"
     fi
 }
 
 diffcheck() {
     [ -f "$1" ] || {
 	echo "$1 does not exist"
 	return 1
     }
     [ -f "$2" ] || {
 	echo "$2 does not exist"
 	return 1
     }
     case `basename $1` in
 	$CIB_F)  cibdiff $1 $2;;
 	$B_CONF) diff -u $1 $2;; # confdiff?
 	*)       diff -u $1 $2;;
 esac
 }
 
 #
 # remove duplicates if files are same, make links instead
 #
 consolidate() {
     for n in $NODES; do
 	if [ -f $1/$2 ]; then
 	    rm $1/$n/$2
 	else
 	    mv $1/$n/$2 $1
 	fi
 	ln -s ../$2 $1/$n
     done
 }
 
 analyze_one() {
     rc=0
     node0=""
     for n in $NODES; do
 	if [ "$node0" ]; then
 	    diffcheck $1/$node0/$2 $1/$n/$2
 	    rc=$(($rc+$?))
 	else
 	    node0=$n
 	fi
     done
     return $rc
 }
 
 analyze() {
     flist="$HOSTCACHE $MEMBERSHIP_F $CIB_F $CRM_MON_F $B_CONF logd.cf $SYSINFO_F"
     for f in $flist; do
 	printf "Diff $f... "
 	ls $1/*/$f >/dev/null 2>&1 || {
 	    echo "no $1/*/$f :/"
 	    continue
 	}
 	if analyze_one $1 $f; then
 	    echo "OK"
 	    [ "$f" != $CIB_F ] && consolidate $1 $f
 	else
 	    echo ""
 	fi
     done
 }
 
 do_cts() {
     test_sets=`echo $tests | tr ',' ' '`
     for test_set in $test_sets; do
 
 	start_time=0
 	start_test=`echo $test_set | tr '-' ' ' | awk '{print $1}'`
 
 	end_time=0
 	end_test=`echo $test_set | tr '-' ' ' | awk '{print $2}'`
 
 	if [ x$end_test = x ]; then
 	    msg="Extracting test $start_test"
 	    label="CTS-$start_test-`date +"%b-%d-%Y"`"
 	    end_test=`expr $start_test + 1`
 	else
 	    msg="Extracting tests $start_test to $end_test"
 	    label="CTS-$start_test-$end_test-`date +"%b-%d-%Y"`"
 	    end_test=`expr $end_test + 1`
 	fi
 
 	if [ $start_test = 0 ]; then
 	    start_pat="BEGINNING [0-9].* TESTS"
 	else
 	    start_pat="Running test.*\[ *$start_test\]"
 	fi
 
 	if [ x$ctslog = x ]; then
 	    ctslog=`findmsg 1 "$start_pat"`
 
 	    if [ x$ctslog = x ]; then
 		fatal "No CTS control file detected"
 	    else
 		log "Using CTS control file: $ctslog"
 	    fi
 	fi
 
 	line=`grep -n "$start_pat" $ctslog | tail -1 | sed 's/:.*//'`
 	if [ ! -z "$line" ]; then
 	    start_time=`linetime $ctslog $line`
 	fi
 
 	line=`grep -n "Running test.*\[ *$end_test\]" $ctslog | tail -1 | sed 's/:.*//'`
 	if [ ! -z "$line" ]; then
 	    end_time=`linetime $ctslog $line`
 	fi
 
 	if [ -z "$nodes" ]; then
 	    nodes=`grep CTS: $ctslog | grep -v debug: | grep " \* " | sed s:.*\\\*::g | sort -u  | tr '\\n' ' '`
 	    log "Calculated node list: $nodes"
 	fi
 
 	if [ $end_time -lt $start_time ]; then
 	    debug "Test didn't complete, grabbing everything up to now"
 	    end_time=`date +%s`
 	fi
 
 	if [ $start_time != 0 ];then
 	    log "$msg (`time2str $start_time` to `time2str $end_time`)"
 	    collect_data $label $start_time $end_time $ctslog
 	else
 	    fatal "$msg failed: not found"
 	fi
     done
 }
 
 node_names_from_xml() {
     awk '
       /uname/ {
             for( i=1; i<=NF; i++ )
                     if( $i~/^uname=/ ) {
                             sub("uname=.","",$i);
                             sub("\".*","",$i);
                             print $i;
                             next;
                     }
       }
     ' | tr '\n' ' '
 }
 
 getnodes() {
     cluster="$1"
 
     # 1. Live (cluster nodes or Pacemaker Remote nodes)
     # TODO: This will not detect Pacemaker Remote nodes unless they
     # have ever had a permanent node attribute set, because it only
     # searches the nodes section. It should also search the config
     # for resources that create Pacemaker Remote nodes.
     cib_nodes=$(cibadmin -Ql -o nodes 2>/dev/null)
     if [ $? -eq 0 ]; then
 	debug "Querying CIB for nodes"
         echo "$cib_nodes" | node_names_from_xml
         return
     fi
 
     # 2. Saved
     if [ -f "@CRM_CONFIG_DIR@/cib.xml" ]; then
 	debug "Querying on-disk CIB for nodes"
         grep "node " "@CRM_CONFIG_DIR@/cib.xml" | node_names_from_xml
         return
     fi
 
     # 3. hostcache
     if [ -z "$HA_STATE_DIR" ]; then
         HA_STATE_DIR=/var/lib/heartbeat
     fi
     if [ -f "$HA_STATE_DIR/hostcache" ]; then
 	debug "Reading nodes from $HA_STATE_DIR/hostcache"
         awk '{print $1}' "$HA_STATE_DIR/hostcache"
         return
     fi
 
     # 4. ha.cf
     if [ "x$cluster" = "xheartbeat" ]; then
         cluster_cf=$(find_cluster_cf $cluster)
 	debug "Reading nodes from $cluster_cf"
         getcfvar $cluster node "$cluster_cf"
         return
     fi
 
     # 5. logs
     # TODO: This has multiple issues:
     # * It looks for messages from crm_update_peer(), which is used only by
     #   heartbeat and legacy plugin clusters; it should work with CMAN and
     #   corosync2 clusters as well.
     # * It does a findmsg for "crm_update_peer" (which will hit
     #   "crm_update_peer_proc" etc.), but then greps for "crm_update_peer:".
     # * It always uses grep, even though $logfile might be compressed.
     #   For this reason and efficiency, it would nice if findmsg could
     #   optionally print the matches instead of the file names.
     # * It would be nice to skip this step for Pacemaker Remote nodes since their
     #   logs will not have node names, but it is nontrivial to know that.
     #   Cluster nodes generally won't get here, but stopped Pacemaker Remote
     #   nodes will.
     logfile=$(findmsg 1 "crm_update_peer")
     debug "Looking for nodes in $logfile"
     if [ ! -z "$logfile" ]; then
         grep crm_update_peer: "$logfile" \
             | sed s/.*crm_update_peer// \
             | sed s/://g \
             | awk '{print $2}' \
             | grep -v "(null)" \
             | sort -u \
             | tr '\n' ' '
     fi
 }
 
 if [ "x$tests" != "x" ]; then
     do_cts
 
 elif [ "x$start_time" != "x" ]; then
     masterlog=""
 
     if [ -z "$sanitize_patterns" ]; then
 	log "WARNING: The tarball produced by this program may contain"
 	log "         sensitive information such as passwords."
 	log ""
 	log "We will attempt to remove such information if you use the"
 	log "-p option. For example: -p \"pass.*\" -p \"user.*\""
 	log ""
 	log "However, doing this may reduce the ability for the recipients"
 	log "to diagnose issues and generally provide assistance."
 	log ""
 	log "IT IS YOUR RESPONSIBILITY TO PROTECT SENSITIVE DATA FROM EXPOSURE"
 	log ""
     fi
 
     # If user didn't specify a cluster stack, make a best guess if possible.
     if [ -z "$cluster" ] || [ "$cluster" = "any" ]; then
         cluster=$(get_cluster_type)
     fi
 
     # If user didn't specify node(s), make a best guess if possible.
     if [ -z "$nodes" ]; then
 	nodes=`getnodes $cluster`
         if [ -n "$nodes" ]; then
             log "Calculated node list: $nodes"
         else
             fatal "Cannot determine nodes; specify --nodes or --single-node"
         fi
     fi
 
     if
 	echo $nodes | grep -qs $host
     then
 	debug "We are a cluster node"
     else
 	debug "We are a log master"
 	masterlog=`findmsg 1 "crmd\\|CTS"`
     fi
 
 
     if [ -z $end_time ]; then
 	end_time=`perl -e 'print time()'`
     fi
     label="pcmk-`date +"%a-%d-%b-%Y"`"
     log "Collecting data from $nodes (`time2str $start_time` to `time2str $end_time`)"
     collect_data $label $start_time $end_time $masterlog
 else
     fatal "Not sure what to do, no tests or time ranges to extract"
 fi
 
 # vim: set expandtab tabstop=8 softtabstop=4 shiftwidth=4 textwidth=80:
diff --git a/tools/report.common.in b/tools/report.common.in
index c3e4d0f812..bfc8e76733 100644
--- a/tools/report.common.in
+++ b/tools/report.common.in
@@ -1,853 +1,852 @@
  # Copyright (C) 2007 Dejan Muhamedagic <dmuhamedagic@suse.de>
  #			Almost everything as part of hb_report
  # Copyright (C) 2010 Andrew Beekhof <andrew@beekhof.net>
  #			Cleanups, refactoring, extensions
  #
  #
  # This program is free software; you can redistribute it and/or
  # modify it under the terms of the GNU General Public
  # License as published by the Free Software Foundation; either
  # version 2.1 of the License, or (at your option) any later version.
  #
  # This software is distributed in the hope that it will be useful,
  # but WITHOUT ANY WARRANTY; without even the implied warranty of
  # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  # General Public License for more details.
  #
  # You should have received a copy of the GNU General Public
  # License along with this library; if not, write to the Free Software
  # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
  #
 
 host=`uname -n`
 shorthost=`echo $host | sed s:\\\\..*::`
 if [ -z $verbose ]; then
     verbose=0
 fi
 
 # Target Files
 EVENTS_F=events.txt
 ANALYSIS_F=analysis.txt
 DESCRIPTION_F=description.txt
 HALOG_F=cluster-log.txt
 BT_F=backtraces.txt
 SYSINFO_F=sysinfo.txt
 SYSSTATS_F=sysstats.txt
 DLM_DUMP_F=dlm_dump.txt
 CRM_MON_F=crm_mon.txt
 MEMBERSHIP_F=members.txt
 HB_UUID_F=hb_uuid.txt
 HOSTCACHE=hostcache
 CRM_VERIFY_F=crm_verify.txt
 PERMISSIONS_F=permissions.txt
 CIB_F=cib.xml
 CIB_TXT_F=cib.txt
 
 EVENT_PATTERNS="
 state		do_state_transition
 membership	pcmk_peer_update.*(lost|memb):
 quorum		crmd.*crm_update_quorum|crmd.*ais.disp.*quorum.(lost|ac?quir)
 pause		Process.pause.detected
 resources	lrmd.*rsc:(start|stop)
 stonith		te_fence_node|stonith-ng.*log_oper.*report|stonithd.*(requests|(Succeeded|Failed).to.STONITH|result=)
 start_stop	sutdown.decision|Starting.heartbeat|Corosync.Cluster.Engine|corosync.*Initializing.transport|Executive.Service.RELEASE|crm_shutdown:.Requesting.shutdown|pcmk_shutdown:.Shutdown.complete
 "
 
 # superset of all packages of interest on all distros
 # (the package manager will be used to validate the installation
 # of any of these packages that are installed)
 PACKAGES="pacemaker pacemaker-libs pacemaker-cluster-libs libpacemaker3
 pacemaker-remote pacemaker-pygui pacemaker-pymgmt pymgmt-client
 openais libopenais2 libopenais3 corosync libcorosync4
 resource-agents cluster-glue-libs cluster-glue libglue2 ldirectord
 heartbeat heartbeat-common heartbeat-resources libheartbeat2
 ocfs2-tools ocfs2-tools-o2cb ocfs2console
 ocfs2-kmp-default ocfs2-kmp-pae ocfs2-kmp-xen ocfs2-kmp-debug ocfs2-kmp-trace
 drbd drbd-kmp-xen drbd-kmp-pae drbd-kmp-default drbd-kmp-debug drbd-kmp-trace
 drbd-heartbeat drbd-pacemaker drbd-utils drbd-bash-completion drbd-xen
 lvm2 lvm2-clvm cmirrord
 libdlm libdlm2 libdlm3
 hawk ruby lighttpd
 kernel-default kernel-pae kernel-xen
 glibc
 "
 
 # Potential locations of system log files
 SYSLOGS="
     /var/log/*
     /var/logs/*
     /var/syslog/*
     /var/adm/*
     /var/log/ha/*
     /var/log/cluster/*
 "
 
 #
 # keep the user posted
 #
 record() {
     if [ x != x"$REPORT_HOME" -a -d "${REPORT_HOME}/$shorthost" ]; then
         rec="${REPORT_HOME}/$shorthost/report.out"
 
     elif [ x != x"${l_base}" -a -d "${l_base}" ]; then
         rec="${l_base}/report.summary"
 
     else
         rec="/dev/null"
     fi
     printf "%-10s  $*\n" "$shorthost:" 2>&1 >> "${rec}"
 }
 
 log() {
     printf "%-10s  $*\n" "$shorthost:" 1>&2
     record "$*"
 }
 
 debug() {
     if [ $verbose -gt 0 ]; then
 	log "Debug: $*"
     else
         record "Debug: $*"
     fi
 }
 
 info() {
     log "$*"
 }
 
 warning() {
     log "WARN: $*"
 }
 
 fatal() {
     log "ERROR: $*"
     exit 1
 }
 
 is_running() {
     ps -ef | egrep -qs $(echo "$1" | sed -e 's/^\(.\)/[\1]/')
 }
 
-detect_host() {
-    if [ -z "$maxdepth" ]; then
-        depth="-maxdepth 5"
+detect_daemon_dir() {
+    info "Searching for where Pacemaker daemons live... this may take a while"
+
+    for d in \
+        {/usr,/usr/local,/opt/local,@exec_prefix@}/{libexec,lib64,lib}/{pacemaker,heartbeat}
+    do
+        # pacemaker and pacemaker-cts packages can install to daemon directory,
+        # so check for a file from each
+        if [ -e $d/pengine ] || [ -e $d/lrmd_test ]; then
+            echo $d
+            info "Found: $d"
+            return
+        fi
+    done
+
+    for f in $(find / -maxdepth $maxdepth -type f -name pengine -o -name lrmd_test); do
+        d=$(dirname "$f")
+        echo $d
+        info "Found: $d"
+        return
+    done
+
+    # Pacemaker Remote nodes don't need to install daemons
+    # TODO: pacemaker_remoted might not be on path
+    if which pacemaker_remoted >/dev/null 2>&1; then
+        info "Not found (this appears to be a Pacemaker Remote node)"
     else
-        depth="-maxdepth $maxdepth"
+        fatal "Pacemaker daemons not found (nonstandard installation?)"
     fi
+}
 
+detect_host() {
     local_state_dir=@localstatedir@
 
     if [ -d $local_state_dir/run ]; then
 	CRM_STATE_DIR=$local_state_dir/run/crm
     else
         info "Searching for where Pacemaker keeps runtime data... this may take a while"
-	for d in `find / $depth -type d -name run`; do
+	for d in `find / -maxdepth $maxdepth -type d -name run`; do
 	    local_state_dir=`dirname $d`
 	    CRM_STATE_DIR=$d/crm
 	    break
 	done
 	info "Found: $CRM_STATE_DIR"
     fi
     debug "Machine runtime directory: $local_state_dir"
     debug "Pacemaker runtime data located in: $CRM_STATE_DIR"
 
-    CRM_DAEMON_DIR=
-    for p in /usr /usr/local /opt/local @exec_prefix@; do
-	for d in libexec lib64 lib; do
-	    if [ -e $p/$d/pacemaker/pengine ]; then
-		CRM_DAEMON_DIR=$p/$d/pacemaker
-		break
-	    elif [ -e $p/$d/heartbeat/pengine ]; then
-		CRM_DAEMON_DIR=$p/$d/heartbeat
-		break
-	    fi
-	done
-    done
-
-    if [ ! -d $CRM_DAEMON_DIR ]; then
-        info "Searching for where Pacemaker daemons live... this may take a while"
-	for f in `find / $depth -type f -name pengine`; do
-	    CRM_DAEMON_DIR=`dirname $f`
-	    break
-	done
-	info "Found: $CRM_DAEMON_DIR"
-    fi
-
-    if [ -z $CRM_DAEMON_DIR ]; then
-	fatal "Non-standard Pacemaker installation: daemons not found"
-    else
-	debug "Pacemaker daemons located under: $CRM_DAEMON_DIR"
-    fi
+    CRM_DAEMON_DIR=$(detect_daemon_dir)
 
     CRM_CONFIG_DIR=
     for d in pacemaker/cib heartbeat/crm; do
 	if [ -f $local_state_dir/lib/$d/cib.xml ]; then
 	    CRM_CONFIG_DIR=$local_state_dir/lib/$d
 	    break
 	fi
     done
 
     if [ ! -d $CRM_CONFIG_DIR ]; then
         info "Detecting where Pacemaker keeps config information... this may take a while"
-	for f in `find / $depth -type f -name cib.xml`; do
+	for f in `find / -maxdepth $maxdepth -type f -name cib.xml`; do
 	    CRM_CONFIG_DIR=`dirname $f`
 	    break
 	done
 	info "Found: $CRM_CONFIG_DIR"
     fi
     if [ -z $CRM_CONFIG_DIR ]; then
 	warning "Non-standard Pacemaker installation: config not found"
     else
 	debug "Pacemaker config files located in: $CRM_CONFIG_DIR"
     fi
 
     # Assume new layout
     # $local_state_dir/lib/pacemaker/(cib,pengine,blackbox,cores)
     config_root=`dirname $CRM_CONFIG_DIR`
 
     # Older versions had none
     BLACKBOX_DIR=$config_root/blackbox
     debug "Pacemaker blackboxes (if any) located in: $BLACKBOX_DIR"
 
     PE_STATE_DIR=$config_root/pengine
     if [ ! -d $PE_STATE_DIR ]; then
 	info "Detecting where Pacemaker keeps Policy Engine inputs... this may take a while"
-	for d in `find / $depth -type d -name pengine`; do
+	for d in `find / -maxdepth $maxdepth -type d -name pengine`; do
 	    PE_STATE_DIR=$d
 	    break
 	done
 	info "Found: $PE_STATE_DIR"
     fi
     if [ -z $PE_STATE_DIR ]; then
 	fatal "Non-standard Pacemaker installation: Policy Engine directory not found"
     else
 	debug "PE files located in: $PE_STATE_DIR"
     fi
 
     HA_STATE_DIR=$local_state_dir/lib/heartbeat
     debug "Assuming Heartbeat state files, if any, are located in: $HA_STATE_DIR"
 
     CRM_CORE_DIRS=""
     for d in $config_root/cores $HA_STATE_DIR/cores $local_state_dir/lib/corosync $local_state_dir/lib/openais; do
 	if [ -d $d ]; then
 	    CRM_CORE_DIRS="$CRM_CORE_DIRS $d"
 	fi
     done
     debug "Core files located under: $CRM_CORE_DIRS"
 }
 
 time2str() {
 	perl -e "use POSIX; print strftime('%x %X',localtime($1));"
 }
 
 get_time() {
 	perl -e "\$time=\"$*\";" -e '
 	$unix_tm = 0;
 	eval "use Date::Parse";
 	if (index($time, ":") < 0) {
 	} elsif (!$@) {
 		$unix_tm = str2time($time);
 	} else {
 		eval "use Date::Manip";
 		if (!$@) {
 			$unix_tm = UnixDate(ParseDateString($time), "%s");
 		}
 	}
 	if ($unix_tm != "") {
 		print int($unix_tm);
 	} else {
 		print "";
 	}
 	'
 }
 
 get_time_() {
     warning "Unknown time format used by: $*"
 }
 
 get_time_syslog() {
     awk '{print $1,$2,$3}'
 }
 
 get_time_legacy() {
     awk '{print $2}' | sed 's/_/ /'
 }
 
 get_time_iso8601() {
     awk '{print $1}'
 }
 
 get_time_format_for_string() {
     l="$*"
     t=$(get_time `echo $l | get_time_syslog`)
     if [ "x$t" != x ]; then
 	echo syslog
 	return
     fi
 
     t=$(get_time `echo $l | get_time_iso8601`)
     if [ "x$t" != x ]; then
 	echo iso8601
 	return
     fi
 
     t=$(get_time `echo $l | get_time_legacy`)
     if [ "x$t" != x ]; then
 	echo legacy
 	return
     fi
 }
 
 get_time_format() {
     t=0 l="" func=""
     trycnt=10
     while [ $trycnt -gt 0 ] && read l; do
 	func=$(get_time_format_for_string $l)
 	if [ "x$func" != x ]; then
 	    break
 	fi
 	trycnt=$(($trycnt-1))
     done
     #debug "Logfile uses the $func time format"
     echo $func
 }
 
 get_first_time() {
     l=""
     format=$1
     while read l; do
 	t=$(echo $l | get_time_$format)
 	ts=$(get_time $t)
 	if [ "x$ts" != x ]; then
 	    echo "$ts"
 	    return
 	fi
     done
 }
 
 get_last_time() {
     l=""
     best=`date +%s` # Now
     format=$1
     while read l; do
 	t=$(echo $l | get_time_$format)
 	ts=$(get_time $t)
 	if [ "x$ts" != x ]; then
 	    best=$ts
 	fi
     done
     echo $best
 }
 
 linetime() {
     l=`tail -n +$2 $1 | grep -a ":[0-5][0-9]:" | head -n 1`
     format=`get_time_format_for_string $l`
     t=`echo $l | get_time_$format`
     get_time "$t"
 }
 
 #
 # findmsg <max> <pattern>
 #
 # Print the names of up to <max> system logs that contain <pattern>,
 # ordered by most recently modified.
 #
 findmsg() {
     max=$1
     pattern="$2"
     found=0
 
     # List all potential system logs ordered by most recently modified.
     candidates=$(ls -1td $SYSLOGS 2>/dev/null)
     if [ -z "$candidates" ]; then
         debug "No system logs found to search for pattern \'$pattern\'"
         return
     fi
 
     # Portable way to handle files with spaces in their names.
     SAVE_IFS=$IFS
     IFS="
 "
 
     # Check each log file for matches.
     logfiles=""
     for f in $candidates; do
         local cat=$(find_decompressor "$f")
         $cat "$f" 2>/dev/null | grep -q -e "$pattern"
         if [ $? -eq 0 ]; then
 
             # Add this file to the list of hits
             # (using newline as separator to handle spaces in names).
             if [ -z "$logfiles" ]; then
                 logfiles="$f"
             else
                 logfiles="$logfiles
 $f"
             fi
 
             # If we have enough hits, print them and return.
             found=$(($found+1))
             if [ $found -ge $max ]; then
                 debug "Pattern \'$pattern\' found in: [ $logfiles ]"
                 IFS=$SAVE_IFS
                 echo "$logfiles"
                 return
             fi
         fi
     done 2>/dev/null
     IFS=$SAVE_IFS
 
     debug "Pattern \'$pattern\' not found in any system logs"
 }
 
 node_events() {
   if [ -e $1 ]; then
     Epatt=`echo "$EVENT_PATTERNS" |
       while read title p; do [ -n "$p" ] && echo -n "|$p"; done |
       sed 's/.//'
       `
     grep -E "$Epatt" $1
   fi
 }
 
 pickfirst() {
     for x; do
 	which $x >/dev/null 2>&1 && {
 	    echo $x
 	    return 0
 	}
     done
     return 1
 }
 
 shrink() {
     olddir=$PWD
     dir=`dirname $1`
     base=`basename $1`
 
     target=$1.tar
     tar_options="cf"
 
     variant=`pickfirst bzip2 gzip xz false`
     case $variant in
 	bz*)
 	    tar_options="jcf"
 	    target="$target.bz2"
 	    ;;
 	gz*)
 	    tar_options="zcf"
 	    target="$target.gz"
 	    ;;
 	xz*)
 	    tar_options="Jcf"
 	    target="$target.xz"
 	    ;;
 	*)
 	    warning "Could not find a compression program, the resulting tarball may be huge"
 	    ;;
     esac
 
     if [ -e $target ]; then
 	fatal "Destination $target already exists, specify an alternate name with --dest"
     fi
 
     cd $dir  >/dev/null 2>&1
     tar $tar_options $target $base >/dev/null 2>&1
     cd $olddir  >/dev/null 2>&1
 
     echo $target
 }
 
 findln_by_time() {
     local logf=$1
     local tm=$2
     local first=1
 
     # Some logs can be massive (over 1,500,000,000 lines have been seen in the wild) 
     # Even just 'wc -l' on these files can take 10+ minutes 
 
     local fileSize=`ls -lh | awk '{ print $5 }' | grep -ie G`
     if [ x$fileSize != x ]; then
         warning "$logf is ${fileSize} in size and could take many hours to process. Skipping."
         return
     fi
 
     local last=`wc -l < $logf`
     while [ $first -le $last ]; do
 	mid=$((($last+$first)/2))
 	trycnt=10
 	while [ $trycnt -gt 0 ]; do
 	    tmid=`linetime $logf $mid`
 	    [ "$tmid" ] && break
 	    warning "cannot extract time: $logf:$mid; will try the next one"
 	    trycnt=$(($trycnt-1))
 			# shift the whole first-last segment
 	    first=$(($first-1))
 	    last=$(($last-1))
 	    mid=$((($last+$first)/2))
 	done
 	if [ -z "$tmid" ]; then
 	    warning "giving up on log..."
 	    return
 	fi
 	if [ $tmid -gt $tm ]; then
 	    last=$(($mid-1))
 	elif [ $tmid -lt $tm ]; then
 	    first=$(($mid+1))
 	else
 	    break
 	fi
     done
     echo $mid
 }
 
 dumplog() {
     local logf=$1
     local from_line=$2
     local to_line=$3
     [ "$from_line" ] ||
     return
     tail -n +$from_line $logf |
     if [ "$to_line" ]; then
 	head -$(($to_line-$from_line+1))
     else
 	cat
     fi
 }
 
 #
 # find log/set of logs which are interesting for us
 #
 #
 # find log slices
 #
 
 find_decompressor() {
     case $1 in
         *bz2) echo "bzip2 -dc" ;;
         *gz)  echo "gzip -dc" ;;
         *xz)  echo "xz -dc" ;;
         *)    echo "cat" ;;
     esac
 }
 
 #
 # check if the log contains a piece of our segment
 #
 is_our_log() {
 	local logf=$1
 	local from_time=$2
 	local to_time=$3
 
 	local cat=`find_decompressor $logf`
 	local format=`$cat $logf | get_time_format`
 	local first_time=`$cat $logf | head -10 | get_first_time $format`
 	local last_time=`$cat $logf | tail -10 | get_last_time $format`
 
 	if [ x = "x$first_time" -o x = "x$last_time" ]; then
 	    warning "Skipping bad logfile '$1': Could not determine log dates"
 	    return 0 # skip (empty log?)
 	fi
 	if [ $from_time -gt $last_time ]; then
 		# we shouldn't get here anyway if the logs are in order
 		return 2 # we're past good logs; exit
 	fi
 	if [ $from_time -ge $first_time ]; then
 		return 3 # this is the last good log
 	fi
 	# have to go further back
 	if [ x = "x$to_time" -o $to_time -ge $first_time ]; then
 		return 1 # include this log
 	else
 		return 0 # don't include this log
 	fi
 }
 #
 # go through archived logs (timewise backwards) and see if there
 # are lines belonging to us
 # (we rely on untouched log files, i.e. that modify time
 # hasn't been changed)
 #
 arch_logs() {
 	local logf=$1
 	local from_time=$2
 	local to_time=$3
 
 	# look for files such as: ha-log-20090308 or
 	# ha-log-20090308.gz (.bz2) or ha-log.0, etc
 	ls -t $logf $logf*[0-9z] 2>/dev/null |
 	while read next_log; do
 		is_our_log $next_log $from_time $to_time
 		case $? in
 		0) ;;  # noop, continue
 		1) echo $next_log  # include log and continue
 			debug "Found log $next_log"
 			;;
 		2) break;; # don't go through older logs!
 		3) echo $next_log  # include log and continue
 			debug "Found log $next_log"
 			break
 			;; # don't go through older logs!
 		esac
 	done
 }
 
 #
 # print part of the log
 #
 drop_tmp_file() {
 	[ -z "$tmp" ] || rm -f "$tmp"
 }
 
 print_logseg() {
 	local logf=$1
 	local from_time=$2
 	local to_time=$3
 
 	# uncompress to a temp file (if necessary)
 	local cat=`find_decompressor $logf`
 	if [ "$cat" != "cat" ]; then
 		tmp=`mktemp`
 		$cat $logf > $tmp
 		trap drop_tmp_file 0
 		sourcef=$tmp
 	else
 		sourcef=$logf
 		tmp=""
 	fi
 
 	if [ "$from_time" = 0 ]; then
 		FROM_LINE=1
 	else
 		FROM_LINE=`findln_by_time $sourcef $from_time`
 	fi
 	if [ -z "$FROM_LINE" ]; then
 		warning "couldn't find line for time $from_time; corrupt log file?"
 		return
 	fi
 
 	TO_LINE=""
 	if [ "$to_time" != 0 ]; then
 		TO_LINE=`findln_by_time $sourcef $to_time`
 		if [ -z "$TO_LINE" ]; then
 			warning "couldn't find line for time $to_time; corrupt log file?"
 			return
 		fi
 		if [ $FROM_LINE -lt $TO_LINE ]; then
 		    dumplog $sourcef $FROM_LINE $TO_LINE
 		    log "Including segment [$FROM_LINE-$TO_LINE] from $logf"
 		else
 		    debug "Empty segment [$FROM_LINE-$TO_LINE] from $logf"
 		fi
 	else
 	    dumplog $sourcef $FROM_LINE $TO_LINE
 	    log "Including all logs after line $FROM_LINE from $logf"
 	fi
 	drop_tmp_file
 	trap "" 0
 }
 
 #
 # find log/set of logs which are interesting for us
 #
 dumplogset() {
 	local logf=$1
 	local from_time=$2
 	local to_time=$3
 
 	local logf_set=`arch_logs $logf $from_time $to_time`
 	if [ x = "x$logf_set" ]; then
 		return
 	fi
 
 	local num_logs=`echo "$logf_set" | wc -l`
 	local oldest=`echo $logf_set | awk '{print $NF}'`
 	local newest=`echo $logf_set | awk '{print $1}'`
 	local mid_logfiles=`echo $logf_set | awk '{for(i=NF-1; i>1; i--) print $i}'`
 
 	# the first logfile: from $from_time to $to_time (or end)
 	# logfiles in the middle: all
 	# the last logfile: from beginning to $to_time (or end)
 	case $num_logs in
 	1) print_logseg $newest $from_time $to_time;;
 	*)
 		print_logseg $oldest $from_time 0
 		for f in $mid_logfiles; do
 		    `find_decompressor $f` $f
 		    debug "including complete $f logfile"
 		done
 		print_logseg $newest 0 $to_time
 	;;
 	esac
 }
 
 # cut out a stanza
 getstanza() {
 	awk -v name="$1" '
 	!in_stanza && NF==2 && /^[a-z][a-z]*[[:space:]]*{/ { # stanza start
 		if ($1 == name)
 			in_stanza = 1
 	}
 	in_stanza { print }
 	in_stanza && NF==1 && $1 == "}" { exit }
 	'
 }
 # supply stanza in $1 and variable name in $2
 # (stanza is optional)
 getcfvar() {
     cf_type=$1; shift;
     cf_var=$1; shift;
     cf_file=$*
 
     [ -f "$cf_file" ] || return
     case $cf_type in
 	cman)
 	    grep $cf_var $cf_file | sed s/.*$cf_var=\"// | sed s/\".*//
 	    ;;
 	corosync|openais)
 	    sed 's/#.*//' < $cf_file |
 	        if [ $# -eq 2 ]; then
 			getstanza "$cf_var"
 			shift 1
 		else
 			cat
 		fi |
 		awk -v varname="$cf_var" '
 		NF==2 && match($1,varname":$")==1 { print $2; exit; }
 		'
 	;;
 	heartbeat)
 	    sed 's/#.*//' < $cf_file |
 		grep -w "^$cf_var" |
 		sed 's/^[^[:space:]]*[[:space:]]*//'
 
 	    ;;
 	logd)
 	    sed 's/#.*//' < $cf_file |
 		grep -w "^$cf_var" |
 		sed 's/^[^[:space:]]*[[:space:]]*//'
 
 	    ;;
     esac
 }
 
 pickfirst() {
     for x; do
 	which $x >/dev/null 2>&1 && {
 	    echo $x
 	    return 0
 	}
     done
     return 1
 }
 
 #
 # figure out the cluster type, depending on the process list
 # and existence of configuration files
 #
 get_cluster_type() {
     if is_running corosync; then
 	tool=`pickfirst corosync-objctl corosync-cmapctl`
 	case $tool in
 	    *objctl) quorum=`$tool -a | grep quorum.provider | sed 's/.*=\s*//'`;;
 	    *cmapctl) quorum=`$tool | grep quorum.provider | sed 's/.*=\s*//'`;;
 	esac
 	if [ x"$quorum" = x"quorum_cman" ]; then
 	    stack="cman"
 	else
 	    stack="corosync"
 	fi
 
     elif is_running aisexec; then
 	stack="openais"
 
     elif
 	ps -ef | grep -v -e grep -e "eartbeat/[clasp]" | egrep -qs '[h]eartbeat'
     then
 	stack="heartbeat"
 
     # Now we're guessing...
 
     elif [ -f /etc/cluster/cluster.conf ]; then
 	stack="cman"
 
     # TODO: Technically these could be anywhere :-/
     elif [ -f /etc/corosync/corosync.conf ]; then
 	stack="corosync"
 
     elif [ -f /etc/ais/openais.conf ]; then
 	stack="openais"
 
     elif [ -f /etc/ha.d/ha.cf ]; then
         stack="heartbeat"
 
     else
         # We still don't know. This might be a Pacemaker Remote node,
         # or the configuration might be in a nonstandard location.
         stack="any"
     fi
 
     debug "Detected the '$stack' cluster stack"
     echo $stack
 }
 
 find_cluster_cf() {
     case $1 in
 	cman) echo "/etc/cluster/cluster.conf";;
 	corosync)
 	    best_size=0
 	    best_file=""
 
 	    # TODO: Technically these could be anywhere :-/
 	    for cf in /etc/ais/openais.conf /etc/corosync/corosync.conf; do
 		if [ -f $cf ]; then
 		    size=`wc -l $cf | awk '{print $1}'`
 		    if [ $size -gt $best_size ]; then
 			best_size=$size
 			best_file=$cf
 		    fi
 		fi
 	    done
 	    if [ -z "$best_file" ]; then
 		debug "Looking for corosync configuration file. This may take a while..."
-		for f in `find / $depth -type f -name corosync.conf`; do
+		for f in `find / -maxdepth $maxdepth -type f -name corosync.conf`; do
 		    best_file=$f
 		    break
 		done
 	    fi
 	    debug "Located corosync config file: $best_file"
 	    echo "$best_file"
 	    ;;
 	openais)
 	    # TODO: Technically it could be anywhere :-/
 	    cf="/etc/ais/openais.conf"
 	    if [ -f $cf ]; then
 		echo "$cf"
 	    fi
 	    ;;
 	heartbeat)
 	    cf="/etc/ha.d/ha.cf"
 	    if [ -f $cf ]; then
 		echo "$cf"
 	    fi
 	    ;;
 	any)
 	    # Cluster type is undetermined. Don't complain, because this
 	    # might be a Pacemaker Remote node.
 	    ;;
 	*)
 	    warning "Unknown cluster type: $1"
 	    ;;
     esac
 }
 
 #
 # check for the major prereq for a) parameter parsing and b)
 # parsing logs
 #
 t=`get_time "12:00"`
 if [ "$t" = "" ]; then
 	fatal "please install the perl Date::Parse module (perl-DateTime-Format-DateParse on Fedora/Red Hat)"
 fi
 
 # vim: set expandtab tabstop=8 softtabstop=4 shiftwidth=4 textwidth=80: