diff --git a/TODO.markdown b/TODO.markdown
index 39072d304a..5ec118f6a9 100644
--- a/TODO.markdown
+++ b/TODO.markdown
@@ -1,53 +1,57 @@
 # Semi-random collection of tasks we'd like to get done
 
 ## Targeted for 1.2
 - Need a way to indicate when unfencing operations need to be initiated from the host to be unfenced
 - Remove all calls to uname() and replace with get_node_name() whcih redirects to ${stack}_node_name()
  
 ## Targeted for 1.2.x
 
 - Support
   http://cgit.freedesktop.org/systemd/systemd/commit/?id=96342de68d0d6de71a062d984dafd2a0905ed9fe
 - Allow stonith_admin to optionally route fencing requests via the CIB (terminate=true)
 - Add corosync to ComponentFail cts test
 - Support 'yesterday' and 'thursday' and '24-04' as dates in crm_report 
 - Allow the N in 'give up after N failed fencing attempts' to be configurable 
 - Check for uppercase letters in node names, warn if found
 - Imply startup-failure-is-fatal from on-fail="restart" 
 - Show an english version of the config with crm_resource --rules
 - Convert cts/CIB.py into a supported Python API for the CIB
 - Reduce the amount of stonith-ng logging
 - Use dlopen for snmp in crm_mon
 - Re-implement no-quorum filter for cib updates?
 
 ## Targeted for 1.4
 
 - Support A colocated with (B || C || D)
 - Implement a truely atomic version of attrd
 - Support rolling average values in attrd
 - Support heartbeat with the mcp
 - Freeze/Thaw
 - Create Pacemaker plugin for snmpd - http://www.net-snmp.org/
 - Investigate using a DB as the back-end for the CIB
 - Decide whether to fully support or drop failover domains
 
 # Testing
 - Convert BandwidthTest CTS test into a Scenario wrapper
 - find_operations() is not covered by PE regression tests
 - no_quorum_policy==suicide is not covered by PE regression tests
 - parse_xml_duration() is not covered by PE regression tests
 - phase_of_the_moon() is not covered by PE regression tests
 - test_role_expression() is not covered by PE regression tests
 - native_parameter() is not covered by PE regression tests
 - clone_active() is not covered by PE regression tests
 - convert_non_atomic_task() in native.c is not covered by PE regression tests
 - group_rsc_colocation_lh() is not covered by PE regression tests
 - Test on-fail=standby
 
 # Documentation
 - Clusters from Scratch: Mail
 - Clusters from Scratch: MySQL
 - Document reload in Pacemaker Explained
 - Document advanced fencing logic in Pacemaker Explained
 - Use ann:defaultValue="..." instead of <optional> in the schema more often
-- Allow Clusters from Scratch to be built in two flavors - pcs and crm shell
+- Document in CFS an Appendix detailing with re-enabling firewall
+- Reference ocf:pacemaker resource agents instead of ocf:heartbeat resource agents in CFS
+- Document implicit operation creation in CFS once pcs supports it.
+- Document use of pcs resource move command in CFS once pcs supports it.
+- Make use of --clone option in pcs resource create dlm in CFS once pcs fully supports that option.
diff --git a/doc/Clusters_from_Scratch/en-US/Ap-Configuration.txt b/doc/Clusters_from_Scratch/en-US/Ap-Configuration.txt
index 07b5a617d0..2061ea6538 100644
--- a/doc/Clusters_from_Scratch/en-US/Ap-Configuration.txt
+++ b/doc/Clusters_from_Scratch/en-US/Ap-Configuration.txt
@@ -1,482 +1,498 @@
 [appendix]
 == Configuration Recap ==
 
 === Final Cluster Configuration ===
 
 ifdef::pcs[]
 
 [source,Bash]
 ----
 # pcs resource
  Master/Slave Set: WebDataClone [WebData]
      Masters: [ pcmk-2 pcmk-1 ]
  Clone Set: dlm-clone [dlm]
      Started: [ pcmk-2 pcmk-1 ]
  Clone Set: ClusterIP-clone [ClusterIP] (unique)
      ClusterIP:0	(ocf::heartbeat:IPaddr2) Started 
      ClusterIP:1	(ocf::heartbeat:IPaddr2) Started 
  Clone Set: WebFS-clone [WebFS]
      Started: [ pcmk-1 pcmk-2 ]
  Clone Set: WebSite-clone [WebSite]
      Started: [ pcmk-1 pcmk-2 ]
 # pcs resource rsc defaults
 resource-stickiness: 100
 # pcs resource op defaults
 timeout: 240s
 # pcs stonith
  impi-fencing	(stonith:fence_ipmilan) Started
 # pcs property
 dc-version: 1.1.8-1.el7-60a19ed12fdb4d5c6a6b6767f52e5391e447fec0
 cluster-infrastructure: corosync
 no-quorum-policy: ignore
 stonith-enabled: true
 # pcs constraint
 Location Constraints:
 Ordering Constraints:
   ClusterIP-clone then WebSite-clone
   WebDataClone then WebSite-clone
   WebFS-clone then WebSite-clone
 Colocation Constraints:
   WebSite-clone with ClusterIP-clone
   WebFS-clone with WebDataClone (with-rsc-role:Master)
   WebSite-clone with WebFS-clone
 #
 # pcs status
 
 Last updated: Fri Sep 14 13:45:34 2012
 Last change: Fri Sep 14 13:43:13 2012 via cibadmin on pcmk-1
 Stack: corosync
 Current DC: pcmk-1 (1) - partition with quorum
 Version: 1.1.8-1.el7-60a19ed12fdb4d5c6a6b6767f52e5391e447fec0
 2 Nodes configured, unknown expected votes
 11 Resources configured.
 
 Online: [ pcmk-1 pcmk-2 ]
 
 Full list of resources:
 
  Master/Slave Set: WebDataClone [WebData]
      Masters: [ pcmk-2 pcmk-1 ]
  Clone Set: dlm-clone [dlm]
      Started: [ pcmk-1 pcmk-2 ]
  Clone Set: ClusterIP-clone [ClusterIP] (unique)
      ClusterIP:0	(ocf::heartbeat:IPaddr2):	Started pcmk-1
      ClusterIP:1	(ocf::heartbeat:IPaddr2):	Started pcmk-2
  Clone Set: WebFS-clone [WebFS]
      Started: [ pcmk-1 pcmk-2 ]
  Clone Set: WebSite-clone [WebSite]
      Started: [ pcmk-1 pcmk-2 ]
  impi-fencing	(stonith:fence_ipmilan):	Started
 ----
 
 In xml it should look similar to this.
 [source,XML]
 ----
 <cib admin_epoch="0" cib-last-written="Fri Sep 14 13:43:13 2012" crm_feature_set="3.0.6" dc-uuid="1" epoch="47" have-quorum="1" num_updates="50" update-client="cibadmin" update-origin="pcmk-1" validate-with="pacemaker-1.2">
   <configuration>
     <crm_config>
       <cluster_property_set id="cib-bootstrap-options">
         <nvpair id="cib-bootstrap-options-dc-version" name="dc-version" value="1.1.8-1.el7-60a19ed12fdb4d5c6a6b6767f52e5391e447fec0"/>
         <nvpair id="cib-bootstrap-options-cluster-infrastructure" name="cluster-infrastructure" value="corosync"/>
         <nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
         <nvpair id="cib-bootstrap-options-stonith-enabled" name="stonith-enabled" value="true"/>
       </cluster_property_set>
     </crm_config>
     <nodes>
       <node id="1" type="normal" uname="pcmk-1"/>
       <node id="2" type="normal" uname="pcmk-2"/>
     </nodes>
     <resources>
       <master id="WebDataClone">
         <primitive class="ocf" id="WebData" provider="linbit" type="drbd">
           <instance_attributes id="WebData-instance_attributes">
             <nvpair id="WebData-instance_attributes-drbd_resource" name="drbd_resource" value="wwwdata"/>
           </instance_attributes>
           <operations>
             <op id="WebData-interval-60s" interval="60s" name="monitor"/>
           </operations>
         </primitive>
         <meta_attributes id="WebDataClone-meta_attributes">
           <nvpair id="WebDataClone-meta_attributes-master-node-max" name="master-node-max" value="1"/>
           <nvpair id="WebDataClone-meta_attributes-clone-max" name="clone-max" value="2"/>
           <nvpair id="WebDataClone-meta_attributes-clone-node-max" name="clone-node-max" value="1"/>
           <nvpair id="WebDataClone-meta_attributes-notify" name="notify" value="true"/>
           <nvpair id="WebDataClone-meta_attributes-master-max" name="master-max" value="2"/>
         </meta_attributes>
       </master>
       <clone id="dlm-clone">
         <primitive class="ocf" id="dlm" provider="pacemaker" type="controld">
           <instance_attributes id="dlm-instance_attributes"/>
           <operations>
             <op id="dlm-interval-60s" interval="60s" name="monitor"/>
           </operations>
         </primitive>
         <meta_attributes id="dlm-clone-meta">
           <nvpair id="dlm-clone-max" name="clone-max" value="2"/>
           <nvpair id="dlm-clone-node-max" name="clone-node-max" value="1"/>
         </meta_attributes>
       </clone>
       <clone id="ClusterIP-clone">
         <primitive class="ocf" id="ClusterIP" provider="heartbeat" type="IPaddr2">
           <instance_attributes id="ClusterIP-instance_attributes">
             <nvpair id="ClusterIP-instance_attributes-ip" name="ip" value="192.168.0.120"/>
             <nvpair id="ClusterIP-instance_attributes-cidr_netmask" name="cidr_netmask" value="32"/>
             <nvpair id="ClusterIP-instance_attributes-clusterip_hash" name="clusterip_hash" value="sourceip"/>
           </instance_attributes>
           <operations>
             <op id="ClusterIP-interval-30s" interval="30s" name="monitor"/>
           </operations>
         </primitive>
         <meta_attributes id="ClusterIP-clone-meta">
           <nvpair id="ClusterIP-globally-unique" name="globally-unique" value="true"/>
           <nvpair id="ClusterIP-clone-max" name="clone-max" value="2"/>
           <nvpair id="ClusterIP-clone-node-max" name="clone-node-max" value="2"/>
         </meta_attributes>
       </clone>
       <clone id="WebFS-clone">
         <primitive class="ocf" id="WebFS" provider="heartbeat" type="Filesystem">
           <instance_attributes id="WebFS-instance_attributes">
             <nvpair id="WebFS-instance_attributes-device" name="device" value="/dev/drbd/by-res/wwwdata"/>
             <nvpair id="WebFS-instance_attributes-directory" name="directory" value="/var/www/html"/>
             <nvpair id="WebFS-instance_attributes-fstype" name="fstype" value="gfs2"/>
           </instance_attributes>
           <meta_attributes id="WebFS-meta_attributes"/>
         </primitive>
         <meta_attributes id="WebFS-clone-meta"/>
       </clone>
       <clone id="WebSite-clone">
         <primitive class="ocf" id="WebSite" provider="heartbeat" type="apache">
           <instance_attributes id="WebSite-instance_attributes">
             <nvpair id="WebSite-instance_attributes-configfile" name="configfile" value="/etc/httpd/conf/httpd.conf"/>
             <nvpair id="WebSite-instance_attributes-statusurl" name="statusurl" value="http://localhost/server-status"/>
           </instance_attributes>
           <operations>
             <op id="WebSite-interval-1min" interval="1min" name="monitor"/>
           </operations>
         </primitive>
         <meta_attributes id="WebSite-clone-meta"/>
       </clone>
       <primitive class="stonith" id="impi-fencing" type="fence_ipmilan">
         <instance_attributes id="impi-fencing-instance_attributes">
           <nvpair id="impi-fencing-instance_attributes-pcmk_host_list" name="pcmk_host_list" value="pcmk-1 pcmk-2"/>
           <nvpair id="impi-fencing-instance_attributes-ipaddr" name="ipaddr" value="10.0.0.1"/>
           <nvpair id="impi-fencing-instance_attributes-login" name="login" value="testuser"/>
           <nvpair id="impi-fencing-instance_attributes-passwd" name="passwd" value="acd123"/>
         </instance_attributes>
         <operations>
           <op id="impi-fencing-interval-60s" interval="60s" name="monitor"/>
         </operations>
       </primitive>
     </resources>
     <constraints>
       <rsc_colocation id="colocation-WebSite-ClusterIP-INFINITY" rsc="WebSite-clone" score="INFINITY" with-rsc="ClusterIP-clone"/>
       <rsc_order first="ClusterIP-clone" first-action="start" id="order-ClusterIP-WebSite-mandatory" then="WebSite-clone" then-action="start"/>
       <rsc_colocation id="colocation-WebFS-WebDataClone-INFINITY" rsc="WebFS-clone" score="INFINITY" with-rsc="WebDataClone" with-rsc-role="Master"/>
       <rsc_colocation id="colocation-WebSite-WebFS-INFINITY" rsc="WebSite-clone" score="INFINITY" with-rsc="WebFS-clone"/>
       <rsc_order first="WebFS-clone" id="order-WebFS-WebSite-mandatory" then="WebSite-clone"/>
       <rsc_order first="WebDataClone" first-action="promote" id="order-WebDataClone-WebFS-mandatory" then="WebFS-clone" then-action="start"/>
     </constraints>
     <rsc_defaults>
       <meta_attributes id="rsc_defaults-options">
         <nvpair id="rsc_defaults-options-resource-stickiness" name="resource-stickiness" value="100"/>
       </meta_attributes>
     </rsc_defaults>
     <op_defaults>
       <meta_attributes id="op_defaults-options">
         <nvpair id="op_defaults-options-timeout" name="timeout" value="240s"/>
       </meta_attributes>
     </op_defaults>
   </configuration>
 </cib>
 ----
 endif::[]
 
 ifdef::crm[]
 .....
 # crm configure show
 node pcmk-1
 node pcmk-2
 primitive WebData ocf:linbit:drbd \
     params drbd_resource="wwwdata" \
     op monitor interval="60s"
 primitive WebFS ocf:heartbeat:Filesystem \
     params device="/dev/drbd/by-res/wwwdata" directory="/var/www/html" fstype="gfs2"
 primitive WebSite ocf:heartbeat:apache \
     params configfile="/etc/httpd/conf/httpd.conf" \
     op monitor interval="1min"
 primitive ClusterIP ocf:heartbeat:IPaddr2 \
     params ip="192.168.122.101" cidr_netmask="32" clusterip_hash="sourceip" \
     op monitor interval="30s"
 primitive ipmi-fencing stonith::fence_ipmilan \
     params pcmk_host_list="pcmk-1 pcmk-2" ipaddr=10.0.0.1 login=testuser passwd=abc123 \
     op monitor interval="60s"
 ms WebDataClone WebData \
     meta master-max="2" master-node-max="1" clone-max="2" clone-node-max="1" notify="true"
 clone WebFSClone WebFS
 clone WebIP ClusterIP \
     meta globally-unique="true" clone-max="2" clone-node-max="2"
 clone WebSiteClone WebSite
 colocation WebSite-with-WebFS inf: WebSiteClone WebFSClone
 colocation fs_on_drbd inf: WebFSClone WebDataClone:Master
 colocation website-with-ip inf: WebSiteClone WebIP
 order WebFS-after-WebData inf: WebDataClone:promote WebFSClone:start
 order WebSite-after-WebFS inf: WebFSClone WebSiteClone
 order apache-after-ip inf: WebIP WebSiteClone
 property $id="cib-bootstrap-options" \
     dc-version="1.1.5-bdd89e69ba545404d02445be1f3d72e6a203ba2f" \
     cluster-infrastructure="openais" \
     expected-quorum-votes="2" \
     stonith-enabled="true" \
     no-quorum-policy="ignore"
 rsc_defaults $id="rsc-options" \
     resource-stickiness="100"
 .....
 endif::[]
 
 
 === Node List ===
 
 The list of cluster nodes is automatically populated by the cluster.
 
 ifdef::pcs[]
 .....
 Pacemaker Nodes:
  Online: [ pcmk-1 pcmk-2  ]
 .....
 endif::[]
 
 ifdef::crm[]
 .....
 node pcmk-1
 node pcmk-2
 .....
 endif::[]
 
 === Cluster Options ===
 
 This is where the cluster automatically stores some information about
 the cluster
 
 * dc-version - the version (including upstream source-code hash) of Pacemaker used on the DC
 
 * cluster-infrastructure - the cluster infrastructure being used (heartbeat or openais)
 
 * expected-quorum-votes - the maximum number of nodes expected to be part of the cluster
 
 and where the admin can set options that control the way the cluster
 operates
 
 * stonith-enabled=true - Make use of STONITH
 
 * no-quorum-policy=ignore - Ignore loss of quorum and continue to host resources.
 
 ifdef::pcs[]
 [source,Bash]
 ----
 # pcs property
 dc-version: 1.1.8-1.el7-60a19ed12fdb4d5c6a6b6767f52e5391e447fec0
 cluster-infrastructure: corosync
 no-quorum-policy: ignore
 stonith-enabled: true
 ----
 endif::[]
 
 ifdef::crm[]
 .....
 property $id="cib-bootstrap-options" \
     dc-version="1.1.5-bdd89e69ba545404d02445be1f3d72e6a203ba2f" \
     cluster-infrastructure="openais" \
     expected-quorum-votes="2" \
     stonith-enabled="true" \
     no-quorum-policy="ignore"
 .....
 endif::[]
 
 === Resources ===
 
 
 ==== Default Options ====
 
 Here we configure cluster options that apply to every resource.
 
 ifdef::pcs[]
 * resource-stickiness - Specify the aversion to moving resources to other machines
 [source,Bash]
 ----
 # pcs resource rsc defaults
 resource-stickiness: 100
 ----
 endif::[]
 
 ifdef::crm[]
 * resource-stickiness - Specify the aversion to moving resources to other machines
 .....
 rsc_defaults $id="rsc-options" \
     resource-stickiness="100"
 .....
 endif::[]
 
 ==== Fencing ====
 
 ifdef::pcs[]
 [source,Bash]
 ----
 # pcs stonith show
  impi-fencing	(stonith:fence_ipmilan) Started
 # pcs stonith show impi-fencing
 Resource: impi-fencing
   pcmk_host_list: pcmk-1 pcmk-2
   ipaddr: 10.0.0.1
   login: testuser
   passwd: acd123
 ----
 endif::[]
 
 ifdef::crm[]
 .....
 primitive ipmi-fencing stonith::fence_ipmilan \
     params pcmk_host_list="pcmk-1 pcmk-2" ipaddr=10.0.0.1 login=testuser passwd=abc123 \
     op monitor interval="60s"
 clone Fencing rsa-fencing
 .....
 endif::[]
 
 ==== Service Address ====
 
 Users of the services provided by the cluster require an unchanging
 address with which to access it. Additionally, we cloned the address so
 it will be active on both nodes. An iptables rule (created as part of the
 resource agent) is used to ensure that each request only gets processed by one
 of the two clone instances. The additional meta options tell the cluster
 that we want two instances of the clone (one "request bucket" for each
 node) and that if one node fails, then the remaining node should hold
 both.
 
 ifdef::pcs[]
 [source,Bash]
 ----
 # pcs resource show ClusterIP-clone
 Resource: ClusterIP-clone
   ip: 192.168.0.120
   cidr_netmask: 32
   clusterip_hash: sourceip
   globally-unique: true
   clone-max: 2
   clone-node-max: 2
   op monitor interval=30s
 ----
 endif::[]
 
 ifdef::crm[]
 .....
 primitive ClusterIP ocf:heartbeat:IPaddr2 \
     params ip="192.168.122.101" cidr_netmask="32" clusterip_hash="sourceip" \
     op monitor interval="30s"
 clone WebIP ClusterIP
     meta globally-unique="true" clone-max="2" clone-node-max="2"
 .....
 endif::[]
 
 [NOTE]
 =======
 TODO: The RA should check for globally-unique=true when cloned
 =======
 
 ==== DRBD - Shared Storage ====
 
 Here we define the DRBD service and specify which DRBD resource (from
 drbd.conf) it should manage. We make it a master/slave resource and, in
 order to have an active/active setup, allow both instances to be promoted
 by specifying master-max=2. We also set the notify option so that the
 cluster will tell DRBD agent when it's peer changes state.
 
 ifdef::pcs[]
 [source,Bash]
 ----
 # pcs resource show WebDataClone
 Resource: WebDataClone
   drbd_resource: wwwdata
   master-node-max: 1
   clone-max: 2
   clone-node-max: 1
   notify: true
   master-max: 2
   op monitor interval=60s
+# pcs constraint ref WebDataClone
+Resource: WebDataClone
+  colocation-WebFS-WebDataClone-INFINITY
+  order-WebDataClone-WebFS-mandatory
 ----
 endif::[]
 
 ifdef::crm[]
 .....
 primitive WebData ocf:linbit:drbd \
     params drbd_resource="wwwdata" \
     op monitor interval="60s"
 ms WebDataClone WebData \
     meta master-max="2" master-node-max="1" clone-max="2" clone-node-max="1" notify="true"
 .....
 endif::[]
 
 
 ==== Cluster Filesystem ====
 
 The cluster filesystem ensures that files are read and written correctly.
 We need to specify the block device (provided by DRBD), where we want it
 mounted and that we are using GFS2. Again it is a clone because it is
 intended to be active on both nodes. The additional constraints ensure
 that it can only be started on nodes with active gfs-control and drbd
 instances.
 
 ifdef::pcs[]
 [source,Bash]
 ----
 # pcs resource show WebFS-clone
 Resource: WebFS-clone
   device: /dev/drbd/by-res/wwwdata
   directory: /var/www/html
   fstype: gfs2
+# pcs constraint ref WebFS-clone
+Resource: WebFS-clone
+  colocation-WebFS-WebDataClone-INFINITY
+  colocation-WebSite-WebFS-INFINITY
+  order-WebFS-WebSite-mandatory
+  order-WebDataClone-WebFS-mandatory
 ----
 endif::[]
 
 ifdef::crm[]
 .....
 primitive WebFS ocf:heartbeat:Filesystem \
     params device="/dev/drbd/by-res/wwwdata" directory="/var/www/html" fstype="gfs2"
 clone WebFSClone WebFS
 colocation WebFS-with-gfs-control inf: WebFSClone gfs-clone
 colocation fs_on_drbd inf: WebFSClone WebDataClone:Master
 order WebFS-after-WebData inf: WebDataClone:promote WebFSClone:start
 order start-WebFS-after-gfs-control inf: gfs-clone WebFSClone
 .....
 endif::[]
 
 ==== Apache ====
 
 Lastly we have the actual service, Apache. We need only tell the cluster
 where to find it's main configuration file and restrict it to running on
 nodes that have the required filesystem mounted and the IP address
 active.
 
 ifdef::pcs[]
 [source,Bash]
 ----
 # pcs resource show WebSite-clone
 Resource: WebSite-clone
   configfile: /etc/httpd/conf/httpd.conf
   statusurl: http://localhost/server-status
   master-max: 2
   op monitor interval=1min
+# pcs constraint ref WebSite-clone
+Resource: WebSite-clone
+  colocation-WebSite-ClusterIP-INFINITY
+  colocation-WebSite-WebFS-INFINITY
+  order-ClusterIP-WebSite-mandatory
+  order-WebFS-WebSite-mandatory
 ----
 endif::[]
 
 ifdef::crm[]
 .....
 primitive WebSite ocf:heartbeat:apache \
     params configfile="/etc/httpd/conf/httpd.conf" \
     op monitor interval="1min"
 clone WebSiteClone WebSite
 colocation WebSite-with-WebFS inf: WebSiteClone WebFSClone
 colocation website-with-ip inf: WebSiteClone WebIP
 order apache-after-ip inf: WebIP WebSiteClone
 order WebSite-after-WebFS inf: WebFSClone WebSiteClone
 .....
 endif::[]
 
diff --git a/doc/Clusters_from_Scratch/en-US/Ap-Corosync-Conf.txt b/doc/Clusters_from_Scratch/en-US/Ap-Corosync-Conf.txt
index fa844bd000..4fa502a589 100644
--- a/doc/Clusters_from_Scratch/en-US/Ap-Corosync-Conf.txt
+++ b/doc/Clusters_from_Scratch/en-US/Ap-Corosync-Conf.txt
@@ -1,121 +1,121 @@
 [appendix]
 
 
 == Sample Corosync Configuration  ==
 
 ifdef::pcs[]
 .Sample corosync.conf for two-node cluster using a node list.
 .....
 # Please read the corosync.conf.5 manual page
 totem {
 version: 2
 secauth: off
-cluster_name: pcmk
+cluster_name: mycluster
 transport: udpu
 }
 
 nodelist {
   node {
         ring0_addr: pcmk-1
         nodeid: 1
   }
   node {
         ring0_addr: pcmk-2
         nodeid: 2
   }
 }
 
 quorum {
   provider: corosync_votequorum
 }
 
 logging {
   to_syslog: yes
 }
 .....
 endif::[]
 
 ifdef::crm[]
 .Sample Corosync.conf for a two-node cluster using multicast.
 .....
 # Please read the corosync.conf.5 manual page
 totem {
 	version: 2
 
 	# cypto_cipher and crypto_hash: Used for mutual node authentication.
 	# If you choose to enable this, then do remember to create a shared
 	# secret with "corosync-keygen".
 	crypto_cipher: none
 	crypto_hash: none
 
 	# interface: define at least one interface to communicate
 	# over. If you define more than one interface stanza, you must
 	# also set rrp_mode.
 	interface {
                 # Rings must be consecutively numbered, starting at 0.
 		ringnumber: 0
 		# This is normally the *network* address of the
 		# interface to bind to. This ensures that you can use
 		# identical instances of this configuration file
 		# across all your cluster nodes, without having to
 		# modify this option.
 bindnetaddr: 192.168.122.0
 		# However, if you have multiple physical network
 		# interfaces configured for the same subnet, then the
 		# network address alone is not sufficient to identify
 		# the interface Corosync should bind to. In that case,
 		# configure the *host* address of the interface
 		# instead:
 		# bindnetaddr: 192.168.1.1
 		# When selecting a multicast address, consider RFC
 		# 2365 (which, among other things, specifies that
 		# 239.255.x.x addresses are left to the discretion of
 		# the network administrator). Do not reuse multicast
 		# addresses across multiple Corosync clusters sharing
 		# the same network.
 mcastaddr: 239.255.1.1
 		# Corosync uses the port you specify here for UDP
 		# messaging, and also the immediately preceding
 		# port. Thus if you set this to 5405, Corosync sends
 		# messages over UDP ports 5405 and 5404.
 mcastport: 4000
 		# Time-to-live for cluster communication packets. The
 		# number of hops (routers) that this ring will allow
 		# itself to pass. Note that multicast routing must be
 		# specifically enabled on most network routers.
 		ttl: 1
 	}
 }
 
 logging {
 	# Log the source file and line where messages are being
 	# generated. When in doubt, leave off. Potentially useful for
 	# debugging.
 	fileline: off
 	# Log to standard error. When in doubt, set to no. Useful when
 	# running in the foreground (when invoking "corosync -f")
 	to_stderr: no
 	# Log to a log file. When set to "no", the "logfile" option
 	# must not be set.
 	to_logfile: yes
 	logfile: /var/log/cluster/corosync.log
 	# Log to the system log daemon. When in doubt, set to yes.
 	to_syslog: yes
 	# Log debug messages (very verbose). When in doubt, leave off.
 	debug: off
 	# Log messages with time stamps. When in doubt, set to on
 	# (unless you are only logging to syslog, where double
 	# timestamps can be annoying).
 	timestamp: on
 	logger_subsys {
 		subsys: QUORUM
 		debug: off
 	}
 }
 
 quorum {
            provider: corosync_votequorum
            expected_votes: 2
 }
 .....
 endif::[]
diff --git a/doc/Clusters_from_Scratch/en-US/Ch-Active-Active.txt b/doc/Clusters_from_Scratch/en-US/Ch-Active-Active.txt
index 156f8bf343..19c2014c8e 100644
--- a/doc/Clusters_from_Scratch/en-US/Ch-Active-Active.txt
+++ b/doc/Clusters_from_Scratch/en-US/Ch-Active-Active.txt
@@ -1,755 +1,755 @@
 = Conversion to Active/Active =
 
 == Requirements ==
 
 The primary requirement for an Active/Active cluster is that the data
 required for your services is available, simultaneously, on both
 machines. Pacemaker makes no requirement on how this is achieved, you
 could use a SAN if you had one available, however since DRBD supports
 multiple Primaries, we can also use that.
 
 The only hitch is that we need to use a cluster-aware filesystem. The
 one we used earlier with DRBD, ext4, is not one of those. Both OCFS2
 and GFS2 are supported, however here we will use GFS2 which comes with
 Fedora 17.
 
 === Installing the required Software ===
 
 [source,Bash]
 -----
 # yum install -y gfs2-utils dlm kernel-modules-extra
 Loaded plugins: langpacks, presto, refresh-packagekit
 Resolving Dependencies
 --> Running transaction check
 ---> Package dlm.x86_64 0:3.99.4-1.fc17 will be installed
 ---> Package gfs2-utils.x86_64 0:3.1.4-3.fc17 will be installed
 ---> Package kernel-modules-extra.x86_64 0:3.4.4-3.fc17 will be installed
 --> Finished Dependency Resolution
 
 Dependencies Resolved
 
 ================================================================================
  Package                Arch       Version          Repository           Size
 ================================================================================
 Installing:
  dlm                    x86_64     3.99.4-1.fc17    updates              83 k
  gfs2-utils             x86_64     3.1.4-3.fc17     fedora              214 k
  kernel-modules-extra   x86_64     3.4.4-3.fc17     updates             1.7 M
 
 Transaction Summary
 ================================================================================
 Install  3 Packages
 
 Total download size: 1.9 M
 Installed size: 7.7 M
 Downloading Packages:
 (1/3): dlm-3.99.4-1.fc17.x86_64.rpm                         |  83 kB     00:00
 (2/3): gfs2-utils-3.1.4-3.fc17.x86_64.rpm                   | 214 kB     00:00
 (3/3): kernel-modules-extra-3.4.4-3.fc17.x86_64.rpm         | 1.7 MB     00:01
  -------------------------------------------------------------------------------
 Total                                              615 kB/s | 1.9 MB     00:03
 Running Transaction Check
 Running Transaction Test
 Transaction Test Succeeded
 Running Transaction
   Installing : kernel-modules-extra-3.4.4-3.fc17.x86_64                 1/3
   Installing : gfs2-utils-3.1.4-3.fc17.x86_64                           2/3
   Installing : dlm-3.99.4-1.fc17.x86_64                                 3/3
   Verifying  : dlm-3.99.4-1.fc17.x86_64                                 1/3
   Verifying  : gfs2-utils-3.1.4-3.fc17.x86_64                           2/3
   Verifying  : kernel-modules-extra-3.4.4-3.fc17.x86_64                 3/3
 
 Installed:
   dlm.x86_64 0:3.99.4-1.fc17
   gfs2-utils.x86_64 0:3.1.4-3.fc17
   kernel-modules-extra.x86_64 0:3.4.4-3.fc17
 
 Complete!
 -----
 
 == Create a GFS2 Filesystem ==
 
 [[GFS2_prep]]
 === Preparation ===
 
 Before we do anything to the existing partition, we need to make sure it
 is unmounted. We do this by telling the cluster to stop the WebFS resource.
 This will ensure that other resources (in our case, Apache) using WebFS
 are not only stopped, but stopped in the correct order.
 
 ifdef::pcs[]
 [source,Bash]
 ----
 # pcs resource stop WebFS
 # pcs resource
  ClusterIP	(ocf::heartbeat:IPaddr2) Started 
  WebSite	(ocf::heartbeat:apache) Stopped 
  Master/Slave Set: WebDataClone [WebData]
      Masters: [ pcmk-2 ]
      Slaves: [ pcmk-1 ]
  WebFS	(ocf::heartbeat:Filesystem) Stopped 
 ----
 endif::[]
 
 ifdef::crm[]
 [source,Bash]
 -----
 # crm resource stop WebFS
 # crm_mon -1
 ============
 Last updated: Tue Apr  3 14:07:36 2012
 Last change: Tue Apr  3 14:07:15 2012 via cibadmin on pcmk-1
 Stack: corosync
 Current DC: pcmk-1 (1702537408) - partition with quorum
 Version: 1.1.7-2.fc17-ee0730e13d124c3d58f00016c3376a1de5323cff
 2 Nodes configured, unknown expected votes
 5 Resources configured.
 ============
 
 Online: [ pcmk-1 pcmk-2 ]
 
  ClusterIP	(ocf::heartbeat:IPaddr2):	Started pcmk-2
  Master/Slave Set: WebDataClone [WebData]
      Masters: [ pcmk-2 ]
      Slaves: [ pcmk-1 ]
 -----
 endif::[]
 
 [NOTE]
 =======
 
 Note that both Apache and WebFS have been stopped.
 
 =======
 
 === Create and Populate an GFS2 Partition ===
 
 Now that the cluster stack and integration pieces are running smoothly,
 we can create an GFS2 partition.
 
 [WARNING]
 =========
 
 This will erase all previous content stored on the DRBD device. Ensure
 you have a copy of any important data.
 
 =========
 
 We need to specify a number of additional parameters when creating a
 GFS2 partition.
 
 First we must use the -p option to specify that we want to use the the
 Kernel's DLM. Next we use -j to indicate that it should reserve enough
 space for two journals (one per node accessing the filesystem).
 
 ifdef::pcs[]
 Lastly, we use -t to specify the lock table name. The format for this
 field is +clustername:fsname+. For the +fsname+, we need to use the same
 value as specified in 'corosync.conf' for +cluster_name+. If you setup
 corosync with the same cluster name we used in this tutorial, cluster
-name will be 'pcmk'.  If you are unsure what your cluster name is,
+name will be 'mycluster'.  If you are unsure what your cluster name is,
 open up /etc/corosync/corosync.conf, or execute the command
 'pcs cluster corosync pcmk-1' to view the corosync config.  The cluster
 name will be in the +totem+ block.
 endif::[]
 
 ifdef::crm[]
 Lastly, we use -t to specify the lock table name. The format for this
 field is +clustername:fsname+. For the +fsname+, we need to use the same
 value as specified in 'corosync.conf' for +cluster_name+.  Just pick
 something unique and descriptive and add somewhere inside the +totem+
 block.  For example:
 
 .....
 totem {
         version: 2
 
         # cypto_cipher and crypto_hash: Used for mutual node authentication.
         # If you choose to enable this, then do remember to create a shared
         # secret with "corosync-keygen".
         crypto_cipher: none
         crypto_hash: none
-        cluster_name: webtest
+        cluster_name: mycluster
   ...
 .....
 
 [IMPORTANT]
 ===========
 Do this on each node in the cluster and be sure to restart them before
 continuing.
 ===========
 endif::[]
 
 [IMPORTANT]
 ===========
 We must run the next command on whichever node last had '/dev/drbd'
 mounted.  Otherwise you will receive the message:
 
 -----
 /dev/drbd1: Read-only file system
 -----
 ===========
 
 [source,Bash]
 -----
-# ssh pcmk-2 -- mkfs.gfs2 -p lock_dlm -j 2 -t webtest:web /dev/drbd1
+# ssh pcmk-2 -- mkfs.gfs2 -p lock_dlm -j 2 -t mycluster:web /dev/drbd1
 This will destroy any data on /dev/drbd1.
 It appears to contain: Linux rev 1.0 ext4 filesystem data, UUID=dc45fff3-c47a-4db2-96f7-a8049a323fe4 (extents) (large files) (huge files)
 Are you sure you want to proceed? [y/n]y
 Device:                    /dev/drbd1
 Blocksize:                 4096
 Device Size                0.97 GB (253935 blocks)
 Filesystem Size:           0.97 GB (253932 blocks)
 Journals:                  2
 Resource Groups:           4
 Locking Protocol:          "lock_dlm"
-Lock Table:                "webtest"
+Lock Table:                "mycluster"
 UUID:                      ed293a02-9eee-3fa3-ed1c-435ef1fd0116
 -----
 
 ifdef::pcs[]
 [source,Bash]
 ----
 # pcs cluster cib dlm_cfg
 # pcs -f dlm_cfg resource create dlm ocf:pacemaker:controld op monitor interval=60s
 # pcs -f dlm_cfg resource clone dlm clone-max=2 clone-node-max=1
 # pcs -f dlm_cfg resource show
  ClusterIP	(ocf::heartbeat:IPaddr2) Started 
  WebSite	(ocf::heartbeat:apache) Stopped 
  Master/Slave Set: WebDataClone [WebData]
      Masters: [ pcmk-2 ]
      Slaves: [ pcmk-1 ]
  WebFS	(ocf::heartbeat:Filesystem) Stopped 
  Clone Set: dlm-clone [dlm]
      Stopped: [ dlm:0 dlm:1 ]
 # pcs cluster push cib dlm_cfg
 CIB updated
 # pcs status
 
 Last updated: Fri Sep 14 12:54:50 2012
 Last change: Fri Sep 14 12:54:43 2012 via cibadmin on pcmk-1
 Stack: corosync
 Current DC: pcmk-1 (1) - partition with quorum
 Version: 1.1.8-1.el7-60a19ed12fdb4d5c6a6b6767f52e5391e447fec0
 2 Nodes configured, unknown expected votes
 7 Resources configured.
 
 Online: [ pcmk-1 pcmk-2 ]
 
 Full list of resources:
 
  ClusterIP	(ocf::heartbeat:IPaddr2):	Started pcmk-2
  WebSite	(ocf::heartbeat:apache):	Stopped 
  Master/Slave Set: WebDataClone [WebData]
      Masters: [ pcmk-2 ]
      Slaves: [ pcmk-1 ]
  WebFS	(ocf::heartbeat:Filesystem):	Stopped 
  Clone Set: dlm-clone [dlm]
      Started: [ pcmk-1 pcmk-2 ]
 ----
 endif::[]
 
 ifdef::crm[]
 [source,Bash]
 -----
 # crm
 crm(live)# cib new dlm
 INFO: dlm shadow CIB created
 crm(dlm)# configure primitive dlm ocf:pacemaker:controld \
     op monitor interval=60s
 crm(dlm)# configure clone dlm_clone dlm meta clone-max=2 clone-node-max=1
 crm(dlm)# configure show
 node $id="1702537408" pcmk-1 \
 	attributes standby="off"
 node $id="1719314624" pcmk-2
 primitive ClusterIP ocf:heartbeat:IPaddr2 \
 	params ip="192.168.122.120" cidr_netmask="32" \
 	op monitor interval="30s"
 primitive WebData ocf:linbit:drbd \
 	params drbd_resource="wwwdata" \
 	op monitor interval="60s"
 primitive WebFS ocf:heartbeat:Filesystem \
 	params device="/dev/drbd/by-res/wwwdata" directory="/var/www/html" fstype="ext4" \
 	meta target-role="Stopped"
 primitive WebSite ocf:heartbeat:apache \
 	params configfile="/etc/httpd/conf/httpd.conf" \
 	op monitor interval="1min"
 primitive dlm ocf:pacemaker:controld \
 	op monitor interval="60s"
 ms WebDataClone WebData \
 	meta master-max="1" master-node-max="1" clone-max="2" clone-node-max="1" notify="true"
 clone dlm_clone dlm \
 	meta clone-max="2" clone-node-max="1"
 location prefer-pcmk-1 WebSite 50: pcmk-1
 colocation WebSite-with-WebFS inf: WebSite WebFS
 colocation fs_on_drbd inf: WebFS WebDataClone:Master
 colocation website-with-ip inf: WebSite ClusterIP
 order WebFS-after-WebData inf: WebDataClone:promote WebFS:start
 order WebSite-after-WebFS inf: WebFS WebSite
 order apache-after-ip inf: ClusterIP WebSite
 property $id="cib-bootstrap-options" \
 	dc-version="1.1.7-2.fc17-ee0730e13d124c3d58f00016c3376a1de5323cff" \
 	cluster-infrastructure="corosync" \
 	stonith-enabled="false" \
 	no-quorum-policy="ignore" \
 	last-lrm-refresh="1333446866"
 rsc_defaults $id="rsc-options" \
 	resource-stickiness="100"
 op_defaults $id="op-options" \
 	timeout="240s"
 crm(dlm)# cib commit dlm
 INFO: commited 'dlm' shadow CIB to the cluster
 crm(dlm)# quit
 bye
 # crm_mon -1
 ============
 Last updated: Wed Apr  4 01:15:11 2012
 Last change: Wed Apr  4 00:50:11 2012 via crmd on pcmk-1
 Stack: corosync
 Current DC: pcmk-1 (1702537408) - partition with quorum
 Version: 1.1.7-2.fc17-ee0730e13d124c3d58f00016c3376a1de5323cff
 2 Nodes configured, unknown expected votes
 7 Resources configured.
 ============
 
 Online: [ pcmk-1 pcmk-2 ]
 
  ClusterIP	(ocf::heartbeat:IPaddr2):	Started pcmk-1
  Master/Slave Set: WebDataClone [WebData]
      Masters: [ pcmk-1 ]
      Slaves: [ pcmk-2 ]
  Clone Set: dlm_clone [dlm]
      Started: [ pcmk-1 pcmk-2 ]
 -----
 endif::[]
 
 Then (re)populate the new filesystem with data (web pages). For now we'll
 create another variation on our home page.
 
 [source,Bash]
 -----
 # mount /dev/drbd1 /mnt/
 # cat <<-END >/mnt/index.html
 <html>
 <body>My Test Site - GFS2</body>
 </html>
 END
 # umount /dev/drbd1
 # drbdadm verify wwwdata#
 -----
 
 == Reconfigure the Cluster for GFS2 ==
 
 
 ifdef::pcs[]
 
 With the WebFS resource stopped, lets update the configuration.
 
 [source,Bash]
 ----
 # pcs resource show WebFS
 Resource: WebFS
   device: /dev/drbd/by-res/wwwdata
   directory: /var/www/html
   fstype: ext4
   target-role: Stopped
 ----
 
 The fstype option needs to be updated to gfs2 instead of ext4.
 
 [source,Bash]
 ----
 # pcs resource update WebFS fstype=gfs2
 # pcs resource show WebFS
 Resource: WebFS
   device: /dev/drbd/by-res/wwwdata
   directory: /var/www/html
   fstype: gfs2
   target-role: Stopped
 CIB updated
 ----
 endif::[]
 
 ifdef::crm[]
 [source,Bash]
 -----
 # crm
 crm(live) # cib new GFS2
 INFO: GFS2 shadow CIB created
 crm(GFS2) # configure delete WebFS
 crm(GFS2) # configure primitive WebFS ocf:heartbeat:Filesystem params device="/dev/drbd/by-res/wwwdata" directory="/var/www/html" fstype="gfs2"
 -----
 
 Now that we've recreated the resource, we also need to recreate all the
 constraints that used it. This is because the shell will automatically
 remove any constraints that referenced WebFS.
 
 [source,Bash]
 -----
 crm(GFS2) # configure colocation WebSite-with-WebFS inf: WebSite WebFS
 crm(GFS2) # configure colocation fs_on_drbd inf: WebFS WebDataClone:Master
 crm(GFS2) # configure order WebFS-after-WebData inf: WebDataClone:promote WebFS:start
 crm(GFS2) # configure order WebSite-after-WebFS inf: WebFS WebSite
 crm(GFS2) # configure show
 node pcmk-1
 node pcmk-2
 primitive WebData ocf:linbit:drbd \
     params drbd_resource="wwwdata" \
     op monitor interval="60s"
 primitive WebFS ocf:heartbeat:Filesystem \
     params device="/dev/drbd/by-res/wwwdata" directory="/var/www/html" fstype="gfs2"
 primitive WebSite ocf:heartbeat:apache \
     params configfile="/etc/httpd/conf/httpd.conf" \
     op monitor interval="1min"
 primitive ClusterIP ocf:heartbeat:IPaddr2 \
     params ip="192.168.122.101" cidr_netmask="32" \
     op monitor interval="30s"
 ms WebDataClone WebData \
     meta master-max="1" master-node-max="1" clone-max="2" clone-node-max="1" notify="true"
 colocation WebSite-with-WebFS inf: WebSite WebFS
 colocation fs_on_drbd inf: WebFS WebDataClone:Master
 colocation website-with-ip inf: WebSite ClusterIP
 order WebFS-after-WebData inf: WebDataClone:promote WebFS:start
 order WebSite-after-WebFS inf: WebFS WebSite
 order apache-after-ip inf: ClusterIP WebSite
 property $id="cib-bootstrap-options" \
     dc-version="1.1.5-bdd89e69ba545404d02445be1f3d72e6a203ba2f" \
     cluster-infrastructure="openais" \
     expected-quorum-votes="2" \
     stonith-enabled="false" \
     no-quorum-policy="ignore"
 rsc_defaults $id="rsc-options" \
     resource-stickiness="100"
 -----
 
 Review the configuration before uploading it to the cluster, quitting the
 shell and watching the cluster's response
 
 [source,Bash]
 -----
 crm(GFS2) # cib commit GFS2
 INFO: commited 'GFS2' shadow CIB to the cluster
 crm(GFS2) # quit
 bye
 # crm_mon
 ============
 Last updated: Thu Sep 3 20:49:54 2009
 Stack: openais
 Current DC: pcmk-2 - partition with quorum
 Version: 1.1.5-bdd89e69ba545404d02445be1f3d72e6a203ba2f
 2 Nodes configured, 2 expected votes
 6 Resources configured.
 ============
 
 Online: [ pcmk-1 pcmk-2 ]
 
 WebSite (ocf::heartbeat:apache):    Started pcmk-2
 Master/Slave Set: WebDataClone
     Masters: [ pcmk-1 ]
     Slaves: [ pcmk-2 ]
 ClusterIP    (ocf::heartbeat:IPaddr):    Started pcmk-2WebFS (ocf::heartbeat:Filesystem): Started pcmk-1
 -----
 endif::[]
 
 == Reconfigure Pacemaker for Active/Active ==
 
 Almost everything is in place. Recent versions of DRBD are capable of
 operating in Primary/Primary mode and the filesystem we're using is
 cluster aware. All we need to do now is reconfigure the cluster to take
 advantage of this.
 
 ifdef::pcs[]
 This will involve a number of changes, so we'll want work with a
 local cib file.
 
 [source,Bash]
 ----
 # pcs cluster cib active_cfg
 ----
 endif::[]
 
 ifdef::crm[]
 This will involve a number of changes, so we'll again use interactive
 mode.
 
 [source,Bash]
 -----
 # crm
 # cib new active
 -----
 endif::[]
 
 There's no point making the services active on both locations if we can't
 reach them, so lets first clone the IP address. Cloned IPaddr2 resources
 use an iptables rule to ensure that each request only gets processed by one of
 the two clone instances. The additional meta options tell the cluster how
 many instances of the clone we want (one "request bucket" for each node)
 and that if all other nodes fail, then the remaining node should hold all
 of them. Otherwise the requests would be simply discarded.
 
 
 ifdef::pcs[]
 [source,Bash]
 ----
 # pcs -f active_cfg resource clone ClusterIP \
      globally-unique=true clone-max=2 clone-node-max=2
 ----
 
 Notice when the ClusterIP becomes a clone, the constraints
 referencing ClusterIP now reference the clone.  This is
 done automatically by pcs.
 
 ifdef::pcs[]
 [source,Bash]
 ----
 # pcs -f active_cfg constraint
 Location Constraints:
 Ordering Constraints:
   start ClusterIP-clone then start WebSite
   WebFS then WebSite
   promote WebDataClone then start WebFS
 Colocation Constraints:
   WebSite with ClusterIP-clone
   WebFS with WebDataClone (with-rsc-role:Master)
   WebSite with WebFS
 ----
 endif::[]
 
 ifdef::crm[]
 [source,Bash]
 -----
 # configure clone WebIP ClusterIP \
     meta globally-unique="true" clone-max="2" clone-node-max="2"
 -----
 endif::[]
 
 Now we must tell the ClusterIP how to decide which requests are
 processed by which hosts. To do this we must specify the
 clusterip_hash parameter.
 
 ifdef::pcs[]
 [source,Bash]
 ----
 # pcs -f active_cfg resource update ClusterIP clusterip_hash=sourceip
 ----
 endif::[]
 
 ifdef::crm[]
 Open the ClusterIP resource
 
 [source,Bash]
 -----
 # configure edit ClusterIP
 -----
 
 And add the following to the params line
 
 .....
 clusterip_hash="sourceip"
 .....
 
 So that the complete definition looks like:
 
 .....
 primitive ClusterIP ocf:heartbeat:IPaddr2 \
     params ip="192.168.122.101" cidr_netmask="32" clusterip_hash="sourceip" \
     op monitor interval="30s"
 .....
 
 Here is the full transcript
 
 [source,Bash]
 -----
 # crm crm(live)
 # cib new active
 INFO: active shadow CIB created
 crm(active) # configure clone WebIP ClusterIP \
     meta globally-unique="true" clone-max="2" clone-node-max="2"
 crm(active) # configure shownode pcmk-1
 node pcmk-2
 primitive WebData ocf:linbit:drbd \
     params drbd_resource="wwwdata" \
     op monitor interval="60s"
 primitive WebFS ocf:heartbeat:Filesystem \
     params device="/dev/drbd/by-res/wwwdata" directory="/var/www/html" fstype="gfs2"
 primitive WebSite ocf:heartbeat:apache \
     params configfile="/etc/httpd/conf/httpd.conf" \
     op monitor interval="1min"
 primitive ClusterIP ocf:heartbeat:IPaddr2 \
     params ip="192.168.122.101" cidr_netmask="32" clusterip_hash="sourceip" \
     op monitor interval="30s"
 ms WebDataClone WebData \
     meta master-max="1" master-node-max="1" clone-max="2" clone-node-max="1" notify="true"
 clone WebIP ClusterIP \
     meta globally-unique="true" clone-max="2" clone-node-max="2"
 colocation WebSite-with-WebFS inf: WebSite WebFS
 colocation fs_on_drbd inf: WebFS WebDataClone:Master
 colocation website-with-ip inf: WebSite WebIPorder WebFS-after-WebData inf: WebDataClone:promote WebFS:start
 order WebSite-after-WebFS inf: WebFS WebSiteorder apache-after-ip inf: WebIP WebSite
 property $id="cib-bootstrap-options" \
     dc-version="1.1.5-bdd89e69ba545404d02445be1f3d72e6a203ba2f" \
     cluster-infrastructure="openais" \
     expected-quorum-votes="2" \
     stonith-enabled="false" \
     no-quorum-policy="ignore"
 rsc_defaults $id="rsc-options" \
     resource-stickiness="100"
 -----
 
 Notice how any constraints that referenced ClusterIP have been updated
 to use WebIP instead. This is an additional benefit of using the crm
 shell.
 endif::[]
 
 Next we need to convert the filesystem and Apache resources into
 clones.
 
 ifdef::pcs[]
 Notice how pcs automatically updates the relevant constraints again.
 [source,Bash]
 ----
 # pcs -f active_cfg resource clone WebFS
 # pcs -f active_cfg resource clone WebSite
 # pcs -f active_cfg constraint
 Location Constraints:
 Ordering Constraints:
   start ClusterIP-clone then start WebSite-clone
   WebFS-clone then WebSite-clone
   promote WebDataClone then start WebFS-clone
 Colocation Constraints:
   WebSite-clone with ClusterIP-clone
   WebFS-clone with WebDataClone (with-rsc-role:Master)
   WebSite-clone with WebFS-clone
 ----
 endif::[]
 
 ifdef::crm[]
 Again, the shell will automatically update any relevant
 constraints.
 
 [source,Bash]
 -----
 crm(active) # configure clone WebFSClone WebFS
 crm(active) # configure clone WebSiteClone WebSite
 -----
 endif::[]
 
 The last step is to tell the cluster that it is now allowed to promote
 both instances to be Primary (aka. Master).
 
 ifdef::pcs[]
 [source,Bash]
 -----
 # pcs -f active_cfg resource update WebDataClone master-max=2 
 -----
 endif::[]
 
 ifdef::crm[]
 [source,Bash]
 -----
 crm(active) # configure edit WebDataClone
 -----
 
 Change master-max to 2
 
 [source,Bash]
 -----
 crm(active) # configure show
 node pcmk-1
 node pcmk-2
 primitive WebData ocf:linbit:drbd \
     params drbd_resource="wwwdata" \
     op monitor interval="60s"
 primitive WebFS ocf:heartbeat:Filesystem \
     params device="/dev/drbd/by-res/wwwdata" directory="/var/www/html" fstype="gfs2"
 primitive WebSite ocf:heartbeat:apache \
     params configfile="/etc/httpd/conf/httpd.conf" \
     op monitor interval="1min"
 primitive ClusterIP ocf:heartbeat:IPaddr2 \
     params ip="192.168.122.101" cidr_netmask="32" clusterip_hash="sourceip" \
     op monitor interval="30s"
 ms WebDataClone WebData \
     meta master-max="2" master-node-max="1" clone-max="2" clone-node-max="1" notify="true"
 clone WebFSClone WebFSclone WebIP ClusterIP \
     meta globally-unique="true" clone-max="2" clone-node-max="2"
 clone WebSiteClone WebSitecolocation WebSite-with-WebFS inf: WebSiteClone WebFSClone
 colocation fs_on_drbd inf: WebFSClone WebDataClone:Master
 colocation website-with-ip inf: WebSiteClone WebIP
 order WebFS-after-WebData inf: WebDataClone:promote WebFSClone:start
 order WebSite-after-WebFS inf: WebFSClone WebSiteClone
 order apache-after-ip inf: WebIP WebSiteClone
 property $id="cib-bootstrap-options" \
     dc-version="1.1.5-bdd89e69ba545404d02445be1f3d72e6a203ba2f" \
     cluster-infrastructure="openais" \
     expected-quorum-votes="2" \
     stonith-enabled="false" \
     no-quorum-policy="ignore"
 rsc_defaults $id="rsc-options" \
     resource-stickiness="100"
 -----
 endif::[]
 
 Review the configuration before uploading it to the cluster, quitting the
 shell and watching the cluster's response
 
 ifdef::pcs[]
 [source,Bash]
 -----
 # pcs cluster push cib active_cfg
 # pcs resource start WebFS
 -----
 
 After all the processes are started the status should look
 similar to this.
 
 [source,Bash]
 -----
 # pcs resource
  Master/Slave Set: WebDataClone [WebData]
      Masters: [ pcmk-2 pcmk-1 ]
  Clone Set: dlm-clone [dlm]
      Started: [ pcmk-2 pcmk-1 ]
  Clone Set: ClusterIP-clone [ClusterIP] (unique)
      ClusterIP:0	(ocf::heartbeat:IPaddr2) Started 
      ClusterIP:1	(ocf::heartbeat:IPaddr2) Started 
  Clone Set: WebFS-clone [WebFS]
      Started: [ pcmk-1 pcmk-2 ]
  Clone Set: WebSite-clone [WebSite]
      Started: [ pcmk-1 pcmk-2 ]
 -----
 endif::[]
 
 ifdef::crm[]
 [source,Bash]
 -----
 crm(active) # cib commit active
 INFO: commited 'active' shadow CIB to the cluster
 crm(active) # quit
 bye
 # crm_mon
 ============
 Last updated: Thu Sep 3 21:37:27 2009
 Stack: openais
 Current DC: pcmk-2 - partition with quorum
 Version: 1.1.5-bdd89e69ba545404d02445be1f3d72e6a203ba2f
 2 Nodes configured, 2 expected votes
 6 Resources configured.
 ============
 
 Online: [ pcmk-1 pcmk-2 ]
 
 Master/Slave Set: WebDataClone
     Masters: [ pcmk-1 pcmk-2 ]
 Clone Set: WebIP Started: [ pcmk-1 pcmk-2 ]
 Clone Set: WebFSClone Started: [ pcmk-1 pcmk-2 ]
 Clone Set: WebSiteClone Started: [ pcmk-1 pcmk-2 ]
 Clone Set: dlm_clone Started: [ pcmk-1 pcmk-2 ]
 -----
 endif::[]
 
 === Testing Recovery ===
 
 [NOTE]
 =======
 TODO: Put one node into standby to demonstrate failover
 =======
diff --git a/doc/Clusters_from_Scratch/en-US/Ch-Active-Passive.txt b/doc/Clusters_from_Scratch/en-US/Ch-Active-Passive.txt
index 95e01eae88..7375dc8850 100644
--- a/doc/Clusters_from_Scratch/en-US/Ch-Active-Passive.txt
+++ b/doc/Clusters_from_Scratch/en-US/Ch-Active-Passive.txt
@@ -1,669 +1,669 @@
 = Creating an Active/Passive Cluster =
 
 == Exploring the Existing Configuration ==
 
 When Pacemaker starts up, it automatically records the number and details
 of the nodes in the cluster as well as which stack is being used and the
 version of Pacemaker being used.
 
 This is what the base configuration should look like.
 
 ifdef::pcs[]
 [source,Bash]
 ----
 # pcs status
 Last updated: Fri Sep 14 10:12:01 2012
 Last change: Fri Sep 14 09:51:55 2012 via crmd on pcmk-2
 Stack: corosync
 Current DC: pcmk-1 (1) - partition with quorum
 Version: 1.1.8-1.el7-60a19ed12fdb4d5c6a6b6767f52e5391e447fec0
 2 Nodes configured, unknown expected votes
 0 Resources configured.
 
 Online: [ pcmk-1 pcmk-2 ]
 
 Full list of resources:
 ----
 endif::[]
 
 ifdef::crm[]
 [source,Bash]
 ----
 # crm configure show
 node $id="1702537408" pcmk-1
 node $id="1719314624" pcmk-2
 property $id="cib-bootstrap-options" \
 	dc-version="1.1.7-2.fc17-ee0730e13d124c3d58f00016c3376a1de5323cff" \
 	cluster-infrastructure="corosync"
 ----
 endif::[]
 
-For those that are not of afraid of XML, you can see the raw
-configuration by appending "xml" to the previous command.
+For those that are not of afraid of XML, you can see the raw cluster
+configuration and status by using the +pcs cluster cib+ command.
 
 .The last XML you'll see in this document
 
 ifdef::pcs[]
 [source,Bash]
 ----
 # pcs cluster cib
 <cib epoch="4" num_updates="19" admin_epoch="0" validate-with="pacemaker-1.2" crm_feature_set="3.0.6" update-origin="pcmk-1" update-client="crmd" cib-last-written="Wed Aug  1 16:08:52 2012" have-quorum="1" dc-uuid="1">
   <configuration>
     <crm_config>
       <cluster_property_set id="cib-bootstrap-options">
         <nvpair id="cib-bootstrap-options-dc-version" name="dc-version" value="1.1.8-1.el7-60a19ed12fdb4d5c6a6b6767f52e5391e447fec0"/>
         <nvpair id="cib-bootstrap-options-cluster-infrastructure" name="cluster-infrastructure" value="corosync"/>
       </cluster_property_set>
     </crm_config>
     <nodes>
       <node id="1" uname="pcmk-1" type="normal"/>
       <node id="2" uname="pcmk-2" type="normal"/>
     </nodes>
     <resources/>
     <constraints/>
   </configuration>
   <status>
     <node_state id="2" uname="pcmk-2" ha="active" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="do_state_transition" shutdown="0">
       <lrm id="2">
         <lrm_resources/>
       </lrm>
       <transient_attributes id="2">
         <instance_attributes id="status-2">
           <nvpair id="status-2-probe_complete" name="probe_complete" value="true"/>
         </instance_attributes>
       </transient_attributes>
     </node_state>
     <node_state id="1" uname="pcmk-1" ha="active" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="do_state_transition" shutdown="0">
       <lrm id="1">
         <lrm_resources/>
       </lrm>
       <transient_attributes id="1">
         <instance_attributes id="status-1">
           <nvpair id="status-1-probe_complete" name="probe_complete" value="true"/>
         </instance_attributes>
       </transient_attributes>
     </node_state>
   </status>
 </cib>
 ----
 endif::[]
 
 ifdef::crm[]
 [source,Bash]
 ----
 # crm configure show xml
 <?xml version="1.0" ?>
 <cib admin_epoch="0" cib-last-written="Tue Apr  3 09:26:21 2012" crm_feature_set="3.0.6" dc-uuid="1702537408" epoch="4" have-quorum="1" num_updates="14" update-client="crmd" update-origin="pcmk-1" validate-with="pacemaker-1.2">
   <configuration>
     <crm_config>
       <cluster_property_set id="cib-bootstrap-options">
         <nvpair id="cib-bootstrap-options-dc-version" name="dc-version" value="1.1.7-2.fc17-ee0730e13d124c3d58f00016c3376a1de5323cff"/>
         <nvpair id="cib-bootstrap-options-cluster-infrastructure" name="cluster-infrastructure" value="corosync"/>
       </cluster_property_set>
     </crm_config>
     <nodes>
       <node id="1719314624" type="normal" uname="pcmk-2"/>
       <node id="1702537408" type="normal" uname="pcmk-1"/>
     </nodes>
     <resources/>
     <constraints/>
   </configuration>
 </cib>
 ----
 endif::[]
 
 Before we make any changes, its a good idea to check the validity of
 the configuration.
 
 [source,Bash]
 ----
 # crm_verify -L -V
    error: unpack_resources: Resource start-up disabled since no STONITH resources have been defined
    error: unpack_resources: Either configure some or disable STONITH with the stonith-enabled option
    error: unpack_resources: NOTE: Clusters with shared data need STONITH to ensure data integrity
 Errors found during check: config not valid
   -V may provide more details
 ----
 
 As you can see, the tool has found some errors.
 
 In order to guarantee the safety of your data
 footnote:[If the data is corrupt, there is little point in continuing to make it available]
 , the default for STONITH
 footnote:[A common node fencing mechanism. Used to ensure data integrity by powering off "bad" nodes]
 in Pacemaker is +enabled+.  However it also knows when no STONITH configuration has been
 supplied and reports this as a problem (since the cluster would not be
 able to make progress if a situation requiring node fencing arose).
 
 For now, we will disable this feature and configure it later in the
 Configuring STONITH section. It is important to note that the use of
 STONITH is highly encouraged, turning it off tells the cluster to
 simply pretend that failed nodes are safely powered off. Some vendors
 will even refuse to support clusters that have it disabled.
 
 To disable STONITH, we set the _stonith-enabled_ cluster option to
 false.
 
 ifdef::pcs[]
 [source,Bash]
 ----
 # pcs property set stonith-enabled=false
 # crm_verify -L
 ----
 endif::[]
 
 ifdef::crm[]
 [source,Bash]
 ----
 # crm configure property stonith-enabled=false
 # crm_verify -L
 ----
 endif::[]
 
 With the new cluster option set, the configuration is now valid.
 
 [WARNING]
 =========
 
 The use of stonith-enabled=false is completely inappropriate for a
 production cluster. We use it here to defer the discussion of its
 configuration which can differ widely from one installation to the
 next.  See  <<_what_is_stonith>> for information on why STONITH is important
 and details on how to configure it.
 
 =========
 
 == Adding a Resource ==
 
 The first thing we should do is configure an IP address. Regardless of
 where the cluster service(s) are running, we need a consistent address
 to contact them on. Here I will choose and add 192.168.122.120 as the
 floating address, give it the imaginative name ClusterIP and tell the
 cluster to check that its running every 30 seconds.
 
 
 [IMPORTANT]
 ===========
 The chosen address must not be one already associated with
 a physical node
 ===========
 
 ifdef::pcs[]
 [source,Bash]
 ----
 # pcs resource create ClusterIP ocf:heartbeat:IPaddr2 \
     ip=192.168.0.120 cidr_netmask=32 op monitor interval=30s
 ----
 endif::[]
 
 ifdef::crm[]
 [source,Bash]
 ----
 # crm configure primitive ClusterIP ocf:heartbeat:IPaddr2 \
      params ip=192.168.122.120 cidr_netmask=32 \
      op monitor interval=30s
 ----
 endif::[]
 
 The other important piece of information here is ocf:heartbeat:IPaddr2.
 
 This tells Pacemaker three things about the resource you want to
 add. The first field, ocf, is the standard to which the resource
 script conforms to and where to find it. The second field is specific
 to OCF resources and tells the cluster which namespace to find the
 resource script in, in this case heartbeat. The last field indicates
 the name of the resource script.
 
 ifdef::pcs[]
 To obtain a list of the available resource standards (the ocf part of
 ocf:heartbeat:IPaddr2), run
 
 [source,Bash]
 ----
 # pcs resource standards
 ocf
 lsb
 service
 systemd
 stonith
 ----
 
 To obtain a list of the available ocf resource providers (the heartbeat
 part of ocf:heartbeat:IPaddr2), run
 
 [source,Bash]
 ----
 # pcs resource providers
 heartbeat
 linbit
 pacemaker
 redhat
 ----
 
 Finally, if you want to see all the resource agents available for
 a specific ocf provider (the IPaddr2 part of ocf:heartbeat:IPaddr2), run
 
 [source,Bash]
 ----
 # pcs resource agents ocf:heartbeat
 AoEtarget
 AudibleAlarm
 CTDB
 ClusterMon
 Delay
 Dummy
 .
 . (skipping lots of resources to save space)
 .
 IPaddr2
 .
 .
 .
 symlink
 syslog-ng
 tomcat
 vmware
 ----
 endif::[]
 
 ifdef::crm[]
 
 To obtain a list of the available resource classes, run
 
 [source,Bash]
 ----
 # crm ra classes
 heartbeat
 lsb
 ocf / heartbeat pacemaker
 stonith
 ----
 
 To then find all the OCF resource agents provided by Pacemaker and
 Heartbeat, run
 
 [source,Bash]
 ----
 # crm ra list ocf pacemaker
 ClusterMon    Dummy         HealthCPU     HealthSMART   Stateful      SysInfo
 SystemHealth  controld      o2cb          ping          pingd
 # crm ra list ocf heartbeat
 AoEtarget            AudibleAlarm         CTDB                 ClusterMon
 Delay                Dummy                EvmsSCC              Evmsd
 Filesystem           ICP                  IPaddr               IPaddr2
 IPsrcaddr            IPv6addr             LVM                  LinuxSCSI
 MailTo               ManageRAID           ManageVE             Pure-FTPd
 Raid1                Route                SAPDatabase          SAPInstance
 SendArp              ServeRAID            SphinxSearchDaemon   Squid
 Stateful             SysInfo              VIPArip              VirtualDomain
 WAS                  WAS6                 WinPopup             Xen
 Xinetd               anything             apache               conntrackd
 db2                  drbd                 eDir88               ethmonitor
 exportfs             fio                  iSCSILogicalUnit     iSCSITarget
 ids                  iscsi                jboss                ldirectord
 lxc                  mysql                mysql-proxy          nfsserver
 nginx                oracle               oralsnr              pgsql
 pingd                portblock            postfix              proftpd
 rsyncd               scsi2reservation     sfex                 symlink
 syslog-ng            tomcat               vmware
 ----
 endif::[]
 
 Now verify that the IP resource has been added and display the cluster's
 status to see that it is now active.
 
 ifdef::pcs[]
 [source,Bash]
 ----
 # pcs status
 
 Last updated: Fri Sep 14 10:17:00 2012
 Last change: Fri Sep 14 10:15:48 2012 via cibadmin on pcmk-1
 Stack: corosync
 Current DC: pcmk-1 (1) - partition with quorum
 Version: 1.1.8-1.el7-60a19ed12fdb4d5c6a6b6767f52e5391e447fec0
 2 Nodes configured, unknown expected votes
 1 Resources configured.
 
 Online: [ pcmk-1 pcmk-2 ]
 
 Full list of resources:
 
  ClusterIP	(ocf::heartbeat:IPaddr2):	Started pcmk-1
 ----
 endif::[]
 
 ifdef::crm[]
 [source,Bash]
 ----
 # crm configure show
 node $id="1702537408" pcmk-1
 node $id="1719314624" pcmk-2
 primitive ClusterIP ocf:heartbeat:IPaddr2 \
 	params ip="192.168.122.120" cidr_netmask="32" \
 	op monitor interval="30s"
 property $id="cib-bootstrap-options" \
 	dc-version="1.1.7-2.fc17-ee0730e13d124c3d58f00016c3376a1de5323cff" \
 	cluster-infrastructure="corosync" \
 	stonith-enabled="false"
 # crm_mon -1
 ============
 Last updated: Tue Apr  3 09:56:50 2012
 Last change: Tue Apr  3 09:54:37 2012 via cibadmin on pcmk-1
 Stack: corosync
 Current DC: pcmk-1 (1702537408) - partition with quorum
 Version: 1.1.7-2.fc17-ee0730e13d124c3d58f00016c3376a1de5323cff
 2 Nodes configured, unknown expected votes
 1 Resources configured.
 ============
 
 Online: [ pcmk-1 pcmk-2 ]
 
  ClusterIP	(ocf::heartbeat:IPaddr2):	Started pcmk-1
 ----
 endif::[]
 
 == Perform a Failover ==
 
 Being a high-availability cluster, we should test failover of our new
 resource before moving on.
 
 First, find the node on which the IP address is running.
 
 ifdef::pcs[]
 [source,Bash]
 ----
 # pcs status
 
 Last updated: Fri Sep 14 10:17:00 2012
 Last change: Fri Sep 14 10:15:48 2012 via cibadmin on pcmk-1
 Stack: corosync
 Current DC: pcmk-1 (1) - partition with quorum
 Version: 1.1.8-1.el7-60a19ed12fdb4d5c6a6b6767f52e5391e447fec0
 2 Nodes configured, unknown expected votes
 1 Resources configured.
 
 Online: [ pcmk-1 pcmk-2 ]
 
 Full list of resources:
 
  ClusterIP	(ocf::heartbeat:IPaddr2):	Started pcmk-1
 ----
 endif::[]
 
 ifdef::crm[]
 [source,Bash]
 ----
 # crm resource status ClusterIP
 resource ClusterIP is running on: pcmk-1
 ----
 endif::[]
 
 Shut down Pacemaker and Corosync on that machine.
 
 ifdef::pcs[]
 [source,Bash]
 ----
 #pcs cluster stop pcmk-1
 Stopping Cluster...
 ----
 
 Once Corosync is no longer running, go to the other node and check the
 cluster status.
 
 [source,Bash]
 ----
 # pcs status
 
 Last updated: Fri Sep 14 10:31:01 2012
 Last change: Fri Sep 14 10:15:48 2012 via cibadmin on pcmk-1
 Stack: corosync
 Current DC: pcmk-2 (2) - partition WITHOUT quorum
 Version: 1.1.8-1.el7-60a19ed12fdb4d5c6a6b6767f52e5391e447fec0
 2 Nodes configured, unknown expected votes
 1 Resources configured.
 
 Online: [ pcmk-2 ]
 OFFLINE: [ pcmk-1 ]
 
 Full list of resources:
 
  ClusterIP	(ocf::heartbeat:IPaddr2):	Stopped 
 ----
 endif::[]
 
 ifdef::crm[]
 [source,Bash]
 ----
 # ssh pcmk-1 -- service pacemaker stop
 # ssh pcmk-1 -- service corosync stop
 ----
 
 Once Corosync is no longer running, go to the other node and check the
 cluster status with crm_mon.
 
 [source,Bash]
 ----
 # crm_mon -1
 ============
 Last updated: Tue Apr  3 10:01:28 2012
 Last change: Tue Apr  3 09:54:39 2012 via cibadmin on pcmk-1
 Stack: corosync
 Current DC: pcmk-2 (1719314624) - partition WITHOUT quorum
 Version: 1.1.7-2.fc17-ee0730e13d124c3d58f00016c3376a1de5323cff
 2 Nodes configured, unknown expected votes
 1 Resources configured.
 ============
 
 Online: [ pcmk-2 ]
 OFFLINE: [ pcmk-1 ]
 ----
 endif::[]
 
 There are three things to notice about the cluster's current
 state. The first is that, as expected, +pcmk-1+ is now offline. However
 we can also see that +ClusterIP+ isn't running anywhere!
 
 
 === Quorum and Two-Node Clusters ===
 
 This is because the cluster no longer has quorum, as can be seen by
 the text "partition WITHOUT quorum" in the status output.  In order
 to reduce the possibility of data corruption, Pacemaker's default
 behavior is to stop all resources if the cluster does not have quorum.
 
 A cluster is said to have quorum when more than half the known or
 expected nodes are online, or for the mathematically inclined,
 whenever the following equation is true:
 
 ....
 total_nodes < 2 * active_nodes
 ....
 
 Therefore a two-node cluster only has quorum when both nodes are
 running, which is no longer the case for our cluster. This would
 normally make the creation of a two-node cluster pointless
 footnote:[Actually some would argue that two-node clusters are always pointless, but that is an argument for another time]
 , however it is possible to control how Pacemaker behaves when quorum
 is lost. In particular, we can tell the cluster to simply ignore
 quorum altogether.
 
 ifdef::pcs[]
 [source,Bash]
 ----
 # pcs property set no-quorum-policy=ignore
 # pcs property
 dc-version: 1.1.8-1.el7-60a19ed12fdb4d5c6a6b6767f52e5391e447fec0
 cluster-infrastructure: corosync
 stonith-enabled: false
 no-quorum-policy: ignore
 ----
 endif::[]
 
 ifdef::crm[]
 [source,Bash]
 ----
 # crm configure property no-quorum-policy=ignore
 # crm configure show
 node $id="1702537408" pcmk-1
 node $id="1719314624" pcmk-2
 primitive ClusterIP ocf:heartbeat:IPaddr2 \
 	params ip="192.168.122.120" cidr_netmask="32" \
 	op monitor interval="30s"
 property $id="cib-bootstrap-options" \
 	dc-version="1.1.7-2.fc17-ee0730e13d124c3d58f00016c3376a1de5323cff" \
 	cluster-infrastructure="corosync" \
 	stonith-enabled="false" \
 	no-quorum-policy="ignore"
 ----
 endif::[]
 
 After a few moments, the cluster will start the IP address on the
 remaining node. Note that the cluster still does not have quorum.
 
 ifdef::pcs[]
 [source,Bash]
 ----
 # pcs status
 Last updated: Fri Sep 14 10:38:11 2012
 Last change: Fri Sep 14 10:37:53 2012 via cibadmin on pcmk-2
 Stack: corosync
 Current DC: pcmk-2 (2) - partition WITHOUT quorum
 Version: 1.1.8-1.el7-60a19ed12fdb4d5c6a6b6767f52e5391e447fec0
 2 Nodes configured, unknown expected votes
 1 Resources configured.
 
 Online: [ pcmk-2 ]
 OFFLINE: [ pcmk-1 ]
 
 Full list of resources:
 
  ClusterIP	(ocf::heartbeat:IPaddr2):	Started pcmk-2
 ----
 endif::[]
 
 ifdef::crm[]
 [source,Bash]
 ----
 # crm_mon -1
 ============
 Last updated: Tue Apr  3 10:02:46 2012
 Last change: Tue Apr  3 10:02:08 2012 via cibadmin on pcmk-2
 Stack: corosync
 Current DC: pcmk-2 (1719314624) - partition WITHOUT quorum
 Version: 1.1.7-2.fc17-ee0730e13d124c3d58f00016c3376a1de5323cff
 2 Nodes configured, unknown expected votes
 1 Resources configured.
 ============
 
 Online: [ pcmk-2 ]
 OFFLINE: [ pcmk-1 ]
 
  ClusterIP	(ocf::heartbeat:IPaddr2):	Started pcmk-2
 ----
 endif::[]
 
 Now simulate node recovery by restarting the cluster stack on +pcmk-1+ and
 check the cluster's status.  Note, if you get an authentication error with
 the 'pcs cluster start pcmk-1' command, you must authenticate on the node
 using the 'pcs cluster auth pcmk pcmk-1 pcmk-2' command discussed earlier.
 
 ifdef::pcs[]
 [source,Bash]
 ----
 # pcs cluster start pcmk-1
 Starting Cluster...
 # pcs status
 
 Last updated: Fri Sep 14 10:42:56 2012
 Last change: Fri Sep 14 10:37:53 2012 via cibadmin on pcmk-2
 Stack: corosync
 Current DC: pcmk-2 (2) - partition with quorum
 Version: 1.1.8-1.el7-60a19ed12fdb4d5c6a6b6767f52e5391e447fec0
 2 Nodes configured, unknown expected votes
 1 Resources configured.
 
 Online: [ pcmk-1 pcmk-2 ]
 
 Full list of resources:
 
  ClusterIP	(ocf::heartbeat:IPaddr2):	Started pcmk-2
 ----
 endif::[]
 
 ifdef::crm[]
 [source,Bash]
 ----
 # service corosync start
 Starting Corosync Cluster Engine (corosync): [ OK ]
 # service pacemaker start
 Starting Pacemaker Cluster Manager: [ OK ]
 # crm_mon
 ============
 Last updated: Fri Aug 28 15:32:13 2009
 Stack: openais
 Current DC: pcmk-2 - partition with quorum
 Version: 1.1.5-bdd89e69ba545404d02445be1f3d72e6a203ba2f
 2 Nodes configured, 2 expected votes
 1 Resources configured.
 ============
 Online: [ pcmk-1 pcmk-2 ]
 
 ClusterIP    (ocf::heartbeat:IPaddr):    Started pcmk-2
 ----
 endif::[]
 
 [NOTE]
 ======
 In the dark days, the cluster may have moved the IP back to its
 original location (+pcmk-1+).  Usually this is no longer the case.
 ======
 
 === Prevent Resources from Moving after Recovery ===
 
 In most circumstances, it is highly desirable to prevent healthy
 resources from being moved around the cluster. Moving resources almost
 always requires a period of downtime. For complex services like Oracle
 databases, this period can be quite long.
 
 To address this, Pacemaker has the concept of resource stickiness
 which controls how much a service prefers to stay running where it
 is. You may like to think of it as the "cost" of any downtime. By
 default, Pacemaker assumes there is zero cost associated with moving
 resources and will do so to achieve "optimal"
 footnote:[It should be noted that Pacemaker's definition of
 optimal may not always agree with that of a human's. The order in which
 Pacemaker processes lists of resources and nodes creates implicit
 preferences in situations where the administrator has not explicitly
 specified them]
 resource placement. We can specify a different stickiness for every
 resource, but it is often sufficient to change the default.
 
 ifdef::pcs[]
 [source,Bash]
 ----
 # pcs resource rsc defaults resource-stickiness=100
 # pcs resource rsc defaults
 resource-stickiness: 100
 ----
 endif::[]
 
 ifdef::crm[]
 [source,Bash]
 ----
 # crm configure rsc_defaults resource-stickiness=100
 # crm configure show
 node $id="1702537408" pcmk-1
 node $id="1719314624" pcmk-2
 primitive ClusterIP ocf:heartbeat:IPaddr2 \
 	params ip="192.168.122.120" cidr_netmask="32" \
 	op monitor interval="30s"
 property $id="cib-bootstrap-options" \
 	dc-version="1.1.7-2.fc17-ee0730e13d124c3d58f00016c3376a1de5323cff" \
 	cluster-infrastructure="corosync" \
 	stonith-enabled="false" \
 	no-quorum-policy="ignore"
 rsc_defaults $id="rsc-options" \
 	resource-stickiness="100"
 ----
 endif::[]
diff --git a/doc/Clusters_from_Scratch/en-US/Ch-Apache.txt b/doc/Clusters_from_Scratch/en-US/Ch-Apache.txt
index b87d76ea09..3cb8a83d58 100644
--- a/doc/Clusters_from_Scratch/en-US/Ch-Apache.txt
+++ b/doc/Clusters_from_Scratch/en-US/Ch-Apache.txt
@@ -1,826 +1,790 @@
 = Apache - Adding More Services =
 
 == Forward ==
 Now that we have a basic but functional active/passive two-node cluster,
 we're ready to add some real services. We're going to start with Apache
 because its a feature of many clusters and relatively simple to
 configure.
 
 == Installation ==
 
 Before continuing, we need to make sure Apache is installed on both
-hosts.
+hosts. We also need the wget tool in order for the cluster to be able to check
+the status of the Apache server.
 
 [source,Bash]
 .....
-# yum install -y httpd
+# yum install -y httpd wget
 Loaded plugins: langpacks, presto, refresh-packagekit
 fedora/metalink                                               | 2.6 kB     00:00
 updates/metalink                                              | 3.2 kB     00:00
 updates-testing/metalink                                      |  41 kB     00:00
 Resolving Dependencies
 --> Running transaction check
 ---> Package httpd.x86_64 0:2.2.22-3.fc17 will be installed
 --> Processing Dependency: httpd-tools = 2.2.22-3.fc17 for package: httpd-2.2.22-3.fc17.x86_64
 --> Processing Dependency: apr-util-ldap for package: httpd-2.2.22-3.fc17.x86_64
 --> Processing Dependency: libaprutil-1.so.0()(64bit) for package: httpd-2.2.22-3.fc17.x86_64
 --> Processing Dependency: libapr-1.so.0()(64bit) for package: httpd-2.2.22-3.fc17.x86_64
 --> Running transaction check
 ---> Package apr.x86_64 0:1.4.6-1.fc17 will be installed
 ---> Package apr-util.x86_64 0:1.4.1-2.fc17 will be installed
 ---> Package apr-util-ldap.x86_64 0:1.4.1-2.fc17 will be installed
 ---> Package httpd-tools.x86_64 0:2.2.22-3.fc17 will be installed
 --> Finished Dependency Resolution
 
 Dependencies Resolved
 
 =====================================================================================
  Package             Arch         Version                Repository             Size
 =====================================================================================
 Installing:
  httpd               x86_64       2.2.22-3.fc17          updates-testing       823 k
+ wget                x86_64       1.13.4-2.fc17          fedora                495 k
 Installing for dependencies:
  apr                 x86_64       1.4.6-1.fc17           fedora                 99 k
  apr-util            x86_64       1.4.1-2.fc17           fedora                 78 k
  apr-util-ldap       x86_64       1.4.1-2.fc17           fedora                 17 k
  httpd-tools         x86_64       2.2.22-3.fc17          updates-testing        74 k
 
 Transaction Summary
 =====================================================================================
 Install  1 Package (+4 Dependent packages)
 
 Total download size: 1.1 M
 Installed size: 3.5 M
 Downloading Packages:
-(1/5): apr-1.4.6-1.fc17.x86_64.rpm                            |  99 kB     00:00
-(2/5): apr-util-1.4.1-2.fc17.x86_64.rpm                       |  78 kB     00:00
-(3/5): apr-util-ldap-1.4.1-2.fc17.x86_64.rpm                  |  17 kB     00:00
-(4/5): httpd-2.2.22-3.fc17.x86_64.rpm                         | 823 kB     00:01
-(5/5): httpd-tools-2.2.22-3.fc17.x86_64.rpm                   |  74 kB     00:00
+(1/6): apr-1.4.6-1.fc17.x86_64.rpm                            |  99 kB     00:00
+(2/6): apr-util-1.4.1-2.fc17.x86_64.rpm                       |  78 kB     00:00
+(3/6): apr-util-ldap-1.4.1-2.fc17.x86_64.rpm                  |  17 kB     00:00
+(4/6): httpd-2.2.22-3.fc17.x86_64.rpm                         | 823 kB     00:01
+(5/6): httpd-tools-2.2.22-3.fc17.x86_64.rpm                   |  74 kB     00:00
+(6/6): wget-1.13.4-2.fc17.x86_64.rpm                          | 495 kB     00:01
 -------------------------------------------------------------------------------------
 Total                                                238 kB/s | 1.1 MB     00:04
 Running Transaction Check
 Running Transaction Test
 Transaction Test Succeeded
 Running Transaction
-  Installing : apr-1.4.6-1.fc17.x86_64                                           1/5
-  Installing : apr-util-1.4.1-2.fc17.x86_64                                      2/5
-  Installing : apr-util-ldap-1.4.1-2.fc17.x86_64                                 3/5
-  Installing : httpd-tools-2.2.22-3.fc17.x86_64                                  4/5
-  Installing : httpd-2.2.22-3.fc17.x86_64                                        5/5
-  Verifying  : apr-util-ldap-1.4.1-2.fc17.x86_64                                 1/5
-  Verifying  : httpd-tools-2.2.22-3.fc17.x86_64                                  2/5
-  Verifying  : apr-util-1.4.1-2.fc17.x86_64                                      3/5
-  Verifying  : apr-1.4.6-1.fc17.x86_64                                           4/5
-  Verifying  : httpd-2.2.22-3.fc17.x86_64                                        5/5
+  Installing : apr-1.4.6-1.fc17.x86_64                                           1/6
+  Installing : apr-util-1.4.1-2.fc17.x86_64                                      2/6
+  Installing : apr-util-ldap-1.4.1-2.fc17.x86_64                                 3/6
+  Installing : httpd-tools-2.2.22-3.fc17.x86_64                                  4/6
+  Installing : httpd-2.2.22-3.fc17.x86_64                                        5/6
+  Installing : wget-1.13.4-2.fc17.x86_64                                         6/6
+  Verifying  : apr-util-ldap-1.4.1-2.fc17.x86_64                                 1/6
+  Verifying  : httpd-tools-2.2.22-3.fc17.x86_64                                  2/6
+  Verifying  : apr-util-1.4.1-2.fc17.x86_64                                      3/6
+  Verifying  : apr-1.4.6-1.fc17.x86_64                                           4/6
+  Verifying  : httpd-2.2.22-3.fc17.x86_64                                        5/6
+  Verifying  : wget-1.13.4-2.fc17.x86_64                                         6/6
 
 Installed:
-  httpd.x86_64 0:2.2.22-3.fc17
+  httpd.x86_64 0:2.2.22-3.fc17              wget.x86_64 0:1.13.4-2.fc17
 
 Dependency Installed:
   apr.x86_64 0:1.4.6-1.fc17                 apr-util.x86_64 0:1.4.1-2.fc17
   apr-util-ldap.x86_64 0:1.4.1-2.fc17       httpd-tools.x86_64 0:2.2.22-3.fc17
 
 Complete!
 .....
 
-Also, we need the wget tool in order for the cluster to be able to check
-the status of the Apache server.
-
-[source,Bash]
-.....
-# yum install -y wget
-Loaded plugins: langpacks, presto, refresh-packagekit
-Resolving Dependencies
---> Running transaction check
----> Package wget.x86_64 0:1.13.4-2.fc17 will be installed
---> Finished Dependency Resolution
-
-Dependencies Resolved
-
-=====================================================================================
- Package         Arch              Version                   Repository         Size
-=====================================================================================
-Installing:
- wget            x86_64            1.13.4-2.fc17             fedora            495 k
-
-Transaction Summary
-=====================================================================================
-Install  1 Package
-
-Total download size: 495 k
-Installed size: 1.8 M
-Downloading Packages:
-wget-1.13.4-2.fc17.x86_64.rpm                                 | 495 kB     00:01
-Running Transaction Check
-Running Transaction Test
-Transaction Test Succeeded
-Running Transaction
-  Installing : wget-1.13.4-2.fc17.x86_64                                         1/1
-  Verifying  : wget-1.13.4-2.fc17.x86_64                                         1/1
-
-Installed:
-  wget.x86_64 0:1.13.4-2.fc17
-
-Complete!
-.....
-
 == Preparation ==
 
 First we need to create a page for Apache to serve up. On Fedora the
 default Apache docroot is /var/www/html, so we'll create an index file
 there.
 
 [source,Bash]
 -----
 # cat <<-END >/var/www/html/index.html
  <html>
  <body>My Test Site - pcmk-1</body>
  </html>
 END
 -----
 
 For the moment, we will simplify things by serving up only a static site
 and manually sync the data between the two nodes. So run the command
 again on pcmk-2.
 
 [source,Bash]
 -----
 [root@pcmk-2 ~]# cat <<-END >/var/www/html/index.html <html>
  <body>My Test Site - pcmk-2</body>
  </html>
  END
 -----
 
 == Enable the Apache status URL ==
 
 In order to monitor the health of your Apache instance, and recover it if
 it fails, the resource agent used by Pacemaker assumes the server-status
 URL is available. Look for the following in '/etc/httpd/conf/httpd.conf'
 and make sure it is not disabled or commented out:
 
 .....
 <Location /server-status>
    SetHandler server-status
    Order deny,allow
    Deny from all
    Allow from 127.0.0.1
 </Location>
 .....
 
 == Update the Configuration ==
 
 At this point, Apache is ready to go, all that needs to be done is to
 add it to the cluster. Lets call the resource WebSite. We need to use
 an OCF script called apache in the heartbeat namespace
 footnote:[Compare the key used here ocf:heartbeat:apache with the one we used earlier for the IP address: ocf:heartbeat:IPaddr2]
 , the only required parameter is the path to the main Apache
 configuration file and we'll tell the cluster to check once a
 minute that apache is still running.
 
 ifdef::pcs[]
 [source,Bash]
 -----
 # pcs resource create WebSite ocf:heartbeat:apache  \
       configfile=/etc/httpd/conf/httpd.conf \
       statusurl="http://localhost/server-status" op monitor interval=1min
 -----
 
 By default, the operation timeout for all resource's start, stop, and monitor
 operations is 20 seconds.  In many cases this timeout period is less than
 the advised timeout period.  For the purposes of this tutorial, we will
 adjust the global operation timeout default to 240 seconds.
 
 [source,Bash]
 -----
 # pcs resource op defaults timeout=240s
 # pcs resource op defaults
 timeout: 240s
 -----
 
 endif::[]
 
 ifdef::crm[]
 [source,Bash]
 -----
 # crm configure primitive WebSite ocf:heartbeat:apache \
      params configfile=/etc/httpd/conf/httpd.conf \
      statusurl="http://localhost/server-status" \
      op monitor interval=1min
 WARNING: WebSite: default timeout 20s for start is smaller than the advised 40s
 WARNING: WebSite: default timeout 20s for stop is smaller than the advised 60s
 -----
 
 The easiest way resolve this, is to change the default:
 
 [source,Bash]
 -----
 # crm configure op_defaults timeout=240s
 # crm configure show
 node $id="1702537408" pcmk-1
 node $id="1719314624" pcmk-2
 primitive ClusterIP ocf:heartbeat:IPaddr2 \
 	params ip="192.168.122.120" cidr_netmask="32" \
 	op monitor interval="30s"
 primitive WebSite ocf:heartbeat:apache \
 	params configfile="/etc/httpd/conf/httpd.conf" \
 	op monitor interval="1min"
 property $id="cib-bootstrap-options" \
 	dc-version="1.1.7-2.fc17-ee0730e13d124c3d58f00016c3376a1de5323cff" \
 	cluster-infrastructure="corosync" \
 	stonith-enabled="false" \
 	no-quorum-policy="ignore"
 rsc_defaults $id="rsc-options" \
 	resource-stickiness="100"
 op_defaults $id="op-options" \
 	timeout="240s"
 -----
 endif::[]
 
 After a short delay, we should see the cluster start apache
 
 ifdef::pcs[]
 [source,Bash]
 -----
 # pcs status
 
 Last updated: Fri Sep 14 10:51:27 2012
 Last change: Fri Sep 14 10:50:46 2012 via crm_attribute on pcmk-1
 Stack: corosync
 Current DC: pcmk-2 (2) - partition with quorum
 Version: 1.1.8-1.el7-60a19ed12fdb4d5c6a6b6767f52e5391e447fec0
 2 Nodes configured, unknown expected votes
 2 Resources configured.
 
 Online: [ pcmk-1 pcmk-2 ]
 
 Full list of resources:
 
  ClusterIP	(ocf::heartbeat:IPaddr2):	Started pcmk-2
  WebSite	(ocf::heartbeat:apache):	Started pcmk-1
 -----
 endif::[]
 
 ifdef::crm[]
 [source,Bash]
 -----
 # crm_mon -1
 ============
 Last updated: Tue Apr  3 11:54:29 2012
 Last change: Tue Apr  3 11:54:26 2012 via crmd on pcmk-1
 Stack: corosync
 Current DC: pcmk-1 (1702537408)	- partition with quorum
 Version: 1.1.7-2.fc17-ee0730e13d124c3d58f00016c3376a1de5323cff
 2 Nodes configured, unknown expected votes
 2 Resources configured.
 ============
 
 Online: [ pcmk-1 pcmk-2 ]
 
 ClusterIP	(ocf:heartbeat:IPaddr2):	Started pcmk-2
 WebSite (ocf:heartbeat:apache):        Started pcmk-1
 -----
 endif::[]
 
 Wait a moment, the WebSite resource isn't running on the same host as our
 IP address!
 
 ifdef::pcs[]
 [NOTE]
 ======
 If, in the `pcs status` output, you see the WebSite resource has
 failed to start, then you've likely not enabled the status URL correctly.
 You can check if this is the problem by running:
 
 ....
 wget http://127.0.0.1/server-status
 ....
 
 If you see +Connection refused+ in the output, then this is indeed the
 problem.  Check to ensure that +Allow from 127.0.0.1+ is present for
 the +<Location /server-status>+ block.
 
 ======
 endif::[]
 
 ifdef::crm[]
 [NOTE]
 ======
 If, in the `crm_mon` output, you see:
 
 ....
 Failed actions:
     WebSite_start_0 (node=pcmk-2, call=301, rc=1, status=complete): unknown error
 ....
 
 Then you've likely not enabled the status URL correctly.
 You can check if this is the problem by running:
 
 ....
 wget http://127.0.0.1/server-status
 ....
 
 If you see +Connection refused+ in the output, then this is indeed the
 problem.  Check to ensure that +Allow from 127.0.0.1+ is present for
 the +<Location /server-status>+ block.
 
 ======
 endif::[]
 
 == Ensuring Resources Run on the Same Host ==
 
 To reduce the load on any one machine, Pacemaker will generally try to
 spread the configured resources across the cluster nodes. However we
 can tell the cluster that two resources are related and need to run on
 the same host (or not at all). Here we instruct the cluster that
 WebSite can only run on the host that ClusterIP is active on.
 
 ifdef::pcs[]
 To achieve this we use a colocation constraint that indicates it is
 mandatory for WebSite to run on the same node as ClusterIP.  The
 "mandatory" part of the colocation constraint is indicated by using a
 score of INFINITY.  The INFINITY score also means that if ClusterIP is not
 active anywhere, WebSite will not be permitted to run.
 endif::[]
 
 ifdef::crm[]
 For the constraint, we need a name (choose something descriptive like
 website-with-ip), indicate that its mandatory (so that if ClusterIP is
 not active anywhere, WebSite will not be permitted to run anywhere
 either) by specifying a score of INFINITY and finally list the two
 resources.
 endif::[]
 
 [NOTE]
 =======
 If ClusterIP is not active anywhere, WebSite will not be permitted to run
 anywhere.
 =======
 
 [IMPORTANT]
 ===========
 
 Colocation constraints are "directional", in that they imply certain
 things about the order in which the two resources will have a location
 chosen. In this case we're saying +WebSite+ needs to be placed on the
 same machine as +ClusterIP+, this implies that we must know the
 location of +ClusterIP+ before choosing a location for +WebSite+.
 
 ===========
 
 ifdef::pcs[]
 [source,Bash]
 -----
 # pcs constraint colocation add WebSite ClusterIP INFINITY
 # pcs constraint
 Location Constraints:
 Ordering Constraints:
 Colocation Constraints:
   WebSite with ClusterIP
 # pcs status
 
 Last updated: Fri Sep 14 11:00:44 2012
 Last change: Fri Sep 14 11:00:25 2012 via cibadmin on pcmk-1
 Stack: corosync
 Current DC: pcmk-2 (2) - partition with quorum
 Version: 1.1.8-1.el7-60a19ed12fdb4d5c6a6b6767f52e5391e447fec0
 2 Nodes configured, unknown expected votes
 2 Resources configured.
 
 Online: [ pcmk-1 pcmk-2 ]
 
 Full list of resources:
 
  ClusterIP	(ocf::heartbeat:IPaddr2):	Started pcmk-2
  WebSite	(ocf::heartbeat:apache):	Started pcmk-2
 -----
 endif::[]
 
 ifdef::crm[]
 [source,Bash]
 -----
 # crm configure colocation website-with-ip INFINITY: WebSite ClusterIP
 # crm configure show
 node $id="1702537408" pcmk-1
 node $id="1719314624" pcmk-2
 primitive ClusterIP ocf:heartbeat:IPaddr2 \
 	params ip="192.168.122.120" cidr_netmask="32" \
 	op monitor interval="30s"
 primitive WebSite ocf:heartbeat:apache \
 	params configfile="/etc/httpd/conf/httpd.conf" \
 	op monitor interval="1min"
 colocation website-with-ip inf: WebSite ClusterIP
 property $id="cib-bootstrap-options" \
 	dc-version="1.1.7-2.fc17-ee0730e13d124c3d58f00016c3376a1de5323cff" \
 	cluster-infrastructure="corosync" \
 	stonith-enabled="false" \
 	no-quorum-policy="ignore" \
 	last-lrm-refresh="1333446866"
 rsc_defaults $id="rsc-options" \
 	resource-stickiness="100"
 op_defaults $id="op-options" \
 	timeout="240s"
 # crm_mon -1
 ============
 Last updated: Tue Apr  3 11:57:13 2012
 Last change: Tue Apr  3 11:56:10 2012 via cibadmin on pcmk-1
 Stack: corosync
 Current DC: pcmk-2 (1719314624) - partition with quorum
 Version: 1.1.7-2.fc17-ee0730e13d124c3d58f00016c3376a1de5323cff
 2 Nodes configured, unknown expected votes
 2 Resources configured.
 ============
 
 Online: [ pcmk-1 pcmk-2 ]
 
  ClusterIP	(ocf:heartbeat:IPaddr2):   Started pcmk-2
  WebSite	(ocf:heartbeat:apache):    Started pcmk-2
 -----
 endif::[]
 
 == Controlling Resource Start/Stop Ordering ==
 
 When Apache starts, it binds to the available IP addresses. It doesn't
 know about any addresses we add afterwards, so not only do they need to
 run on the same node, but we need to make sure ClusterIP is already
 active before we start WebSite. We do this by adding an ordering
 constraint.
 
 ifdef::pcs[]
 By default all order constraints are mandatory constraints unless
 otherwise configured.  This means that the recovery of ClusterIP will
 also trigger the recovery of WebSite.
 
 [source,Bash]
 -----
 # pcs constraint order ClusterIP then WebSite
 Adding ClusterIP WebSite (kind: Mandatory) (Options: first-action=start then-action=start)
 # pcs constraint
 Location Constraints:
 Ordering Constraints:
   start ClusterIP then start WebSite
 Colocation Constraints:
   WebSite with ClusterIP
 -----
 endif::[]
 
 ifdef::crm[]
 
 We need to give it a name (choose something descriptive like
 apache-after-ip), indicate that its mandatory (so that any recovery for
 ClusterIP will also trigger recovery of WebSite) and list the two
 resources in the order we need them to start.
 
 [source,Bash]
 -----
 # crm configure order apache-after-ip mandatory: ClusterIP WebSite
 # crm configure show
 node $id="1702537408" pcmk-1
 node $id="1719314624" pcmk-2
 primitive ClusterIP ocf:heartbeat:IPaddr2 \
 	params ip="192.168.122.120" cidr_netmask="32" \
 	op monitor interval="30s"
 primitive WebSite ocf:heartbeat:apache \
 	params configfile="/etc/httpd/conf/httpd.conf" \
 	op monitor interval="1min"
 colocation website-with-ip inf: WebSite ClusterIP
 order apache-after-ip inf: ClusterIP WebSite
 property $id="cib-bootstrap-options" \
 	dc-version="1.1.7-2.fc17-ee0730e13d124c3d58f00016c3376a1de5323cff" \
 	cluster-infrastructure="corosync" \
 	stonith-enabled="false" \
 	no-quorum-policy="ignore" \
 	last-lrm-refresh="1333446866"
 rsc_defaults $id="rsc-options" \
 	resource-stickiness="100"
 op_defaults $id="op-options" \
 	timeout="240s"
 -----
 endif::[]
 
 == Specifying a Preferred Location ==
 
 Pacemaker does not rely on any sort of hardware symmetry between nodes,
 so it may well be that one machine is more powerful than the other. In
 such cases it makes sense to host the resources there if it is available.
 To do this we create a location constraint.
 
 ifdef::pcs[]
 In the location constraint below, we are saying the WebSite resource
 prefers the node pcmk-1 with a score of 50.  The score here indicates
 how badly we'd like the resource to run somewhere.
 
 [source,Bash]
 -----
 # pcs constraint location WebSite prefers pcmk-1=50
 # pcs constraint
 Location Constraints:
   Resource: WebSite
     Enabled on: pcmk-1 (score:50)
 Ordering Constraints:
   start ClusterIP then start WebSite
 Colocation Constraints:
   WebSite with ClusterIP
 # pcs status
 Last updated: Fri Sep 14 11:06:37 2012
 Last change: Fri Sep 14 11:06:26 2012 via cibadmin on pcmk-1
 Stack: corosync
 Current DC: pcmk-2 (2) - partition with quorum
 Version: 1.1.8-1.el7-60a19ed12fdb4d5c6a6b6767f52e5391e447fec0
 2 Nodes configured, unknown expected votes
 2 Resources configured.
 
 Online: [ pcmk-1 pcmk-2 ]
 
 Full list of resources:
 
  ClusterIP	(ocf::heartbeat:IPaddr2):	Started pcmk-2
  WebSite	(ocf::heartbeat:apache):	Started pcmk-2
 -----
 endif::[]
 
 ifdef::crm[]
 Again we give it a descriptive name (prefer-pcmk-1), specify the resource we
 want to run there (WebSite), how badly we'd like it to run there (we'll use
 50 for now, but in a two-node situation almost any value above 0 will do) and
 the host's name.
 
 [source,Bash]
 -----
 # crm configure location prefer-pcmk-1 WebSite 50: pcmk-1
 WARNING: prefer-pcmk-1: referenced node pcmk-1 does not exist
 -----
 
 This warning should be ignored.
 
 [source,Bash]
 -----
 # crm configure show
 node $id="1702537408" pcmk-1
 node $id="1719314624" pcmk-2
 primitive ClusterIP ocf:heartbeat:IPaddr2 \
 	params ip="192.168.122.120" cidr_netmask="32" \
 	op monitor interval="30s"
 primitive WebSite ocf:heartbeat:apache \
 	params configfile="/etc/httpd/conf/httpd.conf" \
 	op monitor interval="1min"
 location prefer-pcmk-1 WebSite 50: pcmk-1
 colocation website-with-ip inf: WebSite ClusterIP
 order apache-after-ip inf: ClusterIP WebSite
 property $id="cib-bootstrap-options" \
 	dc-version="1.1.7-2.fc17-ee0730e13d124c3d58f00016c3376a1de5323cff" \
 	cluster-infrastructure="corosync" \
 	stonith-enabled="false" \
 	no-quorum-policy="ignore" \
 	last-lrm-refresh="1333446866"
 rsc_defaults $id="rsc-options" \
 	resource-stickiness="100"
 op_defaults $id="op-options" \
 	timeout="240s"
 # crm_mon -1
 ============
 Last updated: Tue Apr  3 12:02:14 2012
 Last change: Tue Apr  3 11:59:42 2012 via cibadmin on pcmk-1
 Stack: corosync
 Current DC: pcmk-2 (1719314624) - partition with quorum
 Version: 1.1.7-2.fc17-ee0730e13d124c3d58f00016c3376a1de5323cff
 2 Nodes configured, unknown expected votes
 2 Resources configured.
 ============
 
 Online: [ pcmk-1 pcmk-2 ]
 
  ClusterIP	(ocf:heartbeat:IPaddr2):	Started pcmk-2
  WebSite	(ocf:heartbeat:apache):	Started pcmk-2
 -----
 endif::[]
 
 Wait a minute, the resources are still on pcmk-2!
 
 Even though we now prefer pcmk-1 over pcmk-2, that preference is
 (intentionally) less than the resource stickiness (how much we
 preferred not to have unnecessary downtime).
 
 To see the current placement scores, you can use a tool called crm_simulate
 
 [source,Bash]
 ----
 # crm_simulate -sL
 Current cluster status:
 Online: [ pcmk-1 pcmk-2 ]
 
  ClusterIP	(ocf:heartbeat:IPaddr2):	Started pcmk-2
  WebSite	(ocf:heartbeat:apache):	Started pcmk-2
 
 Allocation scores:
 native_color: ClusterIP allocation score on pcmk-1: 50
 native_color: ClusterIP allocation score on pcmk-2: 200
 native_color: WebSite allocation score on pcmk-1: -INFINITY
 native_color: WebSite allocation score on pcmk-2: 100
 
 Transition Summary:
 ----
 
 == Manually Moving Resources Around the Cluster ==
 
 ifdef::pcs[]
 There are always times when an administrator needs to override the
 cluster and force resources to move to a specific location.  By
 updating our previous location constraint with a score of INFINITY,
 WebSite will be forced to move to pcmk-1.
 
 [source,Bash]
 -----
 # pcs constraint location WebSite prefers pcmk-1=INFINITY
 # pcs constraint all
 Location Constraints:
   Resource: WebSite
     Enabled on: pcmk-1 (score:INFINITY) (id:location-WebSite-pcmk-1-INFINITY)
 Ordering Constraints:
   start ClusterIP then start WebSite (Mandatory) (id:order-ClusterIP-WebSite-mandatory)
 Colocation Constraints:
   WebSite with ClusterIP (INFINITY) (id:colocation-WebSite-ClusterIP-INFINITY)
 # pcs status
 
 Last updated: Fri Sep 14 11:16:26 2012
 Last change: Fri Sep 14 11:16:18 2012 via cibadmin on pcmk-1
 Stack: corosync
 Current DC: pcmk-2 (2) - partition with quorum
 Version: 1.1.8-1.el7-60a19ed12fdb4d5c6a6b6767f52e5391e447fec0
 2 Nodes configured, unknown expected votes
 2 Resources configured.
 
 Online: [ pcmk-1 pcmk-2 ]
 
 Full list of resources:
 
  ClusterIP	(ocf::heartbeat:IPaddr2):	Started pcmk-1
  WebSite	(ocf::heartbeat:apache):	Started pcmk-1
 -----
 endif::[]
 
 ifdef::crm[]
 There are always times when an administrator needs to override the
 cluster and force resources to move to a specific location. Underneath we
 use location constraints like the one we created above, happily you don't
 need to care. Just provide the name of the resource and the intended
 location, we'll do the rest.
 
 [source,Bash]
 -----
 # crm resource move WebSite pcmk-1
 # crm_mon -1
 ============
 Last updated: Tue Apr  3 12:03:41 2012
 Last change: Tue Apr  3 12:03:37 2012 via crm_resource on pcmk-1
 Stack: corosync
 Current DC: pcmk-2 (1719314624) - partition with quorum
 Version: 1.1.7-2.fc17-ee0730e13d124c3d58f00016c3376a1de5323cff
 2 Nodes configured, unknown expected votes
 2 Resources configured.
 ============
 
 Online: [ pcmk-1 pcmk-2 ]
 
  ClusterIP	(ocf:heartbeat:IPaddr2):	Started pcmk-1
  WebSite	(ocf:heartbeat:apache):	Started pcmk-1
 -----
 
 Notice how the colocation rule we created has ensured that ClusterIP was also moved to pcmk-1.
 For the curious, we can see the effect of this command by examining the configuration
 
 [source,Bash]
 -----
 # crm configure show
 node $id="1702537408" pcmk-1
 node $id="1719314624" pcmk-2
 primitive ClusterIP ocf:heartbeat:IPaddr2 \
 	params ip="192.168.122.120" cidr_netmask="32" \
 	op monitor interval="30s"
 primitive WebSite ocf:heartbeat:apache \
 	params configfile="/etc/httpd/conf/httpd.conf" \
 	op monitor interval="1min"
 location cli-prefer-WebSite WebSite \
 	rule $id="cli-prefer-rule-WebSite" inf: #uname eq pcmk-1
 location prefer-pcmk-1 WebSite 50: pcmk-1
 colocation website-with-ip inf: WebSite ClusterIP
 order apache-after-ip inf: ClusterIP WebSite
 property $id="cib-bootstrap-options" \
 	dc-version="1.1.7-2.fc17-ee0730e13d124c3d58f00016c3376a1de5323cff" \
 	cluster-infrastructure="corosync" \
 	stonith-enabled="false" \
 	no-quorum-policy="ignore" \
 	last-lrm-refresh="1333446866"
 rsc_defaults $id="rsc-options" \
 	resource-stickiness="100"
 op_defaults $id="op-options" \
 	timeout="240s"
 -----
 
 The automated constraint used to move the resources to +pcmk-1+ is the
 line beginning with +location cli-prefer-WebSite+.
 endif::[]
 
 === Giving Control Back to the Cluster ===
 
 Once we've finished whatever activity that required us to move the
 resources to pcmk-1, in our case nothing, we can then allow the cluster
 to resume normal operation with the unmove command. Since we previously
 configured a default stickiness, the resources will remain on pcmk-1.
 
 ifdef::pcs[]
 [source,Bash]
 -----
 # pcs constraint all
 Location Constraints:
   Resource: WebSite
     Enabled on: pcmk-1 (score:INFINITY) (id:location-WebSite-pcmk-1-INFINITY)
 Ordering Constraints:
   start ClusterIP then start WebSite (Mandatory) (id:order-ClusterIP-WebSite-mandatory)
 Colocation Constraints:
   WebSite with ClusterIP (INFINITY) (id:colocation-WebSite-ClusterIP-INFINITY)
 # pcs constraint rm location-WebSite-pcmk-1-INFINITY
 # pcs constraint
 Location Constraints:
 Ordering Constraints:
   start ClusterIP then start WebSite
 Colocation Constraints:
   WebSite with ClusterIP
 -----
 endif::[]
 
 ifdef::crm[]
 [source,Bash]
 -----
 # crm resource unmove WebSite
 # crm configure show
 node $id="1702537408" pcmk-1
 node $id="1719314624" pcmk-2
 primitive ClusterIP ocf:heartbeat:IPaddr2 \
 	params ip="192.168.122.120" cidr_netmask="32" \
 	op monitor interval="30s"
 primitive WebSite ocf:heartbeat:apache \
 	params configfile="/etc/httpd/conf/httpd.conf" \
 	op monitor interval="1min"
 location prefer-pcmk-1 WebSite 50: pcmk-1
 colocation website-with-ip inf: WebSite ClusterIP
 order apache-after-ip inf: ClusterIP WebSite
 property $id="cib-bootstrap-options" \
 	dc-version="1.1.7-2.fc17-ee0730e13d124c3d58f00016c3376a1de5323cff" \
 	cluster-infrastructure="corosync" \
 	stonith-enabled="false" \
 	no-quorum-policy="ignore" \
 	last-lrm-refresh="1333446866"
 rsc_defaults $id="rsc-options" \
 	resource-stickiness="100"
 op_defaults $id="op-options" \
 	timeout="240s"
 -----
 endif::[]
 
-Note that the automated constraint is now gone. If we check the cluster
+Note that the constraint is now gone. If we check the cluster
 status, we can also see that as expected the resources are still active
 on pcmk-1.
 
 ifdef::pcs[]
 [source,Bash]
 -----
 # pcs status
 
 Last updated: Fri Sep 14 11:57:12 2012
 Last change: Fri Sep 14 11:57:03 2012 via cibadmin on pcmk-1
 Stack: corosync
 Current DC: pcmk-2 (2) - partition with quorum
 Version: 1.1.8-1.el7-60a19ed12fdb4d5c6a6b6767f52e5391e447fec0
 2 Nodes configured, unknown expected votes
 2 Resources configured.
 
 Online: [ pcmk-1 pcmk-2 ]
 
 Full list of resources:
 
  ClusterIP	(ocf::heartbeat:IPaddr2):	Started pcmk-1
  WebSite	(ocf::heartbeat:apache):	Started pcmk-1
 -----
 endif::[]
 
 ifdef::crm[]
 [source,Bash]
 -----
 # crm_mon
 ============
 Last updated: Tue Apr  3 12:05:08 2012
 Last change: Tue Apr  3 12:03:37 2012 via crm_resource on pcmk-1
 Stack: corosync
 Current DC: pcmk-2 (1719314624) - partition with quorum
 Version: 1.1.7-2.fc17-ee0730e13d124c3d58f00016c3376a1de5323cff
 2 Nodes configured, unknown expected votes
 2 Resources configured.
 ============
 
 Online: [ pcmk-1 pcmk-2 ]
 
  ClusterIP	(ocf:heartbeat:IPaddr2):	Started pcmk-1
  WebSite	(ocf:heartbeat:apache):	Started pcmk-1
 -----
 endif::[]
diff --git a/doc/Clusters_from_Scratch/en-US/Ch-Installation.txt b/doc/Clusters_from_Scratch/en-US/Ch-Installation.txt
index 58af962602..987d2d736a 100644
--- a/doc/Clusters_from_Scratch/en-US/Ch-Installation.txt
+++ b/doc/Clusters_from_Scratch/en-US/Ch-Installation.txt
@@ -1,1015 +1,1017 @@
 = Installation =
 
 == OS Installation ==
 
 Detailed instructions for installing Fedora are available at
 http://docs.fedoraproject.org/en-US/Fedora/17/html/Installation_Guide/ in a number of
 languages. The abbreviated version is as follows...
 
 Point your browser to http://fedoraproject.org/en/get-fedora-all,
 locate the +Install Media+ section and download the install DVD that
 matches your hardware.
 
 Burn the disk image to a DVD
 footnote:[http://docs.fedoraproject.org/en-US/Fedora/16/html/Burning_ISO_images_to_disc/index.html]
 and boot from it, or use the image to boot a virtual machine.
 
 After clicking through the welcome screen, select your language,
 keyboard layout
 footnote:[http://docs.fedoraproject.org/en-US/Fedora/16/html/Installation_Guide/sn-keyboard-x86.html]
 and storage type
 footnote:[http://docs.fedoraproject.org/en-US/Fedora/16/html/Installation_Guide/Storage_Devices-x86.html]
 
 Assign your machine a host name.
 footnote:[http://docs.fedoraproject.org/en-US/Fedora/16/html/Installation_Guide/sn-Netconfig-x86.html]
 I happen to control the clusterlabs.org domain name, so I will use
 that here.
 
 [IMPORTANT]
 ===========
 Do not accept the default network settings.
 Cluster machines should never obtain an IP address via DHCP.
 
-Before clicking next, select +Configure Network+ to specify a fixed IPv4 address for +System eth0+.
-Here I will use the internal addresses for the clusterlab.org network.
+When you are presented with the +Configure Network+ advanced option, select that option
+before continuing with the installation process to specify a fixed IPv4 address for
++System eth0+. Be sure to also enter the +Routes+ section and add an entry for your
+default gateway.
 
 image::images/Network.png["Custom network settings",align="center"]
 
-Be sure to also enter the +Routes+ section and add an entry for your default gateway.
-
+If you miss this step, this can easily be configured after installation. You will have
+to navigate to +system settings+ and select +network+.  From there you can select
+what device to configure.
 ===========
 
 You will then be prompted to indicate the machine's physical location
 footnote:[http://docs.fedoraproject.org/en-US/Fedora/16/html/Installation_Guide/s1-timezone-x86.html]
 and to supply a root password.
 footnote:[http://docs.fedoraproject.org/en-US/Fedora/16/html/Installation_Guide/sn-account_configuration-x86.html]
 
 Now select where you want Fedora installed.
 footnote:[http://docs.fedoraproject.org/en-US/Fedora/16/html/Installation_Guide/s1-diskpartsetup-x86.html]
 As I don’t care about any existing data, I will accept the default and
 allow Fedora to use the complete drive.
 
 [IMPORTANT]
 ===========
 
 By default Fedora uses LVM for partitioning which allows us to
 dynamically change the amount of space allocated to a given partition.
 
 However, by default it also allocates all free space to the +/+
 (aka. +root+) partition which cannot be dynamically _reduced_ in size
 (dynamic increases are fine by-the-way).
 
 So if you plan on following the DRBD or GFS2 portions of this guide,
 you should reserve at least 1Gb of space on each machine from which to
 create a shared volume.  To do so select the +Review and modify
 partitioning layout+ checkbox before clicking +Next+.  You will then
 be given an opportunity to reduce the size of the +root+ partition.
 
 ===========
 
 Next choose which software should be
 installed. footnote:[http://docs.fedoraproject.org/en-US/Fedora/16/html/Installation_Guide/s1-pkgselection-x86.html]
 Change the selection to Minimal so that we see everything that gets
 installed. Don't enable updates yet, we'll do that (and install any
 extra software we need) later. After you click next, Fedora will begin
 installing.
 
-Go grab something to drink, this may take a while
+Go grab something to drink, this may take a while.
 
 Once the node reboots, you'll see a (possibly mangled) login prompt on
 the console.  Login using +root+ and the password you created earlier.
 
 image::images/Console.png["Initial Console",align="center"]
 
 [NOTE]
 ======
 
-That was the last screenshot, from here on in we're going to be working
-exclusively from the terminal.
+From here on in we're going to be working exclusively from the terminal.
 
 ======
 
 == Post Installation Tasks ==
 
 === Networking ===
 
 Bring up the network and ensure it starts at boot
 
 [source,Bash]
 ....
 # service network start
 # chkconfig network on
 ....
 
 Check the machine has the static IP address you configured earlier
 
 [source,Bash]
 ....
 # ip addr
 1: lo: <LOOPBACK,UP,LOWER_UP> mtu 16436 qdisc noqueue state UNKNOWN
     link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
     inet 127.0.0.1/8 scope host lo
     inet6 ::1/128 scope host
        valid_lft forever preferred_lft forever
 2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP qlen 1000
     link/ether 52:54:00:d7:d6:08 brd ff:ff:ff:ff:ff:ff
     inet 192.168.122.101/24 brd 192.168.122.255 scope global eth0
     inet6 fe80::5054:ff:fed7:d608/64 scope link
        valid_lft forever preferred_lft forever
 ....
 
 Now check the default route setting:
 
 [source,Bash]
 ....
 [root@pcmk-1 ~]# ip route
 default via 192.168.122.1 dev eth0
 192.168.122.0/24 dev eth0  proto kernel  scope link  src 192.168.122.101
 ....
 
 If there is no line beginning with +default via+, then you may need to add a line such as
 
   GATEWAY=192.168.122.1
 
 to '/etc/sysconfig/network' and restart the network.
 
 Now check for connectivity to the outside world.  Start small by
 testing if we can read the gateway we configured.
 
 [source,Bash]
 ....
 # ping -c 1 192.168.122.1
 PING 192.168.122.1 (192.168.122.1) 56(84) bytes of data.
 64 bytes from 192.168.122.1: icmp_req=1 ttl=64 time=0.249 ms
 
 --- 192.168.122.1 ping statistics ---
 1 packets transmitted, 1 received, 0% packet loss, time 0ms
 rtt min/avg/max/mdev = 0.249/0.249/0.249/0.000 ms
 ....
 
 Now try something external, choose a location you know will be available.
 
 [source,Bash]
 ....
 # ping -c 1 www.google.com
 PING www.l.google.com (173.194.72.106) 56(84) bytes of data.
 64 bytes from tf-in-f106.1e100.net (173.194.72.106): icmp_req=1 ttl=41 time=167 ms
 
 --- www.l.google.com ping statistics ---
 1 packets transmitted, 1 received, 0% packet loss, time 0ms
 rtt min/avg/max/mdev = 167.618/167.618/167.618/0.000 ms
 ....
 
 === Leaving the Console ===
 
 The console isn't a very friendly place to work from, we will now
 switch to accessing the machine remotely via SSH where we can
 use copy&paste etc.
 
 First we check we can see the newly installed at all:
 
 [source,Bash]
 ....
 beekhof@f16 ~ # ping -c 1 192.168.122.101
 PING 192.168.122.101 (192.168.122.101) 56(84) bytes of data.
 64 bytes from 192.168.122.101: icmp_req=1 ttl=64 time=1.01 ms
 
 --- 192.168.122.101 ping statistics ---
 1 packets transmitted, 1 received, 0% packet loss, time 0ms
 rtt min/avg/max/mdev = 1.012/1.012/1.012/0.000 ms
 ....
 
 Next we login via SSH
 
 [source,Bash]
 ....
 beekhof@f16 ~ # ssh -l root 192.168.122.11
 root@192.168.122.11's password:
 Last login: Fri Mar 30 19:41:19 2012 from 192.168.122.1
 [root@pcmk-1 ~]#
 ....
 
 === Security Shortcuts ===
 
 To simplify this guide and focus on the aspects directly connected to
 clustering, we will now disable the machine's firewall and SELinux
 installation.
 
 [WARNING]
 ===========
 Both of these actions create significant security issues
 and should not be performed on machines that will be exposed to the
 outside world.
 ===========
 
 [IMPORTANT]
 ===========
  TODO: Create an Appendix that deals with (at least) re-enabling the firewall.
 ===========
 
 [source,Bash]
 ----
 # setenforce 0
 # sed -i.bak "s/SELINUX=enforcing/SELINUX=permissive/g" /etc/selinux/config
 # systemctl disable iptables.service
 rm '/etc/systemd/system/basic.target.wants/iptables.service'
 # systemctl stop iptables.service
 ----
 
 === Short Node Names ===
 
 During installation, we filled in the machine's fully qualifier domain
 name (FQDN) which can be rather long when it appears in cluster logs and
 status output. See for yourself how the machine identifies itself:
 (((Nodes, short name)))
 
 [source,Bash]
 ----
 # uname -n
 pcmk-1.clusterlabs.org
 # dnsdomainname
 clusterlabs.org
 ----
 (((Nodes, Domain name (Query))))
 
 The output from the second command is fine, but we really don't need the
 domain name included in the basic host details. To address this, we need
 to update /etc/sysconfig/network. This is what it should look like before
 we start.
 
 [source,Bash]
 ----
 # cat /etc/sysconfig/network
 NETWORKING=yes
 HOSTNAME=pcmk-1.clusterlabs.org
 GATEWAY=192.168.122.1
 ----
 
 All we need to do now is strip off the domain name portion, which is
 stored elsewhere anyway.
 
 [source,Bash]
 ----
  # sed -i.sed 's/\.[a-z].*//g' /etc/sysconfig/network
 ----
 
 Now confirm the change was successful. The revised file contents should
 look something like this.
 
 [source,Bash]
 ----
 # cat /etc/sysconfig/network
 NETWORKING=yes
 HOSTNAME=pcmk-1
 GATEWAY=192.168.122.1
 ----
 
 However we're not finished. The machine wont normally see the shortened
 host name until about it reboots, but we can force it to update.
 
 [source,Bash]
 ----
 # source /etc/sysconfig/network
 # hostname $HOSTNAME
 ----
 (((Nodes, Domain name (Remove from host name))))
 
 Now check the machine is using the correct names
 
 [source,Bash]
 ----
 # uname -n
 pcmk-1
 # dnsdomainname
 clusterlabs.org
 ----
 
 === NTP ===
+
 It is highly recommended to enable NTP on your cluster nodes. Doing so
 ensures all nodes agree on the current time and makes reading log files
-significantly easier. Fedora Installation - Date and TimeFedora
-Installation: Enable NTP to keep the times on all your nodes consistent
+significantly easier. footnote:[http://docs.fedoraproject.org/en-US/Fedora/17/html-single/System_Administrators_Guide/index.html#ch-Configuring_the_Date_and_Time]
 
 == Before You Continue ==
 
 Repeat the Installation steps so far, so that you have two Fedora
 nodes ready to have the cluster software installed.
 
 For the purposes of this document, the additional node is called
 pcmk-2 with address 192.168.122.102.
 
 === Finalize Networking ===
 
 Confirm that you can communicate between the two new nodes:
 
 [source,Bash]
 ----
 # ping -c 3 192.168.122.102
 PING 192.168.122.102 (192.168.122.102) 56(84) bytes of data.
 64 bytes from 192.168.122.102: icmp_seq=1 ttl=64 time=0.343 ms
 64 bytes from 192.168.122.102: icmp_seq=2 ttl=64 time=0.402 ms
 64 bytes from 192.168.122.102: icmp_seq=3 ttl=64 time=0.558 ms
 
 --- 192.168.122.102 ping statistics ---
 3 packets transmitted, 3 received, 0% packet loss, time 2000ms
 rtt min/avg/max/mdev = 0.343/0.434/0.558/0.092 ms
 ----
 
 Now we need to make sure we can communicate with the machines by their
 name. If you have a DNS server, add additional entries for the two
 machines. Otherwise, you'll need to add the machines to '/etc/hosts' .
 Below are the entries for my cluster nodes:
 
 [source,Bash]
 ----
 # grep pcmk /etc/hosts
 192.168.122.101 pcmk-1.clusterlabs.org pcmk-1
 192.168.122.102 pcmk-2.clusterlabs.org pcmk-2
 ----
 
 We can now verify the setup by again using ping:
 
 [source,Bash]
 ----
 # ping -c 3 pcmk-2
 PING pcmk-2.clusterlabs.org (192.168.122.101) 56(84) bytes of data.
 64 bytes from pcmk-1.clusterlabs.org (192.168.122.101): icmp_seq=1 ttl=64 time=0.164 ms
 64 bytes from pcmk-1.clusterlabs.org (192.168.122.101): icmp_seq=2 ttl=64 time=0.475 ms
 64 bytes from pcmk-1.clusterlabs.org (192.168.122.101): icmp_seq=3 ttl=64 time=0.186 ms
 
 --- pcmk-2.clusterlabs.org ping statistics ---
 3 packets transmitted, 3 received, 0% packet loss, time 2001ms
 rtt min/avg/max/mdev = 0.164/0.275/0.475/0.141 ms
 ----
 
 === Configure SSH ===
 
 SSH is a convenient and secure way to copy files and perform commands
 remotely. For the purposes of this guide, we will create a key without a
 password (using the -N option) so that we can perform remote actions
 without being prompted.
 
 (((SSH)))
 
 [WARNING]
 =========
 Unprotected SSH keys, those without a password, are not recommended for servers exposed to the outside world.
 We use them here only to simplify the demo.
 =========
 
 Create a new key and allow anyone with that key to log in:
 
 .Creating and Activating a new SSH Key
 [source,Bash]
 ----
 # ssh-keygen -t dsa -f ~/.ssh/id_dsa -N ""
 Generating public/private dsa key pair.
 Your identification has been saved in /root/.ssh/id_dsa.
 Your public key has been saved in /root/.ssh/id_dsa.pub.
 The key fingerprint is:
 91:09:5c:82:5a:6a:50:08:4e:b2:0c:62:de:cc:74:44 root@pcmk-1.clusterlabs.org
 
 The key's randomart image is:
 +--[ DSA 1024]----+
 |==.ooEo..        |
 |X O + .o o       |
 | * A    +        |
 |  +      .       |
 | .      S        |
 |                 |
 |                 |
 |                 |
 |                 |
 +-----------------+
 
 # cp .ssh/id_dsa.pub .ssh/authorized_keys
 ----
 (((Creating and Activating a new SSH Key)))
 
 Install the key on the other nodes and test that you can now run commands
 remotely, without being prompted
 
 .Installing the SSH Key on Another Host
 [source,Bash]
 ----
 # scp -r .ssh pcmk-2:
 The authenticity of host 'pcmk-2 (192.168.122.102)' can't be established.
 RSA key fingerprint is b1:2b:55:93:f1:d9:52:2b:0f:f2:8a:4e:ae:c6:7c:9a.
 Are you sure you want to continue connecting (yes/no)? yes
 Warning: Permanently added 'pcmk-2,192.168.122.102' (RSA) to the list of known hosts.root@pcmk-2's password:
 id_dsa.pub                           100%  616     0.6KB/s   00:00
 id_dsa                               100%  672     0.7KB/s   00:00
 known_hosts                          100%  400     0.4KB/s   00:00
 authorized_keys                      100%  616     0.6KB/s   00:00
 # ssh pcmk-2 -- uname -n
 pcmk-2
 #
 ----
 
 == Cluster Software Installation ==
 
 === Install the Cluster Software ===
 
 Since version 12, Fedora comes with recent versions of everything you
 need, so simply fire up the shell and run:
 
 [source,Bash]
 ----
 # yum install -y pacemaker corosync
 fedora/metalink                                  |  38 kB     00:00
 fedora                                           | 4.2 kB     00:00
 fedora/primary_db                                |  14 MB     00:21
 updates/metalink                                 | 2.7 kB     00:00
 updates                                          | 2.6 kB     00:00
 updates/primary_db                               | 1.2 kB     00:00
 updates-testing/metalink                         |  28 kB     00:00
 updates-testing                                  | 4.5 kB     00:00
 updates-testing/primary_db                       | 4.5 MB     00:12
 Setting up Install Process
 Resolving Dependencies
 --> Running transaction check
 ---> Package corosync.x86_64 0:1.99.9-1.fc17 will be installed
 --> Processing Dependency: corosynclib = 1.99.9-1.fc17 for package: corosync-1.99.9-1.fc17.x86_64
 --> Processing Dependency: libxslt for package: corosync-1.99.9-1.fc17.x86_64
 --> Processing Dependency: libvotequorum.so.5(COROSYNC_VOTEQUORUM_1.0)(64bit) for package: corosync-1.99.9-1.fc17.x86_64
 --> Processing Dependency: libquorum.so.5(COROSYNC_QUORUM_1.0)(64bit) for package: corosync-1.99.9-1.fc17.x86_64
 --> Processing Dependency: libcpg.so.4(COROSYNC_CPG_1.0)(64bit) for package: corosync-1.99.9-1.fc17.x86_64
 --> Processing Dependency: libcmap.so.4(COROSYNC_CMAP_1.0)(64bit) for package: corosync-1.99.9-1.fc17.x86_64
 --> Processing Dependency: libcfg.so.6(COROSYNC_CFG_0.82)(64bit) for package: corosync-1.99.9-1.fc17.x86_64
 --> Processing Dependency: libvotequorum.so.5()(64bit) for package: corosync-1.99.9-1.fc17.x86_64
 --> Processing Dependency: libtotem_pg.so.5()(64bit) for package: corosync-1.99.9-1.fc17.x86_64
 --> Processing Dependency: libquorum.so.5()(64bit) for package: corosync-1.99.9-1.fc17.x86_64
 --> Processing Dependency: libqb.so.0()(64bit) for package: corosync-1.99.9-1.fc17.x86_64
 --> Processing Dependency: libnetsnmp.so.30()(64bit) for package: corosync-1.99.9-1.fc17.x86_64
 --> Processing Dependency: libcpg.so.4()(64bit) for package: corosync-1.99.9-1.fc17.x86_64
 --> Processing Dependency: libcorosync_common.so.4()(64bit) for package: corosync-1.99.9-1.fc17.x86_64
 --> Processing Dependency: libcmap.so.4()(64bit) for package: corosync-1.99.9-1.fc17.x86_64
 --> Processing Dependency: libcfg.so.6()(64bit) for package: corosync-1.99.9-1.fc17.x86_64
 ---> Package pacemaker.x86_64 0:1.1.7-2.fc17 will be installed
 --> Processing Dependency: pacemaker-libs = 1.1.7-2.fc17 for package: pacemaker-1.1.7-2.fc17.x86_64
 --> Processing Dependency: pacemaker-cluster-libs = 1.1.7-2.fc17 for package: pacemaker-1.1.7-2.fc17.x86_64
 --> Processing Dependency: pacemaker-cli = 1.1.7-2.fc17 for package: pacemaker-1.1.7-2.fc17.x86_64
 --> Processing Dependency: resource-agents for package: pacemaker-1.1.7-2.fc17.x86_64
 --> Processing Dependency: perl(Getopt::Long) for package: pacemaker-1.1.7-2.fc17.x86_64
 --> Processing Dependency: libgnutls.so.26(GNUTLS_1_4)(64bit) for package: pacemaker-1.1.7-2.fc17.x86_64
 --> Processing Dependency: cluster-glue for package: pacemaker-1.1.7-2.fc17.x86_64
 --> Processing Dependency: /usr/bin/perl for package: pacemaker-1.1.7-2.fc17.x86_64
 --> Processing Dependency: libtransitioner.so.1()(64bit) for package: pacemaker-1.1.7-2.fc17.x86_64
 --> Processing Dependency: libstonithd.so.1()(64bit) for package: pacemaker-1.1.7-2.fc17.x86_64
 --> Processing Dependency: libstonith.so.1()(64bit) for package: pacemaker-1.1.7-2.fc17.x86_64
 --> Processing Dependency: libplumb.so.2()(64bit) for package: pacemaker-1.1.7-2.fc17.x86_64
 --> Processing Dependency: libpils.so.2()(64bit) for package: pacemaker-1.1.7-2.fc17.x86_64
 --> Processing Dependency: libpengine.so.3()(64bit) for package: pacemaker-1.1.7-2.fc17.x86_64
 --> Processing Dependency: libpe_status.so.3()(64bit) for package: pacemaker-1.1.7-2.fc17.x86_64
 --> Processing Dependency: libpe_rules.so.2()(64bit) for package: pacemaker-1.1.7-2.fc17.x86_64
 --> Processing Dependency: libltdl.so.7()(64bit) for package: pacemaker-1.1.7-2.fc17.x86_64
 --> Processing Dependency: liblrm.so.2()(64bit) for package: pacemaker-1.1.7-2.fc17.x86_64
 --> Processing Dependency: libgnutls.so.26()(64bit) for package: pacemaker-1.1.7-2.fc17.x86_64
 --> Processing Dependency: libcrmcommon.so.2()(64bit) for package: pacemaker-1.1.7-2.fc17.x86_64
 --> Processing Dependency: libcrmcluster.so.1()(64bit) for package: pacemaker-1.1.7-2.fc17.x86_64
 --> Processing Dependency: libcib.so.1()(64bit) for package: pacemaker-1.1.7-2.fc17.x86_64
 --> Running transaction check
 ---> Package cluster-glue.x86_64 0:1.0.6-9.fc17.1 will be installed
 --> Processing Dependency: perl-TimeDate for package: cluster-glue-1.0.6-9.fc17.1.x86_64
 --> Processing Dependency: libOpenIPMIutils.so.0()(64bit) for package: cluster-glue-1.0.6-9.fc17.1.x86_64
 --> Processing Dependency: libOpenIPMIposix.so.0()(64bit) for package: cluster-glue-1.0.6-9.fc17.1.x86_64
 --> Processing Dependency: libOpenIPMI.so.0()(64bit) for package: cluster-glue-1.0.6-9.fc17.1.x86_64
 ---> Package cluster-glue-libs.x86_64 0:1.0.6-9.fc17.1 will be installed
 ---> Package corosynclib.x86_64 0:1.99.9-1.fc17 will be installed
 --> Processing Dependency: librdmacm.so.1(RDMACM_1.0)(64bit) for package: corosynclib-1.99.9-1.fc17.x86_64
 --> Processing Dependency: libibverbs.so.1(IBVERBS_1.1)(64bit) for package: corosynclib-1.99.9-1.fc17.x86_64
 --> Processing Dependency: libibverbs.so.1(IBVERBS_1.0)(64bit) for package: corosynclib-1.99.9-1.fc17.x86_64
 --> Processing Dependency: librdmacm.so.1()(64bit) for package: corosynclib-1.99.9-1.fc17.x86_64
 --> Processing Dependency: libibverbs.so.1()(64bit) for package: corosynclib-1.99.9-1.fc17.x86_64
 ---> Package gnutls.x86_64 0:2.12.17-1.fc17 will be installed
 --> Processing Dependency: libtasn1.so.3(LIBTASN1_0_3)(64bit) for package: gnutls-2.12.17-1.fc17.x86_64
 --> Processing Dependency: libtasn1.so.3()(64bit) for package: gnutls-2.12.17-1.fc17.x86_64
 --> Processing Dependency: libp11-kit.so.0()(64bit) for package: gnutls-2.12.17-1.fc17.x86_64
 ---> Package libqb.x86_64 0:0.11.1-1.fc17 will be installed
 ---> Package libtool-ltdl.x86_64 0:2.4.2-3.fc17 will be installed
 ---> Package libxslt.x86_64 0:1.1.26-9.fc17 will be installed
 ---> Package net-snmp-libs.x86_64 1:5.7.1-4.fc17 will be installed
 ---> Package pacemaker-cli.x86_64 0:1.1.7-2.fc17 will be installed
 ---> Package pacemaker-cluster-libs.x86_64 0:1.1.7-2.fc17 will be installed
 ---> Package pacemaker-libs.x86_64 0:1.1.7-2.fc17 will be installed
 ---> Package perl.x86_64 4:5.14.2-211.fc17 will be installed
 --> Processing Dependency: perl-libs = 4:5.14.2-211.fc17 for package: 4:perl-5.14.2-211.fc17.x86_64
 --> Processing Dependency: perl(threads::shared) >= 1.21 for package: 4:perl-5.14.2-211.fc17.x86_64
 --> Processing Dependency: perl(Socket) >= 1.3 for package: 4:perl-5.14.2-211.fc17.x86_64
 --> Processing Dependency: perl(Scalar::Util) >= 1.10 for package: 4:perl-5.14.2-211.fc17.x86_64
 --> Processing Dependency: perl(File::Spec) >= 0.8 for package: 4:perl-5.14.2-211.fc17.x86_64
 --> Processing Dependency: perl-macros for package: 4:perl-5.14.2-211.fc17.x86_64
 --> Processing Dependency: perl-libs for package: 4:perl-5.14.2-211.fc17.x86_64
 --> Processing Dependency: perl(threads::shared) for package: 4:perl-5.14.2-211.fc17.x86_64
 --> Processing Dependency: perl(threads) for package: 4:perl-5.14.2-211.fc17.x86_64
 --> Processing Dependency: perl(Socket) for package: 4:perl-5.14.2-211.fc17.x86_64
 --> Processing Dependency: perl(Scalar::Util) for package: 4:perl-5.14.2-211.fc17.x86_64
 --> Processing Dependency: perl(Pod::Simple) for package: 4:perl-5.14.2-211.fc17.x86_64
 --> Processing Dependency: perl(Module::Pluggable) for package: 4:perl-5.14.2-211.fc17.x86_64
 --> Processing Dependency: perl(List::Util) for package: 4:perl-5.14.2-211.fc17.x86_64
 --> Processing Dependency: perl(File::Spec::Unix) for package: 4:perl-5.14.2-211.fc17.x86_64
 --> Processing Dependency: perl(File::Spec::Functions) for package: 4:perl-5.14.2-211.fc17.x86_64
 --> Processing Dependency: perl(File::Spec) for package: 4:perl-5.14.2-211.fc17.x86_64
 --> Processing Dependency: perl(Cwd) for package: 4:perl-5.14.2-211.fc17.x86_64
 --> Processing Dependency: perl(Carp) for package: 4:perl-5.14.2-211.fc17.x86_64
 --> Processing Dependency: libperl.so()(64bit) for package: 4:perl-5.14.2-211.fc17.x86_64
 ---> Package resource-agents.x86_64 0:3.9.2-2.fc17.1 will be installed
 --> Processing Dependency: /usr/sbin/rpc.nfsd for package: resource-agents-3.9.2-2.fc17.1.x86_64
 --> Processing Dependency: /usr/sbin/rpc.mountd for package: resource-agents-3.9.2-2.fc17.1.x86_64
 --> Processing Dependency: /usr/sbin/ethtool for package: resource-agents-3.9.2-2.fc17.1.x86_64
 --> Processing Dependency: /sbin/rpc.statd for package: resource-agents-3.9.2-2.fc17.1.x86_64
 --> Processing Dependency: /sbin/quotaon for package: resource-agents-3.9.2-2.fc17.1.x86_64
 --> Processing Dependency: /sbin/quotacheck for package: resource-agents-3.9.2-2.fc17.1.x86_64
 --> Processing Dependency: /sbin/mount.nfs4 for package: resource-agents-3.9.2-2.fc17.1.x86_64
 --> Processing Dependency: /sbin/mount.nfs for package: resource-agents-3.9.2-2.fc17.1.x86_64
 --> Processing Dependency: /sbin/mount.cifs for package: resource-agents-3.9.2-2.fc17.1.x86_64
 --> Processing Dependency: /sbin/fsck.xfs for package: resource-agents-3.9.2-2.fc17.1.x86_64
 --> Processing Dependency: libnet.so.1()(64bit) for package: resource-agents-3.9.2-2.fc17.1.x86_64
 --> Running transaction check
 ---> Package OpenIPMI-libs.x86_64 0:2.0.18-13.fc17 will be installed
 ---> Package cifs-utils.x86_64 0:5.3-2.fc17 will be installed
 --> Processing Dependency: libtalloc.so.2(TALLOC_2.0.2)(64bit) for package: cifs-utils-5.3-2.fc17.x86_64
 --> Processing Dependency: keyutils for package: cifs-utils-5.3-2.fc17.x86_64
 --> Processing Dependency: libwbclient.so.0()(64bit) for package: cifs-utils-5.3-2.fc17.x86_64
 --> Processing Dependency: libtalloc.so.2()(64bit) for package: cifs-utils-5.3-2.fc17.x86_64
 ---> Package ethtool.x86_64 2:3.2-2.fc17 will be installed
 ---> Package libibverbs.x86_64 0:1.1.6-2.fc17 will be installed
 ---> Package libnet.x86_64 0:1.1.5-3.fc17 will be installed
 ---> Package librdmacm.x86_64 0:1.0.15-1.fc17 will be installed
 ---> Package libtasn1.x86_64 0:2.12-1.fc17 will be installed
 ---> Package nfs-utils.x86_64 1:1.2.5-12.fc17 will be installed
 --> Processing Dependency: rpcbind for package: 1:nfs-utils-1.2.5-12.fc17.x86_64
 --> Processing Dependency: libtirpc for package: 1:nfs-utils-1.2.5-12.fc17.x86_64
 --> Processing Dependency: libnfsidmap for package: 1:nfs-utils-1.2.5-12.fc17.x86_64
 --> Processing Dependency: libgssglue.so.1(libgssapi_CITI_2)(64bit) for package: 1:nfs-utils-1.2.5-12.fc17.x86_64
 --> Processing Dependency: libgssglue for package: 1:nfs-utils-1.2.5-12.fc17.x86_64
 --> Processing Dependency: libevent for package: 1:nfs-utils-1.2.5-12.fc17.x86_64
 --> Processing Dependency: libtirpc.so.1()(64bit) for package: 1:nfs-utils-1.2.5-12.fc17.x86_64
 --> Processing Dependency: libnfsidmap.so.0()(64bit) for package: 1:nfs-utils-1.2.5-12.fc17.x86_64
 --> Processing Dependency: libgssglue.so.1()(64bit) for package: 1:nfs-utils-1.2.5-12.fc17.x86_64
 --> Processing Dependency: libevent-2.0.so.5()(64bit) for package: 1:nfs-utils-1.2.5-12.fc17.x86_64
 ---> Package p11-kit.x86_64 0:0.12-1.fc17 will be installed
 ---> Package perl-Carp.noarch 0:1.22-2.fc17 will be installed
 ---> Package perl-Module-Pluggable.noarch 1:3.90-211.fc17 will be installed
 ---> Package perl-PathTools.x86_64 0:3.33-211.fc17 will be installed
 ---> Package perl-Pod-Simple.noarch 1:3.16-211.fc17 will be installed
 --> Processing Dependency: perl(Pod::Escapes) >= 1.04 for package: 1:perl-Pod-Simple-3.16-211.fc17.noarch
 ---> Package perl-Scalar-List-Utils.x86_64 0:1.25-1.fc17 will be installed
 ---> Package perl-Socket.x86_64 0:2.001-1.fc17 will be installed
 ---> Package perl-TimeDate.noarch 1:1.20-6.fc17 will be installed
 ---> Package perl-libs.x86_64 4:5.14.2-211.fc17 will be installed
 ---> Package perl-macros.x86_64 4:5.14.2-211.fc17 will be installed
 ---> Package perl-threads.x86_64 0:1.86-2.fc17 will be installed
 ---> Package perl-threads-shared.x86_64 0:1.40-2.fc17 will be installed
 ---> Package quota.x86_64 1:4.00-3.fc17 will be installed
 --> Processing Dependency: quota-nls = 1:4.00-3.fc17 for package: 1:quota-4.00-3.fc17.x86_64
 --> Processing Dependency: tcp_wrappers for package: 1:quota-4.00-3.fc17.x86_64
 ---> Package xfsprogs.x86_64 0:3.1.8-1.fc17 will be installed
 --> Running transaction check
 ---> Package keyutils.x86_64 0:1.5.5-2.fc17 will be installed
 ---> Package libevent.x86_64 0:2.0.14-2.fc17 will be installed
 ---> Package libgssglue.x86_64 0:0.3-1.fc17 will be installed
 ---> Package libnfsidmap.x86_64 0:0.25-1.fc17 will be installed
 ---> Package libtalloc.x86_64 0:2.0.7-4.fc17 will be installed
 ---> Package libtirpc.x86_64 0:0.2.2-2.1.fc17 will be installed
 ---> Package libwbclient.x86_64 1:3.6.3-81.fc17.1 will be installed
 ---> Package perl-Pod-Escapes.noarch 1:1.04-211.fc17 will be installed
 ---> Package quota-nls.noarch 1:4.00-3.fc17 will be installed
 ---> Package rpcbind.x86_64 0:0.2.0-16.fc17 will be installed
 ---> Package tcp_wrappers.x86_64 0:7.6-69.fc17 will be installed
 --> Finished Dependency Resolution
 
 Dependencies Resolved
 
  ==============================================================================================
  Package                        Arch      Version             Repository            Size
 =====================================================================================
 Installing:
  corosync                       x86_64    1.99.9-1.fc17       updates-testing   159 k
  pacemaker                      x86_64    1.1.7-2.fc17        updates-testing   362 k
 Installing for dependencies:
  OpenIPMI-libs                  x86_64    2.0.18-13.fc17      fedora            466 k
  cifs-utils                     x86_64    5.3-2.fc17          updates-testing    66 k
  cluster-glue                   x86_64    1.0.6-9.fc17.1      fedora            229 k
  cluster-glue-libs              x86_64    1.0.6-9.fc17.1      fedora            121 k
  corosynclib                    x86_64    1.99.9-1.fc17       updates-testing    96 k
  ethtool                        x86_64    2:3.2-2.fc17        fedora             94 k
  gnutls                         x86_64    2.12.17-1.fc17      fedora            385 k
  keyutils                       x86_64    1.5.5-2.fc17        fedora             49 k
  libevent                       x86_64    2.0.14-2.fc17       fedora            160 k
  libgssglue                     x86_64    0.3-1.fc17          fedora             24 k
  libibverbs                     x86_64    1.1.6-2.fc17        fedora             44 k
  libnet                         x86_64    1.1.5-3.fc17        fedora             54 k
  libnfsidmap                    x86_64    0.25-1.fc17         fedora             34 k
  libqb                          x86_64    0.11.1-1.fc17       updates-testing    68 k
  librdmacm                      x86_64    1.0.15-1.fc17       fedora             27 k
  libtalloc                      x86_64    2.0.7-4.fc17        fedora             22 k
  libtasn1                       x86_64    2.12-1.fc17         updates-testing   319 k
  libtirpc                       x86_64    0.2.2-2.1.fc17      fedora             78 k
  libtool-ltdl                   x86_64    2.4.2-3.fc17        fedora             45 k
  libwbclient                    x86_64    1:3.6.3-81.fc17.1   updates-testing    68 k
  libxslt                        x86_64    1.1.26-9.fc17       fedora            416 k
  net-snmp-libs                  x86_64    1:5.7.1-4.fc17      fedora            713 k
  nfs-utils                      x86_64    1:1.2.5-12.fc17     fedora            311 k
  p11-kit                        x86_64    0.12-1.fc17         updates-testing    36 k
  pacemaker-cli                  x86_64    1.1.7-2.fc17        updates-testing   368 k
  pacemaker-cluster-libs         x86_64    1.1.7-2.fc17        updates-testing    77 k
  pacemaker-libs                 x86_64    1.1.7-2.fc17        updates-testing   322 k
  perl                           x86_64    4:5.14.2-211.fc17   fedora             10 M
  perl-Carp                      noarch    1.22-2.fc17         fedora             17 k
  perl-Module-Pluggable          noarch    1:3.90-211.fc17     fedora             47 k
  perl-PathTools                 x86_64    3.33-211.fc17       fedora            105 k
  perl-Pod-Escapes               noarch    1:1.04-211.fc17     fedora             40 k
  perl-Pod-Simple                noarch    1:3.16-211.fc17     fedora            223 k
  perl-Scalar-List-Utils         x86_64    1.25-1.fc17         updates-testing    33 k
  perl-Socket                    x86_64    2.001-1.fc17        updates-testing    44 k
  perl-TimeDate                  noarch    1:1.20-6.fc17       fedora             43 k
  perl-libs                      x86_64    4:5.14.2-211.fc17   fedora            628 k
  perl-macros                    x86_64    4:5.14.2-211.fc17   fedora             32 k
  perl-threads                   x86_64    1.86-2.fc17         fedora             47 k
  perl-threads-shared            x86_64    1.40-2.fc17         fedora             36 k
  quota                          x86_64    1:4.00-3.fc17       fedora            160 k
  quota-nls                      noarch    1:4.00-3.fc17       fedora             74 k
  resource-agents                x86_64    3.9.2-2.fc17.1      fedora            466 k
  rpcbind                        x86_64    0.2.0-16.fc17       fedora             52 k
  tcp_wrappers                   x86_64    7.6-69.fc17         fedora             72 k
  xfsprogs                       x86_64    3.1.8-1.fc17        updates-testing   715 k
 
 Transaction Summary
 =====================================================================================
 Install  2 Packages (+46 Dependent packages)
 
 Total download size: 18 M
 Installed size: 59 M
 Downloading Packages:
 (1/48): OpenIPMI-libs-2.0.18-13.fc17.x86_64.rpm                       | 466 kB     00:00
 warning: rpmts_HdrFromFdno: Header V3 RSA/SHA256 Signature, key ID 1aca3465: NOKEY
 Public key for OpenIPMI-libs-2.0.18-13.fc17.x86_64.rpm is not installed
 (2/48): cifs-utils-5.3-2.fc17.x86_64.rpm                              |  66 kB     00:01
 Public key for cifs-utils-5.3-2.fc17.x86_64.rpm is not installed
 (3/48): cluster-glue-1.0.6-9.fc17.1.x86_64.rpm                        | 229 kB     00:00
 (4/48): cluster-glue-libs-1.0.6-9.fc17.1.x86_64.rpm                   | 121 kB     00:00
 (5/48): corosync-1.99.9-1.fc17.x86_64.rpm                             | 159 kB     00:01
 (6/48): corosynclib-1.99.9-1.fc17.x86_64.rpm                          |  96 kB     00:00
 (7/48): ethtool-3.2-2.fc17.x86_64.rpm                                 |  94 kB     00:00
 (8/48): gnutls-2.12.17-1.fc17.x86_64.rpm                              | 385 kB     00:00
 (9/48): keyutils-1.5.5-2.fc17.x86_64.rpm                              |  49 kB     00:00
 (10/48): libevent-2.0.14-2.fc17.x86_64.rpm                            | 160 kB     00:00
 (11/48): libgssglue-0.3-1.fc17.x86_64.rpm                             |  24 kB     00:00
 (12/48): libibverbs-1.1.6-2.fc17.x86_64.rpm                           |  44 kB     00:00
 (13/48): libnet-1.1.5-3.fc17.x86_64.rpm                               |  54 kB     00:00
 (14/48): libnfsidmap-0.25-1.fc17.x86_64.rpm                           |  34 kB     00:00
 (15/48): libqb-0.11.1-1.fc17.x86_64.rpm                               |  68 kB     00:01
 (16/48): librdmacm-1.0.15-1.fc17.x86_64.rpm                           |  27 kB     00:00
 (17/48): libtalloc-2.0.7-4.fc17.x86_64.rpm                            |  22 kB     00:00
 (18/48): libtasn1-2.12-1.fc17.x86_64.rpm                              | 319 kB     00:02
 (19/48): libtirpc-0.2.2-2.1.fc17.x86_64.rpm                           |  78 kB     00:00
 (20/48): libtool-ltdl-2.4.2-3.fc17.x86_64.rpm                         |  45 kB     00:00
 (21/48): libwbclient-3.6.3-81.fc17.1.x86_64.rpm                       |  68 kB     00:00
 (22/48): libxslt-1.1.26-9.fc17.x86_64.rpm                             | 416 kB     00:00
 (23/48): net-snmp-libs-5.7.1-4.fc17.x86_64.rpm                        | 713 kB     00:01
 (24/48): nfs-utils-1.2.5-12.fc17.x86_64.rpm                           | 311 kB     00:00
 (25/48): p11-kit-0.12-1.fc17.x86_64.rpm                               |  36 kB     00:01
 (26/48): pacemaker-1.1.7-2.fc17.x86_64.rpm                            | 362 kB     00:02
 (27/48): pacemaker-cli-1.1.7-2.fc17.x86_64.rpm                        | 368 kB     00:02
 (28/48): pacemaker-cluster-libs-1.1.7-2.fc17.x86_64.rpm               |  77 kB     00:00
 (29/48): pacemaker-libs-1.1.7-2.fc17.x86_64.rpm                       | 322 kB     00:01
 (30/48): perl-5.14.2-211.fc17.x86_64.rpm                              |  10 MB     00:15
 (31/48): perl-Carp-1.22-2.fc17.noarch.rpm                             |  17 kB     00:00
 (32/48): perl-Module-Pluggable-3.90-211.fc17.noarch.rpm               |  47 kB     00:00
 (33/48): perl-PathTools-3.33-211.fc17.x86_64.rpm                      | 105 kB     00:00
 (34/48): perl-Pod-Escapes-1.04-211.fc17.noarch.rpm                    |  40 kB     00:00
 (35/48): perl-Pod-Simple-3.16-211.fc17.noarch.rpm                     | 223 kB     00:00
 (36/48): perl-Scalar-List-Utils-1.25-1.fc17.x86_64.rpm                |  33 kB     00:01
 (37/48): perl-Socket-2.001-1.fc17.x86_64.rpm                          |  44 kB     00:00
 (38/48): perl-TimeDate-1.20-6.fc17.noarch.rpm                         |  43 kB     00:00
 (39/48): perl-libs-5.14.2-211.fc17.x86_64.rpm                         | 628 kB     00:00
 (40/48): perl-macros-5.14.2-211.fc17.x86_64.rpm                       |  32 kB     00:00
 (41/48): perl-threads-1.86-2.fc17.x86_64.rpm                          |  47 kB     00:00
 (42/48): perl-threads-shared-1.40-2.fc17.x86_64.rpm                   |  36 kB     00:00
 (43/48): quota-4.00-3.fc17.x86_64.rpm                                 | 160 kB     00:00
 (44/48): quota-nls-4.00-3.fc17.noarch.rpm                             |  74 kB     00:00
 (45/48): resource-agents-3.9.2-2.fc17.1.x86_64.rpm                    | 466 kB     00:00
 (46/48): rpcbind-0.2.0-16.fc17.x86_64.rpm                             |  52 kB     00:00
 (47/48): tcp_wrappers-7.6-69.fc17.x86_64.rpm                          |  72 kB     00:00
 (48/48): xfsprogs-3.1.8-1.fc17.x86_64.rpm                             | 715 kB     00:03
  ---------------------------------------------------------------------------------------
 Total                                                        333 kB/s |  18 MB     00:55
 Retrieving key from file:///etc/pki/rpm-gpg/RPM-GPG-KEY-fedora-x86_64
 Importing GPG key 0x1ACA3465:
  Userid     : "Fedora (17) <fedora@fedoraproject.org>"
  Fingerprint: cac4 3fb7 74a4 a673 d81c 5de7 50e9 4c99 1aca 3465
  Package    : fedora-release-17-0.8.noarch (@anaconda-0)
  From       : /etc/pki/rpm-gpg/RPM-GPG-KEY-fedora-x86_64
 Running Transaction Check
 Running Transaction Test
 Transaction Test Succeeded
 Running Transaction
   Installing : libqb-0.11.1-1.fc17.x86_64                                         1/48
   Installing : libtool-ltdl-2.4.2-3.fc17.x86_64                                   2/48
   Installing : cluster-glue-libs-1.0.6-9.fc17.1.x86_64                            3/48
   Installing : libxslt-1.1.26-9.fc17.x86_64                                       4/48
   Installing : 1:perl-Pod-Escapes-1.04-211.fc17.noarch                            5/48
   Installing : perl-threads-1.86-2.fc17.x86_64                                    6/48
   Installing : 4:perl-macros-5.14.2-211.fc17.x86_64                               7/48
   Installing : 1:perl-Pod-Simple-3.16-211.fc17.noarch                             8/48
   Installing : perl-Socket-2.001-1.fc17.x86_64                                    9/48
   Installing : perl-Carp-1.22-2.fc17.noarch                                      10/48
   Installing : 4:perl-libs-5.14.2-211.fc17.x86_64                                11/48
   Installing : perl-threads-shared-1.40-2.fc17.x86_64                            12/48
   Installing : perl-Scalar-List-Utils-1.25-1.fc17.x86_64                         13/48
   Installing : 1:perl-Module-Pluggable-3.90-211.fc17.noarch                      14/48
   Installing : perl-PathTools-3.33-211.fc17.x86_64                               15/48
   Installing : 4:perl-5.14.2-211.fc17.x86_64                                     16/48
   Installing : libibverbs-1.1.6-2.fc17.x86_64                                    17/48
   Installing : keyutils-1.5.5-2.fc17.x86_64                                      18/48
   Installing : libgssglue-0.3-1.fc17.x86_64                                      19/48
   Installing : libtirpc-0.2.2-2.1.fc17.x86_64                                    20/48
   Installing : 1:net-snmp-libs-5.7.1-4.fc17.x86_64                               21/48
   Installing : rpcbind-0.2.0-16.fc17.x86_64                                      22/48
   Installing : librdmacm-1.0.15-1.fc17.x86_64                                    23/48
   Installing : corosynclib-1.99.9-1.fc17.x86_64                                  24/48
   Installing : corosync-1.99.9-1.fc17.x86_64                                     25/48
 error reading information on service corosync: No such file or directory
   Installing : 1:perl-TimeDate-1.20-6.fc17.noarch                                26/48
   Installing : 1:quota-nls-4.00-3.fc17.noarch                                    27/48
   Installing : tcp_wrappers-7.6-69.fc17.x86_64                                   28/48
   Installing : 1:quota-4.00-3.fc17.x86_64                                        29/48
   Installing : libnfsidmap-0.25-1.fc17.x86_64                                    30/48
   Installing : 1:libwbclient-3.6.3-81.fc17.1.x86_64                              31/48
   Installing : libnet-1.1.5-3.fc17.x86_64                                        32/48
   Installing : 2:ethtool-3.2-2.fc17.x86_64                                       33/48
   Installing : libevent-2.0.14-2.fc17.x86_64                                     34/48
   Installing : 1:nfs-utils-1.2.5-12.fc17.x86_64                                  35/48
   Installing : libtalloc-2.0.7-4.fc17.x86_64                                     36/48
   Installing : cifs-utils-5.3-2.fc17.x86_64                                      37/48
   Installing : libtasn1-2.12-1.fc17.x86_64                                       38/48
   Installing : OpenIPMI-libs-2.0.18-13.fc17.x86_64                               39/48
   Installing : cluster-glue-1.0.6-9.fc17.1.x86_64                                40/48
   Installing : p11-kit-0.12-1.fc17.x86_64                                        41/48
   Installing : gnutls-2.12.17-1.fc17.x86_64                                      42/48
   Installing : pacemaker-libs-1.1.7-2.fc17.x86_64                                43/48
   Installing : pacemaker-cluster-libs-1.1.7-2.fc17.x86_64                        44/48
   Installing : pacemaker-cli-1.1.7-2.fc17.x86_64                                 45/48
   Installing : xfsprogs-3.1.8-1.fc17.x86_64                                      46/48
   Installing : resource-agents-3.9.2-2.fc17.1.x86_64                             47/48
   Installing : pacemaker-1.1.7-2.fc17.x86_64                                     48/48
   Verifying  : xfsprogs-3.1.8-1.fc17.x86_64                                       1/48
   Verifying  : 1:net-snmp-libs-5.7.1-4.fc17.x86_64                                2/48
   Verifying  : corosync-1.99.9-1.fc17.x86_64                                      3/48
   Verifying  : cluster-glue-1.0.6-9.fc17.1.x86_64                                 4/48
   Verifying  : perl-PathTools-3.33-211.fc17.x86_64                                5/48
   Verifying  : p11-kit-0.12-1.fc17.x86_64                                         6/48
   Verifying  : 1:perl-Pod-Simple-3.16-211.fc17.noarch                             7/48
   Verifying  : OpenIPMI-libs-2.0.18-13.fc17.x86_64                                8/48
   Verifying  : libtasn1-2.12-1.fc17.x86_64                                        9/48
   Verifying  : perl-threads-1.86-2.fc17.x86_64                                   10/48
   Verifying  : 1:perl-Pod-Escapes-1.04-211.fc17.noarch                           11/48
   Verifying  : pacemaker-1.1.7-2.fc17.x86_64                                     12/48
   Verifying  : 4:perl-5.14.2-211.fc17.x86_64                                     13/48
   Verifying  : gnutls-2.12.17-1.fc17.x86_64                                      14/48
   Verifying  : perl-threads-shared-1.40-2.fc17.x86_64                            15/48
   Verifying  : 4:perl-macros-5.14.2-211.fc17.x86_64                              16/48
   Verifying  : 1:perl-Module-Pluggable-3.90-211.fc17.noarch                      17/48
   Verifying  : 1:nfs-utils-1.2.5-12.fc17.x86_64                                  18/48
   Verifying  : cluster-glue-libs-1.0.6-9.fc17.1.x86_64                           19/48
   Verifying  : pacemaker-libs-1.1.7-2.fc17.x86_64                                20/48
   Verifying  : libtalloc-2.0.7-4.fc17.x86_64                                     21/48
   Verifying  : libevent-2.0.14-2.fc17.x86_64                                     22/48
   Verifying  : perl-Socket-2.001-1.fc17.x86_64                                   23/48
   Verifying  : libgssglue-0.3-1.fc17.x86_64                                      24/48
   Verifying  : perl-Carp-1.22-2.fc17.noarch                                      25/48
   Verifying  : libtirpc-0.2.2-2.1.fc17.x86_64                                    26/48
   Verifying  : 2:ethtool-3.2-2.fc17.x86_64                                       27/48
   Verifying  : 4:perl-libs-5.14.2-211.fc17.x86_64                                28/48
   Verifying  : libxslt-1.1.26-9.fc17.x86_64                                      29/48
   Verifying  : rpcbind-0.2.0-16.fc17.x86_64                                      30/48
   Verifying  : librdmacm-1.0.15-1.fc17.x86_64                                    31/48
   Verifying  : resource-agents-3.9.2-2.fc17.1.x86_64                             32/48
   Verifying  : 1:quota-4.00-3.fc17.x86_64                                        33/48
   Verifying  : 1:perl-TimeDate-1.20-6.fc17.noarch                                34/48
   Verifying  : perl-Scalar-List-Utils-1.25-1.fc17.x86_64                         35/48
   Verifying  : libtool-ltdl-2.4.2-3.fc17.x86_64                                  36/48
   Verifying  : pacemaker-cluster-libs-1.1.7-2.fc17.x86_64                        37/48
   Verifying  : cifs-utils-5.3-2.fc17.x86_64                                      38/48
   Verifying  : libnet-1.1.5-3.fc17.x86_64                                        39/48
   Verifying  : corosynclib-1.99.9-1.fc17.x86_64                                  40/48
   Verifying  : libqb-0.11.1-1.fc17.x86_64                                        41/48
   Verifying  : 1:libwbclient-3.6.3-81.fc17.1.x86_64                              42/48
   Verifying  : libnfsidmap-0.25-1.fc17.x86_64                                    43/48
   Verifying  : tcp_wrappers-7.6-69.fc17.x86_64                                   44/48
   Verifying  : keyutils-1.5.5-2.fc17.x86_64                                      45/48
   Verifying  : libibverbs-1.1.6-2.fc17.x86_64                                    46/48
   Verifying  : 1:quota-nls-4.00-3.fc17.noarch                                    47/48
   Verifying  : pacemaker-cli-1.1.7-2.fc17.x86_64                                 48/48
 
 Installed:
   corosync.x86_64 0:1.99.9-1.fc17           pacemaker.x86_64 0:1.1.7-2.fc17
 
 Dependency Installed:
   OpenIPMI-libs.x86_64 0:2.0.18-13.fc17     cifs-utils.x86_64 0:5.3-2.fc17
   cluster-glue.x86_64 0:1.0.6-9.fc17.1      cluster-glue-libs.x86_64 0:1.0.6-9.fc17.1
   corosynclib.x86_64 0:1.99.9-1.fc17        ethtool.x86_64 2:3.2-2.fc17
   gnutls.x86_64 0:2.12.17-1.fc17            keyutils.x86_64 0:1.5.5-2.fc17
   libevent.x86_64 0:2.0.14-2.fc17           libgssglue.x86_64 0:0.3-1.fc17
   libibverbs.x86_64 0:1.1.6-2.fc17          libnet.x86_64 0:1.1.5-3.fc17
   libnfsidmap.x86_64 0:0.25-1.fc17          libqb.x86_64 0:0.11.1-1.fc17
   librdmacm.x86_64 0:1.0.15-1.fc17          libtalloc.x86_64 0:2.0.7-4.fc17
   libtasn1.x86_64 0:2.12-1.fc17             libtirpc.x86_64 0:0.2.2-2.1.fc17
   libtool-ltdl.x86_64 0:2.4.2-3.fc17        libwbclient.x86_64 1:3.6.3-81.fc17.1
   libxslt.x86_64 0:1.1.26-9.fc17            net-snmp-libs.x86_64 1:5.7.1-4.fc17
   nfs-utils.x86_64 1:1.2.5-12.fc17          p11-kit.x86_64 0:0.12-1.fc17
   pacemaker-cli.x86_64 0:1.1.7-2.fc17       pacemaker-cluster-libs.x86_64 0:1.1.7-2.fc17
   pacemaker-libs.x86_64 0:1.1.7-2.fc17      perl.x86_64 4:5.14.2-211.fc17
   perl-Carp.noarch 0:1.22-2.fc17            perl-Module-Pluggable.noarch 1:3.90-211.fc17
   perl-PathTools.x86_64 0:3.33-211.fc17     perl-Pod-Escapes.noarch 1:1.04-211.fc17
   perl-Pod-Simple.noarch 1:3.16-211.fc17    perl-Scalar-List-Utils.x86_64 0:1.25-1.fc17
   perl-Socket.x86_64 0:2.001-1.fc17         perl-TimeDate.noarch 1:1.20-6.fc17
   perl-libs.x86_64 4:5.14.2-211.fc17        perl-macros.x86_64 4:5.14.2-211.fc17
   perl-threads.x86_64 0:1.86-2.fc17         perl-threads-shared.x86_64 0:1.40-2.fc17
   quota.x86_64 1:4.00-3.fc17                quota-nls.noarch 1:4.00-3.fc17
   resource-agents.x86_64 0:3.9.2-2.fc17.1   rpcbind.x86_64 0:0.2.0-16.fc17
   tcp_wrappers.x86_64 0:7.6-69.fc17         xfsprogs.x86_64 0:3.1.8-1.fc17
 
 Complete!
 [root@pcmk-1 ~]#
 ----
 
 Now install the cluster software on the second node.
 
 ifdef::pcs[]
 === Install the Cluster Management Software ===
 The pcs cli command coupled with the pcs daemon creates a cluster
 management system capable of managing all aspects of the cluster stack
 across all nodes from a single location.
 
 [source,Bash]
 ----
 # yum install -y pcs
 ----
 
 Make sure to install the pcs packages on both nodes.
 endif::[]
 
 == Setup ==
 
 ifdef::pcs[]
 === Enable pcs Daemon ===
 
 Before the cluster can be configured, the pcs daemon must be started and enabled
 to boot on startup on each node.  This daemon works with the pcs cli command to manage
 syncing the corosync configuration across all the nodes in the cluster.
 
 Start and enable the daemon by issuing the following commands on each node.
 
 [source,Bash]
 ----
 # systemctl start pcsd.service
 # systemctl enable pcsd.service
 ----
 
 Now setup a common pcs user account on each node in the cluster using
 the pcs_passwd command. In the example below, the user account 'pcmk'
 is created.  You will be asked to supply a password (or supply one
 with the -p option).  Make sure the username and password is
 consistent across all the nodes.
 
 [source,Bash]
 ----
 # pcs_passwd pcmk
 password:
 ----
 
 The pcs daemon account is required on each node to enable remote pcs command
 authentication.  While the pcs cli command can be used locally without setting
 up a pcs daemon user account, access to pcs features that require access to remote
 nodes (such as syncing the corosync config, or starting/stopping the cluster on remote
 nodes) will be unavailable.  This tutorial will make use of these remote access commands.
 
 endif::[]
 
 === Configuring Corosync ===
 
 ifdef::pcs[]
 In the past, at this point in the tutorial an explanation of how to configure and
 propagate corosync's /etc/corosync.conf file would be necessary. Using pcs with the
 pcs daemon greatly simplifies this process by generating the corosync.conf
 across all the nodes in the cluster with a single command.  The only thing required
 to achieve this is to authenticate as the pcs user 'pcmk' on one of the nodes in the
 cluster, and then issue the 'pcs cluster setup' command with a list of all the
 node names in the cluster.
 
 [source,Bash]
 ----
 # pcs cluster auth pcmk-1 pcmk-2
 Username: pcmk
 Password: 
 pcmk-1: Authorized
 pcmk-2: Authorized
 
 # pcs cluster setup mycluster pcmk-1 pcmk-2
 pcmk-1: Succeeded
 pcmk-2: Succeeded
 ----
 
 That's it.  Corosync is configured across the cluster.  If you received an
 authorization error for either of those commands, make sure you setup the
 'pcmk' user account using the pcs_passwd command on every node in the cluster
 with the same password.
 
 endif::[]
 
 ifdef::crm[]
 Choose a port number and multi-cast footnote:[http://en.wikipedia.org/wiki/Multicast] address. footnote:[http://en.wikipedia.org/wiki/Multicast_address]
 Be sure that the values you chose do not conflict with any existing clusters you might have.
 For advice on choosing a multi-cast address, see
 http://www.29west.com/docs/THPM/multicast-address-assignment.html
 For this document, I have chosen port 4000 and used 239.255.1.1 as the multi-cast address.
 
 
 [IMPORTANT]
 ===========
 The instructions below only apply for a machine with a single NIC. If you
 have a more complicated setup, you should edit the configuration
 manually.
 ===========
 
 [source,Bash]
 ----
 # export ais_port=4000
 # export ais_mcast=239.255.1.1
 ----
 
 Next we automatically determine the hosts address. By not using the full
 address, we make the configuration suitable to be copied to other nodes.
 
 [source,Bash]
 ----
 # export ais_addr=`ip addr | grep "inet " | tail -n 1 | awk '{print $4}' | sed s/255/0/`
 ----
 
 Display and verify the configuration options
 
 [source,Bash]
 ----
 # env | grep ais_
 ais_mcast=239.255.1.1
 ais_port=4000
 ais_addr=192.168.122.0
 ----
 
 Once you're happy with the chosen values, update the Corosync
 configuration
 
 [source,Bash]
 ----
 # cp /etc/corosync/corosync.conf.example /etc/corosync/corosync.conf
 # sed -i.bak "s/.*mcastaddr:.*/mcastaddr:\ $ais_mcast/g" /etc/corosync/corosync.conf
 # sed -i.bak "s/.*mcastport:.*/mcastport:\ $ais_port/g" /etc/corosync/corosync.conf
 # sed -i.bak "s/.*\tbindnetaddr:.*/bindnetaddr:\ $ais_addr/g" /etc/corosync/corosync.conf
 ----
 
 Lastly, you'll need to enable quorum
 
 [source,Bash]
 ....
 cat << END >> /etc/corosync/corosync.conf
 quorum {
            provider: corosync_votequorum
            expected_votes: 2
 }
 END
 ....
 
 endif::[]
 
 The final /etc/corosync.conf configuration on each node should look
 something like the sample in Appendix B, Sample Corosync Configuration.
 
 
 [IMPORTANT]
 ===========
 Pacemaker used to obtain membership and quorum from a custom Corosync plugin.
 This plugin also had the capability to start Pacemaker automatically when Corosync was started.
 
 Neither behavior is possible with Corosync 2.0 and beyond as support for plugins was removed.
 
-Instead, Pacemaker must started as a separate job/initscript.
+Instead, Pacemaker must be started as a separate job/initscript.
 Also, since Pacemaker made use of the plugin for message routing, a node using the plugin (Corosync prior to 2.0) cannot talk to one that isn't (Corosync 2.0+).
 
 Rolling upgrades between these versions are therefor not possible and an alternate strategy footnote:[http://www.clusterlabs.org/doc/en-US/Pacemaker/1.1/html/Pacemaker_Explained/ap-upgrade.html] must be used.
 ===========
 
 ifdef::crm[]
 === Propagate the Configuration ===
 
 Now we need to copy the changes so far to the other node:
 
 [source,Bash]
 ----
 # for f in /etc/corosync/corosync.conf /etc/hosts; do scp $f pcmk-2:$f ; done
 corosync.conf                            100% 1528     1.5KB/s   00:00
 hosts                                    100%  281     0.3KB/s   00:00
 #
 ----
 endif::[]
diff --git a/doc/Clusters_from_Scratch/en-US/Ch-Stonith.txt b/doc/Clusters_from_Scratch/en-US/Ch-Stonith.txt
index 90e6d0e805..53c64af2ce 100644
--- a/doc/Clusters_from_Scratch/en-US/Ch-Stonith.txt
+++ b/doc/Clusters_from_Scratch/en-US/Ch-Stonith.txt
@@ -1,304 +1,305 @@
 = Configure STONITH =
 
 == What Is STONITH ==
 
 STONITH is an acronym for Shoot-The-Other-Node-In-The-Head and it
 protects your data from being corrupted by rogue nodes or concurrent
 access.
 
 Just because a node is unresponsive, this doesn't mean it isn't
 accessing your data. The only way to be 100% sure that your data is
 safe, is to use STONITH so we can be certain that the node is truly
 offline, before allowing the data to be accessed from another node.
 
 
 STONITH also has a role to play in the event that a clustered service
 cannot be stopped. In this case, the cluster uses STONITH to force the
 whole node offline, thereby making it safe to start the service
 elsewhere.
 
 == What STONITH Device Should You Use ==
 
 It is crucial that the STONITH device can allow the cluster to
 differentiate between a node failure and a network one.
 
 The biggest mistake people make in choosing a STONITH device is to
 use remote power switch (such as many on-board IMPI controllers) that
 shares power with the node it controls. In such cases, the cluster
 cannot be sure if the node is really offline, or active and suffering
 from a network fault.
 
 Likewise, any device that relies on the machine being active (such as
 SSH-based "devices" used during testing) are inappropriate.
 
 == Configuring STONITH ==
 
 ifdef::pcs[]
 . Find the correct driver: +pcs stonith list+
 
 . Find the parameters associated with the device: +pcs stonith describe <agent name>+
 
 . Create a local config to make changes to +pcs cluster cib stonith_cfg+
 
 . Create the fencing resource using +pcs -f stonith_cfg stonith create <stonith_id>
   <stonith device type> [stonith device options]+
 
 . Set stonith-enable to true. +pcs -f stonith_cfg property set stonith-enabled=true+
-
-. Commit the new configuration. +pcs cluster push cib stonith_cfg+
-
 endif::[]
 
 ifdef::crm[]
 . Find the correct driver: +stonith_admin --list-installed+
 
 . Since every device is different, the parameters needed to configure
   it will vary. To find out the parameters associated with the device,
   run: +stonith_admin --metadata --agent type+
 
   The output should be XML formatted text containing additional
   parameter descriptions. We will endevor to make the output more
   friendly in a later version.
 
 . Enter the shell crm Create an editable copy of the existing
   configuration +cib new stonith+ Create a fencing resource containing a
   primitive resource with a class of stonith, a type of type and a
   parameter for each of the values returned in step 2: +configure
   primitive ...+
 endif::[]
 
 . If the device does not know how to fence nodes based on their uname,
   you may also need to set the special +pcmk_host_map+ parameter.  See
   +man stonithd+ for details.
 
 . If the device does not support the list command, you may also need
   to set the special +pcmk_host_list+ and/or +pcmk_host_check+
   parameters.  See +man stonithd+ for details.
 
 . If the device does not expect the victim to be specified with the
   port parameter, you may also need to set the special
   +pcmk_host_argument+ parameter. See +man stonithd+ for details.
 
 ifdef::crm[]
 . Upload it into the CIB from the shell: +cib commit stonith+
 endif::[]
 
+ifdef::pcs[]
+. Commit the new configuration. +pcs cluster push cib stonith_cfg+
+endif::[]
+
 . Once the stonith resource is running, you can test it by executing:
   +stonith_admin --reboot nodename+. Although you might want to stop the
   cluster on that machine first.
 
 == Example ==
 
 Assuming we have an chassis containing four nodes and an IPMI device
 active on 10.0.0.1, then we would chose the fence_ipmilan driver in step
 2 and obtain the following list of parameters
 
 .Obtaining a list of STONITH Parameters
 
 ifdef::pcs[]
 [source,Bash]
 ----
 # pcs stonith describe fence_ipmilan
 Stonith options for: fence_ipmilan
   auth: IPMI Lan Auth type (md5, password, or none)
   ipaddr: IPMI Lan IP to talk to
   passwd: Password (if required) to control power on IPMI device
   passwd_script: Script to retrieve password (if required)
   lanplus: Use Lanplus
   login: Username/Login (if required) to control power on IPMI device
   action: Operation to perform. Valid operations: on, off, reboot, status, list, diag, monitor or metadata
   timeout: Timeout (sec) for IPMI operation
   cipher: Ciphersuite to use (same as ipmitool -C parameter)
   method: Method to fence (onoff or cycle)
   power_wait: Wait X seconds after on/off operation
   delay: Wait X seconds before fencing is started
   privlvl: Privilege level on IPMI device
   verbose: Verbose mode
 ----
 endif::[]
 
 ifdef::crm[]
 [source,Bash]
 ----
 # stonith_admin --metadata -a fence_ipmilan
 ----
 [source,XML]
 ----
 <?xml version="1.0" ?>
 <resource-agent name="fence_ipmilan" shortdesc="Fence agent for IPMI over LAN">
 <longdesc>
 fence_ipmilan is an I/O Fencing agent which can be used with machines controlled by IPMI. This agent calls support software using ipmitool (http://ipmitool.sf.net/).
 
 To use fence_ipmilan with HP iLO 3 you have to enable lanplus option (lanplus / -P) and increase wait after operation to 4 seconds (power_wait=4 / -T 4)</longdesc>
 <parameters>
         <parameter name="auth" unique="1">
                 <getopt mixed="-A" />
                 <content type="string" />
                 <shortdesc lang="en">IPMI Lan Auth type (md5, password, or none)</shortdesc>
         </parameter>
         <parameter name="ipaddr" unique="1">
                 <getopt mixed="-a" />
                 <content type="string" />
                 <shortdesc lang="en">IPMI Lan IP to talk to</shortdesc>
         </parameter>
         <parameter name="passwd" unique="1">
                 <getopt mixed="-p" />
                 <content type="string" />
                 <shortdesc lang="en">Password (if required) to control power on IPMI device</shortdesc>
         </parameter>
         <parameter name="passwd_script" unique="1">
                 <getopt mixed="-S" />
                 <content type="string" />
                 <shortdesc lang="en">Script to retrieve password (if required)</shortdesc>
         </parameter>
         <parameter name="lanplus" unique="1">
                 <getopt mixed="-P" />
                 <content type="boolean" />
                 <shortdesc lang="en">Use Lanplus</shortdesc>
         </parameter>
         <parameter name="login" unique="1">
                 <getopt mixed="-l" />
                 <content type="string" />
                 <shortdesc lang="en">Username/Login (if required) to control power on IPMI device</shortdesc>
         </parameter>
         <parameter name="action" unique="1">
                 <getopt mixed="-o" />
                 <content type="string" default="reboot"/>
                 <shortdesc lang="en">Operation to perform. Valid operations: on, off, reboot, status, list, diag, monitor or metadata</shortdesc>
         </parameter>
         <parameter name="timeout" unique="1">
                 <getopt mixed="-t" />
                 <content type="string" />
                 <shortdesc lang="en">Timeout (sec) for IPMI operation</shortdesc>
         </parameter>
         <parameter name="cipher" unique="1">
                 <getopt mixed="-C" />
                 <content type="string" />
                 <shortdesc lang="en">Ciphersuite to use (same as ipmitool -C parameter)</shortdesc>
         </parameter>
         <parameter name="method" unique="1">
                 <getopt mixed="-M" />
                 <content type="string" default="onoff"/>
                 <shortdesc lang="en">Method to fence (onoff or cycle)</shortdesc>
         </parameter>
         <parameter name="power_wait" unique="1">
                 <getopt mixed="-T" />
                 <content type="string" default="2"/>
                 <shortdesc lang="en">Wait X seconds after on/off operation</shortdesc>
         </parameter>
         <parameter name="delay" unique="1">
                 <getopt mixed="-f" />
                 <content type="string" />
                 <shortdesc lang="en">Wait X seconds before fencing is started</shortdesc>
         </parameter>
         <parameter name="verbose" unique="1">
                 <getopt mixed="-v" />
                 <content type="boolean" />
                 <shortdesc lang="en">Verbose mode</shortdesc>
         </parameter>
 </parameters>
 <actions>
         <action name="on" />
         <action name="off" />
         <action name="reboot" />
         <action name="status" />
         <action name="diag" />
         <action name="list" />
         <action name="monitor" />
         <action name="metadata" />
 </actions>
 </resource-agent>
 ----
 endif::[]
 
 from which we would create a STONITH resource fragment that might look
 like this
 
 .Sample STONITH Resource
 ifdef::pcs[]
 [source,Bash]
 ----
 # pcs cluster cib stonith_cfg
 # pcs -f stonith_cfg stonith create impi-fencing fence_ipmilan \
  pcmk_host_list="pcmk-1 pcmk-2" ipaddr=10.0.0.1 login=testuser \
  passwd=acd123 op monitor interval=60s
 # pcs -f stonith_cfg stonith
  impi-fencing	(stonith:fence_ipmilan) Stopped
 ----
 endif::[]
 
 ifdef::crm[]
 [source,Bash]
 ----
 # crm crm(live)# cib new stonith
 INFO: stonith shadow CIB created
 crm(stonith)# configure primitive impi-fencing stonith::fence_ipmilan \
  params pcmk_host_list="pcmk-1 pcmk-2" ipaddr=10.0.0.1 login=testuser passwd=abc123 \
  op monitor interval="60s"
 ----
 endif::[]
 
 And finally, since we disabled it earlier, we need to re-enable STONITH.
 At this point we should have the following configuration.
 
 ifdef::pcs[]
 [source,Bash]
 ----
 # pcs -f stonith_cfg property set stonith-enabled=true
 # pcs -f stonith_cfg property
 dc-version: 1.1.8-1.el7-60a19ed12fdb4d5c6a6b6767f52e5391e447fec0
 cluster-infrastructure: corosync
 no-quorum-policy: ignore
 stonith-enabled: true
 ----
 
 Now push the configuration into the cluster.
 
 ifdef::pcs[]
 [source,Bash]
 ----
 # pcs cluster push cib stonith_cfg
 ----
 endif::[]
 
 ifdef::crm[]
 [source,Bash]
 ----
 crm(stonith)# configure property stonith-enabled="true"
 crm(stonith)# configure shownode pcmk-1
 node pcmk-2
 primitive WebData ocf:linbit:drbd \
     params drbd_resource="wwwdata" \
     op monitor interval="60s"
 primitive WebFS ocf:heartbeat:Filesystem \
     params device="/dev/drbd/by-res/wwwdata" directory="/var/www/html" fstype="gfs2"
 primitive WebSite ocf:heartbeat:apache \
     params configfile="/etc/httpd/conf/httpd.conf" \
     op monitor interval="1min"
 primitive ClusterIP ocf:heartbeat:IPaddr2 \
     params ip="192.168.122.101" cidr_netmask="32" clusterip_hash="sourceip" \
     op monitor interval="30s"primitive ipmi-fencing stonith::fence_ipmilan \ params pcmk_host_list="pcmk-1 pcmk-2" ipaddr=10.0.0.1 login=testuser passwd=abc123 \ op monitor interval="60s"ms WebDataClone WebData \
     meta master-max="2" master-node-max="1" clone-max="2" clone-node-max="1" notify="true"
 clone WebFSClone WebFS
 clone WebIP ClusterIP \
     meta globally-unique="true" clone-max="2" clone-node-max="2"
 clone WebSiteClone WebSite
 colocation WebSite-with-WebFS inf: WebSiteClone WebFSClone
 colocation fs_on_drbd inf: WebFSClone WebDataClone:Master
 colocation website-with-ip inf: WebSiteClone WebIP
 order WebFS-after-WebData inf: WebDataClone:promote WebFSClone:start
 order WebSite-after-WebFS inf: WebFSClone WebSiteClone
 order apache-after-ip inf: WebIP WebSiteClone
 property $id="cib-bootstrap-options" \
     dc-version="1.1.5-bdd89e69ba545404d02445be1f3d72e6a203ba2f" \
     cluster-infrastructure="openais" \
     expected-quorum-votes="2" \
     stonith-enabled="true" \
     no-quorum-policy="ignore"
 rsc_defaults $id="rsc-options" \
     resource-stickiness="100"
 crm(stonith)# cib commit stonithINFO: commited 'stonith' shadow CIB to the cluster
 crm(stonith)# quit
 bye
 ----
 endif::[]