diff --git a/doc/Clusters_from_Scratch/en-US/Ap-Configuration.txt b/doc/Clusters_from_Scratch/en-US/Ap-Configuration.txt
index 9c15be4af8..83e8d029d3 100644
--- a/doc/Clusters_from_Scratch/en-US/Ap-Configuration.txt
+++ b/doc/Clusters_from_Scratch/en-US/Ap-Configuration.txt
@@ -1,388 +1,388 @@
 :compat-mode: legacy
 [appendix]
 == Configuration Recap ==
 
 === Final Cluster Configuration ===
 
 ----
 [root@pcmk-1 ~]# pcs resource
  Master/Slave Set: WebDataClone [WebData]
      Masters: [ pcmk-1 pcmk-2 ]
  Clone Set: dlm-clone [dlm]
      Started: [ pcmk-1 pcmk-2 ]
  Clone Set: ClusterIP-clone [ClusterIP] (unique)
      ClusterIP:0	(ocf::heartbeat:IPaddr2):	Started pcmk-2
      ClusterIP:1	(ocf::heartbeat:IPaddr2):	Started pcmk-1
  Clone Set: WebFS-clone [WebFS]
      Started: [ pcmk-1 pcmk-2 ]
  Clone Set: WebSite-clone [WebSite]
      Started: [ pcmk-1 pcmk-2 ]
 ----
 
 ----
 [root@pcmk-1 ~]# pcs resource op defaults
 timeout: 240s
 ----
 
 ----
 [root@pcmk-1 ~]# pcs stonith
  impi-fencing	(stonith:fence_ipmilan): Started pcmk-1
 ----
 
 ----
 [root@pcmk-1 ~]# pcs constraint
 Location Constraints:
 Ordering Constraints:
   start ClusterIP-clone then start WebSite-clone (kind:Mandatory)
   promote WebDataClone then start WebFS-clone (kind:Mandatory)
   start WebFS-clone then start WebSite-clone (kind:Mandatory)
   start dlm-clone then start WebFS-clone (kind:Mandatory)
 Colocation Constraints:
   WebSite-clone with ClusterIP-clone (score:INFINITY)
   WebFS-clone with WebDataClone (score:INFINITY) (with-rsc-role:Master)
   WebSite-clone with WebFS-clone (score:INFINITY)
   WebFS-clone with dlm-clone (score:INFINITY)
 Ticket Constraints:
 ----
 
 ----
 [root@pcmk-1 ~]# pcs status
 Cluster name: mycluster
 Stack: corosync
 Current DC: pcmk-1 (version 1.1.18-11.el7_5.3-2b07d5c5a9) - partition with quorum
 Last updated: Tue Sep 11 10:41:53 2018
 Last change: Tue Sep 11 10:40:16 2018 by root via cibadmin on pcmk-1
 
 2 nodes configured
 11 resources configured
 
 Online: [ pcmk-1 pcmk-2 ]
 
 Full list of resources:
 
  ipmi-fencing   (stonith:fence_ipmilan):        Started pcmk-1
  Master/Slave Set: WebDataClone [WebData]
      Masters: [ pcmk-1 pcmk-2 ]
  Clone Set: dlm-clone [dlm]
      Started: [ pcmk-1 pcmk-2 ]
  Clone Set: ClusterIP-clone [ClusterIP] (unique)
      ClusterIP:0	(ocf::heartbeat:IPaddr2):	Started pcmk-2
      ClusterIP:1	(ocf::heartbeat:IPaddr2):	Started pcmk-1
  Clone Set: WebFS-clone [WebFS]
      Started: [ pcmk-1 pcmk-2 ]
  Clone Set: WebSite-clone [WebSite]
      Started: [ pcmk-1 pcmk-2 ]
 
 Daemon Status:
   corosync: active/disabled
   pacemaker: active/disabled
   pcsd: active/enabled
 ----
 
 ----
 [root@pcmk-1 ~]# pcs cluster cib --config
 ----
 [source,XML]
 ----
 <configuration>
   <crm_config>
     <cluster_property_set id="cib-bootstrap-options">
       <nvpair id="cib-bootstrap-options-have-watchdog" name="have-watchdog" value="false"/>
       <nvpair id="cib-bootstrap-options-dc-version" name="dc-version" value="1.1.18-11.el7_5.3-2b07d5c5a9"/>
       <nvpair id="cib-bootstrap-options-cluster-infrastructure" name="cluster-infrastructure" value="corosync"/>
       <nvpair id="cib-bootstrap-options-cluster-name" name="cluster-name" value="mycluster"/>
       <nvpair id="cib-bootstrap-options-stonith-enabled" name="stonith-enabled" value="true"/>
       <nvpair id="cib-bootstrap-options-last-lrm-refresh" name="last-lrm-refresh" value="1536679009"/>
     </cluster_property_set>
   </crm_config>
   <nodes>
     <node id="1" uname="pcmk-1"/>
     <node id="2" uname="pcmk-2"/>
   </nodes>
   <resources>
     <primitive class="stonith" id="impi-fencing" type="fence_ipmilan">
       <instance_attributes id="impi-fencing-instance_attributes">
         <nvpair id="impi-fencing-instance_attributes-pcmk_host_list" name="pcmk_host_list" value="pcmk-1 pcmk-2"/>
         <nvpair id="impi-fencing-instance_attributes-ipaddr" name="ipaddr" value="10.0.0.1"/>
         <nvpair id="impi-fencing-instance_attributes-login" name="login" value="testuser"/>
         <nvpair id="impi-fencing-instance_attributes-passwd" name="passwd" value="acd123"/>
       </instance_attributes>
       <operations>
         <op id="impi-fencing-interval-60s" interval="60s" name="monitor"/>
       </operations>
     </primitive>
     <master id="WebDataClone">
       <primitive class="ocf" id="WebData" provider="linbit" type="drbd">
         <instance_attributes id="WebData-instance_attributes">
           <nvpair id="WebData-instance_attributes-drbd_resource" name="drbd_resource" value="wwwdata"/>
         </instance_attributes>
         <operations>
           <op id="WebData-demote-interval-0s" interval="0s" name="demote" timeout="90"/>
           <op id="WebData-monitor-interval-60s" interval="60s" name="monitor"/>
           <op id="WebData-notify-interval-0s" interval="0s" name="notify" timeout="90"/>
           <op id="WebData-promote-interval-0s" interval="0s" name="promote" timeout="90"/>
           <op id="WebData-reload-interval-0s" interval="0s" name="reload" timeout="30"/>
           <op id="WebData-start-interval-0s" interval="0s" name="start" timeout="240"/>
           <op id="WebData-stop-interval-0s" interval="0s" name="stop" timeout="100"/>
         </operations>
       </primitive>
       <meta_attributes id="WebDataClone-meta_attributes">
         <nvpair id="WebDataClone-meta_attributes-master-node-max" name="master-node-max" value="1"/>
         <nvpair id="WebDataClone-meta_attributes-clone-max" name="clone-max" value="2"/>
         <nvpair id="WebDataClone-meta_attributes-notify" name="notify" value="true"/>
         <nvpair id="WebDataClone-meta_attributes-master-max" name="master-max" value="2"/>
         <nvpair id="WebDataClone-meta_attributes-clone-node-max" name="clone-node-max" value="1"/>
       </meta_attributes>
     </master>
     <clone id="dlm-clone">
       <primitive class="ocf" id="dlm" provider="pacemaker" type="controld">
         <operations>
           <op id="dlm-monitor-interval-60s" interval="60s" name="monitor"/>
           <op id="dlm-start-interval-0s" interval="0s" name="start" timeout="90"/>
           <op id="dlm-stop-interval-0s" interval="0s" name="stop" timeout="100"/>
         </operations>
       </primitive>
       <meta_attributes id="dlm-clone-meta_attributes">
         <nvpair id="dlm-clone-meta_attributes-clone-max" name="clone-max" value="2"/>
         <nvpair id="dlm-clone-meta_attributes-clone-node-max" name="clone-node-max" value="1"/>
       </meta_attributes>
     </clone>
     <clone id="ClusterIP-clone">
       <primitive class="ocf" id="ClusterIP" provider="heartbeat" type="IPaddr2">
         <instance_attributes id="ClusterIP-instance_attributes">
-          <nvpair id="ClusterIP-instance_attributes-cidr_netmask" name="cidr_netmask" value="32"/>
+          <nvpair id="ClusterIP-instance_attributes-cidr_netmask" name="cidr_netmask" value="24"/>
           <nvpair id="ClusterIP-instance_attributes-ip" name="ip" value="192.168.122.120"/>
           <nvpair id="ClusterIP-instance_attributes-clusterip_hash" name="clusterip_hash" value="sourceip"/>
         </instance_attributes>
         <operations>
           <op id="ClusterIP-monitor-interval-30s" interval="30s" name="monitor"/>
           <op id="ClusterIP-start-interval-0s" interval="0s" name="start" timeout="20s"/>
           <op id="ClusterIP-stop-interval-0s" interval="0s" name="stop" timeout="20s"/>
         </operations>
         <meta_attributes id="ClusterIP-meta_attributes">
           <nvpair id="ClusterIP-meta_attributes-resource-stickiness" name="resource-stickiness" value="0"/>
         </meta_attributes>
       </primitive>
       <meta_attributes id="ClusterIP-clone-meta_attributes">
         <nvpair id="ClusterIP-clone-meta_attributes-clone-max" name="clone-max" value="2"/>
         <nvpair id="ClusterIP-clone-meta_attributes-clone-node-max" name="clone-node-max" value="2"/>
         <nvpair id="ClusterIP-clone-meta_attributes-globally-unique" name="globally-unique" value="true"/>
       </meta_attributes>
     </clone>
     <clone id="WebFS-clone">
       <primitive class="ocf" id="WebFS" provider="heartbeat" type="Filesystem">
         <instance_attributes id="WebFS-instance_attributes">
           <nvpair id="WebFS-instance_attributes-device" name="device" value="/dev/drbd1"/>
           <nvpair id="WebFS-instance_attributes-directory" name="directory" value="/var/www/html"/>
           <nvpair id="WebFS-instance_attributes-fstype" name="fstype" value="gfs2"/>
         </instance_attributes>
         <operations>
           <op id="WebFS-monitor-interval-20" interval="20" name="monitor" timeout="40"/>
           <op id="WebFS-notify-interval-0s" interval="0s" name="notify" timeout="60"/>
           <op id="WebFS-start-interval-0s" interval="0s" name="start" timeout="60"/>
           <op id="WebFS-stop-interval-0s" interval="0s" name="stop" timeout="60"/>
         </operations>
       </primitive>
     </clone>
     <clone id="WebSite-clone">
       <primitive class="ocf" id="WebSite" provider="heartbeat" type="apache">
         <instance_attributes id="WebSite-instance_attributes">
           <nvpair id="WebSite-instance_attributes-configfile" name="configfile" value="/etc/httpd/conf/httpd.conf"/>
           <nvpair id="WebSite-instance_attributes-statusurl" name="statusurl" value="http://localhost/server-status"/>
         </instance_attributes>
         <operations>
           <op id="WebSite-monitor-interval-1min" interval="1min" name="monitor"/>
           <op id="WebSite-start-interval-0s" interval="0s" name="start" timeout="40s"/>
           <op id="WebSite-stop-interval-0s" interval="0s" name="stop" timeout="60s"/>
         </operations>
       </primitive>
       <meta_attributes id="WebSite-clone-meta_attributes"/>
     </clone>
   </resources>
   <constraints>
     <rsc_colocation id="colocation-WebSite-ClusterIP-INFINITY" rsc="WebSite-clone" score="INFINITY" with-rsc="ClusterIP-clone"/>
     <rsc_order first="ClusterIP-clone" first-action="start" id="order-ClusterIP-WebSite-mandatory" then="WebSite-clone" then-action="start"/>
     <rsc_colocation id="colocation-WebFS-WebDataClone-INFINITY" rsc="WebFS-clone" score="INFINITY" with-rsc="WebDataClone" with-rsc-role="Master"/>
     <rsc_order first="WebDataClone" first-action="promote" id="order-WebDataClone-WebFS-mandatory" then="WebFS-clone" then-action="start"/>
     <rsc_colocation id="colocation-WebSite-WebFS-INFINITY" rsc="WebSite-clone" score="INFINITY" with-rsc="WebFS-clone"/>
     <rsc_order first="WebFS-clone" first-action="start" id="order-WebFS-WebSite-mandatory" then="WebSite-clone" then-action="start"/>
     <rsc_colocation id="colocation-WebFS-dlm-clone-INFINITY" rsc="WebFS-clone" score="INFINITY" with-rsc="dlm-clone"/>
     <rsc_order first="dlm-clone" first-action="start" id="order-dlm-clone-WebFS-mandatory" then="WebFS-clone" then-action="start"/>
   </constraints>
   <rsc_defaults>
     <meta_attributes id="rsc_defaults-options">
       <nvpair id="rsc_defaults-options-resource-stickiness" name="resource-stickiness" value="100"/>
     </meta_attributes>
   </rsc_defaults>
   <op_defaults>
     <meta_attributes id="op_defaults-options">
       <nvpair id="op_defaults-options-timeout" name="timeout" value="240s"/>
     </meta_attributes>
   </op_defaults>
 </configuration>
 ----
 
 === Node List ===
 
 ----
 [root@pcmk-1 ~]# pcs status nodes
 Pacemaker Nodes:
  Online: pcmk-1 pcmk-2
  Standby:
  Maintenance:
  Offline:
 Pacemaker Remote Nodes:
  Online:
  Standby:
  Maintenance:
  Offline:
 ----
 
 === Cluster Options ===
 
 ----
 [root@pcmk-1 ~]# pcs property
 Cluster Properties:
  cluster-infrastructure: corosync
  cluster-name: mycluster
  dc-version: 1.1.18-11.el7_5.3-2b07d5c5a9
  have-watchdog: false
  last-lrm-refresh: 1536679009
  stonith-enabled: true
 ----
 
 The output shows state information automatically obtained about the cluster, including:
 
 * *cluster-infrastructure* - the cluster communications layer in use
 * *cluster-name* - the cluster name chosen by the administrator when the cluster was created
 * *dc-version* - the version (including upstream source-code hash) of Pacemaker
   used on the Designated Controller, which is the node elected to determine what
   actions are needed when events occur
 
 The output also shows options set by the administrator that control the way the cluster operates, including:
 
 * *stonith-enabled=true* - whether the cluster is allowed to use STONITH resources
 
 === Resources ===
 
 ==== Default Options ====
 
 ----
 [root@pcmk-1 ~]# pcs resource defaults
 resource-stickiness: 100
 ----
 
 This shows cluster option defaults that apply to every resource that does not
 explicitly set the option itself. Above:
 
 * *resource-stickiness* - Specify the aversion to moving healthy resources to other machines
 
 ==== Fencing ====
 
 ----
 [root@pcmk-1 ~]# pcs stonith show
  ipmi-fencing	(stonith:fence_ipmilan):	Started pcmk-1
 [root@pcmk-1 ~]# pcs stonith show ipmi-fencing
  Resource: ipmi-fencing (class=stonith type=fence_ipmilan)
   Attributes: ipaddr="10.0.0.1" login="testuser" passwd="acd123" pcmk_host_list="pcmk-1 pcmk-2" 
   Operations: monitor interval=60s (fence-monitor-interval-60s)
 ----
 
 ==== Service Address ====
 
 Users of the services provided by the cluster require an unchanging
 address with which to access it. Additionally, we cloned the address so
 it will be active on both nodes. An iptables rule (created as part of the
 resource agent) is used to ensure that each request only gets processed by one
 of the two clone instances. The additional meta options tell the cluster
 that we want two instances of the clone (one "request bucket" for each
 node) and that if one node fails, then the remaining node should hold
 both.
 
 ----
 [root@pcmk-1 ~]# pcs resource show ClusterIP-clone
  Clone: ClusterIP-clone
   Meta Attrs: clone-max=2 clone-node-max=2 globally-unique=true 
   Resource: ClusterIP (class=ocf provider=heartbeat type=IPaddr2)
-   Attributes: cidr_netmask=32 ip=192.168.122.120 clusterip_hash=sourceip
+   Attributes: cidr_netmask=24 ip=192.168.122.120 clusterip_hash=sourceip
    Meta Attrs: resource-stickiness=0 
    Operations: monitor interval=30s (ClusterIP-monitor-interval-30s)
                start interval=0s timeout=20s (ClusterIP-start-interval-0s)
                stop interval=0s timeout=20s (ClusterIP-stop-interval-0s)
 ----
 
 ==== DRBD - Shared Storage ====
 
 Here, we define the DRBD service and specify which DRBD resource (from
 /etc/drbd.d/*.res) it should manage. We make it a master clone resource and, in
 order to have an active/active setup, allow both instances to be promoted to master
 at the same time. We also set the notify option so that the
 cluster will tell DRBD agent when its peer changes state.
 
 ----
 [root@pcmk-1 ~]# pcs resource show WebDataClone
  Master: WebDataClone
   Meta Attrs: master-node-max=1 clone-max=2 notify=true master-max=2 clone-node-max=1 
   Resource: WebData (class=ocf provider=linbit type=drbd)
    Attributes: drbd_resource=wwwdata
    Operations: demote interval=0s timeout=90 (WebData-demote-interval-0s)
                monitor interval=60s (WebData-monitor-interval-60s)
                notify interval=0s timeout=90 (WebData-notify-interval-0s)
                promote interval=0s timeout=90 (WebData-promote-interval-0s)
                reload interval=0s timeout=30 (WebData-reload-interval-0s)
                start interval=0s timeout=240 (WebData-start-interval-0s)
                stop interval=0s timeout=100 (WebData-stop-interval-0s)
 [root@pcmk-1 ~]# pcs constraint ref WebDataClone
 Resource: WebDataClone
   colocation-WebFS-WebDataClone-INFINITY
   order-WebDataClone-WebFS-mandatory
 ----
 
 ==== Cluster Filesystem ====
 
 The cluster filesystem ensures that files are read and written correctly.
 We need to specify the block device (provided by DRBD), where we want it
 mounted and that we are using GFS2. Again, it is a clone because it is
 intended to be active on both nodes. The additional constraints ensure
 that it can only be started on nodes with active DLM and DRBD instances.
 
 ----
 [root@pcmk-1 ~]# pcs resource show WebFS-clone
  Clone: WebFS-clone
   Resource: WebFS (class=ocf provider=heartbeat type=Filesystem)
    Attributes: device=/dev/drbd1 directory=/var/www/html fstype=gfs2
    Operations: monitor interval=20 timeout=40 (WebFS-monitor-interval-20)
                notify interval=0s timeout=60 (WebFS-notify-interval-0s)
                start interval=0s timeout=60 (WebFS-start-interval-0s)
                stop interval=0s timeout=60 (WebFS-stop-interval-0s)
 [root@pcmk-1 ~]# pcs constraint ref WebFS-clone
 Resource: WebFS-clone
   colocation-WebFS-WebDataClone-INFINITY
   colocation-WebSite-WebFS-INFINITY
   colocation-WebFS-dlm-clone-INFINITY
   order-WebDataClone-WebFS-mandatory
   order-WebFS-WebSite-mandatory
   order-dlm-clone-WebFS-mandatory
 ----
 
 ==== Apache ====
 
 Lastly, we have the actual service, Apache. We need only tell the cluster
 where to find its main configuration file and restrict it to running on
 nodes that have the required filesystem mounted and the IP address active.
 
 ----
 [root@pcmk-1 ~]# pcs resource show WebSite-clone
  Clone: WebSite-clone
   Resource: WebSite (class=ocf provider=heartbeat type=apache)
    Attributes: configfile=/etc/httpd/conf/httpd.conf statusurl=http://localhost/server-status
    Operations: monitor interval=1min (WebSite-monitor-interval-1min)
                start interval=0s timeout=40s (WebSite-start-interval-0s)
                stop interval=0s timeout=60s (WebSite-stop-interval-0s)
 [root@pcmk-1 ~]# pcs constraint ref WebSite-clone
 Resource: WebSite-clone
   colocation-WebSite-ClusterIP-INFINITY
   colocation-WebSite-WebFS-INFINITY
   order-ClusterIP-WebSite-mandatory
   order-WebFS-WebSite-mandatory
 ----
diff --git a/doc/Clusters_from_Scratch/en-US/Ch-Active-Passive.txt b/doc/Clusters_from_Scratch/en-US/Ch-Active-Passive.txt
index b0d950c5e1..0cd3463113 100644
--- a/doc/Clusters_from_Scratch/en-US/Ch-Active-Passive.txt
+++ b/doc/Clusters_from_Scratch/en-US/Ch-Active-Passive.txt
@@ -1,377 +1,377 @@
 :compat-mode: legacy
 = Create an Active/Passive Cluster =
 
 == Explore the Existing Configuration ==
 
 When Pacemaker starts up, it automatically records the number and details
 of the nodes in the cluster, as well as which stack is being used and the
 version of Pacemaker being used.
 
 The first few lines of output should look like this:
 
 ----
 [root@pcmk-1 ~]# pcs status
 Cluster name: mycluster
 WARNING: no stonith devices and stonith-enabled is not false
 Stack: corosync
 Current DC: pcmk-2 (version 1.1.18-11.el7_5.3-2b07d5c5a9) - partition with quorum
 Last updated: Mon Sep 10 16:41:46 2018
 Last change: Mon Sep 10 16:30:53 2018 by hacluster via crmd on pcmk-2
 
 2 nodes configured
 0 resources configured
 
 Online: [ pcmk-1 pcmk-2 ]
 ----
 
 For those who are not of afraid of XML, you can see the raw cluster
 configuration and status by using the `pcs cluster cib` command.
 
 .The last XML you'll see in this document
 ======
 ----
 [root@pcmk-1 ~]# pcs cluster cib
 ----
 [source,XML]
 ----
 <cib crm_feature_set="3.0.14" validate-with="pacemaker-2.10" epoch="5" num_updates="4" admin_epoch="0" cib-last-written="Mon Sep 10 16:30:53 2018" update-origin="pcmk-2" update-client="crmd" update-user="hacluster" have-quorum="1" dc-uuid="2">
   <configuration>
     <crm_config>
       <cluster_property_set id="cib-bootstrap-options">
         <nvpair id="cib-bootstrap-options-have-watchdog" name="have-watchdog" value="false"/>
         <nvpair id="cib-bootstrap-options-dc-version" name="dc-version" value="1.1.18-11.el7_5.3-2b07d5c5a9"/>
         <nvpair id="cib-bootstrap-options-cluster-infrastructure" name="cluster-infrastructure" value="corosync"/>
         <nvpair id="cib-bootstrap-options-cluster-name" name="cluster-name" value="mycluster"/>
       </cluster_property_set>
     </crm_config>
     <nodes>
       <node id="1" uname="pcmk-1"/>
       <node id="2" uname="pcmk-2"/>
     </nodes>
     <resources/>
     <constraints/>
   </configuration>
   <status>
     <node_state id="1" uname="pcmk-1" in_ccm="true" crmd="online" crm-debug-origin="do_state_transition" join="member" expected="member">
       <lrm id="1">
         <lrm_resources/>
       </lrm>
     </node_state>
     <node_state id="2" uname="pcmk-2" in_ccm="true" crmd="online" crm-debug-origin="do_state_transition" join="member" expected="member">
       <lrm id="2">
         <lrm_resources/>
       </lrm>
     </node_state>
   </status>
 </cib>
 ----
 ======
 
 Before we make any changes, it's a good idea to check the validity of
 the configuration.
 
 ----
 [root@pcmk-1 ~]# crm_verify -L -V
    error: unpack_resources: Resource start-up disabled since no STONITH resources have been defined
    error: unpack_resources: Either configure some or disable STONITH with the stonith-enabled option
    error: unpack_resources: NOTE: Clusters with shared data need STONITH to ensure data integrity
 Errors found during check: config not valid
 ----
 
 As you can see, the tool has found some errors.
 
 In order to guarantee the safety of your data,
 footnote:[If the data is corrupt, there is little point in continuing to make it available]
 fencing (also called STONITH) is enabled by default. However, it also knows
 when no STONITH configuration has been supplied and reports this as a problem
 (since the cluster will not be able to make progress if a situation requiring
 node fencing arises).
 
 We will disable this feature for now and configure it later.
 
 To disable STONITH, set the *stonith-enabled* cluster option to
 false:
 
 ----
 [root@pcmk-1 ~]# pcs property set stonith-enabled=false
 [root@pcmk-1 ~]# crm_verify -L
 ----
 
 With the new cluster option set, the configuration is now valid.
 
 [WARNING]
 =========
 The use of `stonith-enabled=false` is completely inappropriate for a
 production cluster. It tells the cluster to simply pretend that failed nodes
 are safely powered off. Some vendors will refuse to support clusters that have
 STONITH disabled. We disable STONITH here only to defer the discussion of its
 configuration, which can differ widely from one installation to the
 next. See <<_what_is_stonith>> for information on why STONITH is important
 and details on how to configure it.
 =========
 
 == Add a Resource ==
 
 Our first resource will be a unique IP address that the cluster can bring up on
 either node. Regardless of where any cluster service(s) are running, end
 users need a consistent address to contact them on. Here, I will choose
 192.168.122.120 as the floating address, give it the imaginative name ClusterIP
 and tell the cluster to check whether it is running every 30 seconds.
 
 [WARNING]
 ===========
 The chosen address must not already be in use on the network.
 Do not reuse an IP address one of the nodes already has configured.
 ===========
 
 ----
 [root@pcmk-1 ~]# pcs resource create ClusterIP ocf:heartbeat:IPaddr2 \ 
-    ip=192.168.122.120 cidr_netmask=32 op monitor interval=30s
+    ip=192.168.122.120 cidr_netmask=24 op monitor interval=30s
 ----
 
 Another important piece of information here is *ocf:heartbeat:IPaddr2*.
 This tells Pacemaker three things about the resource you want to add:
 
 * The first field (*ocf* in this case) is the standard to which the resource
 script conforms and where to find it.
 
 * The second field (*heartbeat* in this case) is standard-specific; for OCF
 resources, it tells the cluster which OCF namespace the resource script is in.
 
 * The third field (*IPaddr2* in this case) is the name of the resource script.
 
 To obtain a list of the available resource standards (the *ocf* part of
 *ocf:heartbeat:IPaddr2*), run:
 
 ----
 [root@pcmk-1 ~]# pcs resource standards
 lsb
 ocf
 service
 systemd
 ----
 
 To obtain a list of the available OCF resource providers (the *heartbeat*
 part of *ocf:heartbeat:IPaddr2*), run:
 
 ----
 [root@pcmk-1 ~]# pcs resource providers
 heartbeat
 openstack
 pacemaker
 ----
 
 Finally, if you want to see all the resource agents available for
 a specific OCF provider (the *IPaddr2* part of *ocf:heartbeat:IPaddr2*), run:
 
 ----
 [root@pcmk-1 ~]# pcs resource agents ocf:heartbeat
 apache
 aws-vpc-move-ip
 awseip
 awsvip
 azure-lb
 clvm
 .
 . (skipping lots of resources to save space)
 .
 symlink
 tomcat
 VirtualDomain
 Xinetd
 ----
 
 Now, verify that the IP resource has been added, and display the cluster's
 status to see that it is now active:
 
 ----
 [root@pcmk-1 ~]# pcs status
 Cluster name: mycluster
 Stack: corosync
 Current DC: pcmk-2 (version 1.1.18-11.el7_5.3-2b07d5c5a9) - partition with quorum
 Last updated: Mon Sep 10 16:55:26 2018
 Last change: Mon Sep 10 16:53:42 2018 by root via cibadmin on pcmk-1
 
 2 nodes configured
 1 resource configured
 
 Online: [ pcmk-1 pcmk-2 ]
 
 Full list of resources:
 
  ClusterIP	(ocf::heartbeat:IPaddr2):	Started pcmk-1
 
 Daemon Status:
   corosync: active/disabled
   pacemaker: active/disabled
   pcsd: active/enabled
 ----
 
 == Perform a Failover ==
 
 Since our ultimate goal is high availability, we should test failover of
 our new resource before moving on.
 
 First, find the node on which the IP address is running.
 
 ----
 [root@pcmk-1 ~]# pcs status
 Cluster name: mycluster
 Stack: corosync
 Current DC: pcmk-2 (version 1.1.18-11.el7_5.3-2b07d5c5a9) - partition with quorum
 Last updated: Mon Sep 10 16:55:26 2018
 Last change: Mon Sep 10 16:53:42 2018 by root via cibadmin on pcmk-1
 
 2 nodes configured
 1 resource configured
 
 Online: [ pcmk-1 pcmk-2 ]
 
 Full list of resources:
 
  ClusterIP	(ocf::heartbeat:IPaddr2):	Started pcmk-1
 ----
 
 You can see that the status of the *ClusterIP* resource
 is *Started* on a particular node (in this example, *pcmk-1*).
 Shut down Pacemaker and Corosync on that machine to trigger a failover.
 
 ----
 [root@pcmk-1 ~]# pcs cluster stop pcmk-1
 Stopping Cluster (pacemaker)...
 Stopping Cluster (corosync)...
 ----
 
 [NOTE]
 ======
 A cluster command such as +pcs cluster stop pass:[<replaceable>nodename</replaceable>]+ can be run
 from any node in the cluster, not just the affected node.
 ======
 
 Verify that pacemaker and corosync are no longer running:
 ----
 [root@pcmk-1 ~]# pcs status
 Error: cluster is not currently running on this node
 ----
 
 Go to the other node, and check the cluster status.
 
 ----
 [root@pcmk-2 ~]# pcs status
 Cluster name: mycluster
 Stack: corosync
 Current DC: pcmk-2 (version 1.1.18-11.el7_5.3-2b07d5c5a9) - partition with quorum
 Last updated: Mon Sep 10 16:57:22 2018
 Last change: Mon Sep 10 16:53:42 2018 by root via cibadmin on pcmk-1
 
 2 nodes configured
 1 resource configured
 
 Online: [ pcmk-2 ]
 OFFLINE: [ pcmk-1 ]
 
 Full list of resources:
 
  ClusterIP	(ocf::heartbeat:IPaddr2):	Started pcmk-2
 
 Daemon Status:
   corosync: active/disabled
   pacemaker: active/disabled
   pcsd: active/enabled
 ----
 
 Notice that *pcmk-1* is *OFFLINE* for cluster purposes (its *pcsd* is still
 active, allowing it to receive `pcs` commands, but it is not participating in
 the cluster).
 
 Also notice that *ClusterIP* is now running on *pcmk-2* -- failover happened
 automatically, and no errors are reported.
 
 [IMPORTANT]
 .Quorum
 ====
 If a cluster splits into two (or more) groups of nodes that can no longer
 communicate with each other (aka. _partitions_), _quorum_ is used to prevent
 resources from starting on more nodes than desired, which would risk
 data corruption.
 
 A cluster has quorum when more than half of all known nodes are online in
 the same partition, or for the mathematically inclined, whenever the following
 equation is true:
 ....
 total_nodes < 2 * active_nodes
 ....
 
 For example, if a 5-node cluster split into 3- and 2-node paritions,
 the 3-node partition would have quorum and could continue serving resources.
 If a 6-node cluster split into two 3-node partitions, neither partition
 would have quorum; pacemaker's default behavior in such cases is to
 stop all resources, in order to prevent data corruption.
 
 Two-node clusters are a special case. By the above definition,
 a two-node cluster would only have quorum when both nodes are
 running. This would make the creation of a two-node cluster pointless,
 but corosync has the ability to treat two-node clusters as if only one node
 is required for quorum.
 
 The `pcs cluster setup` command will automatically configure *two_node: 1*
 in +corosync.conf+, so a two-node cluster will "just work".
 
 If you are using a different cluster shell, you will have to configure
 +corosync.conf+ appropriately yourself.
 ====
 
 Now, simulate node recovery by restarting the cluster stack on *pcmk-1*, and
 check the cluster's status. (It may take a little while before the cluster
 gets going on the node, but it eventually will look like the below.)
 
 ----
 [root@pcmk-1 ~]# pcs cluster start pcmk-1
 pcmk-1: Starting Cluster...
 [root@pcmk-1 ~]# pcs status
 Cluster name: mycluster
 Stack: corosync
 Current DC: pcmk-2 (version 1.1.18-11.el7_5.3-2b07d5c5a9) - partition with quorum
 Last updated: Mon Sep 10 17:00:04 2018
 Last change: Mon Sep 10 16:53:42 2018 by root via cibadmin on pcmk-1
 
 2 nodes configured
 1 resource configured
 
 Online: [ pcmk-1 pcmk-2 ]
 
 Full list of resources:
 
  ClusterIP	(ocf::heartbeat:IPaddr2):	Started pcmk-2
 
 Daemon Status:
   corosync: active/disabled
   pacemaker: active/disabled
   pcsd: active/enabled
 ----
 
 == Prevent Resources from Moving after Recovery ==
 
 In most circumstances, it is highly desirable to prevent healthy
 resources from being moved around the cluster. Moving resources almost
 always requires a period of downtime. For complex services such as
 databases, this period can be quite long.
 
 To address this, Pacemaker has the concept of resource _stickiness_,
 which controls how strongly a service prefers to stay running where it
 is. You may like to think of it as the "cost" of any downtime. By
 default, Pacemaker assumes there is zero cost associated with moving
 resources and will do so to achieve "optimal"
 footnote:[Pacemaker's definition of optimal may not always agree with that of a
 human's. The order in which Pacemaker processes lists of resources and nodes
 creates implicit preferences in situations where the administrator has not
 explicitly specified them.]
 resource placement. We can specify a different stickiness for every
 resource, but it is often sufficient to change the default.
 
 ----
 [root@pcmk-1 ~]# pcs resource defaults resource-stickiness=100
 Warning: Defaults do not apply to resources which override them with their own defined values
 [root@pcmk-1 ~]# pcs resource defaults
 resource-stickiness: 100
 ----
diff --git a/doc/pcs-crmsh-quick-ref.md b/doc/pcs-crmsh-quick-ref.md
index a293823d98..5bd54cc4d0 100644
--- a/doc/pcs-crmsh-quick-ref.md
+++ b/doc/pcs-crmsh-quick-ref.md
@@ -1,355 +1,355 @@
 <!-- (new version of doctoc removed this, so added above:) -->
 <!-- *generated with [DocToc](https://github.com/thlorenz/doctoc-web/)* -->
 <!-- START doctoc generated TOC please keep comment here to allow auto update -->
 <!-- DON'T EDIT THIS SECTION, INSTEAD RE-RUN doctoc TO UPDATE -->
 **Table of Contents**
 
 - [General Operations](#general-operations)
   - [Display the configuration](#display-the-configuration)
   - [Display the current status](#display-the-current-status)
   - [Node standby](#node-standby)
   - [Set cluster property](#set-cluster-property)
 - [Resource manipulation](#resource-manipulation)
   - [List Resource Agent (RA) classes](#list-resource-agent-ra-classes)
   - [List available RAs](#list-available-ras)
   - [List RA info](#list-ra-info)
   - [Create a resource](#create-a-resource)
   - [Display a resource](#display-a-resource)
   - [Display fencing resources](#display-fencing-resources)
   - [Display Stonith RA info](#display-stonith-ra-info)
   - [Start a resource](#start-a-resource)
   - [Stop a resource](#stop-a-resource)
   - [Remove a resource](#remove-a-resource)
   - [Modify a resource](#modify-a-resource)
   - [Delete parameters for a given resource](#delete-parameters-for-a-given-resource)
   - [List the current resource defaults](#list-the-current-resource-defaults)
   - [Set resource defaults](#set-resource-defaults)
   - [List the current operation defaults](#list-the-current-operation-defaults)
   - [Set operation defaults](#set-operation-defaults)
   - [Set Colocation](#set-colocation)
   - [Set ordering](#set-ordering)
   - [Set preferred location](#set-preferred-location)
   - [Move resources](#move-resources)
   - [Resource tracing](#resource-tracing)
   - [Clear fail counts](#clear-fail-counts)
   - [Edit fail counts](#edit-fail-counts)
   - [Handling configuration elements by type](#handling-configuration-elements-by-type)
   - [Create a clone](#create-a-clone)
   - [Create a master/slave clone](#create-a-masterslave-clone)
 - [Other operations](#other-operations)
   - [Batch changes](#batch-changes)
   - [Template creation](#template-creation)
   - [Log analysis](#log-analysis)
   - [Configuration scripts](#configuration-scripts)
 
 <!-- END doctoc generated TOC please keep comment here to allow auto update -->
 
 # General Operations
 
 ## Display the configuration
 
     crmsh # crm configure show xml
     pcs   # pcs cluster cib
 
 To show a simplified (non-xml) syntax
 
     crmsh # crm configure show
     pcs   # pcs config
     
 ## Display the current status
 
     crmsh # crm status
     pcs   # pcs status
 
 also
 
     # crm_mon -1
 
 ## Node standby
 
 Put node in standby
 
     crmsh # crm node standby pcmk-1
     pcs   # pcs cluster standby pcmk-1
 
 Remove node from standby
 
     crmsh # crm node online pcmk-1
     pcs   # pcs cluster unstandby pcmk-1
 
 crm has the ability to set the status on reboot or forever. 
 pcs can apply the change to all the nodes.
 
 ## Set cluster property
 
     crmsh # crm configure property stonith-enabled=false
     pcs   # pcs property set stonith-enabled=false
 
 # Resource manipulation
 
 ## List Resource Agent (RA) classes
 
     crmsh # crm ra classes
     pcs   # pcs resource standards
 
 ## List available RAs
 
     crmsh # crm ra list ocf
     crmsh # crm ra list lsb
     crmsh # crm ra list service
     crmsh # crm ra list stonith
     pcs   # pcs resource agents ocf
     pcs   # pcs resource agents lsb
     pcs   # pcs resource agents service
     pcs   # pcs resource agents stonith
     pcs   # pcs resource agents
 
 You can also filter by provider
 
     crmsh # crm ra list ocf pacemaker
     pcs   # pcs resource agents ocf:pacemaker
 
 ## List RA info
 
     crmsh # crm ra meta IPaddr2
     pcs   # pcs resource describe IPaddr2
 
 Use any RA name (like IPaddr2) from the list displayed with the previous command
 You can also use the full class:provider:RA format if multiple RAs with the same name are available :
 
     crmsh # crm ra meta ocf:heartbeat:IPaddr2
     pcs   # pcs resource describe ocf:heartbeat:IPaddr2
 
 ## Create a resource
 
     crmsh # crm configure primitive ClusterIP ocf:heartbeat:IPaddr2 \
-            params ip=192.168.122.120 cidr_netmask=32 \
+            params ip=192.168.122.120 cidr_netmask=24 \
             op monitor interval=30s 
-    pcs   # pcs resource create ClusterIP IPaddr2 ip=192.168.0.120 cidr_netmask=32
+    pcs   # pcs resource create ClusterIP IPaddr2 ip=192.168.0.120 cidr_netmask=24
 
 The standard and provider (`ocf:heartbeat`) are determined automatically since `IPaddr2` is unique.
 The monitor operation is automatically created based on the agent's metadata.
 
 ## Display a resource
 
     crmsh # crm configure show
     pcs   # pcs resource show
 
 crmsh also displays fencing resources. 
 The result can be filtered by supplying a resource name (IE `ClusterIP`):
 
     crmsh # crm configure show ClusterIP
     pcs   # pcs resource show ClusterIP
 
 crmsh also displays fencing resources. 
 
 ## Display fencing resources
 
     crmsh # crm resource show
     pcs   # pcs stonith show
 
 pcs treats STONITH devices separately.
 
 ## Display Stonith RA info
 
     crmsh # crm ra meta stonith:fence_ipmilan
     pcs   # pcs stonith describe fence_ipmilan
 
 ## Start a resource
 
     crmsh # crm resource start ClusterIP
     pcs   # pcs resource enable ClusterIP
 
 ## Stop a resource
 
     crmsh # crm resource stop ClusterIP
     pcs   # pcs resource disable ClusterIP
 
 ## Remove a resource
 
     crmsh # crm configure delete ClusterIP
     pcs   # pcs resource delete ClusterIP
 
 ## Modify a resource
 
     crmsh # crm resource param ClusterIP set clusterip_hash=sourceip
     pcs   # pcs resource update ClusterIP clusterip_hash=sourceip
 
 crmsh also has an `edit` command which edits the simplified CIB syntax
 (same commands as the command line) via a configurable text editor.
 
     crmsh # crm configure edit ClusterIP
 
 Using the interactive shell mode of crmsh, multiple changes can be
 edited and verified before committing to the live configuration.
 
     crmsh # crm configure
     crmsh # edit
     crmsh # verify
     crmsh # commit
 
 ## Delete parameters for a given resource
 
     crmsh # crm resource param ClusterIP delete nic
     pcs   # pcs resource update ClusterIP ip=192.168.0.98 nic=  
 
 ## List the current resource defaults
 
     crmsh # crm configure show type:rsc_defaults
     pcs   # pcs resource rsc defaults
 
 ## Set resource defaults
 
     crmsh # crm configure rsc_defaults resource-stickiness=100
     pcs   # pcs resource rsc defaults resource-stickiness=100
     
 ## List the current operation defaults
 
     crmsh # crm configure show type:op_defaults
     pcs   # pcs resource op defaults
 
 ## Set operation defaults
 
     crmsh # crm configure op_defaults timeout=240s
     pcs   # pcs resource op defaults timeout=240s
 
 ## Set Colocation
 
     crmsh # crm configure colocation website-with-ip INFINITY: WebSite ClusterIP
     pcs   # pcs constraint colocation add ClusterIP with WebSite INFINITY
 
 With roles
 
     crmsh # crm configure colocation another-ip-with-website inf: AnotherIP WebSite:Master
     pcs   # pcs constraint colocation add Started AnotherIP with Master WebSite INFINITY
 
 ## Set ordering
 
     crmsh # crm configure order apache-after-ip mandatory: ClusterIP WebSite
     pcs   # pcs constraint order ClusterIP then WebSite
 
 With roles:
 
     crmsh # crm configure order ip-after-website Mandatory: WebSite:Master AnotherIP
     pcs   # pcs constraint order promote WebSite then start AnotherIP
 
 ## Set preferred location
 
     crmsh # crm configure location prefer-pcmk-1 WebSite 50: pcmk-1
     pcs   # pcs constraint location WebSite prefers pcmk-1=50
     
 With roles:
 
     crmsh # crm configure location prefer-pcmk-1 WebSite rule role=Master 50: \#uname eq pcmk-1
     pcs   # pcs constraint location WebSite rule role=master 50 \#uname eq pcmk-1
 
 ## Move resources
 
     crmsh # crm resource move WebSite pcmk-1
     pcs   # pcs resource move WebSite pcmk-1
     
     crmsh # crm resource unmove WebSite
     pcs   # pcs resource clear WebSite
 
 A resource can also be moved away from a given node:
 
     crmsh # crm resource ban Website pcmk-2
     pcs   # pcs resource ban Website pcmk-2
 
 Remember that moving a resource sets a stickyness to -INF to a given node until unmoved    
 
 ## Resource tracing
 
     crmsh # crm resource trace Website
 
 ## Clear fail counts
 
     crmsh # crm resource cleanup Website
     pcs   # pcs resource cleanup Website
 
 ## Edit fail counts
 
     crmsh # crm resource failcount Website show pcmk-1
     crmsh # crm resource failcount Website set pcmk-1 100
 
 ## Handling configuration elements by type
 
 pcs deals with constraints differently. These can be manipulated by the command above as well as the following and others
 
     pcs   # pcs constraint list --full
     pcs   # pcs constraint remove cli-ban-Website-on-pcmk-1
 
 Removing a constraint in crmsh uses the same command as removing a
 resource.
 
     crmsh # crm configure remove cli-ban-Website-on-pcmk-1
 
 The `show` and `edit` commands in crmsh can be used to manage
 resources and constraints by type:
 
     crmsh # crm configure show type:primitive
     crmsh # crm configure edit type:colocation
 
 ## Create a clone
 
     crmsh # crm configure clone WebIP ClusterIP meta globally-unique=true clone-max=2 clone-node-max=2
     pcs   # pcs resource clone ClusterIP globally-unique=true clone-max=2 clone-node-max=2
 
 ## Create a master/slave clone
 
     crmsh # crm configure ms WebDataClone WebData \
             meta master-max=1 master-node-max=1 \
             clone-max=2 clone-node-max=1 notify=true
     pcs   # pcs resource master WebDataClone WebData \
             master-max=1 master-node-max=1 \
             clone-max=2 clone-node-max=1 notify=true
 
 # Other operations
 
 ## Batch changes
 
     crmsh # crm
     crmsh # cib new drbd_cfg
     crmsh # configure primitive WebData ocf:linbit:drbd params drbd_resource=wwwdata \
             op monitor interval=60s
     crmsh # configure ms WebDataClone WebData meta master-max=1 master-node-max=1 \
             clone-max=2 clone-node-max=1 notify=true
     crmsh # cib commit drbd_cfg
     crmsh # quit
 .
 
     pcs   # pcs cluster cib drbd_cfg
     pcs   # pcs -f drbd_cfg resource create WebData ocf:linbit:drbd drbd_resource=wwwdata \
             op monitor interval=60s
     pcs   # pcs -f drbd_cfg resource master WebDataClone WebData master-max=1 master-node-max=1 \
             clone-max=2 clone-node-max=1 notify=true
     pcs   # pcs cluster push cib drbd_cfg
 
 ## Template creation
 
 Create a resource template based on a list of primitives of the same
 type
 
     crmsh # crm configure assist template ClusterIP AdminIP
 
 ## Log analysis
 
 Display information about recent cluster events
 
     crmsh # crm history
     crmsh # peinputs
     crmsh # transition pe-input-10
     crmsh # transition log pe-input-10
 
 ## Configuration scripts
 
 Create and apply multiple-step cluster configurations including
 configuration of cluster resources
 
     crmsh # crm script show apache
     crmsh # crm script run apache \
         id=WebSite \
         install=true \
         virtual-ip:ip=192.168.0.15 \
         database:id=WebData \
         database:install=true