diff --git a/doc/Clusters_from_Scratch/en-US/Ap-Configuration.txt b/doc/Clusters_from_Scratch/en-US/Ap-Configuration.txt
index 83e8d029d3..381bd68a5b 100644
--- a/doc/Clusters_from_Scratch/en-US/Ap-Configuration.txt
+++ b/doc/Clusters_from_Scratch/en-US/Ap-Configuration.txt
@@ -1,388 +1,363 @@
:compat-mode: legacy
[appendix]
== Configuration Recap ==
=== Final Cluster Configuration ===
----
[root@pcmk-1 ~]# pcs resource
Master/Slave Set: WebDataClone [WebData]
Masters: [ pcmk-1 pcmk-2 ]
Clone Set: dlm-clone [dlm]
Started: [ pcmk-1 pcmk-2 ]
- Clone Set: ClusterIP-clone [ClusterIP] (unique)
- ClusterIP:0 (ocf::heartbeat:IPaddr2): Started pcmk-2
- ClusterIP:1 (ocf::heartbeat:IPaddr2): Started pcmk-1
+ ClusterIP (ocf::heartbeat:IPaddr2): Started pcmk-1
Clone Set: WebFS-clone [WebFS]
Started: [ pcmk-1 pcmk-2 ]
- Clone Set: WebSite-clone [WebSite]
- Started: [ pcmk-1 pcmk-2 ]
+ WebSite (ocf::heartbeat:apache): Started pcmk-1
----
----
[root@pcmk-1 ~]# pcs resource op defaults
timeout: 240s
----
----
[root@pcmk-1 ~]# pcs stonith
impi-fencing (stonith:fence_ipmilan): Started pcmk-1
----
----
[root@pcmk-1 ~]# pcs constraint
Location Constraints:
Ordering Constraints:
- start ClusterIP-clone then start WebSite-clone (kind:Mandatory)
+ start ClusterIP then start WebSite (kind:Mandatory)
promote WebDataClone then start WebFS-clone (kind:Mandatory)
- start WebFS-clone then start WebSite-clone (kind:Mandatory)
+ start WebFS-clone then start WebSite (kind:Mandatory)
start dlm-clone then start WebFS-clone (kind:Mandatory)
Colocation Constraints:
- WebSite-clone with ClusterIP-clone (score:INFINITY)
+ WebSite with ClusterIP (score:INFINITY)
WebFS-clone with WebDataClone (score:INFINITY) (with-rsc-role:Master)
- WebSite-clone with WebFS-clone (score:INFINITY)
+ WebSite with WebFS-clone (score:INFINITY)
WebFS-clone with dlm-clone (score:INFINITY)
Ticket Constraints:
----
----
[root@pcmk-1 ~]# pcs status
Cluster name: mycluster
Stack: corosync
Current DC: pcmk-1 (version 1.1.18-11.el7_5.3-2b07d5c5a9) - partition with quorum
Last updated: Tue Sep 11 10:41:53 2018
Last change: Tue Sep 11 10:40:16 2018 by root via cibadmin on pcmk-1
2 nodes configured
11 resources configured
Online: [ pcmk-1 pcmk-2 ]
Full list of resources:
ipmi-fencing (stonith:fence_ipmilan): Started pcmk-1
Master/Slave Set: WebDataClone [WebData]
Masters: [ pcmk-1 pcmk-2 ]
Clone Set: dlm-clone [dlm]
Started: [ pcmk-1 pcmk-2 ]
- Clone Set: ClusterIP-clone [ClusterIP] (unique)
- ClusterIP:0 (ocf::heartbeat:IPaddr2): Started pcmk-2
- ClusterIP:1 (ocf::heartbeat:IPaddr2): Started pcmk-1
+ ClusterIP (ocf::heartbeat:IPaddr2): Started pcmk-1
Clone Set: WebFS-clone [WebFS]
Started: [ pcmk-1 pcmk-2 ]
- Clone Set: WebSite-clone [WebSite]
- Started: [ pcmk-1 pcmk-2 ]
+ WebSite (ocf::heartbeat:apache): Started pcmk-1
Daemon Status:
corosync: active/disabled
pacemaker: active/disabled
pcsd: active/enabled
----
----
[root@pcmk-1 ~]# pcs cluster cib --config
----
[source,XML]
----
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
+
+
+
+
+
+
+
+
+
+
+
+
+
-
+
-
-
-
-
-
-
-
-
-
-
-
-
-
-
+
+
+
+
+
+
+
+
+
+
+
-
-
+
+
-
-
+
+
----
=== Node List ===
----
[root@pcmk-1 ~]# pcs status nodes
Pacemaker Nodes:
Online: pcmk-1 pcmk-2
Standby:
Maintenance:
Offline:
Pacemaker Remote Nodes:
Online:
Standby:
Maintenance:
Offline:
----
=== Cluster Options ===
----
[root@pcmk-1 ~]# pcs property
Cluster Properties:
cluster-infrastructure: corosync
cluster-name: mycluster
dc-version: 1.1.18-11.el7_5.3-2b07d5c5a9
have-watchdog: false
last-lrm-refresh: 1536679009
stonith-enabled: true
----
The output shows state information automatically obtained about the cluster, including:
* *cluster-infrastructure* - the cluster communications layer in use
* *cluster-name* - the cluster name chosen by the administrator when the cluster was created
* *dc-version* - the version (including upstream source-code hash) of Pacemaker
used on the Designated Controller, which is the node elected to determine what
actions are needed when events occur
The output also shows options set by the administrator that control the way the cluster operates, including:
* *stonith-enabled=true* - whether the cluster is allowed to use STONITH resources
=== Resources ===
==== Default Options ====
----
[root@pcmk-1 ~]# pcs resource defaults
resource-stickiness: 100
----
This shows cluster option defaults that apply to every resource that does not
explicitly set the option itself. Above:
* *resource-stickiness* - Specify the aversion to moving healthy resources to other machines
==== Fencing ====
----
[root@pcmk-1 ~]# pcs stonith show
ipmi-fencing (stonith:fence_ipmilan): Started pcmk-1
[root@pcmk-1 ~]# pcs stonith show ipmi-fencing
Resource: ipmi-fencing (class=stonith type=fence_ipmilan)
Attributes: ipaddr="10.0.0.1" login="testuser" passwd="acd123" pcmk_host_list="pcmk-1 pcmk-2"
Operations: monitor interval=60s (fence-monitor-interval-60s)
----
==== Service Address ====
Users of the services provided by the cluster require an unchanging
-address with which to access it. Additionally, we cloned the address so
-it will be active on both nodes. An iptables rule (created as part of the
-resource agent) is used to ensure that each request only gets processed by one
-of the two clone instances. The additional meta options tell the cluster
-that we want two instances of the clone (one "request bucket" for each
-node) and that if one node fails, then the remaining node should hold
-both.
+address with which to access it.
----
-[root@pcmk-1 ~]# pcs resource show ClusterIP-clone
- Clone: ClusterIP-clone
- Meta Attrs: clone-max=2 clone-node-max=2 globally-unique=true
- Resource: ClusterIP (class=ocf provider=heartbeat type=IPaddr2)
- Attributes: cidr_netmask=24 ip=192.168.122.120 clusterip_hash=sourceip
- Meta Attrs: resource-stickiness=0
- Operations: monitor interval=30s (ClusterIP-monitor-interval-30s)
- start interval=0s timeout=20s (ClusterIP-start-interval-0s)
- stop interval=0s timeout=20s (ClusterIP-stop-interval-0s)
+[root@pcmk-1 ~]# pcs resource show ClusterIP
+Resource: ClusterIP (class=ocf provider=heartbeat type=IPaddr2)
+ Attributes: cidr_netmask=24 ip=192.168.122.120 clusterip_hash=sourceip
+ Meta Attrs: resource-stickiness=0
+ Operations: monitor interval=30s (ClusterIP-monitor-interval-30s)
+ start interval=0s timeout=20s (ClusterIP-start-interval-0s)
+ stop interval=0s timeout=20s (ClusterIP-stop-interval-0s)
----
==== DRBD - Shared Storage ====
Here, we define the DRBD service and specify which DRBD resource (from
/etc/drbd.d/*.res) it should manage. We make it a master clone resource and, in
order to have an active/active setup, allow both instances to be promoted to master
at the same time. We also set the notify option so that the
cluster will tell DRBD agent when its peer changes state.
----
[root@pcmk-1 ~]# pcs resource show WebDataClone
Master: WebDataClone
Meta Attrs: master-node-max=1 clone-max=2 notify=true master-max=2 clone-node-max=1
Resource: WebData (class=ocf provider=linbit type=drbd)
Attributes: drbd_resource=wwwdata
Operations: demote interval=0s timeout=90 (WebData-demote-interval-0s)
monitor interval=60s (WebData-monitor-interval-60s)
notify interval=0s timeout=90 (WebData-notify-interval-0s)
promote interval=0s timeout=90 (WebData-promote-interval-0s)
reload interval=0s timeout=30 (WebData-reload-interval-0s)
start interval=0s timeout=240 (WebData-start-interval-0s)
stop interval=0s timeout=100 (WebData-stop-interval-0s)
[root@pcmk-1 ~]# pcs constraint ref WebDataClone
Resource: WebDataClone
colocation-WebFS-WebDataClone-INFINITY
order-WebDataClone-WebFS-mandatory
----
==== Cluster Filesystem ====
The cluster filesystem ensures that files are read and written correctly.
We need to specify the block device (provided by DRBD), where we want it
mounted and that we are using GFS2. Again, it is a clone because it is
intended to be active on both nodes. The additional constraints ensure
that it can only be started on nodes with active DLM and DRBD instances.
----
[root@pcmk-1 ~]# pcs resource show WebFS-clone
Clone: WebFS-clone
Resource: WebFS (class=ocf provider=heartbeat type=Filesystem)
Attributes: device=/dev/drbd1 directory=/var/www/html fstype=gfs2
Operations: monitor interval=20 timeout=40 (WebFS-monitor-interval-20)
notify interval=0s timeout=60 (WebFS-notify-interval-0s)
start interval=0s timeout=60 (WebFS-start-interval-0s)
stop interval=0s timeout=60 (WebFS-stop-interval-0s)
[root@pcmk-1 ~]# pcs constraint ref WebFS-clone
Resource: WebFS-clone
colocation-WebFS-WebDataClone-INFINITY
colocation-WebSite-WebFS-INFINITY
colocation-WebFS-dlm-clone-INFINITY
order-WebDataClone-WebFS-mandatory
order-WebFS-WebSite-mandatory
order-dlm-clone-WebFS-mandatory
----
==== Apache ====
Lastly, we have the actual service, Apache. We need only tell the cluster
where to find its main configuration file and restrict it to running on
-nodes that have the required filesystem mounted and the IP address active.
+a node that has the required filesystem mounted and the IP address active.
----
-[root@pcmk-1 ~]# pcs resource show WebSite-clone
- Clone: WebSite-clone
- Resource: WebSite (class=ocf provider=heartbeat type=apache)
- Attributes: configfile=/etc/httpd/conf/httpd.conf statusurl=http://localhost/server-status
- Operations: monitor interval=1min (WebSite-monitor-interval-1min)
- start interval=0s timeout=40s (WebSite-start-interval-0s)
- stop interval=0s timeout=60s (WebSite-stop-interval-0s)
-[root@pcmk-1 ~]# pcs constraint ref WebSite-clone
-Resource: WebSite-clone
+[root@pcmk-1 ~]# pcs resource show WebSite
+Resource: WebSite (class=ocf provider=heartbeat type=apache)
+ Attributes: configfile=/etc/httpd/conf/httpd.conf statusurl=http://localhost/server-status
+ Operations: monitor interval=1min (WebSite-monitor-interval-1min)
+ start interval=0s timeout=40s (WebSite-start-interval-0s)
+ stop interval=0s timeout=60s (WebSite-stop-interval-0s)
+[root@pcmk-1 ~]# pcs constraint ref WebSite
+Resource: WebSite
colocation-WebSite-ClusterIP-INFINITY
colocation-WebSite-WebFS-INFINITY
order-ClusterIP-WebSite-mandatory
order-WebFS-WebSite-mandatory
----
diff --git a/doc/Clusters_from_Scratch/en-US/Book_Info.xml b/doc/Clusters_from_Scratch/en-US/Book_Info.xml
index 9fcbdf0a10..2eec85c420 100644
--- a/doc/Clusters_from_Scratch/en-US/Book_Info.xml
+++ b/doc/Clusters_from_Scratch/en-US/Book_Info.xml
@@ -1,71 +1,71 @@
%BOOK_ENTITIES;
]>
Clusters from Scratch
Step-by-Step Instructions for Building Your First High-Availability Cluster
Pacemaker
2.0
11
- 0
+ 1
This document provides a step-by-step guide to building a simple high-availability cluster using Pacemaker.
The example cluster will use:
&DISTRO; &DISTRO_VERSION; as the host operating system
Corosync to provide messaging and membership services,
Pacemaker 1.1.18
While this guide is part of the document set for
Pacemaker 2.0, it demonstrates the version available in
the standard &DISTRO; repositories.
DRBD as a cost-effective alternative to shared storage,
GFS2 as the cluster filesystem (in active/active mode)
Given the graphical nature of the install process, a number of
screenshots are included. However the guide is primarily composed of
commands, the reasons for executing them and their expected outputs.
diff --git a/doc/Clusters_from_Scratch/en-US/Ch-Active-Active.txt b/doc/Clusters_from_Scratch/en-US/Ch-Active-Active.txt
index 0923666353..57e376e484 100644
--- a/doc/Clusters_from_Scratch/en-US/Ch-Active-Active.txt
+++ b/doc/Clusters_from_Scratch/en-US/Ch-Active-Active.txt
@@ -1,383 +1,272 @@
:compat-mode: legacy
-= Convert Cluster to Active/Active =
+= Convert Storage to Active/Active =
The primary requirement for an Active/Active cluster is that the data
required for your services is available, simultaneously, on both
machines. Pacemaker makes no requirement on how this is achieved; you
could use a SAN if you had one available, but since DRBD supports
multiple Primaries, we can continue to use it here.
== Install Cluster Filesystem Software ==
The only hitch is that we need to use a cluster-aware filesystem. The
one we used earlier with DRBD, xfs, is not one of those. Both OCFS2
and GFS2 are supported; here, we will use GFS2.
On both nodes, install the GFS2 command-line utilities and the
Distributed Lock Manager (DLM) required by cluster filesystems:
----
# yum install -y gfs2-utils dlm
----
== Configure the Cluster for the DLM ==
-The DLM needs to run on both nodes, so we'll start by creating a resource for
-it (using the *ocf:pacemaker:controld* resource script), and clone it:
+The DLM control daemon needs to run on both nodes, so we'll start by creating a
+resource for it (using the *ocf:pacemaker:controld* resource script), and clone
+it:
----
[root@pcmk-1 ~]# pcs cluster cib dlm_cfg
[root@pcmk-1 ~]# pcs -f dlm_cfg resource create dlm \
ocf:pacemaker:controld op monitor interval=60s
[root@pcmk-1 ~]# pcs -f dlm_cfg resource clone dlm clone-max=2 clone-node-max=1
[root@pcmk-1 ~]# pcs -f dlm_cfg resource show
ClusterIP (ocf::heartbeat:IPaddr2): Started pcmk-1
WebSite (ocf::heartbeat:apache): Started pcmk-1
Master/Slave Set: WebDataClone [WebData]
Masters: [ pcmk-1 ]
Slaves: [ pcmk-2 ]
WebFS (ocf::heartbeat:Filesystem): Started pcmk-1
Clone Set: dlm-clone [dlm]
Stopped: [ pcmk-1 pcmk-2 ]
----
Activate our new configuration, and see how the cluster responds:
----
[root@pcmk-1 ~]# pcs cluster cib-push dlm_cfg --config
CIB updated
[root@pcmk-1 ~]# pcs status
Cluster name: mycluster
Stack: corosync
Current DC: pcmk-1 (version 1.1.18-11.el7_5.3-2b07d5c5a9) - partition with quorum
Last updated: Tue Sep 11 10:18:30 2018
Last change: Tue Sep 11 10:16:49 2018 by hacluster via crmd on pcmk-2
2 nodes configured
8 resources configured
Online: [ pcmk-1 pcmk-2 ]
Full list of resources:
ipmi-fencing (stonith:fence_ipmilan): Started pcmk-1
ClusterIP (ocf::heartbeat:IPaddr2): Started pcmk-1
WebSite (ocf::heartbeat:apache): Started pcmk-1
Master/Slave Set: WebDataClone [WebData]
Masters: [ pcmk-1 ]
Slaves: [ pcmk-2 ]
WebFS (ocf::heartbeat:Filesystem): Started pcmk-1
Clone Set: dlm-clone [dlm]
Started: [ pcmk-1 pcmk-2 ]
Daemon Status:
corosync: active/disabled
pacemaker: active/disabled
pcsd: active/enabled
----
[[GFS2_prep]]
== Create and Populate GFS2 Filesystem ==
Before we do anything to the existing partition, we need to make sure it
is unmounted. We do this by telling the cluster to stop the WebFS resource.
This will ensure that other resources (in our case, Apache) using WebFS
are not only stopped, but stopped in the correct order.
----
[root@pcmk-1 ~]# pcs resource disable WebFS
[root@pcmk-1 ~]# pcs resource
ClusterIP (ocf::heartbeat:IPaddr2): Started pcmk-1
WebSite (ocf::heartbeat:apache): Stopped
Master/Slave Set: WebDataClone [WebData]
Masters: [ pcmk-1 ]
Slaves: [ pcmk-2 ]
WebFS (ocf::heartbeat:Filesystem): Stopped (disabled)
Clone Set: dlm-clone [dlm]
Started: [ pcmk-1 pcmk-2 ]
----
You can see that both Apache and WebFS have been stopped,
and that *pcmk-1* is the current master for the DRBD device.
Now we can create a new GFS2 filesystem on the DRBD device.
[WARNING]
=========
This will erase all previous content stored on the DRBD device. Ensure
you have a copy of any important data.
=========
[IMPORTANT]
===========
Run the next command on whichever node has the DRBD Primary role.
Otherwise, you will receive the message:
-----
/dev/drbd1: Read-only file system
-----
===========
-----
[root@pcmk-1 ~]# mkfs.gfs2 -p lock_dlm -j 2 -t mycluster:web /dev/drbd1
It appears to contain an existing filesystem (xfs)
This will destroy any data on /dev/drbd1
Are you sure you want to proceed? [y/n] y
Discarding device contents (may take a while on large devices): Done
Adding journals: Done
Building resource groups: Done
Creating quota file: Done
Writing superblock and syncing: Done
Device: /dev/drbd1
Block size: 4096
Device size: 0.50 GB (131059 blocks)
Filesystem size: 0.50 GB (131056 blocks)
Journals: 2
Resource groups: 3
Locking protocol: "lock_dlm"
Lock table: "mycluster:web"
UUID: 0bcbffab-cada-4105-94d1-be8a26669ee0
-----
The `mkfs.gfs2` command required a number of additional parameters:
* `-p lock_dlm` specifies that we want to use the
kernel's DLM.
* `-j 2` indicates that the filesystem should reserve enough
space for two journals (one for each node that will access the filesystem).
* `-t mycluster:web` specifies the lock table name. The format for
this field is +pass:[clustername:fsname]+. For
+pass:[clustername]+, we need to use the same
value we specified originally with `pcs cluster setup --name` (which is also
the value of *cluster_name* in +/etc/corosync/corosync.conf+).
If you are unsure what your cluster name is, you can look in
+/etc/corosync/corosync.conf+ or execute the command
`pcs cluster corosync pcmk-1 | grep cluster_name`.
Now we can (re-)populate the new filesystem with data
(web pages). We'll create yet another variation on our home page.
-----
[root@pcmk-1 ~]# mount /dev/drbd1 /mnt
[root@pcmk-1 ~]# cat <<-END >/mnt/index.html
My Test Site - GFS2
END
[root@pcmk-1 ~]# chcon -R --reference=/var/www/html /mnt
[root@pcmk-1 ~]# umount /dev/drbd1
[root@pcmk-1 ~]# drbdadm verify wwwdata
-----
== Reconfigure the Cluster for GFS2 ==
With the WebFS resource stopped, let's update the configuration.
----
[root@pcmk-1 ~]# pcs resource show WebFS
Resource: WebFS (class=ocf provider=heartbeat type=Filesystem)
Attributes: device=/dev/drbd1 directory=/var/www/html fstype=xfs
Meta Attrs: target-role=Stopped
Operations: monitor interval=20 timeout=40 (WebFS-monitor-interval-20)
notify interval=0s timeout=60 (WebFS-notify-interval-0s)
start interval=0s timeout=60 (WebFS-start-interval-0s)
stop interval=0s timeout=60 (WebFS-stop-interval-0s)
----
The fstype option needs to be updated to *gfs2* instead of *xfs*.
----
[root@pcmk-1 ~]# pcs resource update WebFS fstype=gfs2
[root@pcmk-1 ~]# pcs resource show WebFS
Resource: WebFS (class=ocf provider=heartbeat type=Filesystem)
Attributes: device=/dev/drbd1 directory=/var/www/html fstype=gfs2
Meta Attrs: target-role=Stopped
Operations: monitor interval=20 timeout=40 (WebFS-monitor-interval-20)
notify interval=0s timeout=60 (WebFS-notify-interval-0s)
start interval=0s timeout=60 (WebFS-start-interval-0s)
stop interval=0s timeout=60 (WebFS-stop-interval-0s)
----
GFS2 requires that DLM be running, so we also need to set up new colocation
and ordering constraints for it:
----
[root@pcmk-1 ~]# pcs constraint colocation add WebFS with dlm-clone INFINITY
[root@pcmk-1 ~]# pcs constraint order dlm-clone then WebFS
Adding dlm-clone WebFS (kind: Mandatory) (Options: first-action=start then-action=start)
----
-== Clone the IP address ==
+== Clone the Filesystem Resource ==
-There's no point making the services active on both locations if we can't
-reach them both, so let's clone the IP address.
+Now that we have a cluster filesystem ready to go, we can configure the cluster
+so both nodes mount the filesystem.
-The *IPaddr2* resource agent has built-in intelligence for when it is configured
-as a clone. It will utilize a multicast MAC address to have the local switch
-send the relevant packets to all nodes in the cluster, together with *iptables
-clusterip* rules on the nodes so that any given packet will be grabbed by
-exactly one node. This will give us a simple but effective form of
-load-balancing requests between our two nodes.
-
-Let's start a new config, and clone our IP:
-----
-[root@pcmk-1 ~]# pcs cluster cib loadbalance_cfg
-[root@pcmk-1 ~]# pcs -f loadbalance_cfg resource clone ClusterIP \
- clone-max=2 clone-node-max=2 globally-unique=true
-----
-
-* `clone-max=2` tells the resource agent to split packets this many ways. This
-should equal the number of nodes that can host the IP.
-* `clone-node-max=2` says that one node can run up to 2 instances
-of the clone. This should also equal the number of nodes that can
-host the IP, so that if any node goes down, another node can take over
-the failed node's "request bucket". Otherwise, requests intended for
-the failed node would be discarded.
-* `globally-unique=true` tells the cluster that one clone isn't identical
-to another (each handles a different "bucket"). This also tells the resource
-agent to insert *iptables* rules so each host only processes packets in its
-bucket(s).
-
-Notice that when the ClusterIP becomes a clone, the constraints
-referencing ClusterIP now reference the clone. This is
-done automatically by pcs.
-----
-[root@pcmk-1 ~]# pcs -f loadbalance_cfg constraint
-Location Constraints:
-Ordering Constraints:
- start ClusterIP-clone then start WebSite (kind:Mandatory)
- promote WebDataClone then start WebFS (kind:Mandatory)
- start WebFS then start WebSite (kind:Mandatory)
- start dlm-clone then start WebFS (kind:Mandatory)
-Colocation Constraints:
- WebSite with ClusterIP-clone (score:INFINITY)
- WebFS with WebDataClone (score:INFINITY) (with-rsc-role:Master)
- WebSite with WebFS (score:INFINITY)
- WebFS with dlm-clone (score:INFINITY)
-Ticket Constraints:
-----
-
-Now we must tell the resource how to decide which requests are
-processed by which hosts. To do this, we specify the *clusterip_hash* parameter.
-The value of *sourceip* means that the source IP address of incoming packets
-will be hashed; each node will process a certain range of hashes.
-
-----
-[root@pcmk-1 ~]# pcs -f loadbalance_cfg resource update ClusterIP clusterip_hash=sourceip
-----
-
-Load our configuration to the cluster, and see how it responds.
------
-[root@pcmk-1 ~]# pcs cluster cib-push loadbalance_cfg --config
-CIB updated
-[root@pcmk-1 ~]# pcs status
-Cluster name: mycluster
-Stack: corosync
-Current DC: pcmk-1 (version 1.1.18-11.el7_5.3-2b07d5c5a9) - partition with quorum
-Last updated: Tue Sep 11 10:36:38 2018
-Last change: Tue Sep 11 10:36:33 2018 by root via cibadmin on pcmk-1
-
-2 nodes configured
-9 resources configured (1 DISABLED)
-
-Online: [ pcmk-1 pcmk-2 ]
-
-Full list of resources:
-
- ipmi-fencing (stonith:fence_ipmilan): Started pcmk-1
- WebSite (ocf::heartbeat:apache): Stopped
- Master/Slave Set: WebDataClone [WebData]
- Masters: [ pcmk-1 ]
- Slaves: [ pcmk-2 ]
- WebFS (ocf::heartbeat:Filesystem): Stopped (disabled)
- Clone Set: dlm-clone [dlm]
- Started: [ pcmk-1 pcmk-2 ]
- Clone Set: ClusterIP-clone [ClusterIP] (unique)
- ClusterIP:0 (ocf::heartbeat:IPaddr2): Started pcmk-2
- ClusterIP:1 (ocf::heartbeat:IPaddr2): Started pcmk-1
-
-Daemon Status:
- corosync: active/disabled
- pacemaker: active/disabled
- pcsd: active/enabled
------
-
-If desired, you can demonstrate that all request buckets are working
-by using a tool such as `arping` from several source hosts
-to see which host responds to each.
-
-== Clone the Filesystem and Apache Resources ==
-
-Now that we have a cluster filesystem ready to go,
-and our nodes can load-balance requests to a shared IP address,
-we can configure the cluster so both nodes mount the filesystem
-and respond to web requests.
-
-Clone the filesystem and Apache resources in a new configuration.
+Clone the filesystem resource in a new configuration.
Notice how pcs automatically updates the relevant constraints again.
----
[root@pcmk-1 ~]# pcs cluster cib active_cfg
[root@pcmk-1 ~]# pcs -f active_cfg resource clone WebFS
-[root@pcmk-1 ~]# pcs -f active_cfg resource clone WebSite
[root@pcmk-1 ~]# pcs -f active_cfg constraint
Location Constraints:
Ordering Constraints:
- start ClusterIP-clone then start WebSite-clone (kind:Mandatory)
+ start ClusterIP then start WebSite (kind:Mandatory)
promote WebDataClone then start WebFS-clone (kind:Mandatory)
- start WebFS-clone then start WebSite-clone (kind:Mandatory)
+ start WebFS-clone then start WebSite (kind:Mandatory)
start dlm-clone then start WebFS-clone (kind:Mandatory)
Colocation Constraints:
- WebSite-clone with ClusterIP-clone (score:INFINITY)
+ WebSite with ClusterIP (score:INFINITY)
WebFS-clone with WebDataClone (score:INFINITY) (with-rsc-role:Master)
- WebSite-clone with WebFS-clone (score:INFINITY)
+ WebSite with WebFS-clone (score:INFINITY)
WebFS-clone with dlm-clone (score:INFINITY)
Ticket Constraints:
----
Tell the cluster that it is now allowed to promote both instances to be DRBD
Primary (aka. master).
-----
[root@pcmk-1 ~]# pcs -f active_cfg resource update WebDataClone master-max=2
-----
Finally, load our configuration to the cluster, and re-enable the WebFS resource
(which we disabled earlier).
-----
[root@pcmk-1 ~]# pcs cluster cib-push active_cfg --config
CIB updated
[root@pcmk-1 ~]# pcs resource enable WebFS
-----
After all the processes are started, the status should look similar to this.
-----
[root@pcmk-1 ~]# pcs resource
Master/Slave Set: WebDataClone [WebData]
Masters: [ pcmk-1 pcmk-2 ]
Clone Set: dlm-clone [dlm]
Started: [ pcmk-1 pcmk-2 ]
- Clone Set: ClusterIP-clone [ClusterIP] (unique)
- ClusterIP:0 (ocf::heartbeat:IPaddr2): Started pcmk-2
- ClusterIP:1 (ocf::heartbeat:IPaddr2): Started pcmk-1
+ ClusterIP (ocf::heartbeat:IPaddr2): Started pcmk-1
Clone Set: WebFS-clone [WebFS]
Started: [ pcmk-1 pcmk-2 ]
- Clone Set: WebSite-clone [WebSite]
- Started: [ pcmk-1 pcmk-2 ]
+ WebSite (ocf::heartbeat:apache): Started pcmk-1
-----
== Test Failover ==
Testing failover is left as an exercise for the reader.
-For example, you can put one node into standby mode,
-use `pcs status` to confirm that its ClusterIP clone was
-moved to the other node, and use `arping` to verify that
-packets are not being lost from any source host.
-
-[NOTE]
-====
-You may find that when a failed node rejoins the cluster,
-both ClusterIP clones stay on one node, due to the
-resource stickiness. While this works fine, it effectively eliminates
-load-balancing and returns the cluster to an active-passive setup again.
-You can avoid this by disabling stickiness for the IP address resource:
-----
-[root@pcmk-1 ~]# pcs resource meta ClusterIP resource-stickiness=0
-----
-====
+
+With this configuration, the data is now active/active. The website
+administrator could change HTML files on either node, and the live website will
+show the changes even if it is running on the opposite node.
+
+If the web server is configured to listen on all IP addresses, it is possible
+to remove the constraints between the WebSite and ClusterIP resources, and
+clone the WebSite resource. The web server would always be ready to serve web
+pages, and only the IP address would need to be moved in a failover.
diff --git a/doc/Clusters_from_Scratch/en-US/Revision_History.xml b/doc/Clusters_from_Scratch/en-US/Revision_History.xml
index 77a3b2cdde..8a61d586e7 100644
--- a/doc/Clusters_from_Scratch/en-US/Revision_History.xml
+++ b/doc/Clusters_from_Scratch/en-US/Revision_History.xml
@@ -1,205 +1,219 @@
%BOOK_ENTITIES;
]>
Revision History
1-0
Mon May 17 2010
AndrewBeekhof
andrew@beekhof.net
Import from Pages.app
2-0
Wed Sep 22 2010
RaoulScarazzini
rasca@miamammausalinux.org
Italian translation
3-0
Wed Feb 9 2011
Andrew
Beekhof
andrew@beekhof.net
Updated for Fedora 13
4-0
Wed Oct 5 2011
Andrew
Beekhof
andrew@beekhof.net
Update the GFS2 section to use CMAN
5-0
Fri Feb 10 2012
Andrew
Beekhof
andrew@beekhof.net
Generate docbook content from asciidoc sources
6-0
Tues July 3 2012
AndrewBeekhof
andrew@beekhof.net
Updated for Fedora 17
7-0
Fri Sept 14 2012
DavidVossel
davidvossel@gmail.com
Updated for pcs
8-0
Mon Jan 05 2015
KenGaillot
kgaillot@redhat.com
Updated for Fedora 21
8-1
Thu Jan 08 2015
KenGaillot
kgaillot@redhat.com
Minor corrections, plus use include file for intro
9-0
Fri Aug 14 2015
KenGaillot
kgaillot@redhat.com
Update for CentOS 7.1 and leaving firewalld/SELinux enabled
10-0
Fri Jan 12 2018
KenGaillot
kgaillot@redhat.com
Update banner for Pacemaker 2.0 and content for CentOS 7.4 with Pacemaker 1.1.16
10-1
Wed Sep 5 2018
KenGaillot
kgaillot@redhat.com
Update for CentOS 7.5 with Pacemaker 1.1.18
10-2
Fri Dec 7 2018
KenGaillot
kgaillot@redhat.com
JanPokorný
jpokorny@redhat.com
ChrisLumens
clumens@redhat.com
Minor clarifications and formatting changes
11-0
Thu Jul 18 2019
TomasJelinek
tojeline@redhat.com
Note differences in pcs 0.10
+
+ 11-1
+ Thu Nov 21 2019
+
+ KenGaillot
+ kgaillot@redhat.com
+
+
+
+ Remove references to obsolete cloned IP usage, and
+ reorganize chapters a bit
+
+
+