diff --git a/doc/Clusters_from_Scratch/en-US/Ap-Configuration.txt b/doc/Clusters_from_Scratch/en-US/Ap-Configuration.txt index c920ce3a40..0479971c46 100644 --- a/doc/Clusters_from_Scratch/en-US/Ap-Configuration.txt +++ b/doc/Clusters_from_Scratch/en-US/Ap-Configuration.txt @@ -1,192 +1,192 @@ [appendix] == Configuration Recap == === Final Cluster Configuration === ..... # crm configure show node pcmk-1 node pcmk-2 primitive WebData ocf:linbit:drbd \ params drbd_resource="wwwdata" \ op monitor interval="60s" primitive WebFS ocf:heartbeat:Filesystem \ params device="/dev/drbd/by-res/wwwdata" directory="/var/www/html" fstype="gfs2" primitive WebSite ocf:heartbeat:apache \ params configfile="/etc/httpd/conf/httpd.conf" \ op monitor interval="1min" primitive ClusterIP ocf:heartbeat:IPaddr2 \ params ip="192.168.122.101" cidr_netmask="32" clusterip_hash="sourceip" \ op monitor interval="30s" primitive ipmi-fencing stonith::fence_ipmilan \ params pcmk_host_list="pcmk-1 pcmk-2" ipaddr=10.0.0.1 login=testuser passwd=abc123 \ op monitor interval="60s" ms WebDataClone WebData \ meta master-max="2" master-node-max="1" clone-max="2" clone-node-max="1" notify="true" clone WebFSClone WebFS clone WebIP ClusterIP \ meta globally-unique="true" clone-max="2" clone-node-max="2" clone WebSiteClone WebSite colocation WebSite-with-WebFS inf: WebSiteClone WebFSClone colocation fs_on_drbd inf: WebFSClone WebDataClone:Master colocation website-with-ip inf: WebSiteClone WebIP order WebFS-after-WebData inf: WebDataClone:promote WebFSClone:start order WebSite-after-WebFS inf: WebFSClone WebSiteClone order apache-after-ip inf: WebIP WebSiteClone property $id="cib-bootstrap-options" \ dc-version="1.1.5-bdd89e69ba545404d02445be1f3d72e6a203ba2f" \ cluster-infrastructure="openais" \ expected-quorum-votes="2" \ stonith-enabled="true" \ no-quorum-policy="ignore" rsc_defaults $id="rsc-options" \ resource-stickiness="100" ..... === Node List === The list of cluster nodes is automatically populated by the cluster. ..... node pcmk-1 node pcmk-2 ..... === Cluster Options === This is where the cluster automatically stores some information about the cluster * dc-version - the version (including upstream source-code hash) of Pacemaker used on the DC * cluster-infrastructure - the cluster infrastructure being used (heartbeat or openais) * expected-quorum-votes - the maximum number of nodes expected to be part of the cluster and where the admin can set options that control the way the cluster operates * stonith-enabled=true - Make use of STONITH * no-quorum-policy=ignore - Ignore loss of quorum and continue to host resources. ..... property $id="cib-bootstrap-options" \ dc-version="1.1.5-bdd89e69ba545404d02445be1f3d72e6a203ba2f" \ cluster-infrastructure="openais" \ expected-quorum-votes="2" \ stonith-enabled="true" \ no-quorum-policy="ignore" ..... === Resources === ==== Default Options ==== Here we configure cluster options that apply to every resource. * resource-stickiness - Specify the aversion to moving resources to other machines ..... rsc_defaults $id="rsc-options" \ resource-stickiness="100" ..... ==== Fencing ==== [NOTE] ======= TODO: Add text here ======= ..... primitive ipmi-fencing stonith::fence_ipmilan \ params pcmk_host_list="pcmk-1 pcmk-2" ipaddr=10.0.0.1 login=testuser passwd=abc123 \ op monitor interval="60s" clone Fencing rsa-fencing ..... ==== Service Address ==== Users of the services provided by the cluster require an unchanging address with which to access it. Additionally, we cloned the address so it will be active on both nodes. An iptables rule (created as part of the resource agent) is used to ensure that each request only gets processed by one of the two clone instances. The additional meta options tell the cluster that we want two instances of the clone (one "request bucket" for each node) and that if one node fails, then the remaining node should hold both. ..... primitive ClusterIP ocf:heartbeat:IPaddr2 \ params ip="192.168.122.101" cidr_netmask="32" clusterip_hash="sourceip" \ op monitor interval="30s" -clone WebIP ClusterIP +clone WebIP ClusterIP meta globally-unique="true" clone-max="2" clone-node-max="2" ..... [NOTE] ======= TODO: The RA should check for globally-unique=true when cloned ======= ==== DRBD - Shared Storage ==== Here we define the DRBD service and specify which DRBD resource (from drbd.conf) it should manage. We make it a master/slave resource and, in order to have an active/active setup, allow both instances to be promoted by specifying master-max=2. We also set the notify option so that the cluster will tell DRBD agent when it's peer changes state. ..... primitive WebData ocf:linbit:drbd \ params drbd_resource="wwwdata" \ op monitor interval="60s" ms WebDataClone WebData \ meta master-max="2" master-node-max="1" clone-max="2" clone-node-max="1" notify="true" ..... ==== Cluster Filesystem ==== The cluster filesystem ensures that files are read and written correctly. We need to specify the block device (provided by DRBD), where we want it mounted and that we are using GFS2. Again it is a clone because it is intended to be active on both nodes. The additional constraints ensure that it can only be started on nodes with active gfs-control and drbd instances. ..... primitive WebFS ocf:heartbeat:Filesystem \ params device="/dev/drbd/by-res/wwwdata" directory="/var/www/html" fstype="gfs2" clone WebFSClone WebFS colocation WebFS-with-gfs-control inf: WebFSClone gfs-clone colocation fs_on_drbd inf: WebFSClone WebDataClone:Master order WebFS-after-WebData inf: WebDataClone:promote WebFSClone:start order start-WebFS-after-gfs-control inf: gfs-clone WebFSClone ..... ==== Apache ==== Lastly we have the actual service, Apache. We need only tell the cluster where to find it's main configuration file and restrict it to running on nodes that have the required filesystem mounted and the IP address active. ..... primitive WebSite ocf:heartbeat:apache \ params configfile="/etc/httpd/conf/httpd.conf" \ op monitor interval="1min" clone WebSiteClone WebSite colocation WebSite-with-WebFS inf: WebSiteClone WebFSClone colocation website-with-ip inf: WebSiteClone WebIP order apache-after-ip inf: WebIP WebSiteClone order WebSite-after-WebFS inf: WebFSClone WebSiteClone ..... diff --git a/doc/Clusters_from_Scratch/en-US/Book_Info.xml b/doc/Clusters_from_Scratch/en-US/Book_Info.xml index 55fa59bcc9..9d8e0a5572 100644 --- a/doc/Clusters_from_Scratch/en-US/Book_Info.xml +++ b/doc/Clusters_from_Scratch/en-US/Book_Info.xml @@ -1,66 +1,66 @@ %BOOK_ENTITIES; ]> Clusters from Scratch Creating Active/Passive and Active/Active Clusters on Fedora Pacemaker 1.1 5 0 The purpose of this document is to provide a start-to-finish guide to building an example active/passive cluster with Pacemaker and show how it can be converted to an active/active one. - The example cluster will use: + The example cluster will use: &DISTRO; &DISTRO_VERSION; as the host operating system Corosync to provide messaging and membership services, Pacemaker to perform resource management, DRBD as a cost-effective alternative to shared storage, GFS2 as the cluster filesystem (in active/active mode) The crm shell for displaying the configuration and making changes Given the graphical nature of the Fedora install process, a number of screenshots are included. However the guide is primarily composed of commands, the reasons for executing them and their expected outputs. diff --git a/doc/Clusters_from_Scratch/en-US/Ch-Active-Active.txt b/doc/Clusters_from_Scratch/en-US/Ch-Active-Active.txt index 2fa311a314..f8e255df46 100644 --- a/doc/Clusters_from_Scratch/en-US/Ch-Active-Active.txt +++ b/doc/Clusters_from_Scratch/en-US/Ch-Active-Active.txt @@ -1,537 +1,537 @@ = Conversion to Active/Active = == Requirements == The primary requirement for an Active/Active cluster is that the data required for your services is available, simultaneously, on both machines. Pacemaker makes no requirement on how this is achieved, you could use a SAN if you had one available, however since DRBD supports multiple Primaries, we can also use that. The only hitch is that we need to use a cluster-aware filesystem. The one we used earlier with DRBD, ext4, is not one of those. Both OCFS2 and GFS2 are supported, however here we will use GFS2 which comes with Fedora 17. === Installing the required Software === [source,Bash] ----- -# yum install -y gfs2-utils dlm kernel-modules-extra +# yum install -y gfs2-utils dlm kernel-modules-extra Loaded plugins: langpacks, presto, refresh-packagekit Resolving Dependencies --> Running transaction check ---> Package dlm.x86_64 0:3.99.4-1.fc17 will be installed ---> Package gfs2-utils.x86_64 0:3.1.4-3.fc17 will be installed ---> Package kernel-modules-extra.x86_64 0:3.4.4-3.fc17 will be installed --> Finished Dependency Resolution Dependencies Resolved ================================================================================ Package Arch Version Repository Size ================================================================================ Installing: dlm x86_64 3.99.4-1.fc17 updates 83 k gfs2-utils x86_64 3.1.4-3.fc17 fedora 214 k kernel-modules-extra x86_64 3.4.4-3.fc17 updates 1.7 M Transaction Summary ================================================================================ Install 3 Packages Total download size: 1.9 M Installed size: 7.7 M Downloading Packages: (1/3): dlm-3.99.4-1.fc17.x86_64.rpm | 83 kB 00:00 (2/3): gfs2-utils-3.1.4-3.fc17.x86_64.rpm | 214 kB 00:00 -(3/3): kernel-modules-extra-3.4.4-3.fc17.x86_64.rpm | 1.7 MB 00:01 +(3/3): kernel-modules-extra-3.4.4-3.fc17.x86_64.rpm | 1.7 MB 00:01 ------------------------------------------------------------------------------- Total 615 kB/s | 1.9 MB 00:03 Running Transaction Check Running Transaction Test Transaction Test Succeeded Running Transaction - Installing : kernel-modules-extra-3.4.4-3.fc17.x86_64 1/3 - Installing : gfs2-utils-3.1.4-3.fc17.x86_64 2/3 - Installing : dlm-3.99.4-1.fc17.x86_64 3/3 - Verifying : dlm-3.99.4-1.fc17.x86_64 1/3 - Verifying : gfs2-utils-3.1.4-3.fc17.x86_64 2/3 - Verifying : kernel-modules-extra-3.4.4-3.fc17.x86_64 3/3 + Installing : kernel-modules-extra-3.4.4-3.fc17.x86_64 1/3 + Installing : gfs2-utils-3.1.4-3.fc17.x86_64 2/3 + Installing : dlm-3.99.4-1.fc17.x86_64 3/3 + Verifying : dlm-3.99.4-1.fc17.x86_64 1/3 + Verifying : gfs2-utils-3.1.4-3.fc17.x86_64 2/3 + Verifying : kernel-modules-extra-3.4.4-3.fc17.x86_64 3/3 Installed: dlm.x86_64 0:3.99.4-1.fc17 gfs2-utils.x86_64 0:3.1.4-3.fc17 - kernel-modules-extra.x86_64 0:3.4.4-3.fc17 + kernel-modules-extra.x86_64 0:3.4.4-3.fc17 Complete! ----- == Create a GFS2 Filesystem == [[GFS2_prep]] === Preparation === Before we do anything to the existing partition, we need to make sure it is unmounted. We do this by telling the cluster to stop the WebFS resource. This will ensure that other resources (in our case, Apache) using WebFS are not only stopped, but stopped in the correct order. [source,Bash] ----- # crm resource stop WebFS # crm_mon -1 ============ Last updated: Tue Apr 3 14:07:36 2012 Last change: Tue Apr 3 14:07:15 2012 via cibadmin on pcmk-1 Stack: corosync Current DC: pcmk-1 (1702537408) - partition with quorum Version: 1.1.7-2.fc17-ee0730e13d124c3d58f00016c3376a1de5323cff 2 Nodes configured, unknown expected votes 5 Resources configured. ============ Online: [ pcmk-1 pcmk-2 ] ClusterIP (ocf::heartbeat:IPaddr2): Started pcmk-2 Master/Slave Set: WebDataClone [WebData] Masters: [ pcmk-2 ] Slaves: [ pcmk-1 ] ----- [NOTE] ======= Note that both Apache and WebFS have been stopped. ======= === Create and Populate an GFS2 Partition === Now that the cluster stack and integration pieces are running smoothly, we can create an GFS2 partition. [WARNING] ========= This will erase all previous content stored on the DRBD device. Ensure you have a copy of any important data. ========= We need to specify a number of additional parameters when creating a GFS2 partition. First we must use the -p option to specify that we want to use the the Kernel's DLM. Next we use -j to indicate that it should reserve enough space for two journals (one per node accessing the filesystem). Lastly, we use -t to specify the lock table name. The format for this field is +clustername:fsname+. For the +fsname+, we need to use the same value as specified in 'cluster.conf' for +cluster_name+. Just pick something unique and descriptive and add somewhere inside the +totem+ block. For example: ..... totem { version: 2 # cypto_cipher and crypto_hash: Used for mutual node authentication. # If you choose to enable this, then do remember to create a shared # secret with "corosync-keygen". crypto_cipher: none crypto_hash: none cluster_name: webtest ... ..... [IMPORTANT] =========== Do this on each node in the cluster and be sure to restart them before continuing. =========== [IMPORTANT] =========== We must run the next command on whichever node last had '/dev/drbd' mounted. Otherwise you will receive the message: ----- /dev/drbd1: Read-only file system ----- =========== [source,Bash] ----- # ssh pcmk-2 -- mkfs.gfs2 -p lock_dlm -j 2 -t webtest:web /dev/drbd1 This will destroy any data on /dev/drbd1. It appears to contain: Linux rev 1.0 ext4 filesystem data, UUID=dc45fff3-c47a-4db2-96f7-a8049a323fe4 (extents) (large files) (huge files) Are you sure you want to proceed? [y/n]y Device: /dev/drbd1 Blocksize: 4096 Device Size 0.97 GB (253935 blocks) Filesystem Size: 0.97 GB (253932 blocks) Journals: 2 Resource Groups: 4 Locking Protocol: "lock_dlm" Lock Table: "webtest" UUID: ed293a02-9eee-3fa3-ed1c-435ef1fd0116 ----- [source,Bash] ----- # crm crm(live)# cib new dlm INFO: dlm shadow CIB created crm(dlm)# configure primitive dlm ocf:pacemaker:controld \ op monitor interval=60s -crm(dlm)# configure clone dlm_clone dlm meta clone-max=2 clone-node-max=1 +crm(dlm)# configure clone dlm_clone dlm meta clone-max=2 clone-node-max=1 crm(dlm)# configure show node $id="1702537408" pcmk-1 \ attributes standby="off" node $id="1719314624" pcmk-2 primitive ClusterIP ocf:heartbeat:IPaddr2 \ params ip="192.168.122.120" cidr_netmask="32" \ op monitor interval="30s" primitive WebData ocf:linbit:drbd \ params drbd_resource="wwwdata" \ op monitor interval="60s" primitive WebFS ocf:heartbeat:Filesystem \ params device="/dev/drbd/by-res/wwwdata" directory="/var/www/html" fstype="ext4" \ meta target-role="Stopped" primitive WebSite ocf:heartbeat:apache \ params configfile="/etc/httpd/conf/httpd.conf" \ op monitor interval="1min" primitive dlm ocf:pacemaker:controld \ op monitor interval="60s" ms WebDataClone WebData \ meta master-max="1" master-node-max="1" clone-max="2" clone-node-max="1" notify="true" clone dlm_clone dlm \ meta clone-max="2" clone-node-max="1" location prefer-pcmk-1 WebSite 50: pcmk-1 colocation WebSite-with-WebFS inf: WebSite WebFS colocation fs_on_drbd inf: WebFS WebDataClone:Master colocation website-with-ip inf: WebSite ClusterIP order WebFS-after-WebData inf: WebDataClone:promote WebFS:start order WebSite-after-WebFS inf: WebFS WebSite order apache-after-ip inf: ClusterIP WebSite property $id="cib-bootstrap-options" \ dc-version="1.1.7-2.fc17-ee0730e13d124c3d58f00016c3376a1de5323cff" \ cluster-infrastructure="corosync" \ stonith-enabled="false" \ no-quorum-policy="ignore" \ last-lrm-refresh="1333446866" rsc_defaults $id="rsc-options" \ resource-stickiness="100" op_defaults $id="op-options" \ timeout="240s" crm(dlm)# cib commit dlm INFO: commited 'dlm' shadow CIB to the cluster crm(dlm)# quit bye # crm_mon -1 ============ Last updated: Wed Apr 4 01:15:11 2012 Last change: Wed Apr 4 00:50:11 2012 via crmd on pcmk-1 Stack: corosync Current DC: pcmk-1 (1702537408) - partition with quorum Version: 1.1.7-2.fc17-ee0730e13d124c3d58f00016c3376a1de5323cff 2 Nodes configured, unknown expected votes 7 Resources configured. ============ Online: [ pcmk-1 pcmk-2 ] ClusterIP (ocf::heartbeat:IPaddr2): Started pcmk-1 Master/Slave Set: WebDataClone [WebData] Masters: [ pcmk-1 ] Slaves: [ pcmk-2 ] Clone Set: dlm_clone [dlm] Started: [ pcmk-1 pcmk-2 ] ----- Then (re)populate the new filesystem with data (web pages). For now we'll create another variation on our home page. [source,Bash] ----- # mount /dev/drbd1 /mnt/ # cat <<-END >/mnt/index.html My Test Site - GFS2 END # umount /dev/drbd1 # drbdadm verify wwwdata# ----- == Reconfigure the Cluster for GFS2 == [source,Bash] ----- # crm crm(live) # cib new GFS2 INFO: GFS2 shadow CIB created crm(GFS2) # configure delete WebFS crm(GFS2) # configure primitive WebFS ocf:heartbeat:Filesystem params device="/dev/drbd/by-res/wwwdata" directory="/var/www/html" fstype="gfs2" ----- Now that we've recreated the resource, we also need to recreate all the constraints that used it. This is because the shell will automatically remove any constraints that referenced WebFS. [source,Bash] ----- crm(GFS2) # configure colocation WebSite-with-WebFS inf: WebSite WebFS crm(GFS2) # configure colocation fs_on_drbd inf: WebFS WebDataClone:Master crm(GFS2) # configure order WebFS-after-WebData inf: WebDataClone:promote WebFS:start crm(GFS2) # configure order WebSite-after-WebFS inf: WebFS WebSite crm(GFS2) # configure show node pcmk-1 node pcmk-2 primitive WebData ocf:linbit:drbd \ params drbd_resource="wwwdata" \ op monitor interval="60s" primitive WebFS ocf:heartbeat:Filesystem \ params device="/dev/drbd/by-res/wwwdata" directory="/var/www/html" fstype="gfs2" primitive WebSite ocf:heartbeat:apache \ params configfile="/etc/httpd/conf/httpd.conf" \ op monitor interval="1min" primitive ClusterIP ocf:heartbeat:IPaddr2 \ params ip="192.168.122.101" cidr_netmask="32" \ op monitor interval="30s" ms WebDataClone WebData \ meta master-max="1" master-node-max="1" clone-max="2" clone-node-max="1" notify="true" colocation WebSite-with-WebFS inf: WebSite WebFS colocation fs_on_drbd inf: WebFS WebDataClone:Master colocation website-with-ip inf: WebSite ClusterIP order WebFS-after-WebData inf: WebDataClone:promote WebFS:start order WebSite-after-WebFS inf: WebFS WebSite order apache-after-ip inf: ClusterIP WebSite property $id="cib-bootstrap-options" \ dc-version="1.1.5-bdd89e69ba545404d02445be1f3d72e6a203ba2f" \ cluster-infrastructure="openais" \ expected-quorum-votes="2" \ stonith-enabled="false" \ no-quorum-policy="ignore" rsc_defaults $id="rsc-options" \ resource-stickiness="100" ----- Review the configuration before uploading it to the cluster, quitting the shell and watching the cluster's response [source,Bash] ----- crm(GFS2) # cib commit GFS2 INFO: commited 'GFS2' shadow CIB to the cluster crm(GFS2) # quit bye # crm_mon ============ Last updated: Thu Sep 3 20:49:54 2009 Stack: openais Current DC: pcmk-2 - partition with quorum Version: 1.1.5-bdd89e69ba545404d02445be1f3d72e6a203ba2f 2 Nodes configured, 2 expected votes 6 Resources configured. ============ Online: [ pcmk-1 pcmk-2 ] WebSite (ocf::heartbeat:apache): Started pcmk-2 Master/Slave Set: WebDataClone Masters: [ pcmk-1 ] Slaves: [ pcmk-2 ] ClusterIP (ocf::heartbeat:IPaddr): Started pcmk-2WebFS (ocf::heartbeat:Filesystem): Started pcmk-1 ----- == Reconfigure Pacemaker for Active/Active == Almost everything is in place. Recent versions of DRBD are capable of operating in Primary/Primary mode and the filesystem we're using is cluster aware. All we need to do now is reconfigure the cluster to take -advantage of this. +advantage of this. This will involve a number of changes, so we'll again use interactive mode. [source,Bash] ----- # crm # cib new active ----- There's no point making the services active on both locations if we can't reach them, so lets first clone the IP address. Cloned IPaddr2 resources use an iptables rule to ensure that each request only gets processed by one of the two clone instances. The additional meta options tell the cluster how many instances of the clone we want (one "request bucket" for each node) and that if all other nodes fail, then the remaining node should hold all of them. Otherwise the requests would be simply discarded. [source,Bash] ----- # configure clone WebIP ClusterIP \ meta globally-unique="true" clone-max="2" clone-node-max="2" ----- Now we must tell the ClusterIP how to decide which requests are processed by which hosts. To do this we must specify the clusterip_hash parameter. Open the ClusterIP resource [source,Bash] ----- # configure edit ClusterIP ----- And add the following to the params line ..... clusterip_hash="sourceip" ..... So that the complete definition looks like: ..... -primitive ClusterIP ocf:heartbeat:IPaddr2 \ +primitive ClusterIP ocf:heartbeat:IPaddr2 \ params ip="192.168.122.101" cidr_netmask="32" clusterip_hash="sourceip" \ op monitor interval="30s" ..... Here is the full transcript [source,Bash] ----- # crm crm(live) # cib new active INFO: active shadow CIB created crm(active) # configure clone WebIP ClusterIP \ meta globally-unique="true" clone-max="2" clone-node-max="2" crm(active) # configure shownode pcmk-1 node pcmk-2 primitive WebData ocf:linbit:drbd \ params drbd_resource="wwwdata" \ op monitor interval="60s" primitive WebFS ocf:heartbeat:Filesystem \ params device="/dev/drbd/by-res/wwwdata" directory="/var/www/html" fstype="gfs2" primitive WebSite ocf:heartbeat:apache \ params configfile="/etc/httpd/conf/httpd.conf" \ op monitor interval="1min" primitive ClusterIP ocf:heartbeat:IPaddr2 \ params ip="192.168.122.101" cidr_netmask="32" clusterip_hash="sourceip" \ op monitor interval="30s" ms WebDataClone WebData \ meta master-max="1" master-node-max="1" clone-max="2" clone-node-max="1" notify="true" clone WebIP ClusterIP \ meta globally-unique="true" clone-max="2" clone-node-max="2" colocation WebSite-with-WebFS inf: WebSite WebFS colocation fs_on_drbd inf: WebFS WebDataClone:Master colocation website-with-ip inf: WebSite WebIPorder WebFS-after-WebData inf: WebDataClone:promote WebFS:start order WebSite-after-WebFS inf: WebFS WebSiteorder apache-after-ip inf: WebIP WebSite property $id="cib-bootstrap-options" \ dc-version="1.1.5-bdd89e69ba545404d02445be1f3d72e6a203ba2f" \ cluster-infrastructure="openais" \ expected-quorum-votes="2" \ stonith-enabled="false" \ no-quorum-policy="ignore" rsc_defaults $id="rsc-options" \ resource-stickiness="100" ----- Notice how any constraints that referenced ClusterIP have been updated to use WebIP instead. This is an additional benefit of using the crm shell. Next we need to convert the filesystem and Apache resources into clones. Again, the shell will automatically update any relevant constraints. [source,Bash] ----- crm(active) # configure clone WebFSClone WebFS crm(active) # configure clone WebSiteClone WebSite ----- The last step is to tell the cluster that it is now allowed to promote both instances to be Primary (aka. Master). [source,Bash] ----- crm(active) # configure edit WebDataClone ----- Change master-max to 2 [source,Bash] ----- crm(active) # configure show node pcmk-1 node pcmk-2 primitive WebData ocf:linbit:drbd \ params drbd_resource="wwwdata" \ op monitor interval="60s" primitive WebFS ocf:heartbeat:Filesystem \ params device="/dev/drbd/by-res/wwwdata" directory="/var/www/html" fstype="gfs2" primitive WebSite ocf:heartbeat:apache \ params configfile="/etc/httpd/conf/httpd.conf" \ op monitor interval="1min" primitive ClusterIP ocf:heartbeat:IPaddr2 \ params ip="192.168.122.101" cidr_netmask="32" clusterip_hash="sourceip" \ op monitor interval="30s" ms WebDataClone WebData \ meta master-max="2" master-node-max="1" clone-max="2" clone-node-max="1" notify="true" clone WebFSClone WebFSclone WebIP ClusterIP \ meta globally-unique="true" clone-max="2" clone-node-max="2" clone WebSiteClone WebSitecolocation WebSite-with-WebFS inf: WebSiteClone WebFSClone colocation fs_on_drbd inf: WebFSClone WebDataClone:Master colocation website-with-ip inf: WebSiteClone WebIP order WebFS-after-WebData inf: WebDataClone:promote WebFSClone:start order WebSite-after-WebFS inf: WebFSClone WebSiteClone order apache-after-ip inf: WebIP WebSiteClone property $id="cib-bootstrap-options" \ dc-version="1.1.5-bdd89e69ba545404d02445be1f3d72e6a203ba2f" \ cluster-infrastructure="openais" \ expected-quorum-votes="2" \ stonith-enabled="false" \ no-quorum-policy="ignore" rsc_defaults $id="rsc-options" \ resource-stickiness="100" ----- Review the configuration before uploading it to the cluster, quitting the shell and watching the cluster's response [source,Bash] ----- crm(active) # cib commit active INFO: commited 'active' shadow CIB to the cluster crm(active) # quit bye # crm_mon ============ Last updated: Thu Sep 3 21:37:27 2009 Stack: openais Current DC: pcmk-2 - partition with quorum Version: 1.1.5-bdd89e69ba545404d02445be1f3d72e6a203ba2f 2 Nodes configured, 2 expected votes 6 Resources configured. ============ Online: [ pcmk-1 pcmk-2 ] Master/Slave Set: WebDataClone Masters: [ pcmk-1 pcmk-2 ] Clone Set: WebIP Started: [ pcmk-1 pcmk-2 ] Clone Set: WebFSClone Started: [ pcmk-1 pcmk-2 ] Clone Set: WebSiteClone Started: [ pcmk-1 pcmk-2 ] ----- === Testing Recovery === [NOTE] ======= TODO: Put one node into standby to demonstrate failover ======= diff --git a/doc/Clusters_from_Scratch/en-US/Ch-Active-Passive.txt b/doc/Clusters_from_Scratch/en-US/Ch-Active-Passive.txt index 5bd57f6787..f274bade49 100644 --- a/doc/Clusters_from_Scratch/en-US/Ch-Active-Passive.txt +++ b/doc/Clusters_from_Scratch/en-US/Ch-Active-Passive.txt @@ -1,372 +1,372 @@ = Creating an Active/Passive Cluster = == Exploring the Existing Configuration == When Pacemaker starts up, it automatically records the number and details of the nodes in the cluster as well as which stack is being used and the -version of Pacemaker being used. +version of Pacemaker being used. This is what the base configuration should look like. [source,Bash] ---- # crm configure show node $id="1702537408" pcmk-1 node $id="1719314624" pcmk-2 property $id="cib-bootstrap-options" \ dc-version="1.1.7-2.fc17-ee0730e13d124c3d58f00016c3376a1de5323cff" \ cluster-infrastructure="corosync" ---- For those that are not of afraid of XML, you can see the raw configuration by appending "xml" to the previous command. .The last XML you'll see in this document [source,Bash] ---- # crm configure show xml ---- Before we make any changes, its a good idea to check the validity of the configuration. [source,Bash] ---- # crm_verify -L error: unpack_resources: Resource start-up disabled since no STONITH resources have been defined error: unpack_resources: Either configure some or disable STONITH with the stonith-enabled option error: unpack_resources: NOTE: Clusters with shared data need STONITH to ensure data integrity Errors found during check: config not valid -V may provide more details ---- As you can see, the tool has found some errors. In order to guarantee the safety of your data footnote:[If the data is corrupt, there is little point in continuing to make it available] , Pacemaker ships with STONITH footnote:[A common node fencing mechanism. Used to ensure data integrity by powering off "bad" nodes] enabled. However it also knows when no STONITH configuration has been supplied and reports this as a problem (since the cluster would not be able to make progress if a situation requiring node fencing arose). For now, we will disable this feature and configure it later in the Configuring STONITH section. It is important to note that the use of STONITH is highly encouraged, turning it off tells the cluster to simply pretend that failed nodes are safely powered off. Some vendors -will even refuse to support clusters that have it disabled. +will even refuse to support clusters that have it disabled. To disable STONITH, we set the stonith-enabled cluster option to false. [source,Bash] ---- # crm configure property stonith-enabled=false # crm_verify -L ---- With the new cluster option set, the configuration is now valid. [WARNING] ========= The use of stonith-enabled=false is completely inappropriate for a production cluster. We use it here to defer the discussion of its configuration which can differ widely from one installation to the next. See <<_what_is_stonith>> for information on why STONITH is important and details on how to configure it. ========= == Adding a Resource == The first thing we should do is configure an IP address. Regardless of where the cluster service(s) are running, we need a consistent address to contact them on. Here I will choose and add 192.168.122.120 as the floating address, give it the imaginative name ClusterIP and tell the cluster to check that its running every 30 seconds. [IMPORTANT] =========== The chosen address must not be one already associated with a physical node =========== [source,Bash] ---- # crm configure primitive ClusterIP ocf:heartbeat:IPaddr2 \ params ip=192.168.122.120 cidr_netmask=32 \ op monitor interval=30s ---- The other important piece of information here is ocf:heartbeat:IPaddr2. This tells Pacemaker three things about the resource you want to add. The first field, ocf, is the standard to which the resource script conforms to and where to find it. The second field is specific to OCF resources and tells the cluster which namespace to find the resource script in, in this case heartbeat. The last field indicates the name of the resource script. To obtain a list of the available resource classes, run [source,Bash] ---- -# crm ra classes +# crm ra classes heartbeat -lsb +lsb ocf / heartbeat pacemaker stonith ---- To then find all the OCF resource agents provided by Pacemaker and Heartbeat, run [source,Bash] ---- # crm ra list ocf pacemaker ClusterMon Dummy HealthCPU HealthSMART Stateful SysInfo -SystemHealth controld o2cb ping pingd +SystemHealth controld o2cb ping pingd # crm ra list ocf heartbeat AoEtarget AudibleAlarm CTDB ClusterMon Delay Dummy EvmsSCC Evmsd Filesystem ICP IPaddr IPaddr2 IPsrcaddr IPv6addr LVM LinuxSCSI MailTo ManageRAID ManageVE Pure-FTPd Raid1 Route SAPDatabase SAPInstance SendArp ServeRAID SphinxSearchDaemon Squid Stateful SysInfo VIPArip VirtualDomain WAS WAS6 WinPopup Xen Xinetd anything apache conntrackd db2 drbd eDir88 ethmonitor exportfs fio iSCSILogicalUnit iSCSITarget ids iscsi jboss ldirectord lxc mysql mysql-proxy nfsserver nginx oracle oralsnr pgsql pingd portblock postfix proftpd rsyncd scsi2reservation sfex symlink -syslog-ng tomcat vmware +syslog-ng tomcat vmware ---- Now verify that the IP resource has been added and display the cluster's status to see that it is now active. [source,Bash] ---- # crm configure show node $id="1702537408" pcmk-1 node $id="1719314624" pcmk-2 primitive ClusterIP ocf:heartbeat:IPaddr2 \ params ip="192.168.122.120" cidr_netmask="32" \ op monitor interval="30s" property $id="cib-bootstrap-options" \ dc-version="1.1.7-2.fc17-ee0730e13d124c3d58f00016c3376a1de5323cff" \ cluster-infrastructure="corosync" \ stonith-enabled="false" # crm_mon -1 ============ Last updated: Tue Apr 3 09:56:50 2012 Last change: Tue Apr 3 09:54:37 2012 via cibadmin on pcmk-1 Stack: corosync Current DC: pcmk-1 (1702537408) - partition with quorum Version: 1.1.7-2.fc17-ee0730e13d124c3d58f00016c3376a1de5323cff 2 Nodes configured, unknown expected votes 1 Resources configured. ============ Online: [ pcmk-1 pcmk-2 ] ClusterIP (ocf::heartbeat:IPaddr2): Started pcmk-1 ---- == Perform a Failover == Being a high-availability cluster, we should test failover of our new -resource before moving on. +resource before moving on. First, find the node on which the IP address is running. [source,Bash] ---- # crm resource status ClusterIP resource ClusterIP is running on: pcmk-1 ---- Shut down Pacemaker and Corosync on that machine. [source,Bash] ---- # ssh pcmk-1 -- service pacemaker stop # ssh pcmk-1 -- service corosync stop ---- Once Corosync is no longer running, go to the other node and check the cluster status with crm_mon. [source,Bash] ---- # crm_mon -1 ============ Last updated: Tue Apr 3 10:01:28 2012 Last change: Tue Apr 3 09:54:39 2012 via cibadmin on pcmk-1 Stack: corosync Current DC: pcmk-2 (1719314624) - partition WITHOUT quorum Version: 1.1.7-2.fc17-ee0730e13d124c3d58f00016c3376a1de5323cff 2 Nodes configured, unknown expected votes 1 Resources configured. ============ Online: [ pcmk-2 ] OFFLINE: [ pcmk-1 ] ---- There are three things to notice about the cluster's current state. The first is that, as expected, +pcmk-1+ is now offline. However we can also see that +ClusterIP+ isn't running anywhere! === Quorum and Two-Node Clusters === This is because the cluster no longer has quorum, as can be seen by the text "partition WITHOUT quorum" (emphasised green) in the output above. In order to reduce the possibility of data corruption, Pacemaker's default behavior is to stop all resources if the cluster does not have quorum. A cluster is said to have quorum when more than half the known or expected nodes are online, or for the mathematically inclined, whenever the following equation is true: .... total_nodes < 2 * active_nodes .... Therefore a two-node cluster only has quorum when both nodes are running, which is no longer the case for our cluster. This would normally make the creation of a two-node cluster pointless footnote:[Actually some would argue that two-node clusters are always pointless, but that is an argument for another time] , however it is possible to control how Pacemaker behaves when quorum is lost. In particular, we can tell the cluster to simply ignore quorum altogether. [source,Bash] ---- # crm configure property no-quorum-policy=ignore # crm configure show node $id="1702537408" pcmk-1 node $id="1719314624" pcmk-2 primitive ClusterIP ocf:heartbeat:IPaddr2 \ params ip="192.168.122.120" cidr_netmask="32" \ op monitor interval="30s" property $id="cib-bootstrap-options" \ dc-version="1.1.7-2.fc17-ee0730e13d124c3d58f00016c3376a1de5323cff" \ cluster-infrastructure="corosync" \ stonith-enabled="false" \ no-quorum-policy="ignore" ---- After a few moments, the cluster will start the IP address on the remaining node. Note that the cluster still does not have quorum. [source,Bash] ---- # crm_mon -1 ============ Last updated: Tue Apr 3 10:02:46 2012 Last change: Tue Apr 3 10:02:08 2012 via cibadmin on pcmk-2 Stack: corosync Current DC: pcmk-2 (1719314624) - partition WITHOUT quorum Version: 1.1.7-2.fc17-ee0730e13d124c3d58f00016c3376a1de5323cff 2 Nodes configured, unknown expected votes 1 Resources configured. ============ Online: [ pcmk-2 ] OFFLINE: [ pcmk-1 ] ClusterIP (ocf::heartbeat:IPaddr2): Started pcmk-2 ---- Now simulate node recovery by restarting the cluster stack on +pcmk-1+ and check the cluster's status. [source,Bash] ---- # service corosync start -Starting Corosync Cluster Engine (corosync): [ OK ] +Starting Corosync Cluster Engine (corosync): [ OK ] # service pacemaker start Starting Pacemaker Cluster Manager: [ OK ]# crm_mon ============ Last updated: Fri Aug 28 15:32:13 2009 Stack: openais Current DC: pcmk-2 - partition with quorum Version: 1.1.5-bdd89e69ba545404d02445be1f3d72e6a203ba2f 2 Nodes configured, 2 expected votes 1 Resources configured. ============ Online: [ pcmk-1 pcmk-2 ] ClusterIP (ocf::heartbeat:IPaddr): Started pcmk-2 ---- [NOTE] ====== In the dark days, the cluster may have moved the IP back to its original location (+pcmk-1+). Usually this is no longer the case. ====== === Prevent Resources from Moving after Recovery === In most circumstances, it is highly desirable to prevent healthy resources from being moved around the cluster. Moving resources almost always requires a period of downtime. For complex services like Oracle databases, this period can be quite long. To address this, Pacemaker has the concept of resource stickiness which controls how much a service prefers to stay running where it is. You may like to think of it as the "cost" of any downtime. By default, Pacemaker assumes there is zero cost associated with moving resources and will do so to achieve "optimal" footnote:[It should be noted that Pacemaker's definition of optimal may not always agree with that of a human's. The order in which Pacemaker processes lists of resources and nodes creates implicit preferences in situations where the administrator has not explicitly specified them] resource placement. We can specify a different stickiness for every resource, but it is often sufficient to change the default. [source,Bash] ---- # crm configure rsc_defaults resource-stickiness=100 # crm configure show node $id="1702537408" pcmk-1 node $id="1719314624" pcmk-2 primitive ClusterIP ocf:heartbeat:IPaddr2 \ params ip="192.168.122.120" cidr_netmask="32" \ op monitor interval="30s" property $id="cib-bootstrap-options" \ dc-version="1.1.7-2.fc17-ee0730e13d124c3d58f00016c3376a1de5323cff" \ cluster-infrastructure="corosync" \ stonith-enabled="false" \ no-quorum-policy="ignore" rsc_defaults $id="rsc-options" \ resource-stickiness="100" ---- diff --git a/doc/Clusters_from_Scratch/en-US/Ch-Apache.txt b/doc/Clusters_from_Scratch/en-US/Ch-Apache.txt index 497616bf9c..9eb0fa283e 100644 --- a/doc/Clusters_from_Scratch/en-US/Ch-Apache.txt +++ b/doc/Clusters_from_Scratch/en-US/Ch-Apache.txt @@ -1,572 +1,572 @@ -= Apache - Adding More Services = += Apache - Adding More Services = == Forward == Now that we have a basic but functional active/passive two-node cluster, we're ready to add some real services. We're going to start with Apache because its a feature of many clusters and relatively simple to configure. == Installation == Before continuing, we need to make sure Apache is installed on both hosts. [source,Bash] ..... # yum install -y httpd Loaded plugins: langpacks, presto, refresh-packagekit -fedora/metalink | 2.6 kB 00:00 -updates/metalink | 3.2 kB 00:00 -updates-testing/metalink | 41 kB 00:00 +fedora/metalink | 2.6 kB 00:00 +updates/metalink | 3.2 kB 00:00 +updates-testing/metalink | 41 kB 00:00 Resolving Dependencies --> Running transaction check ---> Package httpd.x86_64 0:2.2.22-3.fc17 will be installed --> Processing Dependency: httpd-tools = 2.2.22-3.fc17 for package: httpd-2.2.22-3.fc17.x86_64 --> Processing Dependency: apr-util-ldap for package: httpd-2.2.22-3.fc17.x86_64 --> Processing Dependency: libaprutil-1.so.0()(64bit) for package: httpd-2.2.22-3.fc17.x86_64 --> Processing Dependency: libapr-1.so.0()(64bit) for package: httpd-2.2.22-3.fc17.x86_64 --> Running transaction check ---> Package apr.x86_64 0:1.4.6-1.fc17 will be installed ---> Package apr-util.x86_64 0:1.4.1-2.fc17 will be installed ---> Package apr-util-ldap.x86_64 0:1.4.1-2.fc17 will be installed ---> Package httpd-tools.x86_64 0:2.2.22-3.fc17 will be installed --> Finished Dependency Resolution Dependencies Resolved ===================================================================================== Package Arch Version Repository Size ===================================================================================== Installing: httpd x86_64 2.2.22-3.fc17 updates-testing 823 k Installing for dependencies: apr x86_64 1.4.6-1.fc17 fedora 99 k apr-util x86_64 1.4.1-2.fc17 fedora 78 k apr-util-ldap x86_64 1.4.1-2.fc17 fedora 17 k httpd-tools x86_64 2.2.22-3.fc17 updates-testing 74 k Transaction Summary ===================================================================================== Install 1 Package (+4 Dependent packages) Total download size: 1.1 M Installed size: 3.5 M Downloading Packages: -(1/5): apr-1.4.6-1.fc17.x86_64.rpm | 99 kB 00:00 -(2/5): apr-util-1.4.1-2.fc17.x86_64.rpm | 78 kB 00:00 -(3/5): apr-util-ldap-1.4.1-2.fc17.x86_64.rpm | 17 kB 00:00 -(4/5): httpd-2.2.22-3.fc17.x86_64.rpm | 823 kB 00:01 -(5/5): httpd-tools-2.2.22-3.fc17.x86_64.rpm | 74 kB 00:00 +(1/5): apr-1.4.6-1.fc17.x86_64.rpm | 99 kB 00:00 +(2/5): apr-util-1.4.1-2.fc17.x86_64.rpm | 78 kB 00:00 +(3/5): apr-util-ldap-1.4.1-2.fc17.x86_64.rpm | 17 kB 00:00 +(4/5): httpd-2.2.22-3.fc17.x86_64.rpm | 823 kB 00:01 +(5/5): httpd-tools-2.2.22-3.fc17.x86_64.rpm | 74 kB 00:00 ------------------------------------------------------------------------------------- -Total 238 kB/s | 1.1 MB 00:04 +Total 238 kB/s | 1.1 MB 00:04 Running Transaction Check Running Transaction Test Transaction Test Succeeded Running Transaction - Installing : apr-1.4.6-1.fc17.x86_64 1/5 - Installing : apr-util-1.4.1-2.fc17.x86_64 2/5 - Installing : apr-util-ldap-1.4.1-2.fc17.x86_64 3/5 - Installing : httpd-tools-2.2.22-3.fc17.x86_64 4/5 - Installing : httpd-2.2.22-3.fc17.x86_64 5/5 - Verifying : apr-util-ldap-1.4.1-2.fc17.x86_64 1/5 - Verifying : httpd-tools-2.2.22-3.fc17.x86_64 2/5 - Verifying : apr-util-1.4.1-2.fc17.x86_64 3/5 - Verifying : apr-1.4.6-1.fc17.x86_64 4/5 - Verifying : httpd-2.2.22-3.fc17.x86_64 5/5 + Installing : apr-1.4.6-1.fc17.x86_64 1/5 + Installing : apr-util-1.4.1-2.fc17.x86_64 2/5 + Installing : apr-util-ldap-1.4.1-2.fc17.x86_64 3/5 + Installing : httpd-tools-2.2.22-3.fc17.x86_64 4/5 + Installing : httpd-2.2.22-3.fc17.x86_64 5/5 + Verifying : apr-util-ldap-1.4.1-2.fc17.x86_64 1/5 + Verifying : httpd-tools-2.2.22-3.fc17.x86_64 2/5 + Verifying : apr-util-1.4.1-2.fc17.x86_64 3/5 + Verifying : apr-1.4.6-1.fc17.x86_64 4/5 + Verifying : httpd-2.2.22-3.fc17.x86_64 5/5 Installed: - httpd.x86_64 0:2.2.22-3.fc17 + httpd.x86_64 0:2.2.22-3.fc17 Dependency Installed: - apr.x86_64 0:1.4.6-1.fc17 apr-util.x86_64 0:1.4.1-2.fc17 - apr-util-ldap.x86_64 0:1.4.1-2.fc17 httpd-tools.x86_64 0:2.2.22-3.fc17 + apr.x86_64 0:1.4.6-1.fc17 apr-util.x86_64 0:1.4.1-2.fc17 + apr-util-ldap.x86_64 0:1.4.1-2.fc17 httpd-tools.x86_64 0:2.2.22-3.fc17 Complete! ..... Also, we need the wget tool in order for the cluster to be able to check the status of the Apache server. [source,Bash] ..... # yum install -y wget Loaded plugins: langpacks, presto, refresh-packagekit Resolving Dependencies --> Running transaction check ---> Package wget.x86_64 0:1.13.4-2.fc17 will be installed --> Finished Dependency Resolution Dependencies Resolved ===================================================================================== Package Arch Version Repository Size ===================================================================================== Installing: wget x86_64 1.13.4-2.fc17 fedora 495 k Transaction Summary ===================================================================================== Install 1 Package Total download size: 495 k Installed size: 1.8 M Downloading Packages: -wget-1.13.4-2.fc17.x86_64.rpm | 495 kB 00:01 +wget-1.13.4-2.fc17.x86_64.rpm | 495 kB 00:01 Running Transaction Check Running Transaction Test Transaction Test Succeeded Running Transaction - Installing : wget-1.13.4-2.fc17.x86_64 1/1 - Verifying : wget-1.13.4-2.fc17.x86_64 1/1 + Installing : wget-1.13.4-2.fc17.x86_64 1/1 + Verifying : wget-1.13.4-2.fc17.x86_64 1/1 Installed: - wget.x86_64 0:1.13.4-2.fc17 + wget.x86_64 0:1.13.4-2.fc17 Complete! ..... == Preparation == First we need to create a page for Apache to serve up. On Fedora the default Apache docroot is /var/www/html, so we'll create an index file there. [source,Bash] ----- # cat <<-END >/var/www/html/index.html My Test Site - pcmk-1 END ----- For the moment, we will simplify things by serving up only a static site and manually sync the data between the two nodes. So run the command again on pcmk-2. [source,Bash] ----- [root@pcmk-2 ~]# cat <<-END >/var/www/html/index.html My Test Site - pcmk-2 END ----- == Enable the Apache status URL == In order to monitor the health of your Apache instance, and recover it if it fails, the resource agent used by Pacemaker assumes the server-status URL is available. Look for the following in '/etc/httpd/conf/httpd.conf' and make sure it is not disabled or commented out: ..... SetHandler server-status Order deny,allow Deny from all Allow from 127.0.0.1 ..... == Update the Configuration == At this point, Apache is ready to go, all that needs to be done is to add it to the cluster. Lets call the resource WebSite. We need to use an OCF script called apache in the heartbeat namespace footnote:[Compare the key used here ocf:heartbeat:apache with the one we used earlier for the IP address: ocf:heartbeat:IPaddr2] , the only required parameter is the path to the main Apache configuration file and we'll tell the cluster to check once a minute that apache is still running. [source,Bash] ----- # crm configure primitive WebSite ocf:heartbeat:apache \ params configfile=/etc/httpd/conf/httpd.conf \ op monitor interval=1min WARNING: WebSite: default timeout 20s for start is smaller than the advised 40s WARNING: WebSite: default timeout 20s for stop is smaller than the advised 60s ----- The easiest way resolve this, is to change the default: [source,Bash] ----- # crm configure op_defaults timeout=240s # crm configure show node $id="1702537408" pcmk-1 node $id="1719314624" pcmk-2 primitive ClusterIP ocf:heartbeat:IPaddr2 \ params ip="192.168.122.120" cidr_netmask="32" \ op monitor interval="30s" primitive WebSite ocf:heartbeat:apache \ params configfile="/etc/httpd/conf/httpd.conf" \ op monitor interval="1min" property $id="cib-bootstrap-options" \ dc-version="1.1.7-2.fc17-ee0730e13d124c3d58f00016c3376a1de5323cff" \ cluster-infrastructure="corosync" \ stonith-enabled="false" \ no-quorum-policy="ignore" rsc_defaults $id="rsc-options" \ resource-stickiness="100" op_defaults $id="op-options" \ timeout="240s" ----- After a short delay, we should see the cluster start apache [source,Bash] ----- # crm_mon -1 ============ Last updated: Tue Apr 3 11:54:29 2012 Last change: Tue Apr 3 11:54:26 2012 via crmd on pcmk-1 Stack: corosync Current DC: pcmk-1 (1702537408) - partition with quorum Version: 1.1.7-2.fc17-ee0730e13d124c3d58f00016c3376a1de5323cff 2 Nodes configured, unknown expected votes 2 Resources configured. ============ Online: [ pcmk-1 pcmk-2 ] ClusterIP (ocf::heartbeat:IPaddr2): Started pcmk-2 WebSite (ocf::heartbeat:apache): Started pcmk-1 ----- Wait a moment, the WebSite resource isn't running on the same host as our IP address! [NOTE] ====== If, in the `crm_mon` output, you see: .... Failed actions: WebSite_start_0 (node=pcmk-2, call=301, rc=1, status=complete): unknown error .... Then you've likely not enabled the status URL correctly. You can check if this is the problem by running: .... wget http://127.0.0.1/server-status .... If you see +Connection refused+ in the output, then this is indeed the problem. Check to ensure that +Allow from 127.0.0.1+ is present for the ++ block. ====== == Ensuring Resources Run on the Same Host == To reduce the load on any one machine, Pacemaker will generally try to spread the configured resources across the cluster nodes. However we can tell the cluster that two resources are related and need to run on the same host (or not at all). Here we instruct the cluster that WebSite can only run on the host that ClusterIP is active on. For the constraint, we need a name (choose something descriptive like website-with-ip), indicate that its mandatory (so that if ClusterIP is not active anywhere, WebSite will not be permitted to run anywhere either) by specifying a score of INFINITY and finally list the two resources. [NOTE] ======= If ClusterIP is not active anywhere, WebSite will not be permitted to run anywhere. ======= [IMPORTANT] =========== Colocation constraints are "directional", in that they imply certain things about the order in which the two resources will have a location chosen. In this case we're saying +WebSite+ needs to be placed on the same machine as +ClusterIP+, this implies that we must know the location of +ClusterIP+ before choosing a location for +WebSite+. =========== [source,Bash] ----- # crm configure colocation website-with-ip INFINITY: WebSite ClusterIP # crm configure show node $id="1702537408" pcmk-1 node $id="1719314624" pcmk-2 primitive ClusterIP ocf:heartbeat:IPaddr2 \ params ip="192.168.122.120" cidr_netmask="32" \ op monitor interval="30s" primitive WebSite ocf:heartbeat:apache \ params configfile="/etc/httpd/conf/httpd.conf" \ op monitor interval="1min" colocation website-with-ip inf: WebSite ClusterIP property $id="cib-bootstrap-options" \ dc-version="1.1.7-2.fc17-ee0730e13d124c3d58f00016c3376a1de5323cff" \ cluster-infrastructure="corosync" \ stonith-enabled="false" \ no-quorum-policy="ignore" \ last-lrm-refresh="1333446866" rsc_defaults $id="rsc-options" \ resource-stickiness="100" op_defaults $id="op-options" \ timeout="240s" # crm_mon -1 ============ Last updated: Tue Apr 3 11:57:13 2012 Last change: Tue Apr 3 11:56:10 2012 via cibadmin on pcmk-1 Stack: corosync Current DC: pcmk-2 (1719314624) - partition with quorum Version: 1.1.7-2.fc17-ee0730e13d124c3d58f00016c3376a1de5323cff 2 Nodes configured, unknown expected votes 2 Resources configured. ============ Online: [ pcmk-1 pcmk-2 ] ClusterIP (ocf::heartbeat:IPaddr2): Started pcmk-2 WebSite (ocf::heartbeat:apache): Started pcmk-2 ----- == Controlling Resource Start/Stop Ordering == When Apache starts, it binds to the available IP addresses. It doesn't know about any addresses we add afterwards, so not only do they need to run on the same node, but we need to make sure ClusterIP is already active before we start WebSite. We do this by adding an ordering constraint. We need to give it a name (choose something descriptive like apache-after-ip), indicate that its mandatory (so that any recovery for ClusterIP will also trigger recovery of WebSite) and list the two resources in the order we need them to start. [source,Bash] ----- # crm configure order apache-after-ip mandatory: ClusterIP WebSite # crm configure show node $id="1702537408" pcmk-1 node $id="1719314624" pcmk-2 primitive ClusterIP ocf:heartbeat:IPaddr2 \ params ip="192.168.122.120" cidr_netmask="32" \ op monitor interval="30s" primitive WebSite ocf:heartbeat:apache \ params configfile="/etc/httpd/conf/httpd.conf" \ op monitor interval="1min" colocation website-with-ip inf: WebSite ClusterIP order apache-after-ip inf: ClusterIP WebSite property $id="cib-bootstrap-options" \ dc-version="1.1.7-2.fc17-ee0730e13d124c3d58f00016c3376a1de5323cff" \ cluster-infrastructure="corosync" \ stonith-enabled="false" \ no-quorum-policy="ignore" \ last-lrm-refresh="1333446866" rsc_defaults $id="rsc-options" \ resource-stickiness="100" op_defaults $id="op-options" \ timeout="240s" ----- == Specifying a Preferred Location == Pacemaker does not rely on any sort of hardware symmetry between nodes, so it may well be that one machine is more powerful than the other. In such cases it makes sense to host the resources there if it is available. To do this we create a location constraint. Again we give it a descriptive name (prefer-pcmk-1), specify the resource we want to run there (WebSite), how badly we'd like it to run there (we'll use 50 for now, but in a two-node situation almost any value above 0 will do) and the host's name. [source,Bash] ----- # crm configure location prefer-pcmk-1 WebSite 50: pcmk-1 WARNING: prefer-pcmk-1: referenced node pcmk-1 does not exist ----- This warning should be ignored. [source,Bash] ----- # crm configure show node $id="1702537408" pcmk-1 node $id="1719314624" pcmk-2 primitive ClusterIP ocf:heartbeat:IPaddr2 \ params ip="192.168.122.120" cidr_netmask="32" \ op monitor interval="30s" primitive WebSite ocf:heartbeat:apache \ params configfile="/etc/httpd/conf/httpd.conf" \ op monitor interval="1min" location prefer-pcmk-1 WebSite 50: pcmk-1 colocation website-with-ip inf: WebSite ClusterIP order apache-after-ip inf: ClusterIP WebSite property $id="cib-bootstrap-options" \ dc-version="1.1.7-2.fc17-ee0730e13d124c3d58f00016c3376a1de5323cff" \ cluster-infrastructure="corosync" \ stonith-enabled="false" \ no-quorum-policy="ignore" \ last-lrm-refresh="1333446866" rsc_defaults $id="rsc-options" \ resource-stickiness="100" op_defaults $id="op-options" \ timeout="240s" # crm_mon -1 ============ Last updated: Tue Apr 3 12:02:14 2012 Last change: Tue Apr 3 11:59:42 2012 via cibadmin on pcmk-1 Stack: corosync Current DC: pcmk-2 (1719314624) - partition with quorum Version: 1.1.7-2.fc17-ee0730e13d124c3d58f00016c3376a1de5323cff 2 Nodes configured, unknown expected votes 2 Resources configured. ============ Online: [ pcmk-1 pcmk-2 ] ClusterIP (ocf::heartbeat:IPaddr2): Started pcmk-2 WebSite (ocf::heartbeat:apache): Started pcmk-2 ----- -Wait a minute, the resources are still on pcmk-2! +Wait a minute, the resources are still on pcmk-2! Even though we now prefer pcmk-1 over pcmk-2, that preference is (intentionally) less than the resource stickiness (how much we -preferred not to have unnecessary downtime). +preferred not to have unnecessary downtime). To see the current placement scores, you can use a tool called crm_simulate [source,Bash] ---- # crm_simulate -sL Current cluster status: Online: [ pcmk-1 pcmk-2 ] ClusterIP (ocf::heartbeat:IPaddr2): Started pcmk-2 WebSite (ocf::heartbeat:apache): Started pcmk-2 Allocation scores: native_color: ClusterIP allocation score on pcmk-1: 50 native_color: ClusterIP allocation score on pcmk-2: 200 native_color: WebSite allocation score on pcmk-1: -INFINITY native_color: WebSite allocation score on pcmk-2: 100 Transition Summary: ---- == Manually Moving Resources Around the Cluster == There are always times when an administrator needs to override the cluster and force resources to move to a specific location. Underneath we use location constraints like the one we created above, happily you don't need to care. Just provide the name of the resource and the intended location, we'll do the rest. [source,Bash] ----- # crm resource move WebSite pcmk-1 # crm_mon -1 ============ Last updated: Tue Apr 3 12:03:41 2012 Last change: Tue Apr 3 12:03:37 2012 via crm_resource on pcmk-1 Stack: corosync Current DC: pcmk-2 (1719314624) - partition with quorum Version: 1.1.7-2.fc17-ee0730e13d124c3d58f00016c3376a1de5323cff 2 Nodes configured, unknown expected votes 2 Resources configured. ============ Online: [ pcmk-1 pcmk-2 ] ClusterIP (ocf::heartbeat:IPaddr2): Started pcmk-1 WebSite (ocf::heartbeat:apache): Started pcmk-1 ----- Notice how the colocation rule we created has ensured that ClusterIP was also moved to pcmk-1. For the curious, we can see the effect of this command by examining the configuration [source,Bash] ----- # crm configure show node $id="1702537408" pcmk-1 node $id="1719314624" pcmk-2 primitive ClusterIP ocf:heartbeat:IPaddr2 \ params ip="192.168.122.120" cidr_netmask="32" \ op monitor interval="30s" primitive WebSite ocf:heartbeat:apache \ params configfile="/etc/httpd/conf/httpd.conf" \ op monitor interval="1min" location cli-prefer-WebSite WebSite \ rule $id="cli-prefer-rule-WebSite" inf: #uname eq pcmk-1 location prefer-pcmk-1 WebSite 50: pcmk-1 colocation website-with-ip inf: WebSite ClusterIP order apache-after-ip inf: ClusterIP WebSite property $id="cib-bootstrap-options" \ dc-version="1.1.7-2.fc17-ee0730e13d124c3d58f00016c3376a1de5323cff" \ cluster-infrastructure="corosync" \ stonith-enabled="false" \ no-quorum-policy="ignore" \ last-lrm-refresh="1333446866" rsc_defaults $id="rsc-options" \ resource-stickiness="100" op_defaults $id="op-options" \ timeout="240s" ----- The automated constraint used to move the resources to +pcmk-1+ is the line beginning with +location cli-prefer-WebSite+. === Giving Control Back to the Cluster === Once we've finished whatever activity that required us to move the resources to pcmk-1, in our case nothing, we can then allow the cluster to resume normal operation with the unmove command. Since we previously configured a default stickiness, the resources will remain on pcmk-1. [source,Bash] ----- # crm resource unmove WebSite # crm configure show node $id="1702537408" pcmk-1 node $id="1719314624" pcmk-2 primitive ClusterIP ocf:heartbeat:IPaddr2 \ params ip="192.168.122.120" cidr_netmask="32" \ op monitor interval="30s" primitive WebSite ocf:heartbeat:apache \ params configfile="/etc/httpd/conf/httpd.conf" \ op monitor interval="1min" location prefer-pcmk-1 WebSite 50: pcmk-1 colocation website-with-ip inf: WebSite ClusterIP order apache-after-ip inf: ClusterIP WebSite property $id="cib-bootstrap-options" \ dc-version="1.1.7-2.fc17-ee0730e13d124c3d58f00016c3376a1de5323cff" \ cluster-infrastructure="corosync" \ stonith-enabled="false" \ no-quorum-policy="ignore" \ last-lrm-refresh="1333446866" rsc_defaults $id="rsc-options" \ resource-stickiness="100" op_defaults $id="op-options" \ timeout="240s" ----- Note that the automated constraint is now gone. If we check the cluster status, we can also see that as expected the resources are still active on pcmk-1. [source,Bash] ----- # crm_mon ============ Last updated: Tue Apr 3 12:05:08 2012 Last change: Tue Apr 3 12:03:37 2012 via crm_resource on pcmk-1 Stack: corosync Current DC: pcmk-2 (1719314624) - partition with quorum Version: 1.1.7-2.fc17-ee0730e13d124c3d58f00016c3376a1de5323cff 2 Nodes configured, unknown expected votes 2 Resources configured. ============ Online: [ pcmk-1 pcmk-2 ] ClusterIP (ocf::heartbeat:IPaddr2): Started pcmk-1 WebSite (ocf::heartbeat:apache): Started pcmk-1 ----- diff --git a/doc/Clusters_from_Scratch/en-US/Ch-Installation.txt b/doc/Clusters_from_Scratch/en-US/Ch-Installation.txt index 1502c7f584..f5b7a9ff9a 100644 --- a/doc/Clusters_from_Scratch/en-US/Ch-Installation.txt +++ b/doc/Clusters_from_Scratch/en-US/Ch-Installation.txt @@ -1,932 +1,932 @@ = Installation = == OS Installation == Detailed instructions for installing Fedora are available at http://docs.fedoraproject.org/en-US/Fedora/17/html/Installation_Guide/ in a number of -languages. The abbreviated version is as follows... +languages. The abbreviated version is as follows... Point your browser to http://fedoraproject.org/en/get-fedora-all, locate the +Install Media+ section and download the install DVD that matches your hardware. Burn the disk image to a DVD footnote:[http://docs.fedoraproject.org/en-US/Fedora/16/html/Burning_ISO_images_to_disc/index.html] and boot from it, or use the image to boot a virtual machine. After clicking through the welcome screen, select your language, keyboard layout footnote:[http://docs.fedoraproject.org/en-US/Fedora/16/html/Installation_Guide/sn-keyboard-x86.html] and storage type footnote:[http://docs.fedoraproject.org/en-US/Fedora/16/html/Installation_Guide/Storage_Devices-x86.html] Assign your machine a host name. footnote:[http://docs.fedoraproject.org/en-US/Fedora/16/html/Installation_Guide/sn-Netconfig-x86.html] I happen to control the clusterlabs.org domain name, so I will use that here. [IMPORTANT] =========== -Do not accept the default network settings. +Do not accept the default network settings. Cluster machines should never obtain an IP address via DHCP. Before clicking next, select +Configure Network+ to specify a fixed IPv4 address for +System eth0+. -Here I will use the internal addresses for the clusterlab.org network. +Here I will use the internal addresses for the clusterlab.org network. image::images/Network.png["Custom network settings",align="center"] Be sure to also enter the +Routes+ section and add an entry for your default gateway. =========== You will then be prompted to indicate the machine's physical location footnote:[http://docs.fedoraproject.org/en-US/Fedora/16/html/Installation_Guide/s1-timezone-x86.html] and to supply a root password. footnote:[http://docs.fedoraproject.org/en-US/Fedora/16/html/Installation_Guide/sn-account_configuration-x86.html] Now select where you want Fedora installed. footnote:[http://docs.fedoraproject.org/en-US/Fedora/16/html/Installation_Guide/s1-diskpartsetup-x86.html] As I don’t care about any existing data, I will accept the default and allow Fedora to use the complete drive. [IMPORTANT] =========== By default Fedora uses LVM for partitioning which allows us to dynamically change the amount of space allocated to a given partition. However, by default it also allocates all free space to the +/+ (aka. +root+) partition which cannot be dynamically _reduced_ in size (dynamic increases are fine by-the-way). So if you plan on following the DRBD or GFS2 portions of this guide, you should reserve at least 1Gb of space on each machine from which to create a shared volume. To do so select the +Review and modify partitioning layout+ checkbox before clicking +Next+. You will then be given an opportunity to reduce the size of the +root+ partition. =========== Next choose which software should be installed. footnote:[http://docs.fedoraproject.org/en-US/Fedora/16/html/Installation_Guide/s1-pkgselection-x86.html] Change the selection to Minimal so that we see everything that gets installed. Don't enable updates yet, we'll do that (and install any extra software we need) later. After you click next, Fedora will begin installing. Go grab something to drink, this may take a while Once the node reboots, you'll see a (possibly mangled) login prompt on the console. Login using +root+ and the password you created earlier. image::images/Console.png["Initial Console",align="center"] [NOTE] ====== That was the last screenshot, from here on in we're going to be working exclusively from the terminal. ====== == Post Installation Tasks == === Networking === Bring up the network and ensure it starts at boot [source,Bash] .... # service network start # chkconfig network on .... Check the machine has the static IP address you configured earlier [source,Bash] .... # ip addr -1: lo: mtu 16436 qdisc noqueue state UNKNOWN +1: lo: mtu 16436 qdisc noqueue state UNKNOWN link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00 inet 127.0.0.1/8 scope host lo - inet6 ::1/128 scope host + inet6 ::1/128 scope host valid_lft forever preferred_lft forever 2: eth0: mtu 1500 qdisc pfifo_fast state UP qlen 1000 link/ether 52:54:00:d7:d6:08 brd ff:ff:ff:ff:ff:ff inet 192.168.122.101/24 brd 192.168.122.255 scope global eth0 - inet6 fe80::5054:ff:fed7:d608/64 scope link + inet6 fe80::5054:ff:fed7:d608/64 scope link valid_lft forever preferred_lft forever .... Now check the default route setting: - + [source,Bash] .... [root@pcmk-1 ~]# ip route -default via 192.168.122.1 dev eth0 -192.168.122.0/24 dev eth0 proto kernel scope link src 192.168.122.101 +default via 192.168.122.1 dev eth0 +192.168.122.0/24 dev eth0 proto kernel scope link src 192.168.122.101 .... If there is no line beginning with +default via+, then you may need to add a line such as GATEWAY=192.168.122.1 to '/etc/sysconfig/network' and restart the network. Now check for connectivity to the outside world. Start small by testing if we can read the gateway we configured. - + [source,Bash] .... # ping -c 1 192.168.122.1 PING 192.168.122.1 (192.168.122.1) 56(84) bytes of data. 64 bytes from 192.168.122.1: icmp_req=1 ttl=64 time=0.249 ms --- 192.168.122.1 ping statistics --- 1 packets transmitted, 1 received, 0% packet loss, time 0ms rtt min/avg/max/mdev = 0.249/0.249/0.249/0.000 ms .... Now try something external, choose a location you know will be available. [source,Bash] .... # ping -c 1 www.google.com PING www.l.google.com (173.194.72.106) 56(84) bytes of data. 64 bytes from tf-in-f106.1e100.net (173.194.72.106): icmp_req=1 ttl=41 time=167 ms --- www.l.google.com ping statistics --- 1 packets transmitted, 1 received, 0% packet loss, time 0ms rtt min/avg/max/mdev = 167.618/167.618/167.618/0.000 ms .... === Leaving the Console === The console isn't a very friendly place to work from, we will now switch to accessing the machine remotely via SSH where we can use copy&paste etc. First we check we can see the newly installed at all: [source,Bash] .... beekhof@f16 ~ # ping -c 1 192.168.122.101 PING 192.168.122.101 (192.168.122.101) 56(84) bytes of data. 64 bytes from 192.168.122.101: icmp_req=1 ttl=64 time=1.01 ms --- 192.168.122.101 ping statistics --- 1 packets transmitted, 1 received, 0% packet loss, time 0ms rtt min/avg/max/mdev = 1.012/1.012/1.012/0.000 ms .... Next we login via SSH [source,Bash] .... beekhof@f16 ~ # ssh -l root 192.168.122.11 -root@192.168.122.11's password: +root@192.168.122.11's password: Last login: Fri Mar 30 19:41:19 2012 from 192.168.122.1 [root@pcmk-1 ~]# .... === Security Shortcuts === To simplify this guide and focus on the aspects directly connected to clustering, we will now disable the machine's firewall and SELinux -installation. +installation. [WARNING] =========== Both of these actions create significant security issues and should not be performed on machines that will be exposed to the outside world. =========== [IMPORTANT] =========== TODO: Create an Appendix that deals with (at least) re-enabling the firewall. =========== [source,Bash] ---- # setenforce 0 # sed -i.bak "s/SELINUX=enforcing/SELINUX=permissive/g" /etc/selinux/config # systemctl disable iptables.service rm '/etc/systemd/system/basic.target.wants/iptables.service' # systemctl stop iptables.service ---- === Short Node Names === During installation, we filled in the machine's fully qualifier domain name (FQDN) which can be rather long when it appears in cluster logs and status output. See for yourself how the machine identifies itself: (((Nodes, short name))) [source,Bash] ---- # uname -n pcmk-1.clusterlabs.org # dnsdomainname clusterlabs.org ---- (((Nodes, Domain name (Query)))) The output from the second command is fine, but we really don't need the domain name included in the basic host details. To address this, we need to update /etc/sysconfig/network. This is what it should look like before we start. [source,Bash] ---- # cat /etc/sysconfig/network NETWORKING=yes HOSTNAME=pcmk-1.clusterlabs.org GATEWAY=192.168.122.1 ---- All we need to do now is strip off the domain name portion, which is stored elsewhere anyway. [source,Bash] ---- # sed -i.sed 's/\.[a-z].*//g' /etc/sysconfig/network ---- Now confirm the change was successful. The revised file contents should look something like this. [source,Bash] ---- # cat /etc/sysconfig/network NETWORKING=yes HOSTNAME=pcmk-1 GATEWAY=192.168.122.1 ---- However we're not finished. The machine wont normally see the shortened host name until about it reboots, but we can force it to update. [source,Bash] ---- # source /etc/sysconfig/network # hostname $HOSTNAME ---- (((Nodes, Domain name (Remove from host name)))) Now check the machine is using the correct names [source,Bash] ---- # uname -n pcmk-1 # dnsdomainname clusterlabs.org ---- === NTP === It is highly recommended to enable NTP on your cluster nodes. Doing so ensures all nodes agree on the current time and makes reading log files significantly easier. Fedora Installation - Date and TimeFedora Installation: Enable NTP to keep the times on all your nodes consistent == Before You Continue == Repeat the Installation steps so far, so that you have two Fedora nodes ready to have the cluster software installed. For the purposes of this document, the additional node is called pcmk-2 with address 192.168.122.102. === Finalize Networking === Confirm that you can communicate between the two new nodes: [source,Bash] ---- # ping -c 3 192.168.122.102 PING 192.168.122.102 (192.168.122.102) 56(84) bytes of data. 64 bytes from 192.168.122.102: icmp_seq=1 ttl=64 time=0.343 ms 64 bytes from 192.168.122.102: icmp_seq=2 ttl=64 time=0.402 ms 64 bytes from 192.168.122.102: icmp_seq=3 ttl=64 time=0.558 ms --- 192.168.122.102 ping statistics --- 3 packets transmitted, 3 received, 0% packet loss, time 2000ms rtt min/avg/max/mdev = 0.343/0.434/0.558/0.092 ms ---- Now we need to make sure we can communicate with the machines by their name. If you have a DNS server, add additional entries for the two machines. Otherwise, you'll need to add the machines to '/etc/hosts' . Below are the entries for my cluster nodes: [source,Bash] ---- # grep pcmk /etc/hosts 192.168.122.101 pcmk-1.clusterlabs.org pcmk-1 192.168.122.102 pcmk-2.clusterlabs.org pcmk-2 ---- We can now verify the setup by again using ping: [source,Bash] ---- # ping -c 3 pcmk-2 PING pcmk-2.clusterlabs.org (192.168.122.101) 56(84) bytes of data. 64 bytes from pcmk-1.clusterlabs.org (192.168.122.101): icmp_seq=1 ttl=64 time=0.164 ms 64 bytes from pcmk-1.clusterlabs.org (192.168.122.101): icmp_seq=2 ttl=64 time=0.475 ms 64 bytes from pcmk-1.clusterlabs.org (192.168.122.101): icmp_seq=3 ttl=64 time=0.186 ms --- pcmk-2.clusterlabs.org ping statistics --- 3 packets transmitted, 3 received, 0% packet loss, time 2001ms rtt min/avg/max/mdev = 0.164/0.275/0.475/0.141 ms ---- -=== Configure SSH === +=== Configure SSH === SSH is a convenient and secure way to copy files and perform commands remotely. For the purposes of this guide, we will create a key without a password (using the -N option) so that we can perform remote actions without being prompted. (((SSH))) [WARNING] ========= Unprotected SSH keys, those without a password, are not recommended for servers exposed to the outside world. We use them here only to simplify the demo. ========= Create a new key and allow anyone with that key to log in: .Creating and Activating a new SSH Key [source,Bash] ---- # ssh-keygen -t dsa -f ~/.ssh/id_dsa -N "" Generating public/private dsa key pair. Your identification has been saved in /root/.ssh/id_dsa. Your public key has been saved in /root/.ssh/id_dsa.pub. The key fingerprint is: 91:09:5c:82:5a:6a:50:08:4e:b2:0c:62:de:cc:74:44 root@pcmk-1.clusterlabs.org The key's randomart image is: +--[ DSA 1024]----+ |==.ooEo.. | |X O + .o o | | * A + | | + . | | . S | | | | | | | | | +-----------------+ # cp .ssh/id_dsa.pub .ssh/authorized_keys ---- (((Creating and Activating a new SSH Key))) Install the key on the other nodes and test that you can now run commands remotely, without being prompted .Installing the SSH Key on Another Host [source,Bash] ---- # scp -r .ssh pcmk-2: The authenticity of host 'pcmk-2 (192.168.122.102)' can't be established. RSA key fingerprint is b1:2b:55:93:f1:d9:52:2b:0f:f2:8a:4e:ae:c6:7c:9a. Are you sure you want to continue connecting (yes/no)? yes -Warning: Permanently added 'pcmk-2,192.168.122.102' (RSA) to the list of known hosts.root@pcmk-2's password: -id_dsa.pub 100% 616 0.6KB/s 00:00 -id_dsa 100% 672 0.7KB/s 00:00 -known_hosts 100% 400 0.4KB/s 00:00 -authorized_keys 100% 616 0.6KB/s 00:00 +Warning: Permanently added 'pcmk-2,192.168.122.102' (RSA) to the list of known hosts.root@pcmk-2's password: +id_dsa.pub 100% 616 0.6KB/s 00:00 +id_dsa 100% 672 0.7KB/s 00:00 +known_hosts 100% 400 0.4KB/s 00:00 +authorized_keys 100% 616 0.6KB/s 00:00 # ssh pcmk-2 -- uname -npcmk-2 # ---- == Cluster Software Installation == === Install the Cluster Software === Since version 12, Fedora comes with recent versions of everything you need, so simply fire up the shell and run: [source,Bash] ..... # yum install -y pacemaker corosync -fedora/metalink | 38 kB 00:00 -fedora | 4.2 kB 00:00 -fedora/primary_db | 14 MB 00:21 -updates/metalink | 2.7 kB 00:00 -updates | 2.6 kB 00:00 -updates/primary_db | 1.2 kB 00:00 -updates-testing/metalink | 28 kB 00:00 -updates-testing | 4.5 kB 00:00 -updates-testing/primary_db | 4.5 MB 00:12 +fedora/metalink | 38 kB 00:00 +fedora | 4.2 kB 00:00 +fedora/primary_db | 14 MB 00:21 +updates/metalink | 2.7 kB 00:00 +updates | 2.6 kB 00:00 +updates/primary_db | 1.2 kB 00:00 +updates-testing/metalink | 28 kB 00:00 +updates-testing | 4.5 kB 00:00 +updates-testing/primary_db | 4.5 MB 00:12 Setting up Install Process Resolving Dependencies --> Running transaction check ---> Package corosync.x86_64 0:1.99.9-1.fc17 will be installed --> Processing Dependency: corosynclib = 1.99.9-1.fc17 for package: corosync-1.99.9-1.fc17.x86_64 --> Processing Dependency: libxslt for package: corosync-1.99.9-1.fc17.x86_64 --> Processing Dependency: libvotequorum.so.5(COROSYNC_VOTEQUORUM_1.0)(64bit) for package: corosync-1.99.9-1.fc17.x86_64 --> Processing Dependency: libquorum.so.5(COROSYNC_QUORUM_1.0)(64bit) for package: corosync-1.99.9-1.fc17.x86_64 --> Processing Dependency: libcpg.so.4(COROSYNC_CPG_1.0)(64bit) for package: corosync-1.99.9-1.fc17.x86_64 --> Processing Dependency: libcmap.so.4(COROSYNC_CMAP_1.0)(64bit) for package: corosync-1.99.9-1.fc17.x86_64 --> Processing Dependency: libcfg.so.6(COROSYNC_CFG_0.82)(64bit) for package: corosync-1.99.9-1.fc17.x86_64 --> Processing Dependency: libvotequorum.so.5()(64bit) for package: corosync-1.99.9-1.fc17.x86_64 --> Processing Dependency: libtotem_pg.so.5()(64bit) for package: corosync-1.99.9-1.fc17.x86_64 --> Processing Dependency: libquorum.so.5()(64bit) for package: corosync-1.99.9-1.fc17.x86_64 --> Processing Dependency: libqb.so.0()(64bit) for package: corosync-1.99.9-1.fc17.x86_64 --> Processing Dependency: libnetsnmp.so.30()(64bit) for package: corosync-1.99.9-1.fc17.x86_64 --> Processing Dependency: libcpg.so.4()(64bit) for package: corosync-1.99.9-1.fc17.x86_64 --> Processing Dependency: libcorosync_common.so.4()(64bit) for package: corosync-1.99.9-1.fc17.x86_64 --> Processing Dependency: libcmap.so.4()(64bit) for package: corosync-1.99.9-1.fc17.x86_64 --> Processing Dependency: libcfg.so.6()(64bit) for package: corosync-1.99.9-1.fc17.x86_64 ---> Package pacemaker.x86_64 0:1.1.7-2.fc17 will be installed --> Processing Dependency: pacemaker-libs = 1.1.7-2.fc17 for package: pacemaker-1.1.7-2.fc17.x86_64 --> Processing Dependency: pacemaker-cluster-libs = 1.1.7-2.fc17 for package: pacemaker-1.1.7-2.fc17.x86_64 --> Processing Dependency: pacemaker-cli = 1.1.7-2.fc17 for package: pacemaker-1.1.7-2.fc17.x86_64 --> Processing Dependency: resource-agents for package: pacemaker-1.1.7-2.fc17.x86_64 --> Processing Dependency: perl(Getopt::Long) for package: pacemaker-1.1.7-2.fc17.x86_64 --> Processing Dependency: libgnutls.so.26(GNUTLS_1_4)(64bit) for package: pacemaker-1.1.7-2.fc17.x86_64 --> Processing Dependency: cluster-glue for package: pacemaker-1.1.7-2.fc17.x86_64 --> Processing Dependency: /usr/bin/perl for package: pacemaker-1.1.7-2.fc17.x86_64 --> Processing Dependency: libtransitioner.so.1()(64bit) for package: pacemaker-1.1.7-2.fc17.x86_64 --> Processing Dependency: libstonithd.so.1()(64bit) for package: pacemaker-1.1.7-2.fc17.x86_64 --> Processing Dependency: libstonith.so.1()(64bit) for package: pacemaker-1.1.7-2.fc17.x86_64 --> Processing Dependency: libplumb.so.2()(64bit) for package: pacemaker-1.1.7-2.fc17.x86_64 --> Processing Dependency: libpils.so.2()(64bit) for package: pacemaker-1.1.7-2.fc17.x86_64 --> Processing Dependency: libpengine.so.3()(64bit) for package: pacemaker-1.1.7-2.fc17.x86_64 --> Processing Dependency: libpe_status.so.3()(64bit) for package: pacemaker-1.1.7-2.fc17.x86_64 --> Processing Dependency: libpe_rules.so.2()(64bit) for package: pacemaker-1.1.7-2.fc17.x86_64 --> Processing Dependency: libltdl.so.7()(64bit) for package: pacemaker-1.1.7-2.fc17.x86_64 --> Processing Dependency: liblrm.so.2()(64bit) for package: pacemaker-1.1.7-2.fc17.x86_64 --> Processing Dependency: libgnutls.so.26()(64bit) for package: pacemaker-1.1.7-2.fc17.x86_64 --> Processing Dependency: libcrmcommon.so.2()(64bit) for package: pacemaker-1.1.7-2.fc17.x86_64 --> Processing Dependency: libcrmcluster.so.1()(64bit) for package: pacemaker-1.1.7-2.fc17.x86_64 --> Processing Dependency: libcib.so.1()(64bit) for package: pacemaker-1.1.7-2.fc17.x86_64 --> Running transaction check ---> Package cluster-glue.x86_64 0:1.0.6-9.fc17.1 will be installed --> Processing Dependency: perl-TimeDate for package: cluster-glue-1.0.6-9.fc17.1.x86_64 --> Processing Dependency: libOpenIPMIutils.so.0()(64bit) for package: cluster-glue-1.0.6-9.fc17.1.x86_64 --> Processing Dependency: libOpenIPMIposix.so.0()(64bit) for package: cluster-glue-1.0.6-9.fc17.1.x86_64 --> Processing Dependency: libOpenIPMI.so.0()(64bit) for package: cluster-glue-1.0.6-9.fc17.1.x86_64 ---> Package cluster-glue-libs.x86_64 0:1.0.6-9.fc17.1 will be installed ---> Package corosynclib.x86_64 0:1.99.9-1.fc17 will be installed --> Processing Dependency: librdmacm.so.1(RDMACM_1.0)(64bit) for package: corosynclib-1.99.9-1.fc17.x86_64 --> Processing Dependency: libibverbs.so.1(IBVERBS_1.1)(64bit) for package: corosynclib-1.99.9-1.fc17.x86_64 --> Processing Dependency: libibverbs.so.1(IBVERBS_1.0)(64bit) for package: corosynclib-1.99.9-1.fc17.x86_64 --> Processing Dependency: librdmacm.so.1()(64bit) for package: corosynclib-1.99.9-1.fc17.x86_64 --> Processing Dependency: libibverbs.so.1()(64bit) for package: corosynclib-1.99.9-1.fc17.x86_64 ---> Package gnutls.x86_64 0:2.12.17-1.fc17 will be installed --> Processing Dependency: libtasn1.so.3(LIBTASN1_0_3)(64bit) for package: gnutls-2.12.17-1.fc17.x86_64 --> Processing Dependency: libtasn1.so.3()(64bit) for package: gnutls-2.12.17-1.fc17.x86_64 --> Processing Dependency: libp11-kit.so.0()(64bit) for package: gnutls-2.12.17-1.fc17.x86_64 ---> Package libqb.x86_64 0:0.11.1-1.fc17 will be installed ---> Package libtool-ltdl.x86_64 0:2.4.2-3.fc17 will be installed ---> Package libxslt.x86_64 0:1.1.26-9.fc17 will be installed ---> Package net-snmp-libs.x86_64 1:5.7.1-4.fc17 will be installed ---> Package pacemaker-cli.x86_64 0:1.1.7-2.fc17 will be installed ---> Package pacemaker-cluster-libs.x86_64 0:1.1.7-2.fc17 will be installed ---> Package pacemaker-libs.x86_64 0:1.1.7-2.fc17 will be installed ---> Package perl.x86_64 4:5.14.2-211.fc17 will be installed --> Processing Dependency: perl-libs = 4:5.14.2-211.fc17 for package: 4:perl-5.14.2-211.fc17.x86_64 --> Processing Dependency: perl(threads::shared) >= 1.21 for package: 4:perl-5.14.2-211.fc17.x86_64 --> Processing Dependency: perl(Socket) >= 1.3 for package: 4:perl-5.14.2-211.fc17.x86_64 --> Processing Dependency: perl(Scalar::Util) >= 1.10 for package: 4:perl-5.14.2-211.fc17.x86_64 --> Processing Dependency: perl(File::Spec) >= 0.8 for package: 4:perl-5.14.2-211.fc17.x86_64 --> Processing Dependency: perl-macros for package: 4:perl-5.14.2-211.fc17.x86_64 --> Processing Dependency: perl-libs for package: 4:perl-5.14.2-211.fc17.x86_64 --> Processing Dependency: perl(threads::shared) for package: 4:perl-5.14.2-211.fc17.x86_64 --> Processing Dependency: perl(threads) for package: 4:perl-5.14.2-211.fc17.x86_64 --> Processing Dependency: perl(Socket) for package: 4:perl-5.14.2-211.fc17.x86_64 --> Processing Dependency: perl(Scalar::Util) for package: 4:perl-5.14.2-211.fc17.x86_64 --> Processing Dependency: perl(Pod::Simple) for package: 4:perl-5.14.2-211.fc17.x86_64 --> Processing Dependency: perl(Module::Pluggable) for package: 4:perl-5.14.2-211.fc17.x86_64 --> Processing Dependency: perl(List::Util) for package: 4:perl-5.14.2-211.fc17.x86_64 --> Processing Dependency: perl(File::Spec::Unix) for package: 4:perl-5.14.2-211.fc17.x86_64 --> Processing Dependency: perl(File::Spec::Functions) for package: 4:perl-5.14.2-211.fc17.x86_64 --> Processing Dependency: perl(File::Spec) for package: 4:perl-5.14.2-211.fc17.x86_64 --> Processing Dependency: perl(Cwd) for package: 4:perl-5.14.2-211.fc17.x86_64 --> Processing Dependency: perl(Carp) for package: 4:perl-5.14.2-211.fc17.x86_64 --> Processing Dependency: libperl.so()(64bit) for package: 4:perl-5.14.2-211.fc17.x86_64 ---> Package resource-agents.x86_64 0:3.9.2-2.fc17.1 will be installed --> Processing Dependency: /usr/sbin/rpc.nfsd for package: resource-agents-3.9.2-2.fc17.1.x86_64 --> Processing Dependency: /usr/sbin/rpc.mountd for package: resource-agents-3.9.2-2.fc17.1.x86_64 --> Processing Dependency: /usr/sbin/ethtool for package: resource-agents-3.9.2-2.fc17.1.x86_64 --> Processing Dependency: /sbin/rpc.statd for package: resource-agents-3.9.2-2.fc17.1.x86_64 --> Processing Dependency: /sbin/quotaon for package: resource-agents-3.9.2-2.fc17.1.x86_64 --> Processing Dependency: /sbin/quotacheck for package: resource-agents-3.9.2-2.fc17.1.x86_64 --> Processing Dependency: /sbin/mount.nfs4 for package: resource-agents-3.9.2-2.fc17.1.x86_64 --> Processing Dependency: /sbin/mount.nfs for package: resource-agents-3.9.2-2.fc17.1.x86_64 --> Processing Dependency: /sbin/mount.cifs for package: resource-agents-3.9.2-2.fc17.1.x86_64 --> Processing Dependency: /sbin/fsck.xfs for package: resource-agents-3.9.2-2.fc17.1.x86_64 --> Processing Dependency: libnet.so.1()(64bit) for package: resource-agents-3.9.2-2.fc17.1.x86_64 --> Running transaction check ---> Package OpenIPMI-libs.x86_64 0:2.0.18-13.fc17 will be installed ---> Package cifs-utils.x86_64 0:5.3-2.fc17 will be installed --> Processing Dependency: libtalloc.so.2(TALLOC_2.0.2)(64bit) for package: cifs-utils-5.3-2.fc17.x86_64 --> Processing Dependency: keyutils for package: cifs-utils-5.3-2.fc17.x86_64 --> Processing Dependency: libwbclient.so.0()(64bit) for package: cifs-utils-5.3-2.fc17.x86_64 --> Processing Dependency: libtalloc.so.2()(64bit) for package: cifs-utils-5.3-2.fc17.x86_64 ---> Package ethtool.x86_64 2:3.2-2.fc17 will be installed ---> Package libibverbs.x86_64 0:1.1.6-2.fc17 will be installed ---> Package libnet.x86_64 0:1.1.5-3.fc17 will be installed ---> Package librdmacm.x86_64 0:1.0.15-1.fc17 will be installed ---> Package libtasn1.x86_64 0:2.12-1.fc17 will be installed ---> Package nfs-utils.x86_64 1:1.2.5-12.fc17 will be installed --> Processing Dependency: rpcbind for package: 1:nfs-utils-1.2.5-12.fc17.x86_64 --> Processing Dependency: libtirpc for package: 1:nfs-utils-1.2.5-12.fc17.x86_64 --> Processing Dependency: libnfsidmap for package: 1:nfs-utils-1.2.5-12.fc17.x86_64 --> Processing Dependency: libgssglue.so.1(libgssapi_CITI_2)(64bit) for package: 1:nfs-utils-1.2.5-12.fc17.x86_64 --> Processing Dependency: libgssglue for package: 1:nfs-utils-1.2.5-12.fc17.x86_64 --> Processing Dependency: libevent for package: 1:nfs-utils-1.2.5-12.fc17.x86_64 --> Processing Dependency: libtirpc.so.1()(64bit) for package: 1:nfs-utils-1.2.5-12.fc17.x86_64 --> Processing Dependency: libnfsidmap.so.0()(64bit) for package: 1:nfs-utils-1.2.5-12.fc17.x86_64 --> Processing Dependency: libgssglue.so.1()(64bit) for package: 1:nfs-utils-1.2.5-12.fc17.x86_64 --> Processing Dependency: libevent-2.0.so.5()(64bit) for package: 1:nfs-utils-1.2.5-12.fc17.x86_64 ---> Package p11-kit.x86_64 0:0.12-1.fc17 will be installed ---> Package perl-Carp.noarch 0:1.22-2.fc17 will be installed ---> Package perl-Module-Pluggable.noarch 1:3.90-211.fc17 will be installed ---> Package perl-PathTools.x86_64 0:3.33-211.fc17 will be installed ---> Package perl-Pod-Simple.noarch 1:3.16-211.fc17 will be installed --> Processing Dependency: perl(Pod::Escapes) >= 1.04 for package: 1:perl-Pod-Simple-3.16-211.fc17.noarch ---> Package perl-Scalar-List-Utils.x86_64 0:1.25-1.fc17 will be installed ---> Package perl-Socket.x86_64 0:2.001-1.fc17 will be installed ---> Package perl-TimeDate.noarch 1:1.20-6.fc17 will be installed ---> Package perl-libs.x86_64 4:5.14.2-211.fc17 will be installed ---> Package perl-macros.x86_64 4:5.14.2-211.fc17 will be installed ---> Package perl-threads.x86_64 0:1.86-2.fc17 will be installed ---> Package perl-threads-shared.x86_64 0:1.40-2.fc17 will be installed ---> Package quota.x86_64 1:4.00-3.fc17 will be installed --> Processing Dependency: quota-nls = 1:4.00-3.fc17 for package: 1:quota-4.00-3.fc17.x86_64 --> Processing Dependency: tcp_wrappers for package: 1:quota-4.00-3.fc17.x86_64 ---> Package xfsprogs.x86_64 0:3.1.8-1.fc17 will be installed --> Running transaction check ---> Package keyutils.x86_64 0:1.5.5-2.fc17 will be installed ---> Package libevent.x86_64 0:2.0.14-2.fc17 will be installed ---> Package libgssglue.x86_64 0:0.3-1.fc17 will be installed ---> Package libnfsidmap.x86_64 0:0.25-1.fc17 will be installed ---> Package libtalloc.x86_64 0:2.0.7-4.fc17 will be installed ---> Package libtirpc.x86_64 0:0.2.2-2.1.fc17 will be installed ---> Package libwbclient.x86_64 1:3.6.3-81.fc17.1 will be installed ---> Package perl-Pod-Escapes.noarch 1:1.04-211.fc17 will be installed ---> Package quota-nls.noarch 1:4.00-3.fc17 will be installed ---> Package rpcbind.x86_64 0:0.2.0-16.fc17 will be installed ---> Package tcp_wrappers.x86_64 0:7.6-69.fc17 will be installed --> Finished Dependency Resolution Dependencies Resolved ============================================================================================== Package Arch Version Repository Size ===================================================================================== Installing: corosync x86_64 1.99.9-1.fc17 updates-testing 159 k pacemaker x86_64 1.1.7-2.fc17 updates-testing 362 k Installing for dependencies: OpenIPMI-libs x86_64 2.0.18-13.fc17 fedora 466 k cifs-utils x86_64 5.3-2.fc17 updates-testing 66 k cluster-glue x86_64 1.0.6-9.fc17.1 fedora 229 k cluster-glue-libs x86_64 1.0.6-9.fc17.1 fedora 121 k corosynclib x86_64 1.99.9-1.fc17 updates-testing 96 k ethtool x86_64 2:3.2-2.fc17 fedora 94 k gnutls x86_64 2.12.17-1.fc17 fedora 385 k keyutils x86_64 1.5.5-2.fc17 fedora 49 k libevent x86_64 2.0.14-2.fc17 fedora 160 k libgssglue x86_64 0.3-1.fc17 fedora 24 k libibverbs x86_64 1.1.6-2.fc17 fedora 44 k libnet x86_64 1.1.5-3.fc17 fedora 54 k libnfsidmap x86_64 0.25-1.fc17 fedora 34 k libqb x86_64 0.11.1-1.fc17 updates-testing 68 k librdmacm x86_64 1.0.15-1.fc17 fedora 27 k libtalloc x86_64 2.0.7-4.fc17 fedora 22 k libtasn1 x86_64 2.12-1.fc17 updates-testing 319 k libtirpc x86_64 0.2.2-2.1.fc17 fedora 78 k libtool-ltdl x86_64 2.4.2-3.fc17 fedora 45 k libwbclient x86_64 1:3.6.3-81.fc17.1 updates-testing 68 k libxslt x86_64 1.1.26-9.fc17 fedora 416 k net-snmp-libs x86_64 1:5.7.1-4.fc17 fedora 713 k nfs-utils x86_64 1:1.2.5-12.fc17 fedora 311 k p11-kit x86_64 0.12-1.fc17 updates-testing 36 k pacemaker-cli x86_64 1.1.7-2.fc17 updates-testing 368 k pacemaker-cluster-libs x86_64 1.1.7-2.fc17 updates-testing 77 k pacemaker-libs x86_64 1.1.7-2.fc17 updates-testing 322 k perl x86_64 4:5.14.2-211.fc17 fedora 10 M perl-Carp noarch 1.22-2.fc17 fedora 17 k perl-Module-Pluggable noarch 1:3.90-211.fc17 fedora 47 k perl-PathTools x86_64 3.33-211.fc17 fedora 105 k perl-Pod-Escapes noarch 1:1.04-211.fc17 fedora 40 k perl-Pod-Simple noarch 1:3.16-211.fc17 fedora 223 k perl-Scalar-List-Utils x86_64 1.25-1.fc17 updates-testing 33 k perl-Socket x86_64 2.001-1.fc17 updates-testing 44 k perl-TimeDate noarch 1:1.20-6.fc17 fedora 43 k perl-libs x86_64 4:5.14.2-211.fc17 fedora 628 k perl-macros x86_64 4:5.14.2-211.fc17 fedora 32 k perl-threads x86_64 1.86-2.fc17 fedora 47 k perl-threads-shared x86_64 1.40-2.fc17 fedora 36 k quota x86_64 1:4.00-3.fc17 fedora 160 k quota-nls noarch 1:4.00-3.fc17 fedora 74 k resource-agents x86_64 3.9.2-2.fc17.1 fedora 466 k rpcbind x86_64 0.2.0-16.fc17 fedora 52 k tcp_wrappers x86_64 7.6-69.fc17 fedora 72 k xfsprogs x86_64 3.1.8-1.fc17 updates-testing 715 k Transaction Summary ===================================================================================== Install 2 Packages (+46 Dependent packages) Total download size: 18 M Installed size: 59 M Downloading Packages: -(1/48): OpenIPMI-libs-2.0.18-13.fc17.x86_64.rpm | 466 kB 00:00 +(1/48): OpenIPMI-libs-2.0.18-13.fc17.x86_64.rpm | 466 kB 00:00 warning: rpmts_HdrFromFdno: Header V3 RSA/SHA256 Signature, key ID 1aca3465: NOKEY Public key for OpenIPMI-libs-2.0.18-13.fc17.x86_64.rpm is not installed -(2/48): cifs-utils-5.3-2.fc17.x86_64.rpm | 66 kB 00:01 +(2/48): cifs-utils-5.3-2.fc17.x86_64.rpm | 66 kB 00:01 Public key for cifs-utils-5.3-2.fc17.x86_64.rpm is not installed -(3/48): cluster-glue-1.0.6-9.fc17.1.x86_64.rpm | 229 kB 00:00 -(4/48): cluster-glue-libs-1.0.6-9.fc17.1.x86_64.rpm | 121 kB 00:00 -(5/48): corosync-1.99.9-1.fc17.x86_64.rpm | 159 kB 00:01 -(6/48): corosynclib-1.99.9-1.fc17.x86_64.rpm | 96 kB 00:00 -(7/48): ethtool-3.2-2.fc17.x86_64.rpm | 94 kB 00:00 -(8/48): gnutls-2.12.17-1.fc17.x86_64.rpm | 385 kB 00:00 -(9/48): keyutils-1.5.5-2.fc17.x86_64.rpm | 49 kB 00:00 -(10/48): libevent-2.0.14-2.fc17.x86_64.rpm | 160 kB 00:00 -(11/48): libgssglue-0.3-1.fc17.x86_64.rpm | 24 kB 00:00 -(12/48): libibverbs-1.1.6-2.fc17.x86_64.rpm | 44 kB 00:00 -(13/48): libnet-1.1.5-3.fc17.x86_64.rpm | 54 kB 00:00 -(14/48): libnfsidmap-0.25-1.fc17.x86_64.rpm | 34 kB 00:00 -(15/48): libqb-0.11.1-1.fc17.x86_64.rpm | 68 kB 00:01 -(16/48): librdmacm-1.0.15-1.fc17.x86_64.rpm | 27 kB 00:00 -(17/48): libtalloc-2.0.7-4.fc17.x86_64.rpm | 22 kB 00:00 -(18/48): libtasn1-2.12-1.fc17.x86_64.rpm | 319 kB 00:02 -(19/48): libtirpc-0.2.2-2.1.fc17.x86_64.rpm | 78 kB 00:00 -(20/48): libtool-ltdl-2.4.2-3.fc17.x86_64.rpm | 45 kB 00:00 -(21/48): libwbclient-3.6.3-81.fc17.1.x86_64.rpm | 68 kB 00:00 -(22/48): libxslt-1.1.26-9.fc17.x86_64.rpm | 416 kB 00:00 -(23/48): net-snmp-libs-5.7.1-4.fc17.x86_64.rpm | 713 kB 00:01 -(24/48): nfs-utils-1.2.5-12.fc17.x86_64.rpm | 311 kB 00:00 -(25/48): p11-kit-0.12-1.fc17.x86_64.rpm | 36 kB 00:01 -(26/48): pacemaker-1.1.7-2.fc17.x86_64.rpm | 362 kB 00:02 -(27/48): pacemaker-cli-1.1.7-2.fc17.x86_64.rpm | 368 kB 00:02 -(28/48): pacemaker-cluster-libs-1.1.7-2.fc17.x86_64.rpm | 77 kB 00:00 -(29/48): pacemaker-libs-1.1.7-2.fc17.x86_64.rpm | 322 kB 00:01 -(30/48): perl-5.14.2-211.fc17.x86_64.rpm | 10 MB 00:15 -(31/48): perl-Carp-1.22-2.fc17.noarch.rpm | 17 kB 00:00 -(32/48): perl-Module-Pluggable-3.90-211.fc17.noarch.rpm | 47 kB 00:00 -(33/48): perl-PathTools-3.33-211.fc17.x86_64.rpm | 105 kB 00:00 -(34/48): perl-Pod-Escapes-1.04-211.fc17.noarch.rpm | 40 kB 00:00 -(35/48): perl-Pod-Simple-3.16-211.fc17.noarch.rpm | 223 kB 00:00 -(36/48): perl-Scalar-List-Utils-1.25-1.fc17.x86_64.rpm | 33 kB 00:01 -(37/48): perl-Socket-2.001-1.fc17.x86_64.rpm | 44 kB 00:00 -(38/48): perl-TimeDate-1.20-6.fc17.noarch.rpm | 43 kB 00:00 -(39/48): perl-libs-5.14.2-211.fc17.x86_64.rpm | 628 kB 00:00 -(40/48): perl-macros-5.14.2-211.fc17.x86_64.rpm | 32 kB 00:00 -(41/48): perl-threads-1.86-2.fc17.x86_64.rpm | 47 kB 00:00 -(42/48): perl-threads-shared-1.40-2.fc17.x86_64.rpm | 36 kB 00:00 -(43/48): quota-4.00-3.fc17.x86_64.rpm | 160 kB 00:00 -(44/48): quota-nls-4.00-3.fc17.noarch.rpm | 74 kB 00:00 -(45/48): resource-agents-3.9.2-2.fc17.1.x86_64.rpm | 466 kB 00:00 -(46/48): rpcbind-0.2.0-16.fc17.x86_64.rpm | 52 kB 00:00 -(47/48): tcp_wrappers-7.6-69.fc17.x86_64.rpm | 72 kB 00:00 -(48/48): xfsprogs-3.1.8-1.fc17.x86_64.rpm | 715 kB 00:03 +(3/48): cluster-glue-1.0.6-9.fc17.1.x86_64.rpm | 229 kB 00:00 +(4/48): cluster-glue-libs-1.0.6-9.fc17.1.x86_64.rpm | 121 kB 00:00 +(5/48): corosync-1.99.9-1.fc17.x86_64.rpm | 159 kB 00:01 +(6/48): corosynclib-1.99.9-1.fc17.x86_64.rpm | 96 kB 00:00 +(7/48): ethtool-3.2-2.fc17.x86_64.rpm | 94 kB 00:00 +(8/48): gnutls-2.12.17-1.fc17.x86_64.rpm | 385 kB 00:00 +(9/48): keyutils-1.5.5-2.fc17.x86_64.rpm | 49 kB 00:00 +(10/48): libevent-2.0.14-2.fc17.x86_64.rpm | 160 kB 00:00 +(11/48): libgssglue-0.3-1.fc17.x86_64.rpm | 24 kB 00:00 +(12/48): libibverbs-1.1.6-2.fc17.x86_64.rpm | 44 kB 00:00 +(13/48): libnet-1.1.5-3.fc17.x86_64.rpm | 54 kB 00:00 +(14/48): libnfsidmap-0.25-1.fc17.x86_64.rpm | 34 kB 00:00 +(15/48): libqb-0.11.1-1.fc17.x86_64.rpm | 68 kB 00:01 +(16/48): librdmacm-1.0.15-1.fc17.x86_64.rpm | 27 kB 00:00 +(17/48): libtalloc-2.0.7-4.fc17.x86_64.rpm | 22 kB 00:00 +(18/48): libtasn1-2.12-1.fc17.x86_64.rpm | 319 kB 00:02 +(19/48): libtirpc-0.2.2-2.1.fc17.x86_64.rpm | 78 kB 00:00 +(20/48): libtool-ltdl-2.4.2-3.fc17.x86_64.rpm | 45 kB 00:00 +(21/48): libwbclient-3.6.3-81.fc17.1.x86_64.rpm | 68 kB 00:00 +(22/48): libxslt-1.1.26-9.fc17.x86_64.rpm | 416 kB 00:00 +(23/48): net-snmp-libs-5.7.1-4.fc17.x86_64.rpm | 713 kB 00:01 +(24/48): nfs-utils-1.2.5-12.fc17.x86_64.rpm | 311 kB 00:00 +(25/48): p11-kit-0.12-1.fc17.x86_64.rpm | 36 kB 00:01 +(26/48): pacemaker-1.1.7-2.fc17.x86_64.rpm | 362 kB 00:02 +(27/48): pacemaker-cli-1.1.7-2.fc17.x86_64.rpm | 368 kB 00:02 +(28/48): pacemaker-cluster-libs-1.1.7-2.fc17.x86_64.rpm | 77 kB 00:00 +(29/48): pacemaker-libs-1.1.7-2.fc17.x86_64.rpm | 322 kB 00:01 +(30/48): perl-5.14.2-211.fc17.x86_64.rpm | 10 MB 00:15 +(31/48): perl-Carp-1.22-2.fc17.noarch.rpm | 17 kB 00:00 +(32/48): perl-Module-Pluggable-3.90-211.fc17.noarch.rpm | 47 kB 00:00 +(33/48): perl-PathTools-3.33-211.fc17.x86_64.rpm | 105 kB 00:00 +(34/48): perl-Pod-Escapes-1.04-211.fc17.noarch.rpm | 40 kB 00:00 +(35/48): perl-Pod-Simple-3.16-211.fc17.noarch.rpm | 223 kB 00:00 +(36/48): perl-Scalar-List-Utils-1.25-1.fc17.x86_64.rpm | 33 kB 00:01 +(37/48): perl-Socket-2.001-1.fc17.x86_64.rpm | 44 kB 00:00 +(38/48): perl-TimeDate-1.20-6.fc17.noarch.rpm | 43 kB 00:00 +(39/48): perl-libs-5.14.2-211.fc17.x86_64.rpm | 628 kB 00:00 +(40/48): perl-macros-5.14.2-211.fc17.x86_64.rpm | 32 kB 00:00 +(41/48): perl-threads-1.86-2.fc17.x86_64.rpm | 47 kB 00:00 +(42/48): perl-threads-shared-1.40-2.fc17.x86_64.rpm | 36 kB 00:00 +(43/48): quota-4.00-3.fc17.x86_64.rpm | 160 kB 00:00 +(44/48): quota-nls-4.00-3.fc17.noarch.rpm | 74 kB 00:00 +(45/48): resource-agents-3.9.2-2.fc17.1.x86_64.rpm | 466 kB 00:00 +(46/48): rpcbind-0.2.0-16.fc17.x86_64.rpm | 52 kB 00:00 +(47/48): tcp_wrappers-7.6-69.fc17.x86_64.rpm | 72 kB 00:00 +(48/48): xfsprogs-3.1.8-1.fc17.x86_64.rpm | 715 kB 00:03 --------------------------------------------------------------------------------------- -Total 333 kB/s | 18 MB 00:55 +Total 333 kB/s | 18 MB 00:55 Retrieving key from file:///etc/pki/rpm-gpg/RPM-GPG-KEY-fedora-x86_64 Importing GPG key 0x1ACA3465: Userid : "Fedora (17) " Fingerprint: cac4 3fb7 74a4 a673 d81c 5de7 50e9 4c99 1aca 3465 Package : fedora-release-17-0.8.noarch (@anaconda-0) From : /etc/pki/rpm-gpg/RPM-GPG-KEY-fedora-x86_64 Running Transaction Check Running Transaction Test Transaction Test Succeeded Running Transaction - Installing : libqb-0.11.1-1.fc17.x86_64 1/48 - Installing : libtool-ltdl-2.4.2-3.fc17.x86_64 2/48 - Installing : cluster-glue-libs-1.0.6-9.fc17.1.x86_64 3/48 - Installing : libxslt-1.1.26-9.fc17.x86_64 4/48 - Installing : 1:perl-Pod-Escapes-1.04-211.fc17.noarch 5/48 - Installing : perl-threads-1.86-2.fc17.x86_64 6/48 - Installing : 4:perl-macros-5.14.2-211.fc17.x86_64 7/48 - Installing : 1:perl-Pod-Simple-3.16-211.fc17.noarch 8/48 - Installing : perl-Socket-2.001-1.fc17.x86_64 9/48 - Installing : perl-Carp-1.22-2.fc17.noarch 10/48 - Installing : 4:perl-libs-5.14.2-211.fc17.x86_64 11/48 - Installing : perl-threads-shared-1.40-2.fc17.x86_64 12/48 - Installing : perl-Scalar-List-Utils-1.25-1.fc17.x86_64 13/48 - Installing : 1:perl-Module-Pluggable-3.90-211.fc17.noarch 14/48 - Installing : perl-PathTools-3.33-211.fc17.x86_64 15/48 - Installing : 4:perl-5.14.2-211.fc17.x86_64 16/48 - Installing : libibverbs-1.1.6-2.fc17.x86_64 17/48 - Installing : keyutils-1.5.5-2.fc17.x86_64 18/48 - Installing : libgssglue-0.3-1.fc17.x86_64 19/48 - Installing : libtirpc-0.2.2-2.1.fc17.x86_64 20/48 - Installing : 1:net-snmp-libs-5.7.1-4.fc17.x86_64 21/48 - Installing : rpcbind-0.2.0-16.fc17.x86_64 22/48 - Installing : librdmacm-1.0.15-1.fc17.x86_64 23/48 - Installing : corosynclib-1.99.9-1.fc17.x86_64 24/48 - Installing : corosync-1.99.9-1.fc17.x86_64 25/48 + Installing : libqb-0.11.1-1.fc17.x86_64 1/48 + Installing : libtool-ltdl-2.4.2-3.fc17.x86_64 2/48 + Installing : cluster-glue-libs-1.0.6-9.fc17.1.x86_64 3/48 + Installing : libxslt-1.1.26-9.fc17.x86_64 4/48 + Installing : 1:perl-Pod-Escapes-1.04-211.fc17.noarch 5/48 + Installing : perl-threads-1.86-2.fc17.x86_64 6/48 + Installing : 4:perl-macros-5.14.2-211.fc17.x86_64 7/48 + Installing : 1:perl-Pod-Simple-3.16-211.fc17.noarch 8/48 + Installing : perl-Socket-2.001-1.fc17.x86_64 9/48 + Installing : perl-Carp-1.22-2.fc17.noarch 10/48 + Installing : 4:perl-libs-5.14.2-211.fc17.x86_64 11/48 + Installing : perl-threads-shared-1.40-2.fc17.x86_64 12/48 + Installing : perl-Scalar-List-Utils-1.25-1.fc17.x86_64 13/48 + Installing : 1:perl-Module-Pluggable-3.90-211.fc17.noarch 14/48 + Installing : perl-PathTools-3.33-211.fc17.x86_64 15/48 + Installing : 4:perl-5.14.2-211.fc17.x86_64 16/48 + Installing : libibverbs-1.1.6-2.fc17.x86_64 17/48 + Installing : keyutils-1.5.5-2.fc17.x86_64 18/48 + Installing : libgssglue-0.3-1.fc17.x86_64 19/48 + Installing : libtirpc-0.2.2-2.1.fc17.x86_64 20/48 + Installing : 1:net-snmp-libs-5.7.1-4.fc17.x86_64 21/48 + Installing : rpcbind-0.2.0-16.fc17.x86_64 22/48 + Installing : librdmacm-1.0.15-1.fc17.x86_64 23/48 + Installing : corosynclib-1.99.9-1.fc17.x86_64 24/48 + Installing : corosync-1.99.9-1.fc17.x86_64 25/48 error reading information on service corosync: No such file or directory - Installing : 1:perl-TimeDate-1.20-6.fc17.noarch 26/48 - Installing : 1:quota-nls-4.00-3.fc17.noarch 27/48 - Installing : tcp_wrappers-7.6-69.fc17.x86_64 28/48 - Installing : 1:quota-4.00-3.fc17.x86_64 29/48 - Installing : libnfsidmap-0.25-1.fc17.x86_64 30/48 - Installing : 1:libwbclient-3.6.3-81.fc17.1.x86_64 31/48 - Installing : libnet-1.1.5-3.fc17.x86_64 32/48 - Installing : 2:ethtool-3.2-2.fc17.x86_64 33/48 - Installing : libevent-2.0.14-2.fc17.x86_64 34/48 - Installing : 1:nfs-utils-1.2.5-12.fc17.x86_64 35/48 - Installing : libtalloc-2.0.7-4.fc17.x86_64 36/48 - Installing : cifs-utils-5.3-2.fc17.x86_64 37/48 - Installing : libtasn1-2.12-1.fc17.x86_64 38/48 - Installing : OpenIPMI-libs-2.0.18-13.fc17.x86_64 39/48 - Installing : cluster-glue-1.0.6-9.fc17.1.x86_64 40/48 - Installing : p11-kit-0.12-1.fc17.x86_64 41/48 - Installing : gnutls-2.12.17-1.fc17.x86_64 42/48 - Installing : pacemaker-libs-1.1.7-2.fc17.x86_64 43/48 - Installing : pacemaker-cluster-libs-1.1.7-2.fc17.x86_64 44/48 - Installing : pacemaker-cli-1.1.7-2.fc17.x86_64 45/48 - Installing : xfsprogs-3.1.8-1.fc17.x86_64 46/48 - Installing : resource-agents-3.9.2-2.fc17.1.x86_64 47/48 - Installing : pacemaker-1.1.7-2.fc17.x86_64 48/48 - Verifying : xfsprogs-3.1.8-1.fc17.x86_64 1/48 - Verifying : 1:net-snmp-libs-5.7.1-4.fc17.x86_64 2/48 - Verifying : corosync-1.99.9-1.fc17.x86_64 3/48 - Verifying : cluster-glue-1.0.6-9.fc17.1.x86_64 4/48 - Verifying : perl-PathTools-3.33-211.fc17.x86_64 5/48 - Verifying : p11-kit-0.12-1.fc17.x86_64 6/48 - Verifying : 1:perl-Pod-Simple-3.16-211.fc17.noarch 7/48 - Verifying : OpenIPMI-libs-2.0.18-13.fc17.x86_64 8/48 - Verifying : libtasn1-2.12-1.fc17.x86_64 9/48 - Verifying : perl-threads-1.86-2.fc17.x86_64 10/48 - Verifying : 1:perl-Pod-Escapes-1.04-211.fc17.noarch 11/48 - Verifying : pacemaker-1.1.7-2.fc17.x86_64 12/48 - Verifying : 4:perl-5.14.2-211.fc17.x86_64 13/48 - Verifying : gnutls-2.12.17-1.fc17.x86_64 14/48 - Verifying : perl-threads-shared-1.40-2.fc17.x86_64 15/48 - Verifying : 4:perl-macros-5.14.2-211.fc17.x86_64 16/48 - Verifying : 1:perl-Module-Pluggable-3.90-211.fc17.noarch 17/48 - Verifying : 1:nfs-utils-1.2.5-12.fc17.x86_64 18/48 - Verifying : cluster-glue-libs-1.0.6-9.fc17.1.x86_64 19/48 - Verifying : pacemaker-libs-1.1.7-2.fc17.x86_64 20/48 - Verifying : libtalloc-2.0.7-4.fc17.x86_64 21/48 - Verifying : libevent-2.0.14-2.fc17.x86_64 22/48 - Verifying : perl-Socket-2.001-1.fc17.x86_64 23/48 - Verifying : libgssglue-0.3-1.fc17.x86_64 24/48 - Verifying : perl-Carp-1.22-2.fc17.noarch 25/48 - Verifying : libtirpc-0.2.2-2.1.fc17.x86_64 26/48 - Verifying : 2:ethtool-3.2-2.fc17.x86_64 27/48 - Verifying : 4:perl-libs-5.14.2-211.fc17.x86_64 28/48 - Verifying : libxslt-1.1.26-9.fc17.x86_64 29/48 - Verifying : rpcbind-0.2.0-16.fc17.x86_64 30/48 - Verifying : librdmacm-1.0.15-1.fc17.x86_64 31/48 - Verifying : resource-agents-3.9.2-2.fc17.1.x86_64 32/48 - Verifying : 1:quota-4.00-3.fc17.x86_64 33/48 - Verifying : 1:perl-TimeDate-1.20-6.fc17.noarch 34/48 - Verifying : perl-Scalar-List-Utils-1.25-1.fc17.x86_64 35/48 - Verifying : libtool-ltdl-2.4.2-3.fc17.x86_64 36/48 - Verifying : pacemaker-cluster-libs-1.1.7-2.fc17.x86_64 37/48 - Verifying : cifs-utils-5.3-2.fc17.x86_64 38/48 - Verifying : libnet-1.1.5-3.fc17.x86_64 39/48 - Verifying : corosynclib-1.99.9-1.fc17.x86_64 40/48 - Verifying : libqb-0.11.1-1.fc17.x86_64 41/48 - Verifying : 1:libwbclient-3.6.3-81.fc17.1.x86_64 42/48 - Verifying : libnfsidmap-0.25-1.fc17.x86_64 43/48 - Verifying : tcp_wrappers-7.6-69.fc17.x86_64 44/48 - Verifying : keyutils-1.5.5-2.fc17.x86_64 45/48 - Verifying : libibverbs-1.1.6-2.fc17.x86_64 46/48 - Verifying : 1:quota-nls-4.00-3.fc17.noarch 47/48 - Verifying : pacemaker-cli-1.1.7-2.fc17.x86_64 48/48 + Installing : 1:perl-TimeDate-1.20-6.fc17.noarch 26/48 + Installing : 1:quota-nls-4.00-3.fc17.noarch 27/48 + Installing : tcp_wrappers-7.6-69.fc17.x86_64 28/48 + Installing : 1:quota-4.00-3.fc17.x86_64 29/48 + Installing : libnfsidmap-0.25-1.fc17.x86_64 30/48 + Installing : 1:libwbclient-3.6.3-81.fc17.1.x86_64 31/48 + Installing : libnet-1.1.5-3.fc17.x86_64 32/48 + Installing : 2:ethtool-3.2-2.fc17.x86_64 33/48 + Installing : libevent-2.0.14-2.fc17.x86_64 34/48 + Installing : 1:nfs-utils-1.2.5-12.fc17.x86_64 35/48 + Installing : libtalloc-2.0.7-4.fc17.x86_64 36/48 + Installing : cifs-utils-5.3-2.fc17.x86_64 37/48 + Installing : libtasn1-2.12-1.fc17.x86_64 38/48 + Installing : OpenIPMI-libs-2.0.18-13.fc17.x86_64 39/48 + Installing : cluster-glue-1.0.6-9.fc17.1.x86_64 40/48 + Installing : p11-kit-0.12-1.fc17.x86_64 41/48 + Installing : gnutls-2.12.17-1.fc17.x86_64 42/48 + Installing : pacemaker-libs-1.1.7-2.fc17.x86_64 43/48 + Installing : pacemaker-cluster-libs-1.1.7-2.fc17.x86_64 44/48 + Installing : pacemaker-cli-1.1.7-2.fc17.x86_64 45/48 + Installing : xfsprogs-3.1.8-1.fc17.x86_64 46/48 + Installing : resource-agents-3.9.2-2.fc17.1.x86_64 47/48 + Installing : pacemaker-1.1.7-2.fc17.x86_64 48/48 + Verifying : xfsprogs-3.1.8-1.fc17.x86_64 1/48 + Verifying : 1:net-snmp-libs-5.7.1-4.fc17.x86_64 2/48 + Verifying : corosync-1.99.9-1.fc17.x86_64 3/48 + Verifying : cluster-glue-1.0.6-9.fc17.1.x86_64 4/48 + Verifying : perl-PathTools-3.33-211.fc17.x86_64 5/48 + Verifying : p11-kit-0.12-1.fc17.x86_64 6/48 + Verifying : 1:perl-Pod-Simple-3.16-211.fc17.noarch 7/48 + Verifying : OpenIPMI-libs-2.0.18-13.fc17.x86_64 8/48 + Verifying : libtasn1-2.12-1.fc17.x86_64 9/48 + Verifying : perl-threads-1.86-2.fc17.x86_64 10/48 + Verifying : 1:perl-Pod-Escapes-1.04-211.fc17.noarch 11/48 + Verifying : pacemaker-1.1.7-2.fc17.x86_64 12/48 + Verifying : 4:perl-5.14.2-211.fc17.x86_64 13/48 + Verifying : gnutls-2.12.17-1.fc17.x86_64 14/48 + Verifying : perl-threads-shared-1.40-2.fc17.x86_64 15/48 + Verifying : 4:perl-macros-5.14.2-211.fc17.x86_64 16/48 + Verifying : 1:perl-Module-Pluggable-3.90-211.fc17.noarch 17/48 + Verifying : 1:nfs-utils-1.2.5-12.fc17.x86_64 18/48 + Verifying : cluster-glue-libs-1.0.6-9.fc17.1.x86_64 19/48 + Verifying : pacemaker-libs-1.1.7-2.fc17.x86_64 20/48 + Verifying : libtalloc-2.0.7-4.fc17.x86_64 21/48 + Verifying : libevent-2.0.14-2.fc17.x86_64 22/48 + Verifying : perl-Socket-2.001-1.fc17.x86_64 23/48 + Verifying : libgssglue-0.3-1.fc17.x86_64 24/48 + Verifying : perl-Carp-1.22-2.fc17.noarch 25/48 + Verifying : libtirpc-0.2.2-2.1.fc17.x86_64 26/48 + Verifying : 2:ethtool-3.2-2.fc17.x86_64 27/48 + Verifying : 4:perl-libs-5.14.2-211.fc17.x86_64 28/48 + Verifying : libxslt-1.1.26-9.fc17.x86_64 29/48 + Verifying : rpcbind-0.2.0-16.fc17.x86_64 30/48 + Verifying : librdmacm-1.0.15-1.fc17.x86_64 31/48 + Verifying : resource-agents-3.9.2-2.fc17.1.x86_64 32/48 + Verifying : 1:quota-4.00-3.fc17.x86_64 33/48 + Verifying : 1:perl-TimeDate-1.20-6.fc17.noarch 34/48 + Verifying : perl-Scalar-List-Utils-1.25-1.fc17.x86_64 35/48 + Verifying : libtool-ltdl-2.4.2-3.fc17.x86_64 36/48 + Verifying : pacemaker-cluster-libs-1.1.7-2.fc17.x86_64 37/48 + Verifying : cifs-utils-5.3-2.fc17.x86_64 38/48 + Verifying : libnet-1.1.5-3.fc17.x86_64 39/48 + Verifying : corosynclib-1.99.9-1.fc17.x86_64 40/48 + Verifying : libqb-0.11.1-1.fc17.x86_64 41/48 + Verifying : 1:libwbclient-3.6.3-81.fc17.1.x86_64 42/48 + Verifying : libnfsidmap-0.25-1.fc17.x86_64 43/48 + Verifying : tcp_wrappers-7.6-69.fc17.x86_64 44/48 + Verifying : keyutils-1.5.5-2.fc17.x86_64 45/48 + Verifying : libibverbs-1.1.6-2.fc17.x86_64 46/48 + Verifying : 1:quota-nls-4.00-3.fc17.noarch 47/48 + Verifying : pacemaker-cli-1.1.7-2.fc17.x86_64 48/48 Installed: - corosync.x86_64 0:1.99.9-1.fc17 pacemaker.x86_64 0:1.1.7-2.fc17 + corosync.x86_64 0:1.99.9-1.fc17 pacemaker.x86_64 0:1.1.7-2.fc17 Dependency Installed: - OpenIPMI-libs.x86_64 0:2.0.18-13.fc17 cifs-utils.x86_64 0:5.3-2.fc17 - cluster-glue.x86_64 0:1.0.6-9.fc17.1 cluster-glue-libs.x86_64 0:1.0.6-9.fc17.1 - corosynclib.x86_64 0:1.99.9-1.fc17 ethtool.x86_64 2:3.2-2.fc17 - gnutls.x86_64 0:2.12.17-1.fc17 keyutils.x86_64 0:1.5.5-2.fc17 - libevent.x86_64 0:2.0.14-2.fc17 libgssglue.x86_64 0:0.3-1.fc17 - libibverbs.x86_64 0:1.1.6-2.fc17 libnet.x86_64 0:1.1.5-3.fc17 - libnfsidmap.x86_64 0:0.25-1.fc17 libqb.x86_64 0:0.11.1-1.fc17 - librdmacm.x86_64 0:1.0.15-1.fc17 libtalloc.x86_64 0:2.0.7-4.fc17 - libtasn1.x86_64 0:2.12-1.fc17 libtirpc.x86_64 0:0.2.2-2.1.fc17 - libtool-ltdl.x86_64 0:2.4.2-3.fc17 libwbclient.x86_64 1:3.6.3-81.fc17.1 - libxslt.x86_64 0:1.1.26-9.fc17 net-snmp-libs.x86_64 1:5.7.1-4.fc17 - nfs-utils.x86_64 1:1.2.5-12.fc17 p11-kit.x86_64 0:0.12-1.fc17 - pacemaker-cli.x86_64 0:1.1.7-2.fc17 pacemaker-cluster-libs.x86_64 0:1.1.7-2.fc17 - pacemaker-libs.x86_64 0:1.1.7-2.fc17 perl.x86_64 4:5.14.2-211.fc17 - perl-Carp.noarch 0:1.22-2.fc17 perl-Module-Pluggable.noarch 1:3.90-211.fc17 - perl-PathTools.x86_64 0:3.33-211.fc17 perl-Pod-Escapes.noarch 1:1.04-211.fc17 - perl-Pod-Simple.noarch 1:3.16-211.fc17 perl-Scalar-List-Utils.x86_64 0:1.25-1.fc17 - perl-Socket.x86_64 0:2.001-1.fc17 perl-TimeDate.noarch 1:1.20-6.fc17 - perl-libs.x86_64 4:5.14.2-211.fc17 perl-macros.x86_64 4:5.14.2-211.fc17 - perl-threads.x86_64 0:1.86-2.fc17 perl-threads-shared.x86_64 0:1.40-2.fc17 - quota.x86_64 1:4.00-3.fc17 quota-nls.noarch 1:4.00-3.fc17 - resource-agents.x86_64 0:3.9.2-2.fc17.1 rpcbind.x86_64 0:0.2.0-16.fc17 - tcp_wrappers.x86_64 0:7.6-69.fc17 xfsprogs.x86_64 0:3.1.8-1.fc17 + OpenIPMI-libs.x86_64 0:2.0.18-13.fc17 cifs-utils.x86_64 0:5.3-2.fc17 + cluster-glue.x86_64 0:1.0.6-9.fc17.1 cluster-glue-libs.x86_64 0:1.0.6-9.fc17.1 + corosynclib.x86_64 0:1.99.9-1.fc17 ethtool.x86_64 2:3.2-2.fc17 + gnutls.x86_64 0:2.12.17-1.fc17 keyutils.x86_64 0:1.5.5-2.fc17 + libevent.x86_64 0:2.0.14-2.fc17 libgssglue.x86_64 0:0.3-1.fc17 + libibverbs.x86_64 0:1.1.6-2.fc17 libnet.x86_64 0:1.1.5-3.fc17 + libnfsidmap.x86_64 0:0.25-1.fc17 libqb.x86_64 0:0.11.1-1.fc17 + librdmacm.x86_64 0:1.0.15-1.fc17 libtalloc.x86_64 0:2.0.7-4.fc17 + libtasn1.x86_64 0:2.12-1.fc17 libtirpc.x86_64 0:0.2.2-2.1.fc17 + libtool-ltdl.x86_64 0:2.4.2-3.fc17 libwbclient.x86_64 1:3.6.3-81.fc17.1 + libxslt.x86_64 0:1.1.26-9.fc17 net-snmp-libs.x86_64 1:5.7.1-4.fc17 + nfs-utils.x86_64 1:1.2.5-12.fc17 p11-kit.x86_64 0:0.12-1.fc17 + pacemaker-cli.x86_64 0:1.1.7-2.fc17 pacemaker-cluster-libs.x86_64 0:1.1.7-2.fc17 + pacemaker-libs.x86_64 0:1.1.7-2.fc17 perl.x86_64 4:5.14.2-211.fc17 + perl-Carp.noarch 0:1.22-2.fc17 perl-Module-Pluggable.noarch 1:3.90-211.fc17 + perl-PathTools.x86_64 0:3.33-211.fc17 perl-Pod-Escapes.noarch 1:1.04-211.fc17 + perl-Pod-Simple.noarch 1:3.16-211.fc17 perl-Scalar-List-Utils.x86_64 0:1.25-1.fc17 + perl-Socket.x86_64 0:2.001-1.fc17 perl-TimeDate.noarch 1:1.20-6.fc17 + perl-libs.x86_64 4:5.14.2-211.fc17 perl-macros.x86_64 4:5.14.2-211.fc17 + perl-threads.x86_64 0:1.86-2.fc17 perl-threads-shared.x86_64 0:1.40-2.fc17 + quota.x86_64 1:4.00-3.fc17 quota-nls.noarch 1:4.00-3.fc17 + resource-agents.x86_64 0:3.9.2-2.fc17.1 rpcbind.x86_64 0:0.2.0-16.fc17 + tcp_wrappers.x86_64 0:7.6-69.fc17 xfsprogs.x86_64 0:3.1.8-1.fc17 Complete! -[root@pcmk-1 ~]# +[root@pcmk-1 ~]# ..... Now install the cluster software on the second node. == Setup == === Configuring Corosync === -Choose a port number and multi-cast footnote:[http://en.wikipedia.org/wiki/Multicast] address. footnote:[http://en.wikipedia.org/wiki/Multicast_address] -Be sure that the values you chose do not conflict with any existing clusters you might have. +Choose a port number and multi-cast footnote:[http://en.wikipedia.org/wiki/Multicast] address. footnote:[http://en.wikipedia.org/wiki/Multicast_address] +Be sure that the values you chose do not conflict with any existing clusters you might have. For advice on choosing a multi-cast address, see http://www.29west.com/docs/THPM/multicast-address-assignment.html For this document, I have chosen port 4000 and used 226.94.1.1 as the multi-cast address. [IMPORTANT] =========== The instructions below only apply for a machine with a single NIC. If you have a more complicated setup, you should edit the configuration manually. =========== [source,Bash] ---- # export ais_port=4000 # export ais_mcast=226.94.1.1 ---- Next we automatically determine the hosts address. By not using the full address, we make the configuration suitable to be copied to other nodes. [source,Bash] ---- # export ais_addr=`ip addr | grep "inet " | tail -n 1 | awk '{print $4}' | sed s/255/0/` ---- Display and verify the configuration options [source,Bash] ---- # env | grep ais_ ais_mcast=226.94.1.1 ais_port=4000 ais_addr=192.168.122.0 ---- Once you're happy with the chosen values, update the Corosync configuration [source,Bash] ---- # cp /etc/corosync/corosync.conf.example /etc/corosync/corosync.conf # sed -i.bak "s/.*mcastaddr:.*/mcastaddr:\ $ais_mcast/g" /etc/corosync/corosync.conf # sed -i.bak "s/.*mcastport:.*/mcastport:\ $ais_port/g" /etc/corosync/corosync.conf # sed -i.bak "s/.*\tbindnetaddr:.*/bindnetaddr:\ $ais_addr/g" /etc/corosync/corosync.conf ---- Lastly, you'll need to enable quorum [source,Bash] .... cat << END >> /etc/corosync/corosync.conf quorum { provider: corosync_votequorum expected_votes: 2 } END .... The final configuration should look something like the sample in Appendix B, Sample Corosync Configuration. [IMPORTANT] =========== Pacemaker used to obtain membership and quorum from a custom Corosync plugin. This plugin also had the capability to start Pacemaker automatically when Corosync was started. Neither behavior is possible with Corosync 2.0 and beyond as support for plugins was removed. Instead, Pacemaker must started as a separate job/initscript. Also, since Pacemaker used to use the plugin for message routing, a node using the plugin (Corosync prior to 2.0) cannot talk to one that isn't (Corosync 2.0+). Rolling upgrades between these versions are therefor not possible and an alternate strategy footnote:[http://www.clusterlabs.org/doc/en-US/Pacemaker/1.1/html/Pacemaker_Explained/ap-upgrade.html] must be used. =========== === Propagate the Configuration === Now we need to copy the changes so far to the other node: [source,Bash] ---- # for f in /etc/corosync/corosync.conf /etc/hosts; do scp $f pcmk-2:$f ; done corosync.conf 100% 1528 1.5KB/s 00:00 hosts 100% 281 0.3KB/s 00:00 # ---- diff --git a/doc/Clusters_from_Scratch/en-US/Ch-Intro.txt b/doc/Clusters_from_Scratch/en-US/Ch-Intro.txt index c737d49346..0c5a1a980e 100644 --- a/doc/Clusters_from_Scratch/en-US/Ch-Intro.txt +++ b/doc/Clusters_from_Scratch/en-US/Ch-Intro.txt @@ -1,155 +1,155 @@ = Read-Me-First = == The Scope of this Document == Computer clusters can be used to provide highly available services or resources. The redundancy of multiple machines is used to guard against failures of many types. This document will walk through the installation and setup of simple clusters using the Fedora distribution, version 14. The clusters described here will use Pacemaker and Corosync to provide resource management and messaging. Required packages and modifications to their configuration files are described along with the use of the Pacemaker command line tool for generating the XML used for cluster control. Pacemaker is a central component and provides the resource management required in these systems. This management includes detecting and recovering from the failure of various nodes, resources and services under its control. When more in depth information is required and for real world usage, please refer to the http://www.clusterlabs.org/doc/[Pacemaker Explained] manual. == What Is Pacemaker? == Pacemaker is a cluster resource manager. It achieves maximum availability for your cluster services (aka. resources) by detecting and recovering from node and resource-level failures by making use of the messaging and membership capabilities provided by your preferred cluster infrastructure (either Corosync or Heartbeat). Pacemaker's key features include: * Detection and recovery of node and service-level failures * Storage agnostic, no requirement for shared storage * Resource agnostic, anything that can be scripted can be clustered * Supports STONITH for ensuring data integrity * Supports large and small clusters * Supports both quorate and resource driven clusters * Supports practically any redundancy configuration * Automatically replicated configuration that can be updated from any node * Ability to specify cluster-wide service ordering, colocation and anti-colocation * Support for advanced service types ** Clones: for services which need to be active on multiple nodes ** Multi-state: for services with multiple modes (eg. master/slave, primary/secondary) * Unified, scriptable, cluster shell == Pacemaker Architecture == At the highest level, the cluster is made up of three pieces: * Non-cluster aware components (illustrated in green). These pieces include the resources themselves, scripts that start, stop and monitor them, and also a local daemon that masks the differences between the different standards these scripts implement. * Resource management Pacemaker provides the brain (illustrated in blue) that processes and reacts to events regarding the cluster. These events include nodes joining or leaving the cluster; resource events caused by failures, maintenance, scheduled activities; and other administrative actions. Pacemaker will compute the ideal state of the cluster and plot a path to achieve it after any of these events. This may include moving resources, stopping nodes and even forcing them offline with remote power switches. * Low level infrastructure Corosync provides reliable messaging, membership and quorum information about the cluster (illustrated in red). .Conceptual Stack Overview image::images/pcmk-overview.png["Conceptual overview of the cluster stack",align="center"] When combined with Corosync, Pacemaker also supports popular open source cluster filesystems. footnote:[Even though Pacemaker also supports Heartbeat, the filesystems need to use the stack for messaging and membership and Corosync seems to be what they're standardizing on. Technically it would be possible for them to support Heartbeat as well, however there seems little interest in this.] Due to recent standardization within the cluster filesystem community, they make use of a common distributed lock manager which makes use of Corosync for its messaging capabilities and Pacemaker for its -membership (which nodes are up/down) and fencing services. +membership (which nodes are up/down) and fencing services. .The Pacemaker Stack image::images/pcmk-stack.png["The Pacemaker StackThe Pacemaker stack when running on Corosync",align="center"] === Internal Components === Pacemaker itself is composed of four key components (illustrated below in the same color scheme as the previous diagram): * CIB (aka. Cluster Information Base) * CRMd (aka. Cluster Resource Management daemon) * PEngine (aka. PE or Policy Engine) * STONITHd .Internal Components image::images/pcmk-internals.png["Subsystems of a Pacemaker cluster running on Corosync",align="center"] The CIB uses XML to represent both the cluster's configuration and current state of all resources in the cluster. The contents of the CIB are automatically kept in sync across the entire cluster and are used by the PEngine to compute the ideal state of the cluster and how it -should be achieved. +should be achieved. This list of instructions is then fed to the DC (Designated Co-ordinator). Pacemaker centralizes all cluster decision making by electing one of the CRMd instances to act as a master. Should the elected CRMd process, or the node it is on, fail... a new one is quickly established. The DC carries out the PEngine's instructions in the required order by passing them to either the LRMd (Local Resource Management daemon) or CRMd peers on other nodes via the cluster messaging infrastructure -(which in turn passes them on to their LRMd process). +(which in turn passes them on to their LRMd process). The peer nodes all report the results of their operations back to the DC and based on the expected and actual results, will either execute any actions that needed to wait for the previous one to complete, or abort processing and ask the PEngine to recalculate the ideal cluster state based on the unexpected results. In some cases, it may be necessary to power off nodes in order to protect shared data or complete resource recovery. For this Pacemaker comes with STONITHd. STONITH is an acronym for Shoot-The-Other-Node-In-The-Head and is usually implemented with a remote power switch. In Pacemaker, STONITH devices are modeled as resources (and configured in the CIB) to enable them to be easily monitored for failure, however STONITHd takes care of understanding the STONITH topology such that its clients simply request a node be fenced and it does the rest. == Types of Pacemaker Clusters == Pacemaker makes no assumptions about your environment, this allows it to support practically any http://en.wikipedia.org/wiki/High-availability_cluster#Node_configurations[redundancy configuration] including Active/Active, Active/Passive, N+1, N+M, N-to-1 and N-to-N. In this document we will focus on the setup of a highly available Apache web server with an Active/Passive cluster using DRBD and Ext4 to store data. Then, we will upgrade this cluster to Active/Active using GFS2. .Active/Passive Redundancy image::images/pcmk-active-passive.png["Two-node Active/Passive clusters using Pacemaker and DRBD are a cost-effective solution for many High Availability situations",align="center"] .N to N Redundancy image::images/pcmk-active-active.png["When shared storage is available, every node can potentially be used for failover. Pacemaker can even run multiple copies of services to spread out the workload",align="center"] diff --git a/doc/Clusters_from_Scratch/en-US/Ch-Shared-Storage.txt b/doc/Clusters_from_Scratch/en-US/Ch-Shared-Storage.txt index a526eeac00..457c5064e6 100644 --- a/doc/Clusters_from_Scratch/en-US/Ch-Shared-Storage.txt +++ b/doc/Clusters_from_Scratch/en-US/Ch-Shared-Storage.txt @@ -1,556 +1,556 @@ = Replicated Storage with DRBD = == Background == Even if you're serving up static websites, having to manually synchronize the contents of that website to all the machines in the cluster is not ideal. For dynamic websites, such as a wiki, it's not even an option. Not everyone care afford network-attached storage but somehow the data needs to be kept in sync. Enter DRBD which can be thought of as network based RAID-1. See http://www.drbd.org/ for more details. == Install the DRBD Packages == Since its inclusion in the upstream 2.6.33 kernel, everything needed to use DRBD has shiped with Fedora since version 13. All you need to do is install it: [source,Bash] ..... # yum install -y drbd-pacemaker drbd-udev Loaded plugins: langpacks, presto, refresh-packagekit Resolving Dependencies --> Running transaction check ---> Package drbd-pacemaker.x86_64 0:8.3.11-5.fc17 will be installed --> Processing Dependency: drbd-utils = 8.3.11-5.fc17 for package: drbd-pacemaker-8.3.11-5.fc17.x86_64 ---> Package drbd-udev.x86_64 0:8.3.11-5.fc17 will be installed --> Running transaction check ---> Package drbd-utils.x86_64 0:8.3.11-5.fc17 will be installed --> Finished Dependency Resolution Dependencies Resolved ====================================================================================== Package Arch Version Repository Size ====================================================================================== Installing: drbd-pacemaker x86_64 8.3.11-5.fc17 updates-testing 22 k drbd-udev x86_64 8.3.11-5.fc17 updates-testing 6.4 k Installing for dependencies: drbd-utils x86_64 8.3.11-5.fc17 updates-testing 183 k Transaction Summary ====================================================================================== Install 2 Packages (+1 Dependent package) Total download size: 212 k Installed size: 473 k Downloading Packages: -(1/3): drbd-pacemaker-8.3.11-5.fc17.x86_64.rpm | 22 kB 00:00 -(2/3): drbd-udev-8.3.11-5.fc17.x86_64.rpm | 6.4 kB 00:00 -(3/3): drbd-utils-8.3.11-5.fc17.x86_64.rpm | 183 kB 00:00 +(1/3): drbd-pacemaker-8.3.11-5.fc17.x86_64.rpm | 22 kB 00:00 +(2/3): drbd-udev-8.3.11-5.fc17.x86_64.rpm | 6.4 kB 00:00 +(3/3): drbd-utils-8.3.11-5.fc17.x86_64.rpm | 183 kB 00:00 -------------------------------------------------------------------------------------- -Total 293 kB/s | 212 kB 00:00 +Total 293 kB/s | 212 kB 00:00 Running Transaction Check Running Transaction Test Transaction Test Succeeded Running Transaction - Installing : drbd-utils-8.3.11-5.fc17.x86_64 1/3 - Installing : drbd-pacemaker-8.3.11-5.fc17.x86_64 2/3 - Installing : drbd-udev-8.3.11-5.fc17.x86_64 3/3 - Verifying : drbd-pacemaker-8.3.11-5.fc17.x86_64 1/3 - Verifying : drbd-udev-8.3.11-5.fc17.x86_64 2/3 - Verifying : drbd-utils-8.3.11-5.fc17.x86_64 3/3 + Installing : drbd-utils-8.3.11-5.fc17.x86_64 1/3 + Installing : drbd-pacemaker-8.3.11-5.fc17.x86_64 2/3 + Installing : drbd-udev-8.3.11-5.fc17.x86_64 3/3 + Verifying : drbd-pacemaker-8.3.11-5.fc17.x86_64 1/3 + Verifying : drbd-udev-8.3.11-5.fc17.x86_64 2/3 + Verifying : drbd-utils-8.3.11-5.fc17.x86_64 3/3 Installed: - drbd-pacemaker.x86_64 0:8.3.11-5.fc17 drbd-udev.x86_64 0:8.3.11-5.fc17 + drbd-pacemaker.x86_64 0:8.3.11-5.fc17 drbd-udev.x86_64 0:8.3.11-5.fc17 Dependency Installed: - drbd-utils.x86_64 0:8.3.11-5.fc17 + drbd-utils.x86_64 0:8.3.11-5.fc17 Complete! ..... == Configure DRBD == Before we configure DRBD, we need to set aside some disk for it to use. === Create A Partition for DRBD === If you have more than 1Gb free, feel free to use it. For this guide however, 1Gb is plenty of space for a single html file and sufficient for later holding the GFS2 metadata. [source,Bash] ---- # vgdisplay | grep -e Name - e Free VG Name vg_pcmk1 Free PE / Size 31 / 992.00 MiB # lvs LV VG Attr LSize Pool Origin Data% Move Log Copy% Convert - lv_root vg_pcmk1 -wi-ao-- 8.56g - lv_swap vg_pcmk1 -wi-ao-- 960.00m + lv_root vg_pcmk1 -wi-ao-- 8.56g + lv_swap vg_pcmk1 -wi-ao-- 960.00m # lvcreate -n drbd-demo -L 1G vg_pcmk1 Logical volume "drbd-demo" created # lvs LV VG Attr LSize Pool Origin Data% Move Log Copy% Convert drbd-demo vg_pcmk1 -wi-a--- 1.00G - lv_root vg_pcmk1 -wi-ao-- 8.56g - lv_swap vg_pcmk1 -wi-ao-- 960.00m + lv_root vg_pcmk1 -wi-ao-- 8.56g + lv_swap vg_pcmk1 -wi-ao-- 960.00m ---- Repeat this on the second node, be sure to use the same size partition. [source,Bash] ---- -# ssh pcmk-2 -- lvs +# ssh pcmk-2 -- lvs LV VG Attr LSize Origin Snap% Move Log Copy% Convert - lv_root vg_pcmk1 -wi-ao-- 8.56g - lv_swap vg_pcmk1 -wi-ao-- 960.00m + lv_root vg_pcmk1 -wi-ao-- 8.56g + lv_swap vg_pcmk1 -wi-ao-- 960.00m # ssh pcmk-2 -- lvcreate -n drbd-demo -L 1G vg_pcmk1 Logical volume "drbd-demo" created # ssh pcmk-2 -- lvs LV VG Attr LSize Origin Snap% Move Log Copy% Convert drbd-demo vg_pcmk1 -wi-a--- 1.00G - lv_root vg_pcmk1 -wi-ao-- 8.56g - lv_swap vg_pcmk1 -wi-ao-- 960.00m + lv_root vg_pcmk1 -wi-ao-- 8.56g + lv_swap vg_pcmk1 -wi-ao-- 960.00m ---- === Write the DRBD Config === There is no series of commands for building a DRBD configuration, so simply copy the configuration below to /etc/drbd.conf Detailed information on the directives used in this configuration (and other alternatives) is available from http://www.drbd.org/users-guide/ch-configure.html [WARNING] ========= Be sure to use the names and addresses of your nodes if they differ from the ones used in this guide. ========= .... -global { - usage-count yes; +global { + usage-count yes; } common { protocol C; } resource wwwdata { meta-disk internal; device /dev/drbd1; syncer { verify-alg sha1; } - net { - allow-two-primaries; + net { + allow-two-primaries; } on pcmk-1 { disk /dev/vg_pcmk1/drbd-demo; address 192.168.122.101:7789; } on pcmk-2 { disk /dev/vg_pcmk1/drbd-demo; address 192.168.122.102:7789; } } .... [NOTE] ======= TODO: Explain the reason for the allow-two-primaries option ======= === Initialize and Load DRBD === With the configuration in place, we can now perform the DRBD initialization [source,Bash] ---- # drbdadm create-md wwwdata Writing meta data... initializing activity log NOT initialized bitmap New drbd meta data block successfully created. success ---- Now load the DRBD kernel module and confirm that everything is sane [source,Bash] ---- # modprobe drbd # drbdadm up wwwdata # cat /proc/drbd version: 8.3.11 (api:88/proto:86-96) -srcversion: 0D2B62DEDB020A425130935 +srcversion: 0D2B62DEDB020A425130935 1: cs:Connected ro:Secondary/Secondary ds:Inconsistent/Inconsistent C r----- ns:0 nr:0 dw:0 dr:0 al:0 bm:0 lo:0 pe:0 ua:0 ap:0 ep:1 wo:f oos:1015740 ---- Repeat on the second node [source,Bash] ---- # ssh pcmk-2 -- drbdadm --force create-md wwwdata Writing meta data... initializing activity log NOT initialized bitmap New drbd meta data block successfully created. success # ssh pcmk-2 -- modprobe drbd WARNING: Deprecated config file /etc/modprobe.conf, all config files belong into /etc/modprobe.d/. # ssh pcmk-2 -- drbdadm up wwwdata # ssh pcmk-2 -- cat /proc/drbd version: 8.3.11 (api:88/proto:86-96) -srcversion: 0D2B62DEDB020A425130935 +srcversion: 0D2B62DEDB020A425130935 1: cs:Connected ro:Secondary/Secondary ds:Inconsistent/Inconsistent C r----- ns:0 nr:0 dw:0 dr:0 al:0 bm:0 lo:0 pe:0 ua:0 ap:0 ep:1 wo:f oos:1015740 ---- Now we need to tell DRBD which set of data to use. Since both sides contain garbage, we can run the following on pcmk-1: [source,Bash] ---- # drbdadm -- --overwrite-data-of-peer primary wwwdata # cat /proc/drbd version: 8.3.11 (api:88/proto:86-96) -srcversion: 0D2B62DEDB020A425130935 +srcversion: 0D2B62DEDB020A425130935 1: cs:SyncSource ro:Primary/Secondary ds:UpToDate/Inconsistent C r----- ns:8064 nr:0 dw:0 dr:8728 al:0 bm:0 lo:0 pe:1 ua:0 ap:0 ep:1 wo:f oos:1007804 [>....................] sync'ed: 0.9% (1007804/1015740)K finish: 0:12:35 speed: 1,320 (1,320) K/sec ---- After a while, the sync should finish and you'll see: [source,Bash] ---- # cat /proc/drbd version: 8.3.11 (api:88/proto:86-96) -srcversion: 0D2B62DEDB020A425130935 +srcversion: 0D2B62DEDB020A425130935 1: cs:Connected ro:Primary/Secondary ds:UpToDate/UpToDate C r----- ns:1015740 nr:0 dw:0 dr:1016404 al:0 bm:62 lo:0 pe:0 ua:0 ap:0 ep:1 wo:f oos:0 ---- pcmk-1 is now in the Primary state which allows it to be written to. Which means it's a good point at which to create a filesystem and populate it with some data to serve up via our WebSite resource. === Populate DRBD with Data === [source,Bash] ---- # mkfs.ext4 /dev/drbd1 mke2fs 1.42 (29-Nov-2011) Filesystem label= OS type: Linux Block size=4096 (log=2) Fragment size=4096 (log=2) Stride=0 blocks, Stripe width=0 blocks 63488 inodes, 253935 blocks 12696 blocks (5.00%) reserved for the super user First data block=0 Maximum filesystem blocks=260046848 8 block groups 32768 blocks per group, 32768 fragments per group 7936 inodes per group -Superblock backups stored on blocks: +Superblock backups stored on blocks: 32768, 98304, 163840, 229376 -Allocating group tables: done -Writing inode tables: done +Allocating group tables: done +Writing inode tables: done Creating journal (4096 blocks): done Writing superblocks and filesystem accounting information: done ---- Now mount the newly created filesystem so we can create our index file [source,Bash] ---- # mount /dev/drbd1 /mnt/ # cat <<-END >/mnt/index.html My Test Site - drbd END # umount /dev/drbd1 ---- == Configure the Cluster for DRBD == One handy feature of the crm shell is that you can use it in interactive mode to make several changes atomically. First we launch the shell. The prompt will change to indicate you're in interactive mode. [source,Bash] ---- # crm crm(live) # ---- Next we must create a working copy of the current configuration. This is where all our changes will go. The cluster will not see any of them until we say it's ok. Notice again how the prompt changes, this time to indicate that we're no longer looking at the live cluster. [source,Bash] ---- cib crm(live) # cib new drbd INFO: drbd shadow CIB created crm(drbd) # ---- Now we can create our DRBD clone and display the revised configuration. [source,Bash] ---- crm(drbd) # configure primitive WebData ocf:linbit:drbd params drbd_resource=wwwdata \ op monitor interval=60s crm(drbd) # configure ms WebDataClone WebData meta master-max=1 master-node-max=1 \ clone-max=2 clone-node-max=1 notify=true crm(drbd) # configure show node $id="1702537408" pcmk-1 node $id="1719314624" pcmk-2 primitive ClusterIP ocf:heartbeat:IPaddr2 \ params ip="192.168.122.120" cidr_netmask="32" \ op monitor interval="30s" primitive WebData ocf:linbit:drbd \ params drbd_resource="wwwdata" \ op monitor interval="60s" primitive WebSite ocf:heartbeat:apache \ params configfile="/etc/httpd/conf/httpd.conf" \ op monitor interval="1min" ms WebDataClone WebData \ meta master-max="1" master-node-max="1" clone-max="2" clone-node-max="1" notify="true" location prefer-pcmk-1 WebSite 50: pcmk-1 colocation website-with-ip inf: WebSite ClusterIP order apache-after-ip inf: ClusterIP WebSite property $id="cib-bootstrap-options" \ dc-version="1.1.7-2.fc17-ee0730e13d124c3d58f00016c3376a1de5323cff" \ cluster-infrastructure="corosync" \ stonith-enabled="false" \ no-quorum-policy="ignore" \ last-lrm-refresh="1333446866" rsc_defaults $id="rsc-options" \ resource-stickiness="100" op_defaults $id="op-options" \ timeout="240s" ---- Once we're happy with the changes, we can tell the cluster to start using them and use crm_mon to check everything is functioning. [source,Bash] ---- crm(drbd) # cib commit drbd INFO: commited 'drbd' shadow CIB to the cluster crm(drbd) # quit bye # crm_mon -1 ============ Last updated: Tue Apr 3 13:50:01 2012 Last change: Tue Apr 3 13:49:46 2012 via crm_shadow on pcmk-1 Stack: corosync Current DC: pcmk-1 (1702537408) - partition with quorum Version: 1.1.7-2.fc17-ee0730e13d124c3d58f00016c3376a1de5323cff 2 Nodes configured, unknown expected votes 4 Resources configured. ============ Online: [ pcmk-1 pcmk-2 ] ClusterIP (ocf::heartbeat:IPaddr2): Started pcmk-1 WebSite (ocf::heartbeat:apache): Started pcmk-1 Master/Slave Set: WebDataClone [WebData] Masters: [ pcmk-1 ] Slaves: [ pcmk-2 ] ---- [NOTE] ======= TODO: Include details on adding a second DRBD resource ======= Now that DRBD is functioning we can configure a Filesystem resource to use it. In addition to the filesystem's definition, we also need to tell the cluster where it can be located (only on the DRBD Primary) and when it is allowed to start (after the Primary was promoted). Once again we'll use the shell's interactive mode [source,Bash] ---- # crm crm(live) # cib new fs INFO: fs shadow CIB created crm(fs) # configure primitive WebFS ocf:heartbeat:Filesystem \ params device="/dev/drbd/by-res/wwwdata" directory="/var/www/html" fstype="ext4" crm(fs) # configure colocation fs_on_drbd inf: WebFS WebDataClone:Master crm(fs) # configure order WebFS-after-WebData inf: WebDataClone:promote WebFS:start ---- We also need to tell the cluster that Apache needs to run on the same machine as the filesystem and that it must be active before Apache can start. [source,Bash] ---- crm(fs) # configure colocation WebSite-with-WebFS inf: WebSite WebFS crm(fs) # configure order WebSite-after-WebFS inf: WebFS WebSite ---- Time to review the updated configuration: [source,Bash] ---- crm(fs) # configure show node $id="1702537408" pcmk-1 node $id="1719314624" pcmk-2 primitive ClusterIP ocf:heartbeat:IPaddr2 \ params ip="192.168.122.120" cidr_netmask="32" \ op monitor interval="30s" primitive WebData ocf:linbit:drbd \ params drbd_resource="wwwdata" \ op monitor interval="60s" primitive WebFS ocf:heartbeat:Filesystem \ params device="/dev/drbd/by-res/wwwdata" directory="/var/www/html" fstype="ext4" primitive WebSite ocf:heartbeat:apache \ params configfile="/etc/httpd/conf/httpd.conf" \ op monitor interval="1min" ms WebDataClone WebData \ meta master-max="1" master-node-max="1" clone-max="2" clone-node-max="1" notify="true" location prefer-pcmk-1 WebSite 50: pcmk-1 colocation WebSite-with-WebFS inf: WebSite WebFS colocation fs_on_drbd inf: WebFS WebDataClone:Master colocation website-with-ip inf: WebSite ClusterIP order WebFS-after-WebData inf: WebDataClone:promote WebFS:start order WebSite-after-WebFS inf: WebFS WebSite order apache-after-ip inf: ClusterIP WebSite property $id="cib-bootstrap-options" \ dc-version="1.1.7-2.fc17-ee0730e13d124c3d58f00016c3376a1de5323cff" \ cluster-infrastructure="corosync" \ stonith-enabled="false" \ no-quorum-policy="ignore" \ last-lrm-refresh="1333446866" rsc_defaults $id="rsc-options" \ resource-stickiness="100" op_defaults $id="op-options" \ timeout="240s" ---- After reviewing the new configuration, we again upload it and watch the cluster put it into effect. [source,Bash] ---- crm(fs) # cib commit fs INFO: commited 'fs' shadow CIB to the cluster crm(fs) # quit bye # crm_mon -1 ============ Last updated: Tue Apr 3 13:52:21 2012 Last change: Tue Apr 3 13:52:06 2012 via crm_shadow on pcmk-1 Stack: corosync Current DC: pcmk-1 (1702537408) - partition with quorum Version: 1.1.7-2.fc17-ee0730e13d124c3d58f00016c3376a1de5323cff 2 Nodes configured, unknown expected votes 5 Resources configured. ============ Online: [ pcmk-1 pcmk-2 ] ClusterIP (ocf::heartbeat:IPaddr2): Started pcmk-1 WebSite (ocf::heartbeat:apache): Started pcmk-1 Master/Slave Set: WebDataClone [WebData] Masters: [ pcmk-1 ] Slaves: [ pcmk-2 ] WebFS (ocf::heartbeat:Filesystem): Started pcmk-1 ---- === Testing Migration === We could shut down the active node again, but another way to safely simulate recovery is to put the node into what is called "standby mode". Nodes in this state tell the cluster that they are not allowed to run resources. Any resources found active there will be moved elsewhere. This feature can be particularly useful when updating the resources' packages. Put the local node into standby mode and observe the cluster move all the resources to the other node. Note also that the node's status will change to indicate that it can no longer host resources. [source,Bash] ---- # crm node standby # crm_mon -1 ============ Last updated: Tue Apr 3 13:59:14 2012 Last change: Tue Apr 3 13:52:36 2012 via crm_attribute on pcmk-1 Stack: corosync Current DC: pcmk-1 (1702537408) - partition with quorum Version: 1.1.7-2.fc17-ee0730e13d124c3d58f00016c3376a1de5323cff 2 Nodes configured, unknown expected votes 5 Resources configured. ============ Node pcmk-1 (1702537408): standby Online: [ pcmk-2 ] ClusterIP (ocf::heartbeat:IPaddr2): Started pcmk-2 WebSite (ocf::heartbeat:apache): Started pcmk-2 Master/Slave Set: WebDataClone [WebData] Masters: [ pcmk-2 ] Stopped: [ WebData:1 ] WebFS (ocf::heartbeat:Filesystem): Started pcmk-2 ---- Once we've done everything we needed to on pcmk-1 (in this case nothing, we just wanted to see the resources move), we can allow the node to be a full cluster member again. [source,Bash] ---- # crm node online # crm_mon -1 ============ Last updated: Tue Apr 3 14:00:06 2012 Last change: Tue Apr 3 14:00:00 2012 via crm_attribute on pcmk-1 Stack: corosync Current DC: pcmk-1 (1702537408) - partition with quorum Version: 1.1.7-2.fc17-ee0730e13d124c3d58f00016c3376a1de5323cff 2 Nodes configured, unknown expected votes 5 Resources configured. ============ Online: [ pcmk-1 pcmk-2 ] ClusterIP (ocf::heartbeat:IPaddr2): Started pcmk-2 WebSite (ocf::heartbeat:apache): Started pcmk-2 Master/Slave Set: WebDataClone [WebData] Masters: [ pcmk-2 ] Slaves: [ pcmk-1 ] WebFS (ocf::heartbeat:Filesystem): Started pcmk-2 ---- Notice that our resource stickiness settings prevent the services from migrating back to pcmk-1. diff --git a/doc/Clusters_from_Scratch/en-US/Ch-Verification.txt b/doc/Clusters_from_Scratch/en-US/Ch-Verification.txt index 1ab35db47f..effc17d293 100644 --- a/doc/Clusters_from_Scratch/en-US/Ch-Verification.txt +++ b/doc/Clusters_from_Scratch/en-US/Ch-Verification.txt @@ -1,162 +1,162 @@ -= Verify Cluster Installation = += Verify Cluster Installation = == Verify Corosync Installation == Start Corosync on the first node [source,Bash] ---- # systemctl start corosync.service ---- The first thing to check is if cluster communication is happy, for that we use `corosync-cfgtool`. [source,Bash] ---- # corosync-cfgtool -s Printing ring status. Local node ID 1702537408 RING ID 0 id = 192.168.122.101 status = ring 0 active with no faults ---- We can see here that everything appears normal with our fixed IP address, not a 127.0.0.x loopback address, listed as the +id+ and +no faults+ for the status. If you see something different, you might want to start by checking the node's network, firewall and selinux configurations. Next we check the membership and quorum APIs: [source,Bash] ---- # corosync-cmapctl | grep members -runtime.totem.pg.mrp.srp.members.1702537408.ip (str) = r(0) ip(192.168.122.101) +runtime.totem.pg.mrp.srp.members.1702537408.ip (str) = r(0) ip(192.168.122.101) runtime.totem.pg.mrp.srp.members.1702537408.join_count (u32) = 1 runtime.totem.pg.mrp.srp.members.1702537408.status (str) = joined # corosync-quorumtool -l Membership information ---------------------- Nodeid Votes Name 1702537408 1 pcmk-1 ---- The node see's itself in both locations which is a good sign. If the node list is empty when you call `corosync-quorumtool`, then you've not correctly quorum in 'corosync.conf'. With everything looking healthy, we start Corosync on the second node and run the same communications check. [source,Bash] ---- # ssh pcmk-2 -- systemctl start corosync.service # ssh pcmk-2 -- corosync-cfgtool -s Printing ring status. Local node ID 1719314624 RING ID 0 id = 192.168.122.102 status = ring 0 active with no faults ---- Everything appears to look ok from +pcmk-2+, time to re-run the membership and quorum checks to see if it shows up there too. Again, if you see something different to the above, check for the usual suspects: network, firewall and selinux. [source,Bash] ---- # corosync-cmapctl | grep members -runtime.totem.pg.mrp.srp.members.1702537408.ip (str) = r(0) ip(192.168.122.101) +runtime.totem.pg.mrp.srp.members.1702537408.ip (str) = r(0) ip(192.168.122.101) runtime.totem.pg.mrp.srp.members.1702537408.join_count (u32) = 1 runtime.totem.pg.mrp.srp.members.1702537408.status (str) = joined -runtime.totem.pg.mrp.srp.members.1719314624.ip (str) = r(0) ip(192.168.122.102) +runtime.totem.pg.mrp.srp.members.1719314624.ip (str) = r(0) ip(192.168.122.102) runtime.totem.pg.mrp.srp.members.1719314624.join_count (u32) = 1 runtime.totem.pg.mrp.srp.members.1719314624.status (str) = joined # corosync-quorumtool -l Membership information ---------------------- Nodeid Votes Name 1702537408 1 pcmk-1 1719314624 1 pcmk-2 ---- All good! == Verify Pacemaker Installation == Now that we have confirmed that Corosync is functional we can check the rest of the stack. Start Pacemaker and check the necessary processes have been started. [source,Bash] ---- # systemctl start pacemaker.service # ps axf PID TTY STAT TIME COMMAND 2 ? S 0:00 [kthreadd] ...lots of processes... 28019 ? Ssl 0:03 /usr/sbin/corosync 28047 ? Ss 0:00 /usr/sbin/pacemakerd -f 28048 ? Ss 0:00 \_ /usr/libexec/pacemaker/cib 28049 ? Ss 0:00 \_ /usr/libexec/pacemaker/stonithd 28050 ? Ss 0:00 \_ /usr/lib64/heartbeat/lrmd 28051 ? Ss 0:00 \_ /usr/libexec/pacemaker/attrd 28052 ? Ss 0:00 \_ /usr/libexec/pacemaker/pengine 28053 ? Ss 0:00 \_ /usr/libexec/pacemaker/crmd ---- If that looks ok, check the logs and crm_mon. [source,Bash] ---- # grep pacemakerd /var/log/messages | grep -e get_cluster_type -e read_config Apr 3 09:19:32 pcmk-1 pacemakerd[28047]: info: get_cluster_type: Detected an active 'corosync' cluster Apr 3 09:19:32 pcmk-1 pacemakerd[28047]: info: read_config: Reading configure for stack: corosync # crm_mon -1 ============ Last updated: Tue Apr 3 09:21:37 2012 Last change: Tue Apr 3 09:19:54 2012 via crmd on pcmk-1 Stack: corosync Current DC: pcmk-1 (1702537408) - partition with quorum Version: 1.1.7-2.fc17-ee0730e13d124c3d58f00016c3376a1de5323cff 1 Nodes configured, unknown expected votes 0 Resources configured. ============ Online: [ pcmk-1 ] ---- Next, check for any ERRORs during startup - there shouldn't be any. [source,Bash] ---- # grep -i error /var/log/messages ---- Repeat on the other node and display the cluster's status. [source,Bash] ---- # ssh pcmk-2 -- systemctl start pacemaker.service # crm_mon -1 ============ Last updated: Tue Apr 3 09:26:23 2012 Last change: Tue Apr 3 09:26:21 2012 via crmd on pcmk-1 Stack: corosync Current DC: pcmk-1 (1702537408) - partition with quorum Version: 1.1.7-2.fc17-ee0730e13d124c3d58f00016c3376a1de5323cff 2 Nodes configured, unknown expected votes 0 Resources configured. ============ Online: [ pcmk-1 pcmk-2 ] ---- diff --git a/tools/crm_report.in b/tools/crm_report.in index 1b651391e4..4a0ef0641c 100755 --- a/tools/crm_report.in +++ b/tools/crm_report.in @@ -1,477 +1,477 @@ #!/bin/sh # Copyright (C) 2010 Andrew Beekhof # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or (at your option) any later version. # # This software is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # # You should have received a copy of the GNU General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA # # Note the quotes around `$TEMP': they are essential! TEMP=`getopt \ -o hv?xl:f:t:n:T:Lp:c:dSACHu:MVs \ --long help,cts:,cts-log:,dest:,node:,nodes:,from:,to:logfile:,as-directory,single-node,cluster:,user:,version,features \ -n 'crm_report' -- "$@"` eval set -- "$TEMP" times="" tests="" nodes="" compress=1 cluster="any" ssh_user="root" search_logs=1 report_data=`dirname $0` extra_logs="" sanitize_patterns="" log_patterns="CRIT: ERROR:" usage() { cat< "$l_base/$HALOG_F" fi for node in $nodes; do cat<$l_base/.env LABEL="$label" REPORT_HOME="$r_base" REPORT_MASTER="$host" REPORT_TARGET="$node" LOG_START=$start LOG_END=$end REMOVE=1 SANITIZE="$sanitize_patterns" CLUSTER=$cluster LOG_PATTERNS="$log_patterns" EXTRA_LOGS="$extra_logs" SEARCH_LOGS=$search_logs verbose=$verbose EOF if [ $host = $node ]; then cat<>$l_base/.env REPORT_HOME="$l_base" EOF cat $l_base/.env $report_data/report.common $report_data/report.collector > $l_base/collector bash $l_base/collector else cat $l_base/.env $report_data/report.common $report_data/report.collector \ | ssh -l $ssh_user -T $node -- "mkdir -p $r_base; cat > $r_base/collector; bash $r_base/collector" | (cd $l_base && tar xf -) fi done analyze $l_base > $l_base/$ANALYSIS_F if [ -f $l_base/$HALOG_F ]; then node_events $l_base/$HALOG_F > $l_base/$EVENTS_F fi for node in $nodes; do cat $l_base/$node/$ANALYSIS_F >> $l_base/$ANALYSIS_F if [ -s $l_base/$node/$EVENTS_F ]; then cat $l_base/$node/$EVENTS_F >> $l_base/$EVENTS_F elif [ -s $l_base/$HALOG_F ]; then awk "\$4==\"$nodes\"" $l_base/$EVENTS_F >> $l_base/$n/$EVENTS_F fi done log " " if [ $compress = 1 ]; then fname=`shrink $l_base` rm -rf $l_base log "Collected results are available in $fname" log " " log "Please create a bug entry at" log " http://developerbugs.linux-foundation.org/enter_bug.cgi?product=Pacemaker" log "Include a description of your problem and attach this tarball" log " " log "Thank you for taking time to create this report." else log "Collected results are available in $l_base" fi log " " } # # check if files have same content in the cluster # cibdiff() { d1=`dirname $1` d2=`dirname $2` if [ -f $d1/RUNNING -a -f $d2/RUNNING ] || [ -f $d1/STOPPED -a -f $d2/STOPPED ]; then if which crm_diff > /dev/null 2>&1; then crm_diff -c -n $1 -o $2 else info "crm_diff(8) not found, cannot diff CIBs" fi else echo "can't compare cibs from running and stopped systems" fi } diffcheck() { [ -f "$1" ] || { echo "$1 does not exist" return 1 } [ -f "$2" ] || { echo "$2 does not exist" return 1 } case `basename $1` in $CIB_F) cibdiff $1 $2;; $B_CONF) diff -u $1 $2;; # confdiff? *) diff -u $1 $2;; esac } # # remove duplicates if files are same, make links instead # consolidate() { for n in $NODES; do if [ -f $1/$2 ]; then rm $1/$n/$2 else mv $1/$n/$2 $1 fi ln -s ../$2 $1/$n done } analyze_one() { rc=0 node0="" for n in $NODES; do if [ "$node0" ]; then diffcheck $1/$node0/$2 $1/$n/$2 rc=$(($rc+$?)) else node0=$n fi done return $rc } analyze() { flist="$HOSTCACHE $MEMBERSHIP_F $CIB_F $CRM_MON_F $B_CONF logd.cf $SYSINFO_F" for f in $flist; do printf "Diff $f... " ls $1/*/$f >/dev/null 2>&1 || { echo "no $1/*/$f :/" continue } if analyze_one $1 $f; then echo "OK" [ "$f" != $CIB_F ] && consolidate $1 $f else echo "" fi done } do_cts() { test_sets=`echo $tests | tr ',' ' '` for test_set in $test_sets; do start_time=0 start_test=`echo $test_set | tr '-' ' ' | awk '{print $1}'` end_time=0 end_test=`echo $test_set | tr '-' ' ' | awk '{print $2}'` if [ x$end_test = x ]; then msg="Extracting test $start_test" label="CTS-$start_test-`date +"%b-%d-%Y"`" end_test=`expr $start_test + 1` else msg="Extracting tests $start_test to $end_test" label="CTS-$start_test-$end_test-`date +"%b-%d-%Y"`" end_test=`expr $end_test + 1` fi if [ $start_test = 0 ]; then start_pat="BEGINNING [0-9].* TESTS" else start_pat="Running test.*\[ *$start_test\]" fi if [ x$ctslog = x ]; then ctslog=`findmsg 1 "$start_pat"` if [ x$ctslog = x ]; then fatal "No CTS control file detected" else log "Using CTS control file: $ctslog" fi fi line=`grep -n "$start_pat" $ctslog | tail -1 | sed 's/:.*//'` if [ ! -z "$line" ]; then start_time=`linetime $ctslog $line` fi line=`grep -n "Running test.*\[ *$end_test\]" $ctslog | tail -1 | sed 's/:.*//'` if [ ! -z "$line" ]; then end_time=`linetime $ctslog $line` fi if [ -z "$nodes" ]; then nodes=`grep CTS: $ctslog | grep -v debug: | grep " \* " | sed s:.*\\\*::g | sort -u | tr '\\n' ' '` log "Calculated node list: $nodes" fi if [ $end_time -lt $start_time ]; then debug "Test didn't complete, grabbing everything up to now" end_time=`date +%s` fi if [ $start_time != 0 ];then log "$msg (`time2str $start_time` to `time2str $end_time`)" collect_data $label $start_time $end_time $ctslog else fatal "$msg failed: not found" fi done } getnodes() { if [ -z $1 ]; then cluster=`get_cluster_type` else cluster=$1 fi if [ -z $HA_STATE_DIR ]; then HA_STATE_DIR=/var/lib/heartbeat fi cluster_cf=`find_cluster_cf $cluster` # 1. Live if ps -ef | egrep -qs "[c]ib" then debug "Querying CIB for nodes" cibadmin -Ql -o nodes | awk ' /type="normal"/ { for( i=1; i<=NF; i++ ) if( $i~/^uname=/ ) { sub("uname=.","",$i); sub("\".*","",$i); print $i; next; } } ' | tr '\n' ' ' # 2. Saved elif [ -f @CRM_CONFIG_DIR@/cib.xml ]; then debug "Querying on-disk CIB for nodes" CIB_file=@CRM_CONFIG_DIR@/cib.xml cibadmin -Ql -o nodes | awk ' /type="normal"/ { for( i=1; i<=NF; i++ ) if( $i~/^uname=/ ) { sub("uname=.","",$i); sub("\".*","",$i); print $i; next; } } ' | tr '\n' ' ' # 3. hostcache elif [ -f $HA_STATE_DIR/hostcache ]; then debug "Reading nodes from $HA_STATE_DIR/hostcache" awk '{print $1}' $HA_STATE_DIR/hostcache # 4. ha.cf elif [ "x$cluster" = "xheartbeat" ]; then debug "Reading nodes from $cluster_cf" getcfvar $cluster node $cluster_cf # 5. logs else # Look in the logs... logfile=`findmsg 1 "crm_update_peer"` debug "Reading nodes from $logfile" if [ ! -z "$logfile" ]; then grep crm_update_peer: $logfile | sed s/.*crm_update_peer// | sed s/://g | awk '{print $2}' | grep -v "(null)" | sort -u | tr '\n' ' ' fi fi } if [ "x$tests" != "x" ]; then do_cts elif [ "x$start_time" != "x" ]; then masterlog="" if [ -z "$sanitize_patterns" ]; then log "WARNING: The tarball produced by this program may contain" log " sensitive information such as passwords." log "" log "We will attempt to remove such information if you use the" log "-p option. For example: -p \"pass.*\" -p \"user.*\"" log "" log "However, doing this may reduce the ability for the recipients" log "to diagnose issues and generally provide assistance." log "" log "IT IS YOUR RESPONSIBILITY TO PROTECT SENSITIVE DATA FROM EXPOSURE" log "" fi if [ -z "$nodes" ]; then nodes=`getnodes $cluster` log "Calculated node list: $nodes" fi if [ -z "$nodes" ]; then fatal "Cannot determine node list, please specify manually with --nodes" fi if echo $nodes | grep -qs $host then debug "We are a cluster node" else debug "We are a log master" masterlog=`findmsg 1 "crmd\\|CTS"` fi if [ -z $end_time ]; then end_time=`perl -e 'print time()'` fi label="pcmk-`date +"%a-%d-%b-%Y"`" log "Collecting data from $nodes (`time2str $start_time` to `time2str $end_time`)" collect_data $label $start_time $end_time $masterlog else fatal "Not sure what to do, no tests or time ranges to extract" fi