yum install pacemaker pcs ccs resource-agents cman rng-tools
-A INPUT -m state --state NEW -m udp -p udp --dport 5405 -j ACCEPT
EXTRAOPTIONS="-r /dev/urandom"
chkconfig rngd on service rngd start
do the following on the first node:
corosync-keygen ccs -f /etc/cluster/cluster.conf --createcluster cluster ccs -f /etc/cluster/cluster.conf --addnode node1.mngt.bh.helux.nl ccs -f /etc/cluster/cluster.conf --addalt node1.mngt.bh.helux.nl node1.local.bh.helux.nl ccs -f /etc/cluster/cluster.conf --addnode node2.mngt.bh.helux.nl ccs -f /etc/cluster/cluster.conf --addalt node2.mngt.bh.helux.nl node2.local.bh.helux.nl ccs -f /etc/cluster/cluster.conf --addfencedev pcmk agent=fence_pcmk ccs -f /etc/cluster/cluster.conf --addmethod pcmk-redirect node1.mngt.bh.helux.nl ccs -f /etc/cluster/cluster.conf --addmethod pcmk-redirect node2.mngt.bh.helux.nl ccs -f /etc/cluster/cluster.conf --addfenceinst pcmk node1.mngt.bh.helux.nl pcmk-redirect port=node1.mngt.bh.helux.nl delay=15 ccs -f /etc/cluster/cluster.conf --addfenceinst pcmk node2.mngt.bh.helux.nl pcmk-redirect port=node2.mngt.bh.helux.nl delay=15 ccs -f /etc/cluster/cluster.conf --setcman keyfile="/etc/corosync/authkey" transport="udpu" port="5405" two_node=1 expected_votes=1 ccs -f /etc/cluster/cluster.conf --settotem rrp_mode="active" ccs_config_validate -f /etc/cluster/cluster.conf
install basic config to other node:
scp /etc/cluster/cluster.conf node2:/etc/cluster scp /etc/corosync/authkey node2:/etc/corosync
configure on all nodes
chkconfig cman on chkconfig pacemaker on service cman start
check the rings on all nodes
corosync-objctl | fgrep members
check secauth, rrp_mode, transport etc. on all nodes
corosync-objctl | egrep ^totem
do the following on all nodes:
service pacemaker start
do the following on one node:
validate everything
crm_mon -Arf1
fence_vmware_soap -z -l root@localos -p <password> -a vcenter.mngt.rtd.helux.nl -o list |grep node node2.mngt,421ceeb6-b7d2-00a5-0be4-81dadde2106f node1.mngt,421ce817-6a23-84c9-da3a-7e9f9324ff39
pcs property set no-quorum-policy=ignore
pcs stonith create fence_node1 fence_vmware_soap pcmk_host_map="node1.mngt.bh.helux.nl:node1.mngt" login="root@localos" passwd=<password> action="off" ipaddr="vcenter.mngt.rtd.helux.nl" ssl=1
pcs stonith create fence_node2 fence_vmware_soap pcmk_host_map="node2.mngt.bh.helux.nl:node2.mngt" login="root@localos" passwd=<password> action="off" ipaddr="vcenter.mngt.rtd.helux.nl" ssl=1
pcs resource create ClusterIP ocf:heartbeat:IPaddr2 ip=172.16.2.26 cidr_netmask=24 op monitor interval=30s
edit /etc/lvm/lvm.conf and define which volume groups to enable at startup. exclude cluster VGs. in this example the cluster VG is data.
. . #volume_list = [ "vg1", "vg2/lvol1", "@tag1", "@*" ] volume_list = [ "system" ] . .
pcs resource create lvm_data LVM volgrpname=data exclusive=true pcs resource create fs_lvol1 Filesystem device="/dev/data/lvol1" directory="/mnt" fstype="ext4" options="rw,noatime" pcs constraint order lvm_data then fs_lvol1
the following will create a linux startup script application (/etc/init.d) postfix to be a clustered application
pcs resource create postfix lsb:postfix
put them in a resource group
pcs resource group add postfix_group lvm_data fs_lvol1 ClusterIP postfix
if you want to patch/reboot a cluster node, you have to fix the cluster resource to run on one node. in this example it is node1.
first, we check:
pcs constraint --full
output:
Location Constraints: Ordering Constraints: start lvm_data then start fs_lvol1 (Mandatory) (id:order-lvm_data-fs_lvol1-mandatory) Colocation Constraints:
fixate resource on node1:
pcs constraint location postfix_group prefers node1.mngt.bh.helux.nl=INFINITY
when done with work on node2 and node2 is available again we remove the fixation:
pcs constraint remove location-postfix_group-node1.mngt.bh.helux.nl-INFINITY