麻豆小视频在线观看_中文黄色一级片_久久久成人精品_成片免费观看视频大全_午夜精品久久久久久久99热浪潮_成人一区二区三区四区

首頁 > 系統 > CentOS > 正文

Centos7.0下通過Corosync+pacemaker+pcs+drbd實現mariadb的高可用

2024-06-28 16:01:40
字體:
來源:轉載
供稿:網友

Centos7.0下通過Corosync+pacemaker+pcs+drbd實現mariadb的高可用 http://blog.csdn.net/wylfengyujiancheng/article/details/50670327

 

 一、操作系統配置

1.1、準備:

 兩個節點node01和node02均按照centos7.0系統,每個節點兩塊磁盤,一塊用作根分區一塊用作drbd

 192.168.100.11  node01

 192.168100.12  node02

修改主機名:

 節點1

# hostnamectl set-hostname node01

# su -l

 

節點2

# hostnamectl set-hostname node02

# su -l

 

 

1.2、磁盤分區如下

[root@ha-node2 corosync]# lsblk

NAME            MAJ:MIN RM  SIZE RO TYPE MOUNTPOINT

sda               8:0    0  20G  0 disk

├─sda1            8:1    0 500M  0 part /boot

└─sda2            8:2    0 19.5G 0 part

  ├─centos-swap253:0    0    2G  0lvm  [SWAP]

  └─centos-root253:1    0 17.5G  0 lvm /

sdb               8:16   0  20G  0 disk

sr0              11:0    1 1024M 0 rom 

[root@ha-node1 corosync]# lsblk

NAME            MAJ:MIN RM  SIZE RO TYPE MOUNTPOINT

sda               8:0    0  20G  0 disk

├─sda1            8:1    0 500M  0 part /boot

└─sda2            8:2    0 19.5G 0 part

  ├─centos-swap253:0    0    2G  0lvm  [SWAP]

  └─centos-root253:1    0 17.5G  0 lvm /

sdb               8:16   0  20G  0 disk

sr0              11:0    1 1024M 0 rom

 

 

1.3、創建lvm(每個節點都需執行)

 

# pvcreate /dev/sdb

# vgcreate data /dev/sdb

# lvcreate --size 2G --name MySQL data

 

 

1.4、關閉防火墻(每個節點都需執行)

 

setenforce 0

sed -i.bak "s/SElinux=enforcing/SELINUX=permissive/g"/etc/selinux/config

systemctl disable firewalld.service

systemctl stop firewalld.service

iptables --flush

 

 

1.5、配置hosts文件

 

echo '192.168.100.11 node01 ' >>/etc/hosts

echo '192.168.100.12 node02 ' >>/etc/hosts

 

 

1.6、配置ntp(10.239.41.128為ntp服務器)每個節點都需執行

 

# chkconfig chronyd off

# chkconfig ntpd on 

# sed -i "/^server/ 3.centos.pool/a server/ 10.239.41.128 "/etc/ntp.conf

# service ntpd start

# ntpq -p

 

 

1.6、配置互信(每個節點都需執行)

 

# ssh-keygen -t dsa -f ~/.ssh/id_dsa -N ""

# ssh-copy-id ha-node1

# ssh-copy-id ha-node2

 

 

二、安裝drbd

 2.1、安裝drbd軟件(各個節點)

 

# rpm --import https://www.elrepo.org/RPM-GPG-KEY-elrepo.org

#rpm-Uvh http://www.elrepo.org/elrepo-release-7.0-2.el7.elrepo.noarch.rpm

# yum install -y kmod-drbd84 drbd84-utils

 

 

2.2、配置文件介紹

/etc/drbd.conf #主配置文件

/etc/drbd.d/global_common.conf #全局配置文件

 

a、/etc/drbd.conf說明

 主配置文件中包含了全局配置文件及”drbd.d/”目錄下以.res結尾的文件

 

# You can find an example in /usr/share/doc/drbd.../drbd.conf.example

include "drbd.d/global_common.conf";

include "drbd.d/*.res";

 

b、/etc/drbd.d/global_common.conf說明

 

global {

    usage-count no;  #是否參加DRBD使用統計,默認為yes。官方統計drbd的裝機量

    # minor-count dialog-refreshdisable-ip-verification

}

common {

    PRotocolC;      #使用DRBD的同步協議

    handlers {

        # These are EXAMPLEhandlers only.

        # They may have severeimplications,

        # like hard resetting thenode under certain circumstances.

        # Be careful when chosingyour poison.

        pri-on-incon-degr"/usr/lib/drbd/notify-pri-on-incon-degr.sh; /usr/lib/drbd/notify-emergency-reboot.sh;echo b > /proc/sysrq-trigger ; reboot -f";

       pri-lost-after-sb "/usr/lib/drbd/notify-pri-lost-after-sb.sh;/usr/lib/drbd/notify-emergency-reboot.sh; echo b > /proc/sysrq-trigger ;reboot -f";

       local-io-error "/usr/lib/drbd/notify-io-error.sh;/usr/lib/drbd/notify-emergency-shutdown.sh; echo o > /proc/sysrq-trigger ;halt -f";

        # fence-peer"/usr/lib/drbd/crm-fence-peer.sh";

        # split-brain"/usr/lib/drbd/notify-split-brain.sh root";

        # out-of-sync"/usr/lib/drbd/notify-out-of-sync.sh root";

 

        # before-resync-target"/usr/lib/drbd/snapshot-resync-target-lvm.sh -p 15 -- -c 16k";

        # after-resync-target/usr/lib/drbd/unsnapshot-resync-target-lvm.sh;

    }

    startup {

        # wfc-timeoutdegr-wfc-timeout outdated-wfc-timeout wait-after-sb

    }

    options {

        # cpu-maskon-no-data-accessible

    }

    disk {

        on-io-error detach; #配置I/O錯誤處理策略為分離

        # size max-bio-bvecson-io-error fencing disk-barrier disk-flushes

        # disk-drain md-flushesresync-rate resync-after al-extents

        # c-plan-aheadc-delay-target c-fill-target c-max-rate

        # c-min-rate disk-timeout

    }

    net {

 

        # protocol timeoutmax-epoch-size max-buffers unplug-watermark

        # connect-int ping-intsndbuf-size rcvbuf-size ko-count

        # allow-two-primariescram-hmac-alg shared-secret after-sb-0pri

        # after-sb-1priafter-sb-2pri always-asbp rr-conflict

        # ping-timeoutdata-integrity-alg tcp-cork on-congestion

        # congestion-fillcongestion-extents csums-alg verify-alg

        # use-rle

}

syncer{ rate1024M;}

}

 

注釋: on-io-error 策略可能為以下選項之一

detach 分離:這是默認和推薦的選項,如果在節點上發生底層的硬盤I/O錯誤,它會將設備運行在Diskless無盤模式下

pass_on:DRBD會將I/O錯誤報告到上層,在主節點上,它會將其報告給掛載的文件系統,但是在此節點上就往往忽略(因此此節點上沒有可以報告的上層)

-local-in-error:調用本地磁盤I/O處理程序定義的命令;這需要有相應的local-io-error調用的資源處理程序處理錯誤的命令;這就給管理員有足夠自由的權力命令命令或是腳本調用local-io-error處理I/O錯誤

c、定義一個資源

 新建/etc/drbd.d/MySQL.res并寫入下列內容

 

resource mysql { #資源名稱

protocol C; #使用協議

meta-disk internal;

device /dev/drbd1; #DRBD設備名稱

syncer {

verify-alg sha1;# 加密算法

}

net {

allow-two-primaries;

}

on ha-node1 {

disk /dev/data/mysql; drbd1使用的磁盤分區為"mysql"

address 192.168.8.51:7789; #設置DRBD監聽地址與端口

}

on ha-node2 {

disk /dev/data/mysql;

address 192.168.8.52:7789;

}

}

 

 

2.4、將配置文件拷貝到node2上

 

# scp -rp  /etc/drbd.d/*  node02:/etc/drbd.d/1

 

2.5、啟用drbd

 

# drbdadm create-md mysql

# modprobe drbd

# drbdadm up mysql

# drbdadm -- --force primary mysql

1

 

查看狀態

 

# cat /proc/drbd 1

1

 

2.6、配置對端節點

 

# ssh node02 “drbdadm create-md mysql”

# ssh node02 “modprobe drbd”

# ssh node02 “drbdadm up mysql”

 

2.7、格式化設備并掛載

 

# mkfs.xfs /dev/drbd1

meta-data=/dev/drbd1 isize=256 agcount=4, agsize=65532 blks

= sectsz=512 attr=2, projid32bit=1

= crc=0 finobt=0

data = bsize=4096 blocks=262127, imaxpct=25

= sunit=0 swidth=0 blks

naming =version 2 bsize=4096 ascii-ci=0 ftype=0

log =internal log bsize=4096 blocks=853, version=2

= sectsz=512 sunit=0 blks, lazy-count=1

realtime =none extsz=4096 blocks=0, rtextents=0

 

 

掛載設備

 

# mount /dev/drbd1 /mnt/drbd

 

三、安裝集群軟件

3.1安裝相關軟件包

 

# yum install -y pacemaker pcs psmisc policycoreutils-python1

 

 

啟動pcs服務并保持開機啟動

 

# systemctl start pcsd.service

# systemctl enable pcsd.service

 

3.2修改用戶hacluster的密碼(各個節點)

 

# passwd hacluster

密碼:hacluster

 

四、安裝mysql(各個節點)

4.1、安裝軟件

 

# yum install epel* -y

# yum install mariadb mariadb-server MySQL-python

備注:修改  /etc/my.cnf 指定的數據庫綁定目錄(datadir=/mnt/drbd)

和修改 數據庫綁定的目錄 權限(chown –Rmysql.mysql /mnt/drbd)

 

 

4.2、禁止mysql開機啟動

# chkconfig  mariadb off

 

 

五、配置corosync

 5.1認證各個節點,并創建集群(注意如果有代理記得取消)

 

# pcs cluster auth node01 node02

# pcs cluster setup --name mycluster node01 node02

 

 

5.2啟動集群

 

[root@ha-node1 ~]# pcs cluster start --all

ha-node1: Starting Cluster...

ha-node2: Starting Cluster...

 

 

5.3驗證corosync的安裝

 

[root@ha-node1 ~]# corosync-cfgtool -s

Printing ring status.

Local node ID 1

RING ID 0

id = 192.168.8.51

status = ring 0 active with no faults

 

 

5.4查看接入成員

 

# corosync-cmapctl | grep members

runtime.totem.pg.mrp.srp.members.1.config_version (u64) = 0

runtime.totem.pg.mrp.srp.members.1.ip (str) = r(0) ip(192.168.8.51)

runtime.totem.pg.mrp.srp.members.1.join_count (u32) = 1

runtime.totem.pg.mrp.srp.members.1.status (str) = joined

runtime.totem.pg.mrp.srp.members.2.config_version (u64) = 0

runtime.totem.pg.mrp.srp.members.2.ip (str) = r(0) ip(192.168.8.52)

runtime.totem.pg.mrp.srp.members.2.join_count (u32) = 2

runtime.totem.pg.mrp.srp.members.2.status (str) = joined

 

 

 

5.5查看corosync狀態

 

# pcs status corosync

Membership information

--------------------------

Nodeid Votes Name

1 1 ha-node1 (local)

2 1 ha-node2

 

 

5.6檢查pacemaker的安裝

 

# ps axf

PID TTY STAT TIME COMMAND

2 ? S 0:00 [kthreadd]

...lots of processes...

1362 ? Ssl 0:35 corosync

1379 ? Ss 0:00 /usr/sbin/pacemakerd -f

1380 ? Ss 0:00 /_ /usr/libexec/pacemaker/cib

1381 ? Ss 0:00 /_ /usr/libexec/pacemaker/stonithd

1382 ? Ss 0:00 /_ /usr/libexec/pacemaker/lrmd

1383 ? Ss 0:00 /_ /usr/libexec/pacemaker/attrd

1384 ? Ss 0:00 /_ /usr/libexec/pacemaker/pengine

1385 ? Ss 0:00 /_ /usr/libexec/pacemaker/crmd

 

 

5.7檢查pcs status

 

[root@ha-node1 ~]# pcs status

Cluster name: mycluster

WARNING: no stonith devices and stonith-enabled is not false

Last updated: Tue Dec 16 16:15:29 2014

Last change: Tue Dec 16 15:49:47 2014

Stack: corosync

Current DC: ha-node2 (2) - partition with quorum

Version: 1.1.12-a14efad

2 Nodes configured

0 Resources configured

Online: [ ha-node1 ha-node2 ]

Full list of resources:

PCSD Status:

ha-node1: Online

ha-node2: Online

Daemon Status:

corosync: active/disabled

pacemaker: active/disabled

pcsd: active/enabled

 

 

5.8查看系統中error(stonith除外)

 

# journalctl | grep -i error1

1

 

六、配置集群(任選一個節點)

6.1集群屬性

 投票屬性

 

# pcs property set no-quorum-policy=ignore

 

 

集群故障時候服務遷移

 

# pcs resource defaults migration-threshold=1

 

 

由于兩個節點無stonith設備

 

# pcs property set stonith-enabled=false

 

 

在node1恢復后,為防止node2資源遷回node01(遷來遷去對還是會對業務有一定影響)

 

# pcs resource defaults resource-stickiness=100

# pcs resource defaults

 

 

設置資源超時時間

 

# pcs resource op defaults timeout=90s

# pcs resource op defaults

 

 

驗證,正常無回顯

 

# crm_verify –L -V

 

 

6.2配置浮動IP

 

# pcs resource create vip ocf:heartbeat:IPaddr2 ip=192.168.100.2 cidr_netmask=24op monitor interval=30s

vip為自定義的一個集群IP的名稱,監控時間為30S

 6.3配置drbd高可用

# umount /dev/drbd1(各個節點)

 

創建文件

# pcs cluster cib drbd_cfg

 

創建DRBD資源

#pcs -f drbd_cfg resource create DRBD ocf:linbit:drbddrbd_resource=mysql op monitor interval=60s

 

clone資源

# pcs -f drbd_cfg resource master DRBDClone DRBD /

master-max=1 master-node-max=1 clone-max=2 clone-node-max=1 /

notify=true

查看配置

[root@ha-node1 ~]# pcs -f drbd_cfg resource show

ClusterIP (ocf::heartbeat:IPaddr2): Started

mysqlSite (ocf::heartbeat:apache): Started

Master/Slave Set: mysqlDataClone [mysqlData]

Stopped: [node01 node02 ]

 

 

 

提交配置

# pcs cluster cib-push drbd_cfg

 

 

查看狀態

[root@ha-node1 ~]# pcs status

Cluster name: mycluster

Last updated: Fri Aug 14 09:29:41 2015

Last change: Fri Aug 14 09:29:25 2015

Stack: corosync

Current DC: ha-node1 (1) - partition with quorum

Version: 1.1.12-a14efad

2 Nodes configured

4 Resources configured

Online: [ ha-node1 ha-node2 ]

Full list of resources:

ClusterIP (ocf::heartbeat:IPaddr2): Started ha-node1

mysqlSite (ocf::heartbeat:apache): Started ha-node1

Master/Slave Set: mysqlDataClone [mysqlData]

Masters: [ ha-node1 ]

Slaves: [ ha-node2 ]

PCSD Status:

ha-node1: Online

ha-node2: Online

Daemon Status:

corosync: active/disabled

pacemaker: active/disabled

pcsd: active/enabled

 

確保模塊載入

# echo drbd >/etc/modules-load.d/drbd.conf

 

6.4配置文件系統高可用

 創建文件

# pcs cluster cib fs_cfg

 

創建資源dbFS

# pcs -f fs_cfg resource create dbFS ocf:heartbeat:Filesystemdevice='/dev/drbd1' directory='/mnt/drbd' fstype='xfs'

 

將dbFS和DRBDClone綁定在一起

# pcs -f fs_cfg constraint colocation add dbFS with DRBDClone INFINITYwith-rsc-role=Master

設置啟動順序

# pcs -f fs_cfg constraint order promote DRBDClone then start dbFS

提交配置

# pcs cluster cib-push fs_cfg

 

6.5配置mariadb高可用 (2選一)

pcs resource create Mysql systemd:mariadbbinary="/usr/libexec/mysqld" config="/etc/my.cnf"datadir="/mnt/drbd/" pid="/var/run/mariadb/mariadb.pid" socket="/var/lib/mysql/mysql.sock"op start timeout=180s op stop timeout=180s op monitor interval=20s timeout=60s

配置資源關系

# pcs  constraint colocation add Mysql  vip INFINITY

pcs constraintcolocation add  vip  dbFS INFINITY

 

設置啟動順序

# pcs    constraintorder dbFS   then Mysql 

 

測試: 數據庫權限

( GRANT ALL PRIVILEGES ON *.* TO 'root'@'%'WITH GRANT OPTION;)

(GRANT ALL PRIVILEGESON *.* TO 'root'@'%'IDENTIFIED BY ‘mypassWord’ WITH GRANT OPTION; )

 

>FLUSH PRIVILEGES;

 

查看資源約束關系

# pcs constraint  –full

 

查詢系統狀態

# pcs status

 

 

七、群集操作命令

7.1、驗證群集安裝

 

 # pacemakerd -F ## 查看pacemaker組件,ps axf | greppacemaker

 # corosync-cfgtool -s ## 查看corosync序號

 # corosync-cmapctl | grepmembers ## corosync 2.3.x

 # corosync-objctl | grep members## corosync 1.4.x

 

 

7.2、查看群集資源

 

 # pcs resource standards ## 查看支持資源類型

 # pcs resource providers ## 查看資源提供商

 # pcs resource agents ## 查看所有資源代理

 # pcs resource list ## 查看支持資源列表

 # pcs stonith list ## 查看支持Fence列表

 # pcs property list --all ## 顯示群集默認變量參數

 # crm_simulate -sL ## 檢驗資源 score 值

 

 

7.3、使用群集腳本

 

 # pcs cluster cib ra_cfg ## 將群集資源配置信息保存在指定文件

 # pcs -f ra_cfg resource create## 創建群集資源并保存在指定文件中(而非保存在運行配置)

 # pcs -f ra_cfg resource show ##顯示指定文件的配置信息,檢查無誤后

 # pcs cluster cib-push ra_cfg ##將指定配置文件加載到運行配置中

 

 

7.4、STONITH 設備操作

 

# stonith_admin -I ## 查詢fence設備

# stonith_admin -M -a agent_name ## 查詢fence設備的元數據,stonith_admin -M-a fence_vmware_soap

# stonith_admin --reboot nodename ## 測試 STONITH 設備

 

 

7.5、查看群集配置

 

 # crm_verify -L -V ## 檢查配置有無錯誤

 # pcs property ## 查看群集屬性

 # pcs stonith ## 查看stonith

 # pcs constraint ## 查看資源約束

 # pcs config ## 查看群集資源配置

 # pcs cluster cib ## 以xml格式顯示群集配置

 

 

7.6、管理群集

 

 # pcs status ## 查看群集狀態

 # pcs status cluster

 # pcs status corosync

 # pcs cluster stop [node11] ## 停止群集

 # pcs cluster start --all ## 啟動群集

 # pcs cluster standby node11 ## 將節點置為后備standby狀態,pcs clusterunstandby node11

 # pcs cluster destroy [--all] ##刪除群集,[--all]同時恢復corosync.conf文件

 # pcs resource cleanup ClusterIP## 清除指定資源的狀態與錯誤計數

 # pcs stonith cleanupvmware-fencing ## 清除Fence資源的狀態與錯誤計數


上一篇:Centos7 drdb

下一篇:CENTOS7構建HA集群

發表評論 共有條評論
用戶名: 密碼:
驗證碼: 匿名發表
主站蜘蛛池模板: 久久久精品综合 | 高颜值美女啪啪 | 久久精品爱| 3344永久免费 | 中国7777高潮网站 | 久久精品亚洲成在人线av网址 | 蜜桃视频在线观看视频 | 一区二区三区日韩精品 | 亚洲人成网站免费播放 | 日日摸夜夜添夜夜添牛牛 | 国产精品一品二区三区四区18 | 免费a级毛片大学生免费观看 | 久色精品视频 | 美女在线视频一区二区 | 日韩午夜片 | 99re热精品视频 | 在线观看免费污视频 | 黄色高清免费 | 中文字幕在线亚洲精品 | 亚洲国产精品久久久久久久久久久 | 中文字幕一区久久 | 97porn| 精品在线视频观看 | 一级黄色免费 | 国产做爰 | 成人三级电影网站 | 91av视频大全 | 毛片午夜| 国内精品视频饥渴少妇在线播放 | omofun 动漫在线观看 | 久久国产精品成人免费网站 | 激情福利视频 | 最新亚洲国产 | 色中色在线播放 | 竹内纱里奈和大战黑人 | 龙床上的呻吟高h | 国产一区二区二 | 欧美国产精品久久 | 亚洲第一成人久久网站 | 亚洲最黄视频 | 国产亚洲精品成人a |