Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- #USE IT IN COMMAND LINE (without #):
- #curl http://pastebin.com/raw/2Hez4MYN > xendeletelocalsr.sh; tr -d "\015" < xendeletelocalsr.sh > xendeletelocalsrnew.sh
- #sh xendeletelocalsrnew.sh
- #I have managed to configure 4 servers of beta dundee with glusterfs and ctdb and fully working ha without SPOF.
- #I have four NICs: NIC0 management of xenserver 192.168.10.2*, NIC1 gluster vm client 192.168.10.14*, bond NIC2-3 10.10.10.1*, VIPs #from ctdb 192.168.10.13*. In /etc/hosts I keep only 10.10.10.1x server ips and use them in glusterfs volume creation and in #consequence in glusterfs backend traffic.
- #The main compilation script:
- #http://pastebin.com/ps1LZaSq
- #Removing local storage and configuring ctdb, /etc/hosts:
- #http://pastebin.com/2Hez4MYN
- #At the end you should:
- #gluster peer probe servers
- #and
- #init_gluster4 "xenserverhw" "06" "08" "03" "04" "vol0" 4 #replica 4 at the end!
- #create SR nfs and iso, make vm and install xen tools and enable ha with 3 failovers.
- #You are encouraged to send some patches or opinions newfuntek(at)gmail.com
- #Here are some screenshots of the glusterfs sr in the xenserver pool:
- #http://s17.postimg.org/3y47n8w27/glusterfsxenserv03.jpg
- #http://s17.postimg.org/n4heqfcjz/glusterfsxenserv01.jpg
- #http://s17.postimg.org/gs29gl9hr/glusterfsxenserv02.jpg
- #uuid pbd http://blog.gluster.org/2012/
- #usunięcie Local Storage na sda3
- #sformatowanie i podmontowanie dwóch wolnych partycji sda2 i sda3 na dwa dyski sieciowe
- sed -i -e "s/metadata_read_only = 1/metadata_read_only = 0/" /etc/lvm/lvm.conf
- hname=`hostname`
- #hname=`echo $hname | tr [:lower:] [:upper:]`
- sruuid=`xe sr-list host=$hname name-label=Local\ storage --minimal`
- pbduid=`xe pbd-list sr-uuid=$sruuid --minimal`
- xe pbd-unplug uuid=$pbduid
- xe sr-forget uuid=$sruuid
- vgremove `vgdisplay -C | tail -n1 | cut -f3 -d' '` -f
- dev4gfs=`pvdisplay -C | tail -n1 | cut -f3 -d' '`
- pvremove $dev4gfs -f
- umount $dev4gfs
- init_brick $dev4gfs "vol0"
- #umount "/dev/sda2"
- #init_brick "/dev/sda2" "vol1"
- removeallandformat "/dev/sdb"
- init_brick "/dev/sdb1" "vol2"
- ###########################################################################
- #OnePool
- ###########################################################################
- server="xenserverhw0"
- host01="3"
- host02="4"
- host03="6"
- host04="8"
- echo "127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4" > /etc/hosts
- echo "10.10.10.1${host01} ${server}${host01}" >> /etc/hosts
- echo "10.10.10.1${host02} ${server}${host02}" >> /etc/hosts
- echo "10.10.10.1${host03} ${server}${host03}" >> /etc/hosts
- echo "10.10.10.1${host04} ${server}${host04}" >> /etc/hosts
- echo "10.10.10.1${host01}" > /etc/ctdb/nodes
- echo "10.10.10.1${host02}" >> /etc/ctdb/nodes
- echo "10.10.10.1${host03}" >> /etc/ctdb/nodes
- echo "10.10.10.1${host04}" >> /etc/ctdb/nodes
- echo "192.168.10.13${host01}/24 xenbr1" > /etc/ctdb/public_addresses
- echo "192.168.10.13${host02}/24 xenbr1" >> /etc/ctdb/public_addresses
- echo "192.168.10.13${host03}/24 xenbr1" >> /etc/ctdb/public_addresses
- echo "192.168.10.13${host04}/24 xenbr1" >> /etc/ctdb/public_addresses
- chkconfig ctdb on
- service ctdb start
- #init_gluster4 "xenserverhw" "06" "08" "03" "04" "vol0" 4
- #service ctdb restart
- ctdb status
- #https://serversforhackers.com/an-ansible-tutorial
- #http://www.cyberciti.biz/faq/
- yum install ansible -y
- [ -f /etc/ansible/hosts ] && mv /etc/ansible/hosts /etc/ansible/hosts.orig
- echo "[web]" > /etc/ansible/hosts
- echo "10.10.10.13" >> /etc/ansible/hosts
- echo "10.10.10.14" >> /etc/ansible/hosts
- echo "10.10.10.16" >> /etc/ansible/hosts
- echo "10.10.10.18" >> /etc/ansible/hosts
- #ssh-keygen -t rsa -b 2048 -N '' -f /root/.ssh/id_rsa
- #ssh-copy-id -i root@xenserverhw03
- #ssh-copy-id -i root@xenserverhw04
- #ssh-copy-id -i root@xenserverhw05
- #ssh-copy-id -i root@xenserverhw06
- #ansible all -s -m shell -a "ctdb status"
- echo 'ansible all -s -m shell -a "$1"' > /usr/local/bin/ae
- chmod 700 /usr/local/bin/ae
- #CRONTAB WORKS for test logs
- [ -f /var/log/checktime.log ] && mv /var/log/checktime.log /var/log/checktime.log.old -f
- echo 'echo "#########################"' > /usr/local/bin/checktime
- echo "date" >> /usr/local/bin/checktime
- echo "ntpstat -s" >> /usr/local/bin/checktime
- echo "/sbin/gluster volume status vol2" >> /usr/local/bin/checktime
- echo "ctdb status" >> /usr/local/bin/checktime
- echo "free" >> /usr/local/bin/checktime
- chmod 755 /usr/local/bin/checktime
- echo "/usr/local/bin/checktime 2>&1 | cat >> /var/log/checktime.log" > /usr/local/bin/cronuserlogs
- chmod 755 /usr/local/bin/cronuserlogs
- echo "* * * * * /usr/local/bin/cronuserlogs" > ./cronwork
- crontab -r
- crontab ./cronwork
- crontab -l
- cat <<__EOF__ > /etc/logrotate.d/checktime
- /var/log/checktime.log {
- daily
- rotate 3
- compress
- delaycompress
- missingok
- notifempty
- create 644 root root
- }
- __EOF__
- echo 'Skonfiguruj NIC1 (192.168.10.13?) i NIC2 (10.10.10.1?)'
- echo 'Wykonaj na jednym serwerze:'
- echo 'gluster peer probe xenserverhw03'
- echo 'gluster peer probe xenserverhw04'
- echo 'gluster peer probe xenserverhw06'
- echo 'gluster peer probe xenserverhw08'
- echo 'init_gluster4 "xenserverhw" "06" "08" "03" "04" "vol0" 4'
- echo 'init_gluster4 "xenserverhw" "06" "08" "03" "04" "vol1" 4'
- echo 'init_gluster4 "xenserverhw" "06" "08" "03" "04" "vol2" 4'
- echo 'Wykonaj na na każdym serwerze:'
- echo 'service ctdb restart'
- echo 'ctdb status'
- #echo 'ip addr show | grep inet'
- echo "ssh-keygen -t rsa -b 2048 -N '' -f /root/.ssh/id_rsa"
- echo 'ssh-copy-id -i root@xenserverhw03'
- echo 'ssh-copy-id -i root@xenserverhw04'
- echo 'ssh-copy-id -i root@xenserverhw06'
- echo 'ssh-copy-id -i root@xenserverhw08'
- echo 'Teraz możesz wykonywać polecenia na wszystkich serwerach z jednego: ae "ctdb status"'
- echo 'Pozostało zamontować SR iso, gfs1, gfs2'
- #KONIEC
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement