Advertisement
42n4

xen7glusterfs_config

Jun 9th, 2016
426
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
Bash 9.41 KB | None | 0 0
  1. #USE IT IN COMMAND LINE (without #):
  2. #curl http://pastebin.com/raw/LdtBkcTE > xendeletelocalsr.sh; tr -d "\015" < xendeletelocalsr.sh > xendeletelocalsrnew.sh
  3. #sh xendeletelocalsrnew.sh
  4. #I have managed to configure 4 servers of beta3 dundee with glusterfs and ctdb and fully working ha without SPOF.
  5. #I have four NICs: NIC0 management of xenserver 192.168.10.2*, NIC1 gluster vm client 192.168.10.14*, bond NIC2-3 10.10.10.1*, VIPs #from ctdb 192.168.10.13*. In /etc/hosts #I keep only 10.10.10.1x server ips and use them in glusterfs volume creation and in #consequence in glusterfs backend traffic.
  6. #The main compilation script:
  7. #http://pastebin.com/hNsiyPyL
  8. #Removing local storage and configuring ctdb, /etc/hosts:
  9. #http://pastebin.com/LdtBkcTE
  10. #At the end you should:
  11. #gluster peer probe servers
  12. #and
  13. #init_gluster3 "xen" "1" "2" "3" "vol0" 3 #replica 3 at the end!
  14. #create SR nfs and iso, make vm and install xen tools and enable ha with 3 failovers.
  15. #You are encouraged to send some patches or opinions newfuntek(at)gmail.com
  16. #Here are some screenshots of the glusterfs sr in the xenserver pool:
  17. #http://s17.postimg.org/3y47n8w27/glusterfsxenserv03.jpg
  18. #http://s17.postimg.org/n4heqfcjz/glusterfsxenserv01.jpg
  19. #http://s17.postimg.org/gs29gl9hr/glusterfsxenserv02.jpg
  20. ####################################################################################################################################
  21. #uuid pbd http://blog.gluster.org/2012/
  22. #usunięcie Local Storage na sda3
  23. #sformatowanie i podmontowanie dwóch wolnych partycji sda2 i sda3 na dwa dyski sieciowe
  24. sed -i -e "s/metadata_read_only = 1/metadata_read_only = 0/" /etc/lvm/lvm.conf
  25. hname=`hostname`
  26. #hname=`echo $hname | tr [:lower:] [:upper:]`
  27. sruuid=`xe sr-list host=$hname name-label=Local\ storage --minimal`
  28. pbduid=`xe pbd-list sr-uuid=$sruuid --minimal`
  29. xe pbd-unplug uuid=$pbduid
  30. xe sr-forget uuid=$sruuid
  31. vgremove `vgdisplay -C | tail -n1 | cut -f3 -d' '` -f
  32. dev4gfs=`pvdisplay -C | tail -n1 | cut -f3 -d' '`
  33. pvremove $dev4gfs -f
  34.  
  35. umount $dev4gfs
  36. #init_brick $dev4gfs "vol0" #FOR GLUSTERFS
  37.  
  38. umount "/dev/sda2"
  39. #init_brick "/dev/sda2" "vol1"  #FOR GLUSTERFS
  40.  
  41. #FOR CEPH
  42. parted -s /dev/sdb mklabel gpt mkpart primary xfs 0% 100%
  43. #parted -s /dev/sdb mklabel gpt mkpart primary 0% 33% mkpart primary 34% 66% mkpart primary 67% 100%
  44. mkfs.xfs /dev/sdb1 -f
  45. #removeallandformat "/dev/sdb"
  46. #init_brick "/dev/sdb1" "vol2"
  47.  
  48. ###########################################################################
  49. #OnePool
  50. ###########################################################################
  51. server="xen"
  52. host01="1"
  53. host02="2"
  54. host03="3"
  55. echo "127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4" > /etc/hosts
  56. echo "10.10.10.1${host01} osd${host01}" >> /etc/hosts
  57. echo "10.10.10.1${host02} osd${host02}" >> /etc/hosts
  58. echo "10.10.10.1${host03} osd${host03}" >> /etc/hosts
  59. echo "192.168.0.20${host01} ${server}${host01}" >> /etc/hosts
  60. echo "192.168.0.20${host02} ${server}${host02}" >> /etc/hosts
  61. echo "192.168.0.20${host03} ${server}${host03}" >> /etc/hosts
  62. echo "10.10.10.1${host01}" > /etc/ctdb/nodes
  63. echo "10.10.10.1${host02}" >> /etc/ctdb/nodes
  64. echo "10.10.10.1${host03}" >> /etc/ctdb/nodes
  65. echo "192.168.0.22${host01}/24 xenbr1" > /etc/ctdb/public_addresses
  66. echo "192.168.0.22${host02}/24 xenbr1" >> /etc/ctdb/public_addresses
  67. echo "192.168.0.22${host03}/24 xenbr1" >> /etc/ctdb/public_addresses
  68. chkconfig ctdb on
  69. #service ctdb restart
  70.  
  71. #init_gluster4 "xen" "1" "2" "3" "vol0" 3
  72. #init_gluster4 "xen" "1" "2" "3" "vol1" 3
  73. #service ctdb restart
  74. #ctdb status
  75.  
  76. #https://serversforhackers.com/an-ansible-tutorial
  77. #http://www.cyberciti.biz/faq/
  78. [ -f /etc/ansible/hosts ] && mv /etc/ansible/hosts /etc/ansible/hosts.orig -f
  79. echo "[web]" > /etc/ansible/hosts
  80. echo "10.10.10.1${host01}" >> /etc/ansible/hosts
  81. echo "10.10.10.1${host02}" >> /etc/ansible/hosts
  82. echo "10.10.10.1${host03}" >> /etc/ansible/hosts
  83. #ssh-keygen -t rsa -b 2048 -N '' -f ~/.ssh/id_rsa
  84. #ssh-copy-id -i root@xen1
  85. #ssh-copy-id -i root@xen2
  86. #ssh-copy-id -i root@xen3
  87. #ansible all -s -m shell -a "ctdb status"
  88. echo 'ansible all -s -m shell -a "$1"' > /usr/local/bin/ae
  89. chmod 700 /usr/local/bin/ae
  90. #CRONTAB WORKS for test logs
  91. [ -f /var/log/checktime.log ] && mv /var/log/checktime.log /var/log/checktime.log.old -f
  92. echo 'echo "#########################"' > /usr/local/bin/checktime
  93. echo "date" >> /usr/local/bin/checktime
  94. echo "ntpstat -s" >> /usr/local/bin/checktime
  95. echo "/sbin/gluster volume status vol2" >> /usr/local/bin/checktime
  96. echo "ctdb status" >> /usr/local/bin/checktime
  97. echo "free" >> /usr/local/bin/checktime
  98. chmod 755 /usr/local/bin/checktime
  99. echo "/usr/local/bin/checktime  2>&1 | cat >> /var/log/checktime.log" > /usr/local/bin/cronuserlogs
  100. chmod 755 /usr/local/bin/cronuserlogs
  101. echo "* * * * * /usr/local/bin/cronuserlogs" > ./cronwork
  102. crontab -r
  103. crontab ./cronwork
  104. crontab -l
  105.  
  106. cat <<__EOF__ > /etc/logrotate.d/checktime
  107. /var/log/checktime.log {
  108.     daily
  109.     rotate 3
  110.     compress
  111.     delaycompress
  112.     missingok
  113.     notifempty
  114.     create 644 root root
  115. }
  116. __EOF__
  117. cat <<__EOF__ > ~/.ssh/config
  118. Host *
  119.     StrictHostKeyChecking no
  120.     UserKnownHostsFile /dev/null
  121. __EOF__
  122. ssh-keygen -t rsa -b 2048 -N '' -f ~/.ssh/id_rsa
  123. #for node in xen1 xen2 xen3 osd3 osd4 osd6; do ssh-copy-id $node ; done
  124.  
  125.  
  126. #CEPH
  127. #http://www.virtualtothecore.com/en/adventures-ceph-storage-part-1-introduction/
  128. #https://blog.zhaw.ch/icclab/tag/ceph/
  129. #https://wiki.centos.org/SpecialInterestGroup/Storage/ceph-Quickstart
  130. #http://linoxide.com/storage/setup-red-hat-ceph-storage-centos-7-0/
  131. #http://karan-mj.blogspot.com/2013/12/what-is-ceph-ceph-is-open-source.html
  132. #https://www.reddit.com/r/DataHoarder/comments/4gzpxi/why_is_ceph_so_rare_for_home_use_even_among/
  133. #http://palmerville.github.io/2016/04/30/single-node-ceph-install.html
  134. cat <<__EOF__ >> ./ceph.conf
  135. mon_pg_warn_max_per_osd = 0
  136. public network = 192.168.0.0/24
  137. cluster network = 10.10.10.0/24
  138. #Choose reasonable numbers for number of replicas and placement groups.
  139. osd pool default size = 2 # Write an object 2 times
  140. osd pool default min size = 1 # Allow writing 1 copy in a degraded state
  141. osd pool default pg num = 256
  142. osd pool default pgp num = 256
  143. #Choose a reasonable crush leaf type
  144. #0 for a 1-node cluster.
  145. #1 for a multi node cluster in a single rack
  146. #2 for a multi node, multi chassis cluster with multiple hosts in a chassis
  147. #3 for a multi node cluster with hosts across racks, etc.
  148. osd crush chooseleaf type = 1
  149. [osd.0]
  150. public addr = 192.168.0.53
  151. cluster addr = 10.10.10.13
  152. host = osd3
  153. devs = /dev/sdb
  154. osd journal = /dev/sda2
  155. [osd.1]
  156. public addr = 192.168.0.54
  157. cluster addr = 10.10.10.14
  158. host = osd4
  159. devs = /dev/sdb
  160. osd journal = /dev/sda2
  161. [osd.2]
  162. public addr = 192.168.0.56
  163. cluster addr = 10.10.10.16
  164. host = osd6
  165. devs = /dev/sdb
  166. osd journal = /dev/sda2
  167. __EOF__
  168.  
  169.  
  170. cat <<__EOF__ > /usr/local/bin/init_ceph
  171. server="xen"
  172. host01="1"
  173. host02="2"
  174. host03="3"
  175. [ ! -d ceph-deploy ] && mkdir ceph-deploy
  176. cd ceph-deploy/
  177. ceph-deploy purge xen1 xen2 xen3
  178. ceph-deploy purgedata xen1 xen2 xen3
  179. ceph-deploy forgetkeys
  180. ceph-deploy new xen1 xen2 xen3
  181. #ceph-deploy install --release jewel --no-adjust-repos xen1 xen2 xen3
  182. ceph-deploy install --release jewel xen1 xen2 xen3
  183. #ceph-deploy install --repo-url http://download.ceph.com/rpm-jewel/el7/ xen1 xen2 xen3
  184. ceph-deploy --overwrite-conf mon create-initial
  185.  
  186. ceph-deploy gatherkeys xen1
  187. for i in osd3 osd4 osd6; do ceph-deploy disk zap $i:sdb; done
  188. ae "parted -s /dev/sdb mklabel gpt mkpart primary xfs 0% 100%"
  189. ae "mkfs.xfs /dev/sdb1 -f"
  190. #http://tracker.ceph.com/issues/13833
  191. ae "chown ceph:ceph /dev/sda2"
  192. for i in osd3 osd4 osd6; do
  193. ceph-deploy osd prepare \$i:/dev/sdb1:/dev/sda2; done
  194. for i in osd3 osd4 osd6; do
  195. ceph-deploy osd activate \$i:/dev/sdb1:/dev/sda2; done
  196. #ceph-deploy  --username ceph osd create osd3:/dev/sdb1:/dev/sda2
  197. ceph-deploy admin xen1 xen2 xen3 osd3 osd4 osd6
  198. ae "chmod +r /etc/ceph/ceph.client.admin.keyring"
  199. ae "systemctl enable ceph-mon.target"
  200. ae "systemctl enable ceph-mds.target"
  201. ae "systemctl enable ceph-osd.target"
  202. #object storage gateway
  203. ceph-deploy rgw create xen1 xen2 xen3
  204. #cephfs
  205. ceph-deploy mds create xen1 xen2 xen3
  206. ceph -s #ceph status
  207. ceph osd tree
  208. ceph mon_status
  209. ceph osd pool create mypool 1
  210. ceph osd lspools
  211. ceph df
  212. echo "test data" > testfile
  213. rados put -p mypool testfile testfile
  214. rados -p mypool ls
  215. rados -p mypool setomapval testfile mykey myvalue
  216. rados -p mypool getomapval testfile mykey
  217. rados get -p mypool testfile testfile2
  218. md5sum testfile testfile2
  219.  
  220. ceph osd pool create cephfs_data 128
  221. ceph osd pool create cephfs_metadata 128
  222. ceph fs new cephfs cephfs_metadata cephfs_data
  223. mkdir /mnt/mycephfs
  224. mount -t ceph xen1:6789:/ /mnt/mycephfs -o name=admin,secret=`cat ./ceph.client.admin.keyring | grep key | cut -f 2 | sed 's/key = //g'`
  225. __EOF__
  226.  
  227.  
  228. echo 'Configure NIC1 (192.168.0.22?) i NIC2 (10.10.10.1?)'
  229. echo 'for node in xen1 xen2 xen3 osd3 osd4 osd6; do ssh-copy-id $node ; done'
  230.  
  231. echo 'FOR GLUSTER Execute on one server:'
  232. echo 'gluster peer probe xen1'
  233. echo 'gluster peer probe xen2'
  234. echo 'gluster peer probe xen3'
  235. #echo 'gluster peer probe xen4'
  236. echo 'init_gluster4 "xen" "1" "2" "3" "vol0" 3'
  237. echo 'init_gluster4 "xen" "1" "2" "3" "vol1" 3'
  238. echo 'ae "service ctdb restart"'
  239. echo 'ae "ctdb status"'
  240. #echo 'ip addr show | grep inet'
  241. #echo "ssh-keygen -t rsa -b 2048 -N '' -f ~/.ssh/id_rsa"
  242. echo 'Mount SR iso, gfs1, gfs2'
  243.  
  244. echo 'FOR CEPH execute on one server'
  245. echo '/usr/local/bin/init_ceph'
  246.  
  247. #END
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement