42n4

XENSERVERHW7_CONFIG

Jun 14th, 2016
386
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
Bash 26.93 KB | None | 0 0
  1. #USE IT IN COMMAND LINE (without #):
  2. #curl http://pastebin.com/raw/SNnhH3rx > xendeletelocalsr.sh; tr -d "\015" < xendeletelocalsr.sh > xendeletelocalsrnew.sh
  3. #sh xendeletelocalsrnew.sh
  4. #I have managed to configure 4 servers of beta3 dundee with glusterfs and ctdb and fully working ha without SPOF.
  5. #I have four NICs: NIC0 management of xenserver 192.168.10.2*, NIC1 gluster vm client 192.168.10.14*, bond NIC2-3 10.10.10.1*, VIPs #from ctdb 192.168.10.13*. In /etc/hosts #I keep only 10.10.10.1x server ips and use them in glusterfs volume creation and in #consequence in glusterfs backend traffic.
  6. #The main compilation script
  7. #Removing local storage and configuring ctdb, /etc/hosts:
  8. #http://pastebin.com/SNnhH3rx
  9. #At the end you should:
  10. #gluster peer probe servers
  11. #and
  12. #init_gluster3 "xen" "1" "2" "3" "vol0" 3 #replica 3 at the end!
  13. #create SR nfs and iso, make vm and install xen tools and enable ha with 3 failovers.
  14. #You are encouraged to send some patches or opinions newfuntek(at)gmail.com
  15. #Here are some screenshots of the glusterfs sr in the xenserver pool:
  16. #http://s17.postimg.org/3y47n8w27/glusterfsxenserv03.jpg
  17. #http://s17.postimg.org/n4heqfcjz/glusterfsxenserv01.jpg
  18. #http://s17.postimg.org/gs29gl9hr/glusterfsxenserv02.jpg
  19. ####################################################################################################################################
  20. #Internet help
  21. #Xenserver doc links
  22. #http://www.poppelgaard.com/citrix-xenserver-6-5
  23. #http://www.gluster.org/community/documentation/index.php/GlusterFS_Documentation
  24. #xenserver tutorials and cheatsheets
  25. #http://www.admin-magazine.com/HPC/Articles/GlusterFS
  26. #http://www.slashroot.in/gfs-gluster-file-system-complete-tutorial-guide-for-an-administrator
  27. #https://virtualizationandstorage.wordpress.com/2010/11/15/xenserver-commands/
  28. #http://krypted.com/unix/using-the-xensource-command-line-interface/
  29. #http://funwithlinux.net/2013/02/glusterfs-tips-and-tricks-centos/
  30. #http://xmodulo.com/category/xenserver
  31. #compilation of xenserver
  32. #https://discussions.citrix.com/topic/372069-does-libvmi-work-on-xenserver/
  33. #glusterfs slides info
  34. #http://rajesh-joseph.blogspot.com/2015/11/usenix-lisa-2015-tutorial-on-glusterfs.html
  35. #https://github.com/gluster/gluster-tutorial/blob/master/LISA-GlusterFS-Introduction.pdf
  36. #https://github.com/gluster/gluster-tutorial/blob/master/LISA-GlusterFS-Hands-on.pdf
  37. #compilation of glusterfs
  38. #http://majentis.com/?p=319
  39. #różne możliwości glusterfs jak w RAID
  40. #http://sysadm.pp.ua/linux/glusterfs-setup.html
  41. #glusterfs performance
  42. #https://blog.secretisland.de/xenserver-mit-glusterfs/
  43. #http://blog.dradmin.co.in/?tag=glusterfs-how-to
  44. #https://gluster.readthedocs.org/en/latest/Administrator%20Guide/Managing%20Volumes/
  45. #https://www.mail-archive.com/users@ovirt.org/msg31079.html
  46. #http://www.gluster.org/community/documentation/index.php/Performance_Testing
  47. #glusterfs on lvm
  48. #https://support.rackspace.com/how-to/getting-started-with-glusterfs-considerations-and-installation/
  49. #glusterfs profiling (delays in ops)
  50. #https://gluster.readthedocs.org/en/latest/Administrator%20Guide/Monitoring%20Workload/
  51. #xenserver glusterfs discussion - they said not possible ;)
  52. #http://discussions.citrix.com/topic/366729-about-xenserver-glusterfs/page-2
  53. #xenserver ha
  54. #http://docs.citrix.com/de-de/xencenter/6-1/xs-xc-protection/xs-xc-pools-ha/xs-xc-pools-ha-about.html
  55. #http://xapi-project.github.io/features/HA/HA.html
  56. #https://support.citrix.com/servlet/KbServlet/download/21018-102-664364/High%20Availability%20for%20Citrix%20XenServer.pdf
  57. #https://xen-orchestra.com/blog/xenserver-and-vm-high-availability/
  58. #https://discussions.citrix.com/topic/367150-ntp-ha-self-fencing/page-2#entry1884695
  59. #http://discussions.citrix.com/topic/333343-need-help-interpreting-xha-logs/
  60. #failure ha restart
  61. #http://support.citrix.com/article/CTX128275
  62. #http://citrixtechs.com/blog/help-my-citrix-xenserver-poolmaster-is-down-2/
  63. #http://discussions.citrix.com/topic/292757-local-storage-unplugged-and-un-repairable/
  64. #xenserver iscsi
  65. #http://gluster.readthedocs.org/en/latest/Administrator%20Guide/GlusterFS%20iSCSI/#Running_the_target_on_the_gluster_client
  66. #xenserver multipath
  67. #http://docs.citrix.com/content/dam/docs/en-us/xenserver/xenserver-61/xs-design-multipathing-config.pdf
  68. #xenserver iptables discussion
  69. #http://discussions.citrix.com/topic/235974-cannot-add-new-nfs-virtual-disk-sr-can-add-iso-library-nfs-sr/page-2
  70. #rebalance nics (bond for glusterfs)
  71. #http://www.gluster.org/pipermail/gluster-users/2014-November/019463.html
  72. #http://www.gluster.org/pipermail/gluster-users/2014-November/019466.html
  73. #multi nic splitnetwork for gluster in near future
  74. #https://www.gluster.org/pipermail/gluster-users/2015-May/021815.html
  75. #http://www.gluster.org/community/documentation/index.php/Features/SplitNetwork
  76. #http://pl.atyp.us/hekafs.org/index.php/2013/01/split-and-secure-networks-for-glusterfs/
  77. #host in vm inside vm config
  78. #https://wiki.openstack.org/wiki/XenServer/VirtualBox
  79. #reinstall xenserver host
  80. #http://support.citrix.com/article/CTX136342
  81. #discuss rejoin xenserver
  82. #http://discussions.citrix.com/topic/303468-server-removed-from-pool-after-failure-now-that-it-is-back-it-can-not-rejoin-pool-and-emergency-master-reset-does-not-work/
  83. #increase dom0 memory
  84. #http://support.citrix.com/article/CTX134951
  85. #find rpm
  86. #http://rpm.pbone.net/
  87. #win admin
  88. #win7-10: Net user administrator /active:yes
  89. #umount /dev/v/l
  90. #e2fsck -f /dev/v/l
  91. #resize2fs -p /dev/v/l 4G
  92. #lvreduce -L -8.9G /dev/
  93.  
  94. ####################################################################################################################################
  95. #turn off nfs i iptables
  96. chkconfig nfs off
  97. service nfs stop
  98. rm -f /etc/exports
  99. service ntpd restart
  100. chkconfig ntpd on
  101. service iptables stop
  102. chkconfig iptables off
  103. systemctl disable firewalld
  104. systemctl stop firewalld
  105.  
  106. #turn off selinux
  107. sed -i "s/SELINUX=enforcing/SELINUX=disabled/" /etc/sysconfig/selinux
  108. #change the linux name to Centos for CEPH-DEPLOY
  109. mv /etc/centos-release /etc/centos-release.xs -f
  110. echo 'CentOS release 7.2 (Final)' > /etc/centos-release
  111.  
  112. #new package update lists
  113. #https://discussions.citrix.com/topic/372069-does-libvmi-work-on-xenserver/
  114. #yum install epel-release
  115. #http://www.tecmint.com/how-to-enable-epel-repository-for-rhel-centos-6-5/
  116. #http://elrepo.org/tiki/tiki-index.php
  117. #rpm --import https://www.elrepo.org/RPM-GPG-KEY-elrepo.org
  118. #rpm -Uvh http://www.elrepo.org/elrepo-release-7.0-2.el7.elrepo.noarch.rpm
  119. #wget http://dl.fedoraproject.org/pub/epel/7/x86_64/e/epel-release-7-6.noarch.rpm
  120. #rpm -ivh epel-release-7-6.noarch.rpm
  121. #rpm -Uvh http://pkgs.repoforge.org/rpmforge-release/rpmforge-release-0.5.3-1.el7.rf.x86_64.rpm
  122. #rpm -Uvh http://rpms.famillecollet.com/enterprise/remi-release-7.rpm
  123. #rpm -Uvh http://repo.webtatic.com/yum/el7/webtatic-release.rpm
  124. sed -i -e "/baseurl=http:\/\/www.uk/d" /etc/yum.repos.d/CentOS-Base.repo
  125. sed -i -e "/mirrorlist/d" /etc/yum.repos.d/CentOS-Base.repo
  126. sed -i -e "s/^#base/base/" /etc/yum.repos.d/CentOS-Base.repo
  127. sed -i -e "s/enabled=0/enabled=1/" /etc/yum.repos.d/CentOS-Base.repo
  128. sed -i -e "s/\$releasever/7/" /etc/yum.repos.d/CentOS-Base.repo
  129. yum install epel-release -y
  130. yum install centos-release-gluster -y
  131. sed -i -e "s/enabled=0/enabled=1/" /etc/yum.repos.d/CentOS-Gluster-3.7.repo
  132. sed -i -e "s/\$releasever/7/" /etc/yum.repos.d/CentOS-Gluster-3.7.repo
  133. sed -i -e "s/buildlogs.centos.org\/centos\/7\/storage\/\$basearch\/gluster-3.7/buildlogs.centos.org\/centos\/7\/storage\/\$basearch\/gluster-3.8/" /etc/yum.repos.d/CentOS-Gluster-3.7.repo
  134. #sed -i -e "s/enabled=0/enabled=1/" /etc/yum.repos.d/epel-testing.repo
  135. yum clean all
  136. yum repolist enabled
  137. yum -y install deltarpm
  138. #yum update --skip-broken -y
  139. yum -y install vim-enhanced mc yum-utils curl e4fsprogs epel-rpm-macros
  140. curl ix.io/client > /usr/local/bin/ix
  141. chmod +x /usr/local/bin/ix
  142.  
  143. yum install -y glusterfs glusterfs-api-devel python-gluster ctdb ceph-deploy bind-utils xfsprogs git dnsmasq xfsprogs ansible
  144. #yum install -y open-vm-tools ## TYLKO DLA MASZYN WIRTUALNYCH VMWARE
  145.  
  146. #run serwis glusterfs
  147. systemctl unmask rpcbind.socket
  148. service glusterd start
  149. service glusterd status
  150. service glusterfsd start
  151. service glusterfsd status
  152. chkconfig glusterd on
  153. chkconfig glusterfsd on
  154.  
  155. wget http://halizard.org/release/noSAN-combined/halizard_nosan_installer_1.4.7
  156. chmod 755 halizard_nosan_installer_1.4.7  
  157. sed -i 's/`uname -r`/3.10.0+2/' halizard_nosan_installer_1.4.7
  158.  
  159. #curl http://ix.io/oxr > /etc/ntp.conf.ix
  160. #curl http://ix.io/ojO > /etc/ntp.conf.ix
  161. #gluster10_4nodes
  162. #curl http://ix.io/op8 > /etc/ntp.conf.ix
  163. #gluster_200_201
  164. #curl http://ix.io/oKN > /etc/ntp.conf.ix
  165. #gluster_201_202_203
  166. #curl http://ix.io/QLt > /etc/ntp.conf.ix
  167. #gluster10_3nodes
  168. curl http://ix.io/Tm5 > /etc/ntp.conf.ix
  169. tr -d "\015" < /etc/ntp.conf.ix > /etc/ntp.conf
  170. echo OPTIONS="-u ntp:ntp -p /var/run/ntpd.pid -x" > /etc/sysconfig/ntpd
  171. echo SYNC_HWCLOCK=yes >> /etc/sysconfig/ntpd
  172. hwclock --systohc
  173. service ntpd stop
  174. service ntpd start
  175. ntpstat -s
  176. ntpq -p
  177. ntpstat -s
  178.  
  179. echo "#/opt/xensource/bin/xe-toolstack-restart" >> /etc/rc.d/rc.local
  180. echo "service glusterd restart" >> /etc/rc.d/rc.local
  181. echo "service glusterfsd restart" >> /etc/rc.d/rc.local
  182. echo "service ctdb restart" >> /etc/rc.d/rc.local
  183. chmod 755  /etc/rc.d/rc.local
  184.  
  185. echo "xe host-emergency-ha-disable force=true" > /usr/local/bin/restartfence
  186. echo "/opt/xensource/bin/xe-toolstack-restart" >> /usr/local/bin/restartfence
  187. echo "service glusterd restart" >> /usr/local/bin/restartfence
  188. echo "service glusterfsd restart" >> /usr/local/bin/restartfence
  189. echo "service ctdb restart" >> /usr/local/bin/restartfence  
  190. chmod 755 /usr/local/bin/restartfence
  191.  
  192. echo "/opt/xensource/bin/xe-toolstack-restart" >> /usr/local/bin/restartoolxen
  193. echo "service glusterd restart" >> /usr/local/bin/restartoolxen
  194. echo "service glusterfsd restart" >> /usr/local/bin/restartoolxen
  195. echo "service ctdb restart" >> /usr/local/bin/restartoolxen
  196. chmod 755 /usr/local/bin/restartoolxen
  197.  
  198. echo "service glusterd restart" > /usr/local/bin/restartdodatki
  199. echo "service glusterfsd restart" >> /usr/local/bin/restartdodatki
  200. echo "service ctdb restart" >> /usr/local/bin/restartdodatki
  201. chmod 755 /usr/local/bin/restartdodatki
  202.  
  203. echo "service glusterd restart" > /usr/local/bin/restartgluster
  204. echo "service glusterfsd restart" >> /usr/local/bin/restartgluster
  205. chmod 755 /usr/local/bin/restartgluster
  206.  
  207. #init_gluster4 "xen" "1" "2" "3" "4" "vol0" 4
  208. #server="xen"; host01="1"; host02="2"; host03="3"; host04="4"; volume="vol0"; replica=4;
  209. cat <<__EOF__ > /usr/local/bin/init_gluster4
  210. server=\$1  
  211. host01=\$2  
  212. host02=\$3  
  213. host03=\$4
  214. host04=\$5  
  215. volume=\$6
  216. replica=\$7
  217. gluster peer status
  218. #na dowolnym jednym wykonac
  219. #glusterfs dwa volumeny vol0 (iso) I vol1 (gfs) na sda3 i sda2  
  220. #gluster volume stop \$volume force
  221. #gluster volume delete \$volume force
  222. gluster volume create \$volume replica \$replica \${server}\${host01}:/export/\${server}\${host01}-\$volume \${server}\${host02}:/export/\${server}\${host02}-\$volume \${server}\${host03}:/export/\${server}\${host03}-\$volume \${server}\${host04}:/export/\${server}\${host04}-\$volume force  
  223. gluster volume set \$volume nfs.port 2049  
  224. gluster volume set \$volume performance.cache-size 128MB  
  225. gluster volume set \$volume performance.write-behind-window-size 4MB  
  226. gluster volume set \$volume performance.io-thread-count 64  
  227. gluster volume set \$volume performance.io-cache on  
  228. gluster volume set \$volume performance.read-ahead on  
  229. gluster volume start \$volume  
  230. gluster volume info \$volume  
  231. gluster volume status \$volume  
  232. #montowanie NFS SR pod localhost lub VIP :/vol0 lub vol1  
  233. __EOF__
  234. chmod 755 /usr/local/bin/init_gluster4
  235.  
  236. #init_gluster3 "xen" "1" "2" "3" "vol0" 3
  237. #server="xen"; host01="1"; host02="2"; host03="3"; volume="vol0"; replica=3;
  238. cat <<__EOF__ > /usr/local/bin/init_gluster3
  239. server=\$1  
  240. host01=\$2  
  241. host02=\$3  
  242. host03=\$4
  243. volume=\$5
  244. replica=\$6
  245. gluster peer status
  246. #na dowolnym jednym wykonac
  247. #glusterfs dwa volumeny vol0 (iso) I vol1 (gfs) na sda3 i sda2  
  248. #gluster volume stop \$volume force
  249. #gluster volume delete \$volume force
  250. gluster volume create \$volume replica \$replica \${server}\${host01}:/export/\${server}\${host01}-\$volume \${server}\${host02}:/export/\${server}\${host02}-\$volume \${server}\${host03}:/export/\${server}\${host03}-\$volume force  
  251. gluster volume set \$volume nfs.port 2049  
  252. gluster volume set \$volume performance.cache-size 128MB  
  253. gluster volume set \$volume performance.write-behind-window-size 4MB  
  254. gluster volume set \$volume performance.io-thread-count 64  
  255. gluster volume set \$volume performance.io-cache on  
  256. gluster volume set \$volume performance.read-ahead on  
  257. gluster volume start \$volume  
  258. gluster volume info \$volume  
  259. gluster volume status \$volume  
  260. #montowanie NFS SR pod localhost lub VIP :/vol0 lub vol1  
  261. __EOF__
  262. chmod 755 /usr/local/bin/init_gluster3
  263.  
  264. #init_gluster2 "xen" "1" "2" "vol0" 2
  265. #server="xen"; host01="1"; host02="2"; volume="vol0"; replica=2;
  266. cat <<__EOF__ > /usr/local/bin/init_gluster2
  267. server=\$1  
  268. host01=\$2  
  269. host02=\$3  
  270. volume=\$4
  271. replica=\$5
  272. gluster peer status
  273. #na dowolnym jednym wykonac
  274. #glusterfs dwa volumeny vol0 (iso) I vol1 (gfs) na sda3 i sda2  
  275. #gluster volume stop \$volume force
  276. #gluster volume delete \$volume force
  277. gluster volume create \$volume replica \$replica \${server}\${host01}:/export/\${server}\${host01}-\$volume \${server}\${host02}:/export/\${server}\${host02}-\$volume force  
  278. gluster volume set \$volume nfs.port 2049  
  279. gluster volume set \$volume performance.cache-size 128MB  
  280. gluster volume set \$volume performance.write-behind-window-size 4MB  
  281. gluster volume set \$volume performance.io-thread-count 64  
  282. gluster volume set \$volume performance.io-cache on  
  283. gluster volume set \$volume performance.read-ahead on  
  284. gluster volume start \$volume  
  285. gluster volume info \$volume  
  286. gluster volume status \$volume  
  287. #montowanie NFS SR pod localhost lub VIP :/vol0 lub vol1  
  288. __EOF__
  289. chmod 755 /usr/local/bin/init_gluster2
  290.  
  291. #init_brick "/dev/sdb1" "vol2"
  292. cat <<__EOF__ > /usr/local/bin/init_brick
  293. dev4gfs=\$1  
  294. volume=\$2  
  295. hname=\`hostname\`
  296. mkfs.ext4 -m 0 -j \$dev4gfs
  297. tune2fs -O dir_index -o user_xattr \$dev4gfs
  298. mkdir -p /export/\${hname}-\${volume}
  299. echo  "\$dev4gfs /export/\${hname}-\${volume} ext4 rw,noatime,nodiratime,user_xattr,barrier=0,data=ordered 1 2" >> /etc/fstab
  300. mount -a
  301. __EOF__
  302. chmod 755 /usr/local/bin/init_brick
  303.  
  304. #reformat_brick "/dev/sda2" "vol1"
  305. cat <<__EOF__ > /usr/local/bin/reformat_brick
  306. dev4gfs=\$1  
  307. volume=\$2  
  308. hname=\`hostname\`
  309. umount /export/\${hname}-\${volume}
  310. mkfs.ext4 -m 0 -j \$dev4gfs
  311. tune2fs -O dir_index -o user_xattr \$dev4gfs
  312. mount /export/\${hname}-\${volume}
  313. __EOF__
  314. chmod 755 /usr/local/bin/reformat_brick
  315.  
  316. #removeallandformat "/dev/sdb"
  317. cat <<__EOF__ > /usr/local/bin/removeallandformat
  318. #!/bin/bash
  319. # Script to automatically format all partitions on /dev/sda and create
  320. # a single partition for the whole disk
  321. # Remove each partition
  322. for v_partition in \`parted -s \$1 print|awk '/^ / {print \$1}'\`; do umount \$1\${v_partition}; parted -s \$1 rm \${v_partition}; done
  323. # Find size of disk
  324. v_disk=\`parted -s \$1 print|awk '/^Disk \/dev/ {print \$3}'| sed 's/[Mm][Bb]//'\`
  325. # Create single partition
  326. parted -s \$1 mkpart primary 1 \${v_disk}
  327. # Format the partition
  328. # mke2fs -T ext3 \${1}1
  329. __EOF__
  330. chmod 755  /usr/local/bin/removeallandformat
  331.  
  332. ####################################################################################################################################
  333. #ctdb need compiling: SCHED_FIFO does not run in xenserver - how to enable it?
  334. yum install gcc-c++ autoconf rpm-build -y
  335. yum install popt-devel libtalloc-devel libtdb-devel libtevent-devel -y
  336.  
  337. mkdir rpmbuild/SPECS -p; curl http://ix.io/QMh > rpmbuild/SPECS/ctdb.spec
  338. wget https://download.samba.org/pub/ctdb/ctdb-2.5.6.tar.gz -P rpmbuild/SOURCES
  339. rpmbuild -bp rpmbuild/SPECS/ctdb.spec
  340. sed -i "s/SCHED_FIFO/SCHED_OTHER/"  rpmbuild/BUILD/ctdb-2.5.6/common/system_util.c  
  341. sed -i "s/p.sched_priority = 1/p.sched_priority = 0/"  rpmbuild/BUILD/ctdb-2.5.6/common/system_util.c  
  342. rpmbuild -bc --short-circuit  rpmbuild/SPECS/ctdb.spec --noclean
  343. rpmbuild -bi --short-circuit  rpmbuild/SPECS/ctdb.spec --noclean
  344. rpmbuild -bb --short-circuit  rpmbuild/SPECS/ctdb.spec --noclean
  345. rpm -ivh rpmbuild/RPMS/x86_64/ctdb-* --nodeps --force
  346. mv rpmbuild rpmbuild-ctdb -f
  347. sed -i "s/^# CTDB_LOGFILE/CTDB_LOGFILE/" /etc/sysconfig/ctdb
  348. sed -i "s/^# CTDB_NODES/CTDB_NODES/" /etc/sysconfig/ctdb  
  349. sed -i "s/^CTDB_RECOVERY_LOCK/# CTDB_RECOVERY_LOCK/" /etc/sysconfig/ctdb
  350. sed -i 's/Restart=no/Restart=always/' /usr/lib/systemd/system/ctdb.service
  351. #service smb stop
  352. #chkconfig smb off
  353.  
  354. wget http://downloadns.citrix.com.edgesuite.net/11624/XenServer-7.0.0-binpkg.iso
  355. mkdir iso1; mount XenServer-7.0.0-binpkg.iso iso1
  356. yum localinstall iso1/x86_64/kernel-* -y
  357. #wget http://downloadns.citrix.com.edgesuite.net/11620/XenServer-7.0.0-DDK.iso
  358. wget http://downloadns.citrix.com.edgesuite.net/11623/XenServer-7.0.0-source.iso
  359. mkdir iso; mount XenServer-7.0.0-source.iso iso
  360. rpm -ivh iso/kernel-3.10.96-484.383030.src.rpm
  361. rpmbuild -bp rpmbuild/SPECS/kernel.spec
  362. sed -i 's/# CONFIG_CEPH_FS is not set/CONFIG_CEPH_FS=m/g' rpmbuild/BUILD/kernel-3.10.96/linux-3.10.96/.config
  363. rpmbuild -bc --short-circuit  rpmbuild/SPECS/kernel.spec --noclean
  364. rpmbuild -bi --short-circuit  rpmbuild/SPECS/kernel.spec --noclean
  365. rpmbuild -bb --short-circuit  rpmbuild/SPECS/kernel.spec --noclean
  366. rpm -ivh rpmbuild/RPMS/x86_64/kernel-* --nodeps --force
  367. #scp  rpmbuild/RPMS/x86_64/kernel-* xenserverhw04:
  368. #scp  rpmbuild/RPMS/x86_64/kernel-* xenserverhw06:
  369. #rpm -ivh kernel-* --nodeps --force
  370.  
  371.  
  372. ####################################################################################################################################
  373. ##CEPH XENSERVER
  374. #http://www.virtualtothecore.com/en/quickly-build-a-new-ceph-cluster-with-ceph-deploy-on-centos-7/
  375. #ntpd, no selinux, ssh-copy-id to do
  376. useradd -d /home/cephuser -m cephuser
  377. echo "cephuser ALL = (root) NOPASSWD:ALL" | sudo tee /etc/sudoers.d/cephuser
  378. chmod 0440 /etc/sudoers.d/cephuser
  379. sed -i s'/Defaults requiretty/#Defaults requiretty'/g /etc/sudoers
  380. git clone https://github.com/mstarikov/rbdsr.git
  381. cd rbdsr
  382. python ./install_rbdsr.py enable
  383. ####################################################################################################################################
  384. ###########################################################################
  385. #uuid pbd http://blog.gluster.org/2012/
  386. #usunięcie Local Storage na sda3
  387. #sformatowanie i podmontowanie dwóch wolnych partycji sda2 i sda3 na dwa dyski sieciowe
  388. sed -i -e "s/metadata_read_only = 1/metadata_read_only = 0/" /etc/lvm/lvm.conf
  389. hname=`hostname`
  390. #hname=`echo $hname | tr [:lower:] [:upper:]`
  391. sruuid=`xe sr-list host=$hname name-label=Local\ storage --minimal`
  392. pbduid=`xe pbd-list sr-uuid=$sruuid --minimal`
  393. xe pbd-unplug uuid=$pbduid
  394. xe sr-forget uuid=$sruuid
  395. vgremove `vgdisplay -C | tail -n1 | cut -f3 -d' '` -f
  396. dev4gfs=`pvdisplay -C | tail -n1 | cut -f3 -d' '`
  397. pvremove $dev4gfs -f
  398.  
  399. umount $dev4gfs
  400. init_brick $dev4gfs "vol0"
  401.  
  402. umount "/dev/sda2"
  403. init_brick "/dev/sda2" "vol1"
  404.  
  405. #FOR CEPH
  406. parted -s /dev/sdb mklabel gpt mkpart primary xfs 0% 100%
  407. #parted -s /dev/sdb mklabel gpt mkpart primary 0% 33% mkpart primary 34% 66% mkpart primary 67% 100%
  408. mkfs.xfs /dev/sdb1 -f
  409. #removeallandformat "/dev/sdb"
  410. #init_brick "/dev/sdb1" "vol2"
  411.  
  412. ###########################################################################
  413. #OnePool
  414. ###########################################################################
  415. server="xenserverhw0"
  416. host01="3"
  417. host02="4"
  418. host03="6"
  419. #host04="8"
  420. echo "127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4" > /etc/hosts
  421. echo "10.10.10.1${host01} osd${host01}" >> /etc/hosts
  422. echo "10.10.10.1${host02} osd${host02}" >> /etc/hosts
  423. echo "10.10.10.1${host03} osd${host03}" >> /etc/hosts
  424. echo "192.168.10.5${host01} ${server}${host01}" >> /etc/hosts
  425. echo "192.168.10.5${host02} ${server}${host02}" >> /etc/hosts
  426. echo "192.168.10.5${host03} ${server}${host03}" >> /etc/hosts
  427. echo "10.10.10.1${host01}" > /etc/ctdb/nodes
  428. echo "10.10.10.1${host02}" >> /etc/ctdb/nodes
  429. echo "10.10.10.1${host03}" >> /etc/ctdb/nodes
  430. echo "192.168.10.5${host01}/24 xenbr1" > /etc/ctdb/public_addresses
  431. echo "192.168.10.5${host02}/24 xenbr1" >> /etc/ctdb/public_addresses
  432. echo "192.168.10.5${host03}/24 xenbr1" >> /etc/ctdb/public_addresses
  433. chkconfig ctdb on
  434. #service ctdb restart
  435.  
  436. #init_gluster4 "xenserverhw" "03" "04" "06" "vol0" 3
  437. #init_gluster4 "xenserverhw" "03" "04" "06" "vol1" 3
  438. #service ctdb restart
  439. #ctdb status
  440.  
  441. #https://serversforhackers.com/an-ansible-tutorial
  442. #http://www.cyberciti.biz/faq/
  443. [ -f /etc/ansible/hosts ] && mv /etc/ansible/hosts /etc/ansible/hosts.orig -f
  444. echo "[web]" > /etc/ansible/hosts
  445. echo "10.10.10.1${host01}" >> /etc/ansible/hosts
  446. echo "10.10.10.1${host02}" >> /etc/ansible/hosts
  447. echo "10.10.10.1${host03}" >> /etc/ansible/hosts
  448. #echo "10.10.10.1${host04}" >> /etc/ansible/hosts
  449. ssh-keygen -t rsa -b 2048 -N '' -f ~/.ssh/id_rsa
  450. #ssh-copy-id -i root@xenserverhw03
  451. #ssh-copy-id -i root@xenserverhw04
  452. #ssh-copy-id -i root@xenserverhw05
  453. #ssh-copy-id -i root@xenserverhw06
  454. #ansible all -s -m shell -a "ctdb status"
  455. echo 'ansible all -s -m shell -a "$1"' > /usr/local/bin/ae
  456. chmod 700 /usr/local/bin/ae
  457. #CRONTAB WORKS for test logs
  458. [ -f /var/log/checktime.log ] && mv /var/log/checktime.log /var/log/checktime.log.old -f
  459. echo 'echo "#########################"' > /usr/local/bin/checktime
  460. echo "date" >> /usr/local/bin/checktime
  461. echo "ntpstat -s" >> /usr/local/bin/checktime
  462. echo "/sbin/gluster volume status vol2" >> /usr/local/bin/checktime
  463. echo "ctdb status" >> /usr/local/bin/checktime
  464. echo "free" >> /usr/local/bin/checktime
  465. chmod 755 /usr/local/bin/checktime
  466. echo "/usr/local/bin/checktime  2>&1 | cat >> /var/log/checktime.log" > /usr/local/bin/cronuserlogs
  467. chmod 755 /usr/local/bin/cronuserlogs
  468. echo "* * * * * /usr/local/bin/cronuserlogs" > ./cronwork
  469. crontab -r
  470. crontab ./cronwork
  471. crontab -l
  472.  
  473. cat <<__EOF__ > /etc/logrotate.d/checktime
  474. /var/log/checktime.log {
  475.     daily
  476.     rotate 3
  477.     compress
  478.     delaycompress
  479.     missingok
  480.     notifempty
  481.     create 644 root root
  482. }
  483. __EOF__
  484. cat <<__EOF__ > ~/.ssh/config
  485. Host *
  486.     StrictHostKeyChecking no
  487.     UserKnownHostsFile /dev/null
  488. __EOF__
  489. ssh-keygen -t rsa -b 2048 -N '' -f /root/.ssh/id_rsa
  490. #for node in xenserverhw03 xenserverhw04 xenserverhw06 osd3 osd4 osd6; do ssh-copy-id $node ; done
  491.  
  492.  
  493. #CEPH
  494. #http://www.virtualtothecore.com/en/adventures-ceph-storage-part-1-introduction/
  495. #https://blog.zhaw.ch/icclab/tag/ceph/
  496. #https://wiki.centos.org/SpecialInterestGroup/Storage/ceph-Quickstart
  497. #http://linoxide.com/storage/setup-red-hat-ceph-storage-centos-7-0/
  498. #http://karan-mj.blogspot.com/2013/12/what-is-ceph-ceph-is-open-source.html
  499. #https://www.reddit.com/r/DataHoarder/comments/4gzpxi/why_is_ceph_so_rare_for_home_use_even_among/
  500. #http://palmerville.github.io/2016/04/30/single-node-ceph-install.html
  501. cat <<__EOF__ >> ./ceph.conf
  502. mon_pg_warn_max_per_osd = 0
  503. public network = 192.168.10.0/24
  504. cluster network = 10.10.10.0/24
  505. #Choose reasonable numbers for number of replicas and placement groups.
  506. osd pool default size = 2 # Write an object 2 times
  507. osd pool default min size = 1 # Allow writing 1 copy in a degraded state
  508. osd pool default pg num = 256
  509. osd pool default pgp num = 256
  510. #Choose a reasonable crush leaf type
  511. #0 for a 1-node cluster.
  512. #1 for a multi node cluster in a single rack
  513. #2 for a multi node, multi chassis cluster with multiple hosts in a chassis
  514. #3 for a multi node cluster with hosts across racks, etc.
  515. osd crush chooseleaf type = 1
  516. [osd.0]
  517. public addr = 192.168.10.53
  518. cluster addr = 10.10.10.13
  519. host = osd3
  520. devs = /dev/sdb
  521. osd journal = /dev/sda2
  522. [osd.1]
  523. public addr = 192.168.10.54
  524. cluster addr = 10.10.10.14
  525. host = osd4
  526. devs = /dev/sdb
  527. osd journal = /dev/sda2
  528. [osd.2]
  529. public addr = 192.168.10.56
  530. cluster addr = 10.10.10.16
  531. host = osd6
  532. devs = /dev/sdb
  533. osd journal = /dev/sda2
  534. __EOF__
  535.  
  536.  
  537. cat <<__EOF__ > /usr/local/bin/init_ceph
  538. server="xenserverhw0"
  539. host01="3"
  540. host02="4"
  541. host03="6"
  542. [ ! -d ceph-deploy ] && mkdir ceph-deploy
  543. cd ceph-deploy/
  544. ceph-deploy purge xenserverhw03 xenserverhw04 xenserverhw06
  545. ceph-deploy purgedata xenserverhw03 xenserverhw04 xenserverhw06
  546. ceph-deploy forgetkeys
  547. ceph-deploy new xenserverhw03 xenserverhw04 xenserverhw06
  548. #ceph-deploy install --release jewel --no-adjust-repos xenserverhw03 xenserverhw04 xenserverhw06
  549. #ceph-deploy install --release jewel xenserverhw03 xenserverhw04 xenserverhw06
  550. ceph-deploy install --repo-url http://download.ceph.com/rpm-jewel/el7/ xenserverhw03 xenserverhw04 xenserverhw06
  551. ceph-deploy --overwrite-conf mon create-initial
  552.  
  553. ceph-deploy gatherkeys xenserverhw03
  554. for i in osd3 osd4 osd6; do ceph-deploy disk zap $i:sdb; done
  555. ae "parted -s /dev/sdb mklabel gpt mkpart primary xfs 0% 100%"
  556. ae "mkfs.xfs /dev/sdb1 -f"
  557. #http://tracker.ceph.com/issues/13833
  558. ae "chown ceph:ceph /dev/sda2"
  559. for i in osd3 osd4 osd6; do
  560. ceph-deploy osd prepare \$i:/dev/sdb1:/dev/sda2; done
  561. for i in osd3 osd4 osd6; do
  562. ceph-deploy osd activate \$i:/dev/sdb1:/dev/sda2; done
  563. #ceph-deploy  --username ceph osd create osd3:/dev/sdb1:/dev/sda2
  564. ceph-deploy admin xenserverhw03 xenserverhw04 xenserverhw06 osd3 osd4 osd6
  565. ae "chmod +r /etc/ceph/ceph.client.admin.keyring"
  566. ae "systemctl enable ceph-mon.target"
  567. ae "systemctl enable ceph-mds.target"
  568. ae "systemctl enable ceph-osd.target"
  569. #object storage gateway
  570. ceph-deploy rgw create xenserverhw03 xenserverhw04 xenserverhw06
  571. #cephfs
  572. ceph-deploy mds create xenserverhw03 xenserverhw04 xenserverhw06
  573. ceph -s #ceph status
  574. ceph osd tree
  575. ceph mon_status
  576. ceph osd pool create mypool 1
  577. ceph osd lspools
  578. ceph df
  579. echo "test data" > testfile
  580. rados put -p mypool testfile testfile
  581. rados -p mypool ls
  582. rados -p mypool setomapval testfile mykey myvalue
  583. rados -p mypool getomapval testfile mykey
  584. rados get -p mypool testfile testfile2
  585. md5sum testfile testfile2
  586.  
  587. ceph osd pool create cephfs_data 128
  588. ceph osd pool create cephfs_metadata 128
  589. ceph fs new cephfs cephfs_metadata cephfs_data
  590. mkdir /mnt/mycephfs
  591. mount -t ceph xenserverhw03:6789:/ /mnt/mycephfs -o name=admin,secret=`cat ./ceph.client.admin.keyring | grep key | cut -f 2 | sed 's/key = //g'`
  592. __EOF__
  593.  
  594.  
  595. echo 'Configure NIC1 (192.168.10.15?) i NIC2 (10.10.10.1?)'
  596. echo 'for node in xenserverhw03 xenserverhw04 xenserverhw06 osd3 osd4 osd6; do ssh-copy-id $node ; done'
  597.  
  598. echo 'FOR GLUSTER Execute on one server:'
  599. echo 'gluster peer probe xenserverhw03'
  600. echo 'gluster peer probe xenserverhw04'
  601. echo 'gluster peer probe xenserverhw06'
  602. #echo 'gluster peer probe xenserverhw08'
  603. echo 'init_gluster4 "xenserverhw" "03" "04" "06" "vol0" 3'
  604. echo 'init_gluster4 "xenserverhw" "03" "04" "06" "vol1" 3'
  605. echo 'ae "service ctdb restart"'
  606. echo 'ae "ctdb status"'
  607. #echo 'ip addr show | grep inet'
  608. #echo "ssh-keygen -t rsa -b 2048 -N '' -f ~/.ssh/id_rsa"
  609. echo 'Mount SR iso, gfs1, gfs2'
  610.  
  611. echo 'FOR CEPH execute on one server'
  612. echo '/usr/local/bin/init_ceph'
  613.  
  614. #KONIEC
Add Comment
Please, Sign In to add comment