Advertisement
42n4

CENTOS4CEPH

Mar 18th, 2017
474
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
Bash 6.47 KB | None | 0 0
  1. #https://www.howtoforge.com/tutorial/how-to-build-a-ceph-cluster-on-centos-7/
  2. #w VirtualBoxie zakładam vm Centos7 z 2GB RAM i dwa dyski 8GB i 8GB
  3. #TUTAJ JEST GŁÓWNY SKRYPT (zawiera dwa rebooty jest więc połączeniem osobnych skryptów)
  4. #https://goo.gl/SAjzUj
  5. #GŁÓWNY SKRYPT
  6. curl https://pastebin.com/raw/kZX6ubYH | sed 's/\r//g' > ceph.txt
  7. yum install -y gpm && systemctl start gpm
  8. #DALEJ PASTUJ LINIE z GŁÓWNEGO oraz uruchamiaj FUNKCJE
  9. #vi ceph.txt #paste sth
  10.  
  11. #FUNKCJE DO TEGO SKRYPTU W OSOBNYM SKRYPCIE
  12. curl https://pastebin.com/raw/2gEM4ena | sed 's/\r//g' > cephfun.sh
  13. sh cephfun.sh
  14. /usr/local/bin/centos_update
  15. reboot
  16.  
  17. #dodatkowe rzeczy niepotrzebne teraz np. środowisko graficzne
  18. #http://www.45drives.com/wiki/index.php?title=Installing_MATE_on_CentOS_7
  19. #yum groupinstall -y "X Window system"
  20. #yum groupinstall -y "MATE Desktop"
  21. #systemctl isolate graphical.target
  22. #systemctl set-default graphical.target
  23. #yum install -y gnome-disk-utility yum-utils novnc x11vnc tigervnc tigervnc-server
  24. #useradd -d /home/cephuser -m cephuser
  25. #echo "cephuser ALL = (root) NOPASSWD:ALL" | sudo tee /etc/sudoers.d/cephuser
  26. #chmod 0440 /etc/sudoers.d/cephuser
  27. #passwd cephuser
  28. #reboot
  29.  
  30. #wyłaczam maszynę po zrebootowaniu i
  31. #KLONUJE W VirtualBox NA DWIE DODATKOWE MASZYNY !!!!
  32.  
  33. ################################################################################################
  34. #TUTAJ wpisuje uzyskane z dhcp ip np.: dla server1 może być 77 itd.
  35. #centos_static_ip "server" "77" "78" "79"
  36. #server="server"; ip01="77"; ip02="78"; ip03="79"
  37. /usr/local/bin/centos_static_ip "server" "77" "78" "79"
  38. reboot
  39.  
  40. #su - cephuser
  41. #CEPH
  42. #http://www.virtualtothecore.com/en/adventures-ceph-storage-part-1-introduction/
  43. #https://blog.zhaw.ch/icclab/tag/ceph/
  44. #https://wiki.centos.org/SpecialInterestGroup/Storage/ceph-Quickstart
  45. #http://linoxide.com/storage/setup-red-hat-ceph-storage-centos-7-0/
  46. #http://karan-mj.blogspot.com/2013/12/what-is-ceph-ceph-is-open-source.html
  47. #https://www.reddit.com/r/DataHoarder/comments/4gzpxi/why_is_ceph_so_rare_for_home_use_even_among/
  48. #http://palmerville.github.io/2016/04/30/single-node-ceph-install.html
  49.  
  50. [ ! -d ceph-deploy ] && mkdir ceph-deploy
  51. cd ceph-deploy/
  52. ceph-deploy purge server1 server2 server3
  53. ceph-deploy purgedata server1 server2 server3
  54. ceph-deploy forgetkeys
  55. ceph-deploy new server1 server2 server3
  56. #ceph-deploy install --release jewel --no-adjust-repos server1 server2 server3
  57. #ceph-deploy install --release jewel server1 server2 server3
  58. ceph-deploy install --repo-url http://download.ceph.com/rpm-jewel/el7/ server1 server2 server3
  59. ceph-deploy --overwrite-conf mon create server1
  60. ceph-deploy --overwrite-conf mon create server2
  61. ceph-deploy --overwrite-conf mon create server3
  62. ceph --admin-daemon /var/run/ceph/ceph-mon.server1.asok mon_status
  63. #poczekaj kilka sekund
  64.  
  65. cat <<__EOF__ >> ./ceph.conf
  66. mon_pg_warn_max_per_osd = 0
  67. public network = 192.168.2.0/24
  68. #cluster network = 192.168.2.0/24
  69. #Choose reasonable numbers for number of replicas and placement groups.
  70. osd pool default size = 2 # Write an object 2 times
  71. osd pool default min size = 1 # Allow writing 1 copy in a degraded state
  72. osd pool default pg num = 64
  73. osd pool default pgp num = 64
  74. #Choose a reasonable crush leaf type
  75. #0 for a 1-node cluster.
  76. #1 for a multi node cluster in a single rack
  77. #2 for a multi node, multi chassis cluster with multiple hosts in a chassis
  78. #3 for a multi node cluster with hosts across racks, etc.
  79. osd crush chooseleaf type = 1
  80. osd journal size = 200
  81. __EOF__
  82. scp ./ceph.conf root@server1:/etc/ceph
  83. scp ./ceph.conf root@server2:/etc/ceph
  84. scp ./ceph.conf root@server3:/etc/ceph
  85.  
  86. ceph-deploy gatherkeys server1
  87. ssh server2 ceph-deploy gatherkeys server2
  88. ssh server3 ceph-deploy gatherkeys server3
  89. for i in server1 server2 server3; do ceph-deploy disk zap $i:sdb; done
  90. ae "parted -s /dev/sdb mklabel gpt mkpart primary xfs 0% 100%"
  91. #sprawdź, czy na wszystkich serwerach się wykonało
  92.  
  93. #http://tracker.ceph.com/issues/13833
  94. #ae "chown ceph:ceph /dev/sda2"
  95. for i in server1 server2 server3; do
  96. ceph-deploy osd --overwrite-conf prepare $i:/dev/sdb1; done
  97.  
  98. #poczekać chwilę
  99. for i in server1 server2 server3; do
  100. ceph-deploy osd activate $i:/dev/sdb1; done
  101. #sprawdzić "ceph -s", czy osd się dodały
  102.  
  103. #ceph-deploy  --username ceph osd create osd3:/dev/sdb1
  104. ceph-deploy admin server1 server2 server3
  105. ae "chmod +r /etc/ceph/ceph.client.admin.keyring"
  106. ae "systemctl enable ceph-mon.target"
  107. ae "systemctl enable ceph-mds.target"
  108. ae "systemctl enable ceph-osd.target"
  109.  
  110. #object storage gateway
  111. ceph-deploy rgw create server1 server2 server3
  112. #cephfs
  113. ceph-deploy mds create server1 server2 server3
  114. ceph -s #ceph status
  115. ceph osd tree
  116. ceph mon_status
  117. ceph osd pool create mypool 1
  118. ceph osd lspools
  119. ceph df
  120. echo "test data" > testfile
  121. rados put -p mypool testfile testfile
  122. rados -p mypool ls
  123. rados -p mypool setomapval testfile mykey myvalue
  124. rados -p mypool getomapval testfile mykey
  125. rados get -p mypool testfile testfile2
  126. md5sum testfile testfile2
  127. ceph osd pool create cephfs_data 128
  128. ceph osd pool create cephfs_metadata 128
  129. ceph fs new cephfs cephfs_metadata cephfs_data
  130. mkdir /mnt/mycephfs
  131. mount -t ceph server1:6789:/ /mnt/mycephfs -o name=admin,secret=`cat /etc/ceph/ceph.client.admin.keyring | grep key | cut -f 2 | sed 's/key = //g'`
  132.  
  133. #test wydajności dysku ceph, gdzie -s rozmiar pliku MB, -r ilość ram MB (domyślne wartości: 8192 i dostępna pamięć)
  134. free && sync && echo 3 > /proc/sys/vm/drop_caches && free
  135. bonnie++ -s 2048 -r 1024 -u root -d /mnt/mycephfs -m BenchClient
  136.  
  137. #https://wiki.mikejung.biz/Sysbench#Install_Sysbench_on_CentOS_7
  138. #http://www.servernoobs.com/avoiding-cpu-speed-scaling-in-modern-linux-distributions-running-cpu-at-full-speed-tips/
  139. #for CPUFREQ in /sys/devices/system/cpu/cpu*/cpufreq/scaling_governor;
  140. #do [ -f $CPUFREQ ] || continue;
  141. #  echo -n performance > $CPUFREQ;
  142. #done          
  143. grep -E '^model name|^cpu MHz' /proc/cpuinfo  
  144. sysbench --test=cpu --cpu-max-prime=10000 --num-threads=4 run
  145. #sysbench --test=fileio --file-test-mode=rndwr run
  146. #sysbench --test=fileio help
  147. #sysbench --test=fileio --file-test-mode=seqwr --num-threads=1 --file-block-size=4096 run
  148. #sysbench --test=fileio --file-test-mode=rndwr --num-threads=1 --file-block-size=4096 run  
  149. #http://cromwell-intl.com/linux/performance-tuning/disks.html
  150. echo 3 | sudo tee /proc/sys/vm/drop_caches && sudo sync
  151. iperf -s #pomiar przepustowości sieci, uruchamiamy na jednym serwerze np. server1
  152. iperf -c server1 -i1 -t 10  #na drugim serwerze mierzymy
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement