Advertisement
42n4

CEPH4UBUNTU

Mar 28th, 2017
307
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
Bash 6.22 KB | None | 0 0
  1. #https://www.howtoforge.com/tutorial/how-to-install-a-ceph-cluster-on-ubuntu-16-04/
  2. #w VirtualBoxie zakładam vm Lubuntu 16.10 z 2GB RAM i dwa dyski 8GB i 8GB
  3. #TUTAJ JEST GŁÓWNY SKRYPT (zawiera dwa rebooty jest więc połączeniem osobnych skryptów)
  4. #https://goo.gl/SAjzUj
  5. #GŁÓWNY SKRYPT
  6. curl https://pastebin.com/raw/LK85bZ8D | sed 's/\r//g' > ceph.txt
  7. apt-get install -y curl gpm vim && systemctl start gpm
  8. #DALEJ PASTUJ LINIE z GŁÓWNEGO oraz uruchamiaj FUNKCJE
  9. #vim ceph.txt #paste sth
  10.  
  11. #FUNKCJE DO TEGO SKRYPTU W OSOBNYM SKRYPCIE
  12. curl https://pastebin.com/raw/tULD18VV | sed 's/\r//g' > cephfun.sh
  13. sh cephfun.sh
  14. /usr/local/bin/ubuntu_update
  15. #zmień hasło roota
  16. passwd
  17. reboot
  18.  
  19. #dodatkowe rzeczy niepotrzebne teraz np. środowisko graficzne
  20.  
  21. #useradd -d /home/cephuser -m cephuser
  22. #echo "cephuser ALL = (root) NOPASSWD:ALL" | sudo tee /etc/sudoers.d/cephuser
  23. #chmod 0440 /etc/sudoers.d/cephuser
  24. #passwd cephuser
  25. #reboot
  26.  
  27. #wyłaczam maszynę po zrebootowaniu i
  28. #KLONUJE W VirtualBox NA DWIE DODATKOWE MASZYNY !!!!
  29.  
  30. ################################################################################################
  31. #TUTAJ wpisuje uzyskane z dhcp ip np.: dla server1 może być 77 itd.
  32. #ubuntu_static_ip "server" "77" "78" "79"
  33. #server="server"; ip01="77"; ip02="78"; ip03="79"
  34. /usr/local/bin/ubuntu_static_ip "server" "77" "78" "79"
  35.  
  36. #su - cephuser
  37. #CEPH
  38. #http://www.virtualtothecore.com/en/adventures-ceph-storage-part-1-introduction/
  39. #https://blog.zhaw.ch/icclab/tag/ceph/
  40. #https://wiki.centos.org/SpecialInterestGroup/Storage/ceph-Quickstart
  41. #http://linoxide.com/storage/setup-red-hat-ceph-storage-centos-7-0/
  42. #http://karan-mj.blogspot.com/2013/12/what-is-ceph-ceph-is-open-source.html
  43. #https://www.reddit.com/r/DataHoarder/comments/4gzpxi/why_is_ceph_so_rare_for_home_use_even_among/
  44. #http://palmerville.github.io/2016/04/30/single-node-ceph-install.html
  45.  
  46. [ ! -d ceph-deploy ] && mkdir ceph-deploy
  47. cd ceph-deploy/
  48. ceph-deploy purge server1 server2 server3
  49. ceph-deploy purgedata server1 server2 server3
  50. ceph-deploy forgetkeys
  51. ceph-deploy new server1 server2 server3
  52. #ceph-deploy install --release jewel --no-adjust-repos server1 server2 server3
  53. #ceph-deploy install --release jewel server1 server2 server3
  54. ceph-deploy install --repo-url http://download.ceph.com/rpm-jewel/el7/ server1 server2 server3
  55. ceph-deploy --overwrite-conf mon create server1
  56. ceph-deploy --overwrite-conf mon create server2
  57. ceph-deploy --overwrite-conf mon create server3
  58. ceph --admin-daemon /var/run/ceph/ceph-mon.server1.asok mon_status
  59. #poczekaj kilka sekund
  60.  
  61. cat <<__EOF__ >> ./ceph.conf
  62. mon_pg_warn_max_per_osd = 0
  63. public network = 192.168.2.0/24
  64. #cluster network = 192.168.2.0/24
  65. #Choose reasonable numbers for number of replicas and placement groups.
  66. osd pool default size = 2 # Write an object 2 times
  67. osd pool default min size = 1 # Allow writing 1 copy in a degraded state
  68. osd pool default pg num = 64
  69. osd pool default pgp num = 64
  70. #Choose a reasonable crush leaf type
  71. #0 for a 1-node cluster.
  72. #1 for a multi node cluster in a single rack
  73. #2 for a multi node, multi chassis cluster with multiple hosts in a chassis
  74. #3 for a multi node cluster with hosts across racks, etc.
  75. osd crush chooseleaf type = 1
  76. osd journal size = 200
  77. __EOF__
  78. scp ./ceph.conf root@server1:/etc/ceph
  79. scp ./ceph.conf root@server2:/etc/ceph
  80. scp ./ceph.conf root@server3:/etc/ceph
  81.  
  82. ceph-deploy gatherkeys server1
  83. ssh server2 ceph-deploy gatherkeys server2
  84. ssh server3 ceph-deploy gatherkeys server3
  85. for i in server1 server2 server3; do ceph-deploy disk zap $i:sdb; done
  86. ae "parted -s /dev/sdb mklabel gpt mkpart primary xfs 0% 100%"
  87. #sprawdź, czy na wszystkich serwerach się wykonało
  88.  
  89. #http://tracker.ceph.com/issues/13833
  90. #ae "chown ceph:ceph /dev/sda2"
  91. for i in server1 server2 server3; do
  92. ceph-deploy --overwrite-conf osd prepare $i:/dev/sdb1; done
  93.  
  94. #poczekać chwilę
  95. for i in server1 server2 server3; do
  96. ceph-deploy osd activate $i:/dev/sdb1; done
  97. #sprawdzić "ceph -s", czy osd się dodały
  98.  
  99. #ceph-deploy  --username ceph osd create osd3:/dev/sdb1
  100. ceph-deploy admin server1 server2 server3
  101. ae "chmod +r /etc/ceph/ceph.client.admin.keyring"
  102. ae "systemctl enable ceph-mon.target"
  103. ae "systemctl enable ceph-mds.target"
  104. ae "systemctl enable ceph-osd.target"
  105.  
  106. #object storage gateway
  107. ceph-deploy rgw create server1 server2 server3
  108. #cephfs
  109. ceph-deploy mds create server1 server2 server3
  110. ceph -s #ceph status
  111. ceph osd tree
  112. ceph mon_status
  113. ceph osd pool create mypool 1
  114. ceph osd lspools
  115. ceph df
  116. echo "test data" > testfile
  117. rados put -p mypool testfile testfile
  118. rados -p mypool ls
  119. rados -p mypool setomapval testfile mykey myvalue
  120. rados -p mypool getomapval testfile mykey
  121. rados get -p mypool testfile testfile2
  122. md5sum testfile testfile2
  123. ceph osd pool create cephfs_data 128
  124. ceph osd pool create cephfs_metadata 128
  125. ceph fs new cephfs cephfs_metadata cephfs_data
  126. mkdir /mnt/mycephfs
  127. mount -t ceph server1:6789:/ /mnt/mycephfs -o name=admin,secret=`cat /etc/ceph/ceph.client.admin.keyring | grep key | cut -f 2 | sed 's/key = //g'`
  128.  
  129. #test wydajności dysku ceph, gdzie -s rozmiar pliku MB, -r ilość ram MB (domyślne wartości: 8192 i dostępna pamięć)
  130. free && sync && echo 3 > /proc/sys/vm/drop_caches && sync && free
  131. bonnie++ -s 2048 -r 1024 -u root -d /mnt/mycephfs -m BenchClient
  132.  
  133. #https://wiki.mikejung.biz/Sysbench#Install_Sysbench_on_CentOS_7
  134. #http://www.servernoobs.com/avoiding-cpu-speed-scaling-in-modern-linux-distributions-running-cpu-at-full-speed-tips/
  135. #for CPUFREQ in /sys/devices/system/cpu/cpu*/cpufreq/scaling_governor;
  136. #do [ -f $CPUFREQ ] || continue;
  137. #  echo -n performance > $CPUFREQ;
  138. #done          
  139. grep -E '^model name|^cpu MHz' /proc/cpuinfo  
  140. sysbench --test=cpu --cpu-max-prime=10000 --num-threads=4 run
  141. #sysbench --test=fileio --file-test-mode=rndwr run
  142. #sysbench --test=fileio help
  143. #sysbench --test=fileio --file-test-mode=seqwr --num-threads=1 --file-block-size=4096 run
  144. #sysbench --test=fileio --file-test-mode=rndwr --num-threads=1 --file-block-size=4096 run  
  145. #http://cromwell-intl.com/linux/performance-tuning/disks.html
  146. echo 3 | sudo tee /proc/sys/vm/drop_caches && sudo sync
  147. iperf -s #pomiar przepustowości sieci, uruchamiamy na jednym serwerze np. server1
  148. iperf -c server1 -i1 -t 10  #na drugim serwerze mierzymy
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement