Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- #https://www.howtoforge.com/tutorial/how-to-build-a-ceph-cluster-on-centos-7/
- #w VirtualBoxie zakładam vm Centos7 z 2GB RAM i dwa dyski 8GB i 8GB
- #TUTAJ JEST GŁÓWNY SKRYPT (zawiera dwa rebooty jest więc połączeniem osobnych skryptów)
- #https://goo.gl/SAjzUj
- #GŁÓWNY SKRYPT
- curl https://pastebin.com/raw/kZX6ubYH | sed 's/\r//g' > ceph.txt
- yum install -y gpm && systemctl start gpm
- #DALEJ PASTUJ LINIE z GŁÓWNEGO oraz uruchamiaj FUNKCJE
- #vi ceph.txt #paste sth
- #FUNKCJE DO TEGO SKRYPTU W OSOBNYM SKRYPCIE
- curl https://pastebin.com/raw/2gEM4ena | sed 's/\r//g' > cephfun.sh
- sh cephfun.sh
- /usr/local/bin/centos_update
- reboot
- #dodatkowe rzeczy niepotrzebne teraz np. środowisko graficzne
- #http://www.45drives.com/wiki/index.php?title=Installing_MATE_on_CentOS_7
- #yum groupinstall -y "X Window system"
- #yum groupinstall -y "MATE Desktop"
- #systemctl isolate graphical.target
- #systemctl set-default graphical.target
- #yum install -y gnome-disk-utility yum-utils novnc x11vnc tigervnc tigervnc-server
- #useradd -d /home/cephuser -m cephuser
- #echo "cephuser ALL = (root) NOPASSWD:ALL" | sudo tee /etc/sudoers.d/cephuser
- #chmod 0440 /etc/sudoers.d/cephuser
- #passwd cephuser
- #reboot
- #wyłaczam maszynę po zrebootowaniu i
- #KLONUJE W VirtualBox NA DWIE DODATKOWE MASZYNY !!!!
- ################################################################################################
- #TUTAJ wpisuje uzyskane z dhcp ip np.: dla server1 może być 77 itd.
- #centos_static_ip "server" "77" "78" "79"
- #server="server"; ip01="77"; ip02="78"; ip03="79"
- /usr/local/bin/centos_static_ip "server" "77" "78" "79"
- reboot
- #su - cephuser
- #CEPH
- #http://www.virtualtothecore.com/en/adventures-ceph-storage-part-1-introduction/
- #https://blog.zhaw.ch/icclab/tag/ceph/
- #https://wiki.centos.org/SpecialInterestGroup/Storage/ceph-Quickstart
- #http://linoxide.com/storage/setup-red-hat-ceph-storage-centos-7-0/
- #http://karan-mj.blogspot.com/2013/12/what-is-ceph-ceph-is-open-source.html
- #https://www.reddit.com/r/DataHoarder/comments/4gzpxi/why_is_ceph_so_rare_for_home_use_even_among/
- #http://palmerville.github.io/2016/04/30/single-node-ceph-install.html
- [ ! -d ceph-deploy ] && mkdir ceph-deploy
- cd ceph-deploy/
- ceph-deploy purge server1 server2 server3
- ceph-deploy purgedata server1 server2 server3
- ceph-deploy forgetkeys
- ceph-deploy new server1 server2 server3
- #ceph-deploy install --release jewel --no-adjust-repos server1 server2 server3
- #ceph-deploy install --release jewel server1 server2 server3
- ceph-deploy install --repo-url http://download.ceph.com/rpm-jewel/el7/ server1 server2 server3
- ceph-deploy --overwrite-conf mon create server1
- ceph-deploy --overwrite-conf mon create server2
- ceph-deploy --overwrite-conf mon create server3
- ceph --admin-daemon /var/run/ceph/ceph-mon.server1.asok mon_status
- #poczekaj kilka sekund
- cat <<__EOF__ >> ./ceph.conf
- mon_pg_warn_max_per_osd = 0
- public network = 192.168.2.0/24
- #cluster network = 192.168.2.0/24
- #Choose reasonable numbers for number of replicas and placement groups.
- osd pool default size = 2 # Write an object 2 times
- osd pool default min size = 1 # Allow writing 1 copy in a degraded state
- osd pool default pg num = 64
- osd pool default pgp num = 64
- #Choose a reasonable crush leaf type
- #0 for a 1-node cluster.
- #1 for a multi node cluster in a single rack
- #2 for a multi node, multi chassis cluster with multiple hosts in a chassis
- #3 for a multi node cluster with hosts across racks, etc.
- osd crush chooseleaf type = 1
- osd journal size = 200
- __EOF__
- scp ./ceph.conf root@server1:/etc/ceph
- scp ./ceph.conf root@server2:/etc/ceph
- scp ./ceph.conf root@server3:/etc/ceph
- ceph-deploy gatherkeys server1
- ssh server2 ceph-deploy gatherkeys server2
- ssh server3 ceph-deploy gatherkeys server3
- for i in server1 server2 server3; do ceph-deploy disk zap $i:sdb; done
- ae "parted -s /dev/sdb mklabel gpt mkpart primary xfs 0% 100%"
- #sprawdź, czy na wszystkich serwerach się wykonało
- #http://tracker.ceph.com/issues/13833
- #ae "chown ceph:ceph /dev/sda2"
- for i in server1 server2 server3; do
- ceph-deploy osd --overwrite-conf prepare $i:/dev/sdb1; done
- #poczekać chwilę
- for i in server1 server2 server3; do
- ceph-deploy osd activate $i:/dev/sdb1; done
- #sprawdzić "ceph -s", czy osd się dodały
- #ceph-deploy --username ceph osd create osd3:/dev/sdb1
- ceph-deploy admin server1 server2 server3
- ae "chmod +r /etc/ceph/ceph.client.admin.keyring"
- ae "systemctl enable ceph-mon.target"
- ae "systemctl enable ceph-mds.target"
- ae "systemctl enable ceph-osd.target"
- #object storage gateway
- ceph-deploy rgw create server1 server2 server3
- #cephfs
- ceph-deploy mds create server1 server2 server3
- ceph -s #ceph status
- ceph osd tree
- ceph mon_status
- ceph osd pool create mypool 1
- ceph osd lspools
- ceph df
- echo "test data" > testfile
- rados put -p mypool testfile testfile
- rados -p mypool ls
- rados -p mypool setomapval testfile mykey myvalue
- rados -p mypool getomapval testfile mykey
- rados get -p mypool testfile testfile2
- md5sum testfile testfile2
- ceph osd pool create cephfs_data 128
- ceph osd pool create cephfs_metadata 128
- ceph fs new cephfs cephfs_metadata cephfs_data
- mkdir /mnt/mycephfs
- mount -t ceph server1:6789:/ /mnt/mycephfs -o name=admin,secret=`cat /etc/ceph/ceph.client.admin.keyring | grep key | cut -f 2 | sed 's/key = //g'`
- #test wydajności dysku ceph, gdzie -s rozmiar pliku MB, -r ilość ram MB (domyślne wartości: 8192 i dostępna pamięć)
- free && sync && echo 3 > /proc/sys/vm/drop_caches && free
- bonnie++ -s 2048 -r 1024 -u root -d /mnt/mycephfs -m BenchClient
- #https://wiki.mikejung.biz/Sysbench#Install_Sysbench_on_CentOS_7
- #http://www.servernoobs.com/avoiding-cpu-speed-scaling-in-modern-linux-distributions-running-cpu-at-full-speed-tips/
- #for CPUFREQ in /sys/devices/system/cpu/cpu*/cpufreq/scaling_governor;
- #do [ -f $CPUFREQ ] || continue;
- # echo -n performance > $CPUFREQ;
- #done
- grep -E '^model name|^cpu MHz' /proc/cpuinfo
- sysbench --test=cpu --cpu-max-prime=10000 --num-threads=4 run
- #sysbench --test=fileio --file-test-mode=rndwr run
- #sysbench --test=fileio help
- #sysbench --test=fileio --file-test-mode=seqwr --num-threads=1 --file-block-size=4096 run
- #sysbench --test=fileio --file-test-mode=rndwr --num-threads=1 --file-block-size=4096 run
- #http://cromwell-intl.com/linux/performance-tuning/disks.html
- echo 3 | sudo tee /proc/sys/vm/drop_caches && sudo sync
- iperf -s #pomiar przepustowości sieci, uruchamiamy na jednym serwerze np. server1
- iperf -c server1 -i1 -t 10 #na drugim serwerze mierzymy
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement