搭建脚本

来自CloudWiki
123.183.247.88讨论2020年11月28日 (六) 13:29的版本 (创建页面,内容为“#########################################基础搭建#################################### wget 内网yum地址&&yum clean all&&yum makecache&&pkill -9 yum · hostn…”)
(差异) ←上一版本 | 最后版本 (差异) | 下一版本→ (差异)
跳转至: 导航搜索
                                                                                  1. 基础搭建####################################

wget 内网yum地址&&yum clean all&&yum makecache&&pkill -9 yum

· hostnamectl set-hostname master&&systemctl stop firewalld&&systemctl disable firewalld&&setenforce 0


hostnamectl set-hostname slave1&&systemctl stop firewalld&&systemctl disable firewalld&&setenforce 0


hostnamectl set-hostname slave2&&systemctl stop firewalld&&systemctl disable firewalld&&setenforce 0


echo 192.168.100.169 master >> /etc/hosts&&echo 192.168.100.248 slave1 >> /etc/hosts&&echo 192.168.100.148 slave2 >> /etc/hosts&&cat /etc/hosts


scp /etc/hosts root@slave1:/etc/hosts;scp /etc/hosts root@slave2:/etc/hosts

                1. 免密#################慎重输入如有密钥文件请误操作###########################

ssh-keygen -t dsa -P -f ~/.ssh/id_dsa&&cat .ssh/id_dsa.pub >> .ssh/authorized_keys&&scp /root/.ssh/authorized_keys root@slave1:/root/.ssh/authorized_keys&&scp /root/.ssh/authorized_keys root@slave2:/root/.ssh/authorized_keys


                                              1. SSH排除障碍###################

rm -rf .ssh/known_hosts&&ssh root@master "hostname"&&ssh root@slave1 "hostname"&&ssh root@slave2 "hostname"


                                                                        1. NTP时间同步############################

yum install ntp -y&&echo "TZ='Asia/Shanghai'; export TZ" >> /etc/profile&&source /etc/profile&&vi /etc/ntp.conf


server 127.127.1.0 fudge 127.127.1.0 stratum 10

systemctl restart ntpd&&systemctl enable ntpd

ntpdate master



                                                                        1. Java搭建##################################################

环境变量

export JAVA_HOME=/usr/java/jdk1.8.0_171 export PATH=$PATH:$JAVA_HOME/bin


   scp -r /usr/java root@slave1:/usr/&&scp -r /usr/java root@slave2:/usr/


192.168.15.104 master master.root 192.168.15.127 slave1 slave1.root 192.168.15.124 slave2 slave2.root


                                                                          1. Zookeeper#######################

export ZOOKEEPER_HOME=/usr/zookeeper/zookeeper-3.4.10 PATH=$PATH:$ZOOKEEPER_HOME/bin

mkdir /usr/zookeeper/zookeeper-3.4.10/zkdata&&mkdir /usr/zookeeper/zookeeper-3.4.10/zkdatalog

dataDir=/usr/zookeeper/zookeeper-3.4.10/zkdata dataLogDir=/usr/zookeeper/zookeeper-3.4.10/zkdatalog server.1=master:2888:3888 server.2=slave1:2888:3888 server.3=slave2:2888:3888

echo 1 > /usr/zookeeper/zookeeper-3.4.10/zkdata/myid echo 2 > /usr/zookeeper/zookeeper-3.4.10/zkdata/myid echo 3 > /usr/zookeeper/zookeeper-3.4.10/zkdata/myid



scp -r /usr/zookeeper root@slave1:/usr/&&scp -r /usr/zookeeper root@slave2:/usr/


环境变量 scp /etc/profile root@slave1:/etc/profile&&scp /etc/profile root@slave2:/etc/profile



Zookeeper排错-----------------------------------
zkServer.sh stop&&rm -rf zkdatalog/*&&rm -rf zkdata/version-2/

scp /usr/zookeeper/zookeeper-3.4.10/conf/zoo.cfg root@slave1:/usr/zookeeper/zookeeper-3.4.10/conf/zoo.cfg&&scp /usr/zookeeper/zookeeper-3.4.10/conf/zoo.cfg root@slave2:/usr/zookeeper/zookeeper-3.4.10/conf/zoo.cfg

echo 1 > myid

zkServer.sh status

scp -r /usr/hadoop/ root@slave1:/usr/ && scp -r /usr/hadoop/ root@slave2:/usr/

scp /etc/hosts root@slave1:/etc/hosts;scp /etc/hosts root@slave2:/etc/hosts


Hadoop排错-------------------------------------------

export HADOOP_HOME=/usr/hadoop/hadoop-2.7.3 export PATH=$PATH:$HADOOP_HOME/bin:$HADOOP_HOME/sbin


scp /etc/profile root@slave1:/etc/profile&&scp /etc/profile root@slave2:/etc/profile

scp -r /usr/hadoop/ root@slave1:/usr/&&scp -r /usr/hadoop/ root@slave2:/usr/



增加删除节点-------------------------------------------

hostnamectl set-hostname slave3&&systemctl stop firewalld&&systemctl disable firewalld&&setenforce 0


vi /etc/hosts&&scp /root/.ssh/authorized_keys root@slave3:/root/.ssh/authorized_keys&&scp /etc/hosts root@slave3:/etc/hosts

yum install ntp -y&&ntpdate master

vi /usr/hadoop/hadoop-2.7.3/etc/hadoop/slaves

scp -r /usr/java/ root@slave3:/usr/&&scp -r /etc/profile root@slave3:/etc/profile&& scp -r /usr/hadoop/ root@slave3:/usr/



hadoop-daemon.sh start datanode yarn-daemon.sh start nodemanager

主节点 hdfs dfsadmin -refreshNodes 均衡 block: start-balancer.sh 查看存活节点 hdfs dfsadmin -report


临时删除 hadoop-daemon.sh stop datanode hadoop-daemon.sh stop tasktracker



二、动态删除DataNode 1、配置NameNode的hdfs-site.xml,增加dfs.hosts.exclude配置

<property> 
 <name>dfs.hosts.exclude</name> 
 <value>/usr/local/hadoop2/etc/hadoop/excludes</value> 
</property>

2、在对应路径(/etc/hadoop/)下新建excludes文件,并写入待删除DataNode的ip或域名 如要删除slave2写入 echo “slave2” > excludes 3、在NameNode上刷新所有DataNode


hdfs dfsadmin -refreshNodes hdfs dfsadmin -report start-balancer.sh 关闭节点上进程(slave2) hadoop-daemon.sh stop datanode yarn-daemon.sh stop nodemanager 数据均衡:start-balancer.sh