当前位置: 首页 > >

Hadoop2.6.1集群环境搭建

发布时间:



Hadoop2.6.1环境搭建
环境及版本删除机器自带的JAVA环境三台机器均需关闭防火墙获取hadoop、JAVA设置环境变量配置ssh免密登入hadoop配置文件修改将hadoop分发给slave1,slave2启动集群


环境及版本
    CentOS7JAVA8Hadoop2.6.1集群环境
      master: 192.168.27.130slave1: 192.168.27.131slave2: 192.168.27.132

以下未做说明操作均在master节点上


删除机器自带的JAVA环境

[root@master src] echo $JAVA_HOME
[root@master src] rpm -qa | grep jdk
java-1.8.0-openjdk-headless-1.8.0.65-3.b17.el7.x86_64
java-1.7.0-openjdk-1.7.0.91-2.6.2.3.el7.x86_64
java-1.7.0-openjdk-headless-1.7.0.91-2.6.2.3.el7.x86_64
java-1.8.0-openjdk-1.8.0.65-3.b17.el7.x86_64
# 建议使用第一种
# 卸载命令 1.
[root@master src] yum -y remove java-1.8.0-openjdk-headless-1.8.0.65-3.b17.el7.x86_64

# 卸载命令 2. 将输出的java全部删除
rpm -e --nodeps
rpm -e --nodeps java-1.8.0-openjdk-headless-1.8.0.65-3.b17.el7.x86_64
rpm -e --nodeps java-1.7.0-openjdk-1.7.0.91-2.6.2.3.el7.x86_64
rpm -e --nodeps java-1.7.0-openjdk-headless-1.7.0.91-2.6.2.3.el7.x86_64
rpm -e --nodeps java-1.8.0-openjdk-1.8.0.65-3.b17.el7.x86_64

# 验证是否将 JAVA环境删除
[root@master src]# java -version
bash: java: command not found...

三台机器均需关闭防火墙

# 关闭系统防火墙
systemctl stop firewalld
systemctl disable firewalld
# 关闭内核防火墙
setenforce 0
vim /etc/selinux/config
SELINUX=disable

获取hadoop、JAVA

    hadoop下载地址JAVA8下载

    # 解压
    tar -zxvf hadoop-2.6.1.tar.gz /usr/local/src

    tar -zxvf jdk-8u251-linux-x64.tar.gz /usr/local/src


设置环境变量
    打开配置文件

    # 用户环境变量(建议设置这个)
    vim ~/.bashrc
    # 系统环境变量(不建议)
    vim /etc/profile
    在配置文件后面添加环境变量

    export JAVA_HOME=/usr/local/src/jdk1.8.0_241
    export HADOOP_HOME=/usr/local/src/hadoop-2.6.1
    export CLASSPATH=.:$CLASSPATH:$JAVA_HOME/lib
    export PATH=$PATH:$JAVA_HOME/bin:$HADOOP_HOME/bin:$HADOOP_HOME/sbin
    使环境变量生效

    source ~/.bashrc


配置ssh免密登入

# master 执行
ssh -keygen -t rsa
cat /root/.ssh/id_rsa.pub > /root/.ssh/authorized_keys
ssh slave1 cat /root/.ssh/authorized_keys >> /root/.ssh/authorized_keys
ssh slave2 cat /root/.ssh/authorized_keys >> /root/.ssh/authorized_keys

# slave1、slave2 执行
ssh -keygen -t rsa
cat /root/.ssh/id_rsa.pub > /root/.ssh/authorized_keys
ssh master cat /root/.ssh/authorized_keys >> /root/.ssh/authorized_keys

hadoop配置文件修改

    slaves

    vim slaves
    slave1
    slave2
    core-site.xml



    fs.defaultFS
    hdfs://master:9000


    hadoop.tmp.dir
    file:/usr/local/src/hadoop-2.6.1/tmp/


    hdfs-site.xml



    dfs.namenode.secondary.http-address
    master:9001


    dfs.namenode.name.dir
    file:/usr/local/src/hadoop-2.6.1/dfs/name/


    dfs.datanode.data.dir
    file:/usr/local/src/hadoop-2.6.1/dfs/data/


    dfs.replication
    2


    mapred-site.xml



    mapreduce.framework.name
    yarn


    mapreduce.jobhistory.address
    http://master:10020


    mapreduce.jobhistory.webapp.address
    http://master:19888


    yarn-site.xml



    yarn.nodemanager.aux-services
    mapreduce_shuffle


    yarn.nodemanager.aux-services.mapreduce.shuffle.class
    org.apache.hadoop.mapred.ShuffleHandler


    yarn.resourcemanager.address
    master:8032


    yarn.resourcemanager.scheduler.address
    master:8030


    yarn.resourcemanager.resource-tracker.address
    master:8035


    yarn.resourcemanager.admin.address
    master:8033


    yarn.resourcemanager.webapp.address
    master:8088



    yarn.nodemanager.vmem-check-enabled
    false


    创建临时目录和hdfs配置中的文件目录

    # 临时目录
    mkdir /usr/local/src/hadoop-2.6.1/tmp
    # 文件目录
    mkdir -p /usr/local/src/hadoop-2.6.1/dfs/name
    mkdir -p /usr/local/src/hadoop-2.6.1/dfs/data


将hadoop分发给slave1,slave2

scp -r /usr/local/src/hadoop-2.6.1 root@slave1:/usr/local/src/hadoop-2.6.1
scp -r /usr/local/src/hadoop-2.6.1 root@slave2:/usr/local/src/hadoop-2.6.1

启动集群

    初始化Namenode 注:格式化操作只需要执行一次即可,多次格式化将造成id不一致的问题

    hadoop namenode -format

    启动

    ./usr/local/src/hadoop-2.6.1/sbin/start-all.sh
    # 因为已经配置了环境变量,所以可以直接执行
    start-all.sh

    查看集群是否启动


    master

    [root@master ~]# jps
    18126 ResourceManager
    17975 SecondaryNameNode
    21333 Jps
    17798 NameNode

      slave1

      [root@master ~]# jps
      13538 DataNode
      13988 Jps
      13638 NodeManager
      slave2

      [root@master ~]# jps
      14516 DataNode
      14944 Jps
      14616 NodeManager

    启动历史服务器,master

    sbin/mr-jobhistory-daemon.sh start historyserver

    监控网页(本地没有配置ip映射,需要以ip:8088的方式查看)
    http://master:8088

    HDFS Shell

    [root@master ~]# hadoop fs -ls /


到了这里,恭喜你hadoop集群已经搭建完成



友情链接: