add more hadoop versions

This commit is contained in:
2018-08-06 17:14:20 +08:00
parent e238f29b04
commit 8e119766ee
21 changed files with 655 additions and 186 deletions

View File

@@ -1,17 +1,59 @@
FROM sequenceiq/hadoop-docker:2.7.1
FROM alpine:3.8
MAINTAINER Newnius <docker@newnius.com>
MAINTAINER Newnius <newnius.cn@gmail.com>
USER root
# Prerequisites
RUN apk add --no-cache openssh openssl openjdk8-jre rsync bash procps
ENV JAVA_HOME /usr/lib/jvm/java-1.8-openjdk
ENV PATH $PATH:$JAVA_HOME/bin
# Passwordless SSH
RUN ssh-keygen -q -N "" -t dsa -f /etc/ssh/ssh_host_dsa_key
RUN ssh-keygen -q -N "" -t rsa -f /etc/ssh/ssh_host_rsa_key
RUN ssh-keygen -q -N "" -t rsa -f /root/.ssh/id_rsa
RUN cp /root/.ssh/id_rsa.pub /root/.ssh/authorized_keys
ADD ssh_config /root/.ssh/config
RUN chmod 600 /root/.ssh/config
RUN chown root:root /root/.ssh/config
RUN echo "Port 2122" >> /etc/ssh/sshd_config
# Install Hadoop
ENV HADOOP_VER 2.7.1
RUN wget -O hadoop.tar.gz https://archive.apache.org/dist/hadoop/common/hadoop-$HADOOP_VER/hadoop-$HADOOP_VER.tar.gz && \
tar -xzf hadoop.tar.gz -C /usr/local/ && rm hadoop.tar.gz
RUN ln -s /usr/local/hadoop-$HADOOP_VER /usr/local/hadoop
ENV HADOOP_HOME /usr/local/hadoop
ENV PATH $PATH:$HADOOP_HOME/bin:$HADOOP_HOME/sbin
ENV HADOOP_PREFIX $HADOOP_HOME
ENV HADOOP_COMMON_HOME $HADOOP_HOME
ENV HADOOP_HDFS_HOME $HADOOP_HOME
ENV HADOOP_MAPRED_HOME $HADOOP_HOME
ENV HADOOP_YARN_HOME $HADOOP_HOME
ENV HADOOP_CONF_DIR $HADOOP_HOME/etc/hadoop
ENV YARN_CONF_DIR $HADOOP_PREFIX/etc/hadoop
# Default Conf Files
ADD core-site.xml $HADOOP_HOME/etc/hadoop/core-site.xml
ADD hdfs-site.xml $HADOOP_HOME/etc/hadoop/hdfs-site.xml
ADD mapred-site.xml $HADOOP_HOME/etc/hadoop/mapred-site.xml
ADD yarn-site.xml $HADOOP_HOME/etc/hadoop/yarn-site.xml
ADD slaves $HADOOP_HOME/etc/hadoop/slaves
RUN sed -i "/^export JAVA_HOME/ s:.*:export JAVA_HOME=${JAVA_HOME}\nexport HADOOP_HOME=${HADOOP_HOME}\nexport HADOOP_PREFIX=${HADOOP_PREFIX}:" ${HADOOP_HOME}/etc/hadoop/hadoop-env.sh
RUN sed -i '/^export HADOOP_CONF_DIR/ s:.*:export HADOOP_CONF_DIR=$HADOOP_PREFIX/etc/hadoop/:' $HADOOP_PREFIX/etc/hadoop/hadoop-env.sh
WORKDIR $HADOOP_HOME
ADD bootstrap.sh /etc/bootstrap.sh
RUN mkdir -p /mnt/hadoop-config
ADD core-site.xml /mnt/hadoop-config
ADD yarn-site.xml /mnt/hadoop-config
ADD mapred-site.xml /mnt/hadoop-config
ADD hdfs-site.xml /mnt/hadoop-config
ADD slaves /mnt/hadoop-config
WORKDIR /usr/local/hadoop
CMD ["/etc/bootstrap.sh", "-d"]

View File

@@ -1,87 +1,115 @@
# based on sequenceiq/hadoop-docker
# Deploy one Hadoop Cluster with docker
## Create a hadoop cluster in swarm mode
## Start Master
`--hostname` needs 1.13 or higher
```
```bash
docker service create \
--name hadoop-master \
--network swarm-net \
--hostname hadoop-master \
--replicas 1 \
--endpoint-mode dnsrr \
newnius/hadoop
--name hadoop-master \
--hostname hadoop-master \
--network swarm-net \
--replicas 1 \
--detach=true \
--endpoint-mode dnsrr \
--mount type=bind,source=/etc/localtime,target=/etc/localtime \
newnius/hadoop:2.7.1
```
```
## Start slaves
```bash
docker service create \
--name hadoop-slave1 \
--network swarm-net \
--hostname hadoop-slave1 \
--replicas 1 \
--endpoint-mode dnsrr \
newnius/hadoop
--name hadoop-slave1 \
--hostname hadoop-slave1 \
--network swarm-net \
--replicas 1 \
--detach=true \
--endpoint-mode dnsrr \
--mount type=bind,source=/etc/localtime,target=/etc/localtime \
newnius/hadoop:2.7.1
```
```
```bash
docker service create \
--name hadoop-slave2 \
--network swarm-net \
--hostname hadoop-slave2 \
--replicas 1 \
--endpoint-mode dnsrr \
newnius/hadoop
--name hadoop-slave2 \
--network swarm-net \
--hostname hadoop-slave2 \
--replicas 1 \
--detach=true \
--endpoint-mode dnsrr \
--mount type=bind,source=/etc/localtime,target=/etc/localtime \
newnius/hadoop:2.7.1
```
```
```bash
docker service create \
--name hadoop-slave3 \
--network swarm-net \
--hostname hadoop-slave3 \
--replicas 1 \
--endpoint-mode dnsrr \
newnius/hadoop
--name hadoop-slave3 \
--hostname hadoop-slave3 \
--network swarm-net \
--replicas 1 \
--detach=true \
--endpoint-mode dnsrr \
--mount type=bind,source=/etc/localtime,target=/etc/localtime \
newnius/hadoop:2.7.1
```
## Init && Test
## Init for the first time
In the first deploy, format dfs first
#### format dfs first
Run these commands on the master node.
### stop cluster (in master)
`sbin/stop-yarn.sh`
`sbin/stop-dfs.sh`
```bash
# stop HDFS services
sbin/stop-dfs.sh
### remove previous data (in all nodes)
clear all data in /tmp in all nodes
### format hdfs (in master)
```
# format HDFS meta data
bin/hadoop namenode -format
# restart HDFS services
sbin/start-dfs.sh
```
### start cluster (in master)
`sbin/start-dfs.sh`
`sbin/start-yarn.sh`
## Run a test job
To make sure youui have successfully setup the Hadoop cluster, just run the floowing commands to see if it is executed well.
### Run a test job
bin/hadoop jar share/hadoop/mapreduce/hadoop-mapreduce-examples-2.7.1.jar grep input output 'dfs[a-z.]+'
Note: there is no such dir default, you can add data by
```
```bash
# prepare input data
bin/hadoop dfs -mkdir -p /user/root/input
```
and
```
# copy files to input path
bin/hadoop dfs -put etc/hadoop/* /user/root/input
# submit the job
bin/hadoop jar share/hadoop/mapreduce/hadoop-mapreduce-examples-2.7.4.jar grep input output 'dfs[a-z.]+'
```
### monitor cluster in browser
## Browse the web UI
You can expose the ports in the script, but I'd rather not since the slaves shoule occupy the same ports.
YARN: hadoop-master:8088
To access the web UI, deploy another (socks5) proxy to route the traffic.
HDFS: hadoop-master:50070
If you don't one, try [newnius/docker-proxy](https://hub.docker.com/r/newnius/docker-proxy/), it is rather easy to use.
_Proxy needed, e.g. newnius/docker-proxy_
Visit [hadoop-master:8088](hadoop-master:8088) fo YARN pages.
Visit [hadoop-master:50070](hadoop-master:50070) fo YARN pages.
## Custom configuration
To persist data or modify the conf files, refer to the following script.
The `/config/hadoop` path is where new conf files to be replaces, you don't have to put all the files.
```bash
docker service create \
--name hadoop-master \
--hostname hadoop-master \
--network swarm-net \
--replicas 1 \
--detach=true \
--endpoint-mode dnsrr \
--mount type=bind,source=/etc/localtime,target=/etc/localtime \
--mount type=bind,source=/data/hadoop/config,target=/config/hadoop \
--mount type=bind,source=/data/hadoop/hdfs/master,target=/tmp/hadoop-root \
--mount type=bind,source=/data/hadoop/logs/master,target=/usr/local/hadoop/logs \
newnius/hadoop:2.7.1
```

View File

@@ -9,10 +9,14 @@ rm /tmp/*.pid
# installing libraries if any - (resource urls added comma separated to the ACP system variable)
cd $HADOOP_PREFIX/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd -
# replace config
cp /mnt/hadoop-config/* $HADOOP_PREFIX/etc/hadoop/
## replace config
: ${EXTRA_CONF_DIR:=/config/hadoop}
service sshd start
if [ -d "$EXTRA_CONF_DIR" ]; then
cp $EXTRA_CONF_DIR/* $HADOOP_PREFIX/etc/hadoop/
fi
/usr/sbin/sshd
## stop all in case master starts far behind
$HADOOP_PREFIX/sbin/stop-yarn.sh

View File

@@ -17,13 +17,12 @@
<!-- Put site-specific property overrides in this file. -->
<configuration>
<property>
<name>fs.defaultFS</name>
<value>hdfs://hadoop-master:8020</value>
</property>
<property>
<name>fs.default.name</name>
<value>hdfs://hadoop-master:8020</value>
</property>
<property>
<name>fs.defaultFS</name>
<value>hdfs://hadoop-master:8020</value>
</property>
<property>
<name>fs.default.name</name>
<value>hdfs://hadoop-master:8020</value>
</property>
</configuration>

View File

@@ -17,30 +17,24 @@
<!-- Put site-specific property overrides in this file. -->
<configuration>
<property>
<name>dfs.permissions</name>
<value>false</value>
</property>
<property>
<name>dfs.namenode.secondary.http-address</name>
<value>hadoop-slave1:50090</value>
</property>
</property>
<property>
<name>dfs.namenode.http-address</name>
<value>hadoop-master:50070</value>
</property>
<property>
<name>dfs.datanode.max.transfer.threads</name>
<value>8192</value>
<property>
<name>dfs.datanode.max.transfer.threads</name>
<value>8192</value>
</property>
<property>
<name>dfs.replication</name>
<value>3</value>
</property>
<property>
<name>dfs.replication</name>
<value>3</value>
</property>
</configuration>

View File

@@ -17,10 +17,16 @@
<!-- Put site-specific property overrides in this file. -->
<configuration>
<property>
<property>
<name>mapreduce.framework.name</name>
<value>yarn</value>
</property>
<property>
<name>mapreduce.jobhistory.address</name>
<value>hadoop-master:10020</value>
</property>
<property>
<name>mapreduce.jobhistory.webapp.address</name>
<value>hadoop-master:19888</value>
</property>
</configuration>

5
hadoop/2.7.1/ssh_config Normal file
View File

@@ -0,0 +1,5 @@
Host *
UserKnownHostsFile /dev/null
StrictHostKeyChecking no
LogLevel quiet
Port 2122

View File

@@ -14,34 +14,26 @@
-->
<!-- Site specific YARN configuration properties -->
<configuration>
<property>
<name>yarn.application.classpath</name>
<value>/usr/local/hadoop/etc/hadoop, /usr/local/hadoop/share/hadoop/common/*, /usr/local/hadoop/share/hadoop/common/lib/*, /usr/local/hadoop/share/hadoop/hdfs/*, /usr/local/hadoop/share/hadoop/hdfs/lib/*, /usr/local/hadoop/share/hadoop/mapreduce/*, /usr/local/hadoop/share/hadoop/mapreduce/lib/*, /usr/local/hadoop/share/hadoop/yarn/*, /usr/local/hadoop/share/hadoop/yarn/lib/*</value>
</property>
<property>
<name>yarn.application.classpath</name>
<value>/usr/local/hadoop/etc/hadoop, /usr/local/hadoop/share/hadoop/common/*, /usr/local/hadoop/share/hadoop/common/lib/*, /usr/local/hadoop/share/hadoop/hdfs/*, /usr/local/hadoop/share/hadoop/hdfs/lib/*, /usr/local/hadoop/share/hadoop/mapreduce/*, /usr/local/hadoop/share/hadoop/mapreduce/lib/*, /usr/local/hadoop/share/hadoop/yarn/*, /usr/local/hadoop/share/hadoop/yarn/lib/*</value>
</property>
<property>
<name>yarn.resourcemanager.hostname</name>
<value>hadoop-master</value>
</property>
<property>
<name>yarn.nodemanager.aux-services</name>
<value>mapreduce_shuffle</value>
</property>
<property>
<name>yarn.log-aggregation-enable</name>
<value>true</value>
</property>
<property>
<name>yarn.log-aggregation.retain-seconds</name>
<value>604800</value>
</property>
<property>
<name>yarn.nodemanager.resource.memory-mb</name>
<value>2048</value>
@@ -50,8 +42,8 @@
<name>yarn.nodemanager.resource.cpu-vcores</name>
<value>2</value>
</property>
<property>
<name>yarn.scheduler.minimum-allocation-mb</name>
<value>1024</value>
</property>
<property>
<name>yarn.scheduler.minimum-allocation-mb</name>
<value>1024</value>
</property>
</configuration>