mirror of
https://github.com/newnius/Dockerfiles.git
synced 2025-06-07 08:31:55 +00:00
add hadoop:2.7.4, add soft link to fix a bug
This commit is contained in:
parent
fc70e58c31
commit
c2265589ce
57
hadoop/2.7.4/Dockerfile
Normal file
57
hadoop/2.7.4/Dockerfile
Normal file
@ -0,0 +1,57 @@
|
||||
FROM alpine:3.6
|
||||
|
||||
MAINTAINER Newnius <newnius.cn@gmail.com>
|
||||
|
||||
USER root
|
||||
|
||||
# Prerequisites
|
||||
RUN apk add --no-cache openssh openssl openjdk8-jre rsync bash procps
|
||||
|
||||
ENV JAVA_HOME /usr/lib/jvm/java-1.8-openjdk
|
||||
ENV PATH $PATH:$JAVA_HOME/bin
|
||||
|
||||
# Passwordless SSH
|
||||
RUN ssh-keygen -q -N "" -t dsa -f /etc/ssh/ssh_host_dsa_key
|
||||
RUN ssh-keygen -q -N "" -t rsa -f /etc/ssh/ssh_host_rsa_key
|
||||
RUN ssh-keygen -q -N "" -t rsa -f /root/.ssh/id_rsa
|
||||
RUN cp /root/.ssh/id_rsa.pub /root/.ssh/authorized_keys
|
||||
|
||||
ADD ssh_config /root/.ssh/config
|
||||
RUN chmod 600 /root/.ssh/config
|
||||
RUN chown root:root /root/.ssh/config
|
||||
|
||||
RUN echo "Port 2122" >> /etc/ssh/sshd_config
|
||||
|
||||
# Install Hadoop
|
||||
RUN wget -O hadoop.tar.gz https://archive.apache.org/dist/hadoop/common/hadoop-2.7.4/hadoop-2.7.4.tar.gz && \
|
||||
tar -xzf hadoop.tar.gz -C /usr/local/ && rm hadoop.tar.gz
|
||||
|
||||
RUN ln -s /usr/local/hadoop-2.7.4 /usr/local/hadoop
|
||||
|
||||
ENV HADOOP_HOME /usr/local/hadoop
|
||||
ENV PATH $PATH:$HADOOP_HOME/bin:$HADOOP_HOME/sbin
|
||||
|
||||
ENV HADOOP_PREFIX $HADOOP_HOME
|
||||
ENV HADOOP_COMMON_HOME $HADOOP_HOME
|
||||
ENV HADOOP_HDFS_HOME $HADOOP_HOME
|
||||
ENV HADOOP_MAPRED_HOME $HADOOP_HOME
|
||||
ENV HADOOP_YARN_HOME $HADOOP_HOME
|
||||
ENV HADOOP_CONF_DIR $HADOOP_HOME/etc/hadoop
|
||||
ENV YARN_CONF_DIR $HADOOP_PREFIX/etc/hadoop
|
||||
|
||||
# Default Conf Files
|
||||
ADD core-site.xml $HADOOP_HOME/etc/hadoop/core-site.xml
|
||||
ADD hdfs-site.xml $HADOOP_HOME/etc/hadoop/hdfs-site.xml
|
||||
ADD mapred-site.xml $HADOOP_HOME/etc/hadoop/mapred-site.xml
|
||||
ADD yarn-site.xml $HADOOP_HOME/etc/hadoop/yarn-site.xml
|
||||
ADD slaves $HADOOP_HOME/etc/hadoop/slaves
|
||||
|
||||
RUN sed -i "/^export JAVA_HOME/ s:.*:export JAVA_HOME=${JAVA_HOME}\nexport HADOOP_HOME=${HADOOP_HOME}\nexport HADOOP_PREFIX=${HADOOP_PREFIX}:" ${HADOOP_HOME}/etc/hadoop/hadoop-env.sh
|
||||
|
||||
RUN sed -i '/^export HADOOP_CONF_DIR/ s:.*:export HADOOP_CONF_DIR=$HADOOP_PREFIX/etc/hadoop/:' $HADOOP_PREFIX/etc/hadoop/hadoop-env.sh
|
||||
|
||||
WORKDIR $HADOOP_HOME
|
||||
|
||||
ADD bootstrap.sh /etc/bootstrap.sh
|
||||
|
||||
CMD ["/etc/bootstrap.sh", "-d"]
|
87
hadoop/2.7.4/README
Normal file
87
hadoop/2.7.4/README
Normal file
@ -0,0 +1,87 @@
|
||||
# based on sequenceiq/hadoop-docker
|
||||
|
||||
## Create a hadoop cluster in swarm mode
|
||||
|
||||
`--hostname` needs 1.13 or higher
|
||||
|
||||
```
|
||||
docker service create \
|
||||
--name hadoop-master \
|
||||
--network swarm-net \
|
||||
--hostname hadoop-master \
|
||||
--replicas 1 \
|
||||
--endpoint-mode dnsrr \
|
||||
newnius/hadoop
|
||||
```
|
||||
|
||||
```
|
||||
docker service create \
|
||||
--name hadoop-slave1 \
|
||||
--network swarm-net \
|
||||
--hostname hadoop-slave1 \
|
||||
--replicas 1 \
|
||||
--endpoint-mode dnsrr \
|
||||
newnius/hadoop
|
||||
```
|
||||
|
||||
```
|
||||
docker service create \
|
||||
--name hadoop-slave2 \
|
||||
--network swarm-net \
|
||||
--hostname hadoop-slave2 \
|
||||
--replicas 1 \
|
||||
--endpoint-mode dnsrr \
|
||||
newnius/hadoop
|
||||
```
|
||||
|
||||
```
|
||||
docker service create \
|
||||
--name hadoop-slave3 \
|
||||
--network swarm-net \
|
||||
--hostname hadoop-slave3 \
|
||||
--replicas 1 \
|
||||
--endpoint-mode dnsrr \
|
||||
newnius/hadoop
|
||||
```
|
||||
|
||||
## Init && Test
|
||||
|
||||
In the first deploy, format dfs first
|
||||
|
||||
### stop cluster (in master)
|
||||
`sbin/stop-yarn.sh`
|
||||
`sbin/stop-dfs.sh`
|
||||
|
||||
### remove previous data (in all nodes)
|
||||
clear all data in /tmp in all nodes
|
||||
|
||||
### format hdfs (in master)
|
||||
```
|
||||
bin/hadoop namenode -format
|
||||
```
|
||||
|
||||
### start cluster (in master)
|
||||
`sbin/start-dfs.sh`
|
||||
`sbin/start-yarn.sh`
|
||||
|
||||
### Run a test job
|
||||
|
||||
bin/hadoop jar share/hadoop/mapreduce/hadoop-mapreduce-examples-2.7.4.jar grep input output 'dfs[a-z.]+'
|
||||
|
||||
Note: there is no such dir default, you can add data by
|
||||
|
||||
```
|
||||
bin/hadoop dfs -mkdir -p /user/root/input
|
||||
```
|
||||
and
|
||||
```
|
||||
bin/hadoop dfs -put etc/hadoop/* /user/root/input
|
||||
```
|
||||
|
||||
### monitor cluster in browser
|
||||
|
||||
YARN: hadoop-master:8088
|
||||
|
||||
HDFS: hadoop-master:50070
|
||||
|
||||
_Proxy needed, e.g. newnius/docker-proxy_
|
31
hadoop/2.7.4/bootstrap.sh
Executable file
31
hadoop/2.7.4/bootstrap.sh
Executable file
@ -0,0 +1,31 @@
|
||||
#!/bin/bash
|
||||
|
||||
: ${HADOOP_PREFIX:=/usr/local/hadoop}
|
||||
|
||||
$HADOOP_PREFIX/etc/hadoop/hadoop-env.sh
|
||||
|
||||
rm /tmp/*.pid
|
||||
|
||||
# installing libraries if any - (resource urls added comma separated to the ACP system variable)
|
||||
cd $HADOOP_PREFIX/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd -
|
||||
|
||||
# replace config
|
||||
cp /mnt/hadoop-config/* $HADOOP_PREFIX/etc/hadoop/
|
||||
|
||||
/usr/sbin/sshd
|
||||
|
||||
## stop all in case master starts far behind
|
||||
$HADOOP_PREFIX/sbin/stop-yarn.sh
|
||||
$HADOOP_PREFIX/sbin/stop-dfs.sh
|
||||
|
||||
$HADOOP_PREFIX/sbin/start-dfs.sh
|
||||
$HADOOP_PREFIX/sbin/start-yarn.sh
|
||||
$HADOOP_PREFIX/sbin/mr-jobhistory-daemon.sh start historyserver
|
||||
|
||||
if [[ $1 == "-d" ]]; then
|
||||
while true; do sleep 1000; done
|
||||
fi
|
||||
|
||||
if [[ $1 == "-bash" ]]; then
|
||||
/bin/bash
|
||||
fi
|
29
hadoop/2.7.4/core-site.xml
Normal file
29
hadoop/2.7.4/core-site.xml
Normal file
@ -0,0 +1,29 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
|
||||
<!--
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License. See accompanying LICENSE file.
|
||||
-->
|
||||
|
||||
<!-- Put site-specific property overrides in this file. -->
|
||||
|
||||
<configuration>
|
||||
|
||||
<property>
|
||||
<name>fs.defaultFS</name>
|
||||
<value>hdfs://hadoop-master:8020</value>
|
||||
</property>
|
||||
<property>
|
||||
<name>fs.default.name</name>
|
||||
<value>hdfs://hadoop-master:8020</value>
|
||||
</property>
|
||||
</configuration>
|
46
hadoop/2.7.4/hdfs-site.xml
Normal file
46
hadoop/2.7.4/hdfs-site.xml
Normal file
@ -0,0 +1,46 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
|
||||
<!--
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License. See accompanying LICENSE file.
|
||||
-->
|
||||
|
||||
<!-- Put site-specific property overrides in this file. -->
|
||||
|
||||
<configuration>
|
||||
|
||||
<property>
|
||||
<name>dfs.permissions</name>
|
||||
<value>false</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>dfs.namenode.secondary.http-address</name>
|
||||
<value>hadoop-slave1:50090</value>
|
||||
</property>
|
||||
<property>
|
||||
<name>dfs.namenode.http-address</name>
|
||||
<value>hadoop-master:50070</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>dfs.datanode.max.transfer.threads</name>
|
||||
<value>8192</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>dfs.replication</name>
|
||||
<value>3</value>
|
||||
</property>
|
||||
|
||||
|
||||
</configuration>
|
33
hadoop/2.7.4/mapred-site.xml
Normal file
33
hadoop/2.7.4/mapred-site.xml
Normal file
@ -0,0 +1,33 @@
|
||||
<?xml version="1.0"?>
|
||||
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
|
||||
<!--
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License. See accompanying LICENSE file.
|
||||
-->
|
||||
|
||||
<!-- Put site-specific property overrides in this file. -->
|
||||
|
||||
<configuration>
|
||||
|
||||
<property>
|
||||
<name>mapreduce.framework.name</name>
|
||||
<value>yarn</value>
|
||||
</property>
|
||||
<property>
|
||||
<name>mapreduce.jobhistory.address</name>
|
||||
<value>hadoop-master:10020</value>
|
||||
</property>
|
||||
<property>
|
||||
<name>mapreduce.jobhistory.webapp.address</name>
|
||||
<value>hadoop-master:19888</value>
|
||||
</property>
|
||||
</configuration>
|
3
hadoop/2.7.4/slaves
Normal file
3
hadoop/2.7.4/slaves
Normal file
@ -0,0 +1,3 @@
|
||||
hadoop-slave1
|
||||
hadoop-slave2
|
||||
hadoop-slave3
|
5
hadoop/2.7.4/ssh_config
Normal file
5
hadoop/2.7.4/ssh_config
Normal file
@ -0,0 +1,5 @@
|
||||
Host *
|
||||
UserKnownHostsFile /dev/null
|
||||
StrictHostKeyChecking no
|
||||
LogLevel quiet
|
||||
Port 2122
|
56
hadoop/2.7.4/yarn-site.xml
Normal file
56
hadoop/2.7.4/yarn-site.xml
Normal file
@ -0,0 +1,56 @@
|
||||
<?xml version="1.0"?>
|
||||
<!--
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License. See accompanying LICENSE file.
|
||||
-->
|
||||
<!-- Site specific YARN configuration properties -->
|
||||
<configuration>
|
||||
|
||||
<property>
|
||||
<name>yarn.application.classpath</name>
|
||||
<value>/usr/local/hadoop/etc/hadoop, /usr/local/hadoop/share/hadoop/common/*, /usr/local/hadoop/share/hadoop/common/lib/*, /usr/local/hadoop/share/hadoop/hdfs/*, /usr/local/hadoop/share/hadoop/hdfs/lib/*, /usr/local/hadoop/share/hadoop/mapreduce/*, /usr/local/hadoop/share/hadoop/mapreduce/lib/*, /usr/local/hadoop/share/hadoop/yarn/*, /usr/local/hadoop/share/hadoop/yarn/lib/*</value>
|
||||
</property>
|
||||
|
||||
|
||||
<property>
|
||||
<name>yarn.resourcemanager.hostname</name>
|
||||
<value>hadoop-master</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>yarn.nodemanager.aux-services</name>
|
||||
<value>mapreduce_shuffle</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>yarn.log-aggregation-enable</name>
|
||||
<value>true</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>yarn.log-aggregation.retain-seconds</name>
|
||||
<value>604800</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>yarn.nodemanager.resource.memory-mb</name>
|
||||
<value>2048</value>
|
||||
</property>
|
||||
<property>
|
||||
<name>yarn.nodemanager.resource.cpu-vcores</name>
|
||||
<value>2</value>
|
||||
</property>
|
||||
<property>
|
||||
<name>yarn.scheduler.minimum-allocation-mb</name>
|
||||
<value>1024</value>
|
||||
</property>
|
||||
</configuration>
|
@ -26,6 +26,8 @@ RUN echo "Port 2122" >> /etc/ssh/sshd_config
|
||||
RUN wget -O hadoop.tar.gz https://archive.apache.org/dist/hadoop/common/hadoop-2.8.1/hadoop-2.8.1.tar.gz && \
|
||||
tar -xzf hadoop.tar.gz -C /usr/local/ && rm hadoop.tar.gz
|
||||
|
||||
RUN ln -s /usr/local/hadoop-2.7.4 /usr/local/hadoop
|
||||
|
||||
ENV HADOOP_HOME /usr/local/hadoop
|
||||
ENV PATH $PATH:$HADOOP_HOME/bin:$HADOOP_HOME/sbin
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user