init package
This commit is contained in:
parent
d7ec8735dd
commit
a5d4cf95ab
32
0001-sys_errlist-undeclared.patch
Normal file
32
0001-sys_errlist-undeclared.patch
Normal file
@ -0,0 +1,32 @@
|
||||
From 11b1ee8aa01718d68dd332f08b27d4feffc3da31 Mon Sep 17 00:00:00 2001
|
||||
From: wang--ge <wang__ge@126.com>
|
||||
Date: Fri, 26 Mar 2021 10:18:23 +0800
|
||||
Subject: [PATCH] modify
|
||||
|
||||
---
|
||||
.../hadoop-common/src/main/native/src/exception.c | 9 ---------
|
||||
1 file changed, 9 deletions(-)
|
||||
|
||||
diff --git a/hadoop-common-project/hadoop-common/src/main/native/src/exception.c b/hadoop-common-project/hadoop-common/src/main/native/src/exception.c
|
||||
index fc072e8..a678688 100644
|
||||
--- a/hadoop-common-project/hadoop-common/src/main/native/src/exception.c
|
||||
+++ b/hadoop-common-project/hadoop-common/src/main/native/src/exception.c
|
||||
@@ -110,15 +110,6 @@ jthrowable newIOException(JNIEnv* env, const char *fmt, ...)
|
||||
|
||||
const char* terror(int errnum)
|
||||
{
|
||||
-
|
||||
-#if defined(__sun)
|
||||
-// MT-Safe under Solaris which doesn't support sys_errlist/sys_nerr
|
||||
return strerror(errnum);
|
||||
-#else
|
||||
- if ((errnum < 0) || (errnum >= sys_nerr)) {
|
||||
- return "unknown error.";
|
||||
- }
|
||||
- return sys_errlist[errnum];
|
||||
-#endif
|
||||
}
|
||||
|
||||
--
|
||||
2.27.0
|
||||
|
||||
3
context.xml
Normal file
3
context.xml
Normal file
@ -0,0 +1,3 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<Context allowLinking="true">
|
||||
</Context>
|
||||
BIN
hadoop-3.2.1-src.tar.gz
Normal file
BIN
hadoop-3.2.1-src.tar.gz
Normal file
Binary file not shown.
36
hadoop-core-site.xml
Normal file
36
hadoop-core-site.xml
Normal file
@ -0,0 +1,36 @@
|
||||
<?xml version="1.0"?>
|
||||
<!--
|
||||
Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
contributor license agreements. See the NOTICE file distributed with
|
||||
this work for additional information regarding copyright ownership.
|
||||
The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
(the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
-->
|
||||
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
|
||||
|
||||
<configuration>
|
||||
<property>
|
||||
<name>fs.default.name</name>
|
||||
<value>hdfs://localhost:8020</value>
|
||||
</property>
|
||||
|
||||
<!-- HTTPFS proxy user setting -->
|
||||
<property>
|
||||
<name>hadoop.proxyuser.tomcat.hosts</name>
|
||||
<value>*</value>
|
||||
</property>
|
||||
<property>
|
||||
<name>hadoop.proxyuser.tomcat.groups</name>
|
||||
<value>*</value>
|
||||
</property>
|
||||
|
||||
</configuration>
|
||||
67
hadoop-hdfs-site.xml
Normal file
67
hadoop-hdfs-site.xml
Normal file
@ -0,0 +1,67 @@
|
||||
<?xml version="1.0"?>
|
||||
<!--
|
||||
Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
contributor license agreements. See the NOTICE file distributed with
|
||||
this work for additional information regarding copyright ownership.
|
||||
The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
(the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
-->
|
||||
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
|
||||
|
||||
<configuration>
|
||||
<property>
|
||||
<name>dfs.replication</name>
|
||||
<value>1</value>
|
||||
</property>
|
||||
<!-- Immediately exit safemode as soon as one DataNode checks in.
|
||||
On a multi-node cluster, these configurations must be removed. -->
|
||||
<property>
|
||||
<name>dfs.safemode.extension</name>
|
||||
<value>0</value>
|
||||
</property>
|
||||
<property>
|
||||
<name>dfs.safemode.min.datanodes</name>
|
||||
<value>1</value>
|
||||
</property>
|
||||
<property>
|
||||
<name>hadoop.tmp.dir</name>
|
||||
<value>/var/lib/hadoop-hdfs/${user.name}</value>
|
||||
</property>
|
||||
<property>
|
||||
<name>dfs.namenode.name.dir</name>
|
||||
<value>file:///var/lib/hadoop-hdfs/${user.name}/dfs/namenode</value>
|
||||
</property>
|
||||
<property>
|
||||
<name>dfs.namenode.checkpoint.dir</name>
|
||||
<value>file:///var/lib/hadoop-hdfs/${user.name}/dfs/secondarynamenode</value>
|
||||
</property>
|
||||
<property>
|
||||
<name>dfs.datanode.data.dir</name>
|
||||
<value>file:///var/lib/hadoop-hdfs/${user.name}/dfs/datanode</value>
|
||||
</property>
|
||||
<property>
|
||||
<name>dfs.http.address</name>
|
||||
<value>0.0.0.0:50070</value>
|
||||
</property>
|
||||
<property>
|
||||
<name>dfs.datanode.address</name>
|
||||
<value>0.0.0.0:50010</value>
|
||||
</property>
|
||||
<property>
|
||||
<name>dfs.datanode.http.address</name>
|
||||
<value>0.0.0.0:50075</value>
|
||||
</property>
|
||||
<property>
|
||||
<name>dfs.datanode.ipc.address</name>
|
||||
<value>0.0.0.0:50020</value>
|
||||
</property>
|
||||
</configuration>
|
||||
37
hadoop-hdfs.service.template
Normal file
37
hadoop-hdfs.service.template
Normal file
@ -0,0 +1,37 @@
|
||||
[Unit]
|
||||
Description=The Hadoop DAEMON daemon
|
||||
After=network.target
|
||||
After=NetworkManager.target
|
||||
|
||||
[Service]
|
||||
Type=forking
|
||||
EnvironmentFile=-/etc/sysconfig/hadoop-hdfs
|
||||
EnvironmentFile=-/etc/sysconfig/hadoop-DAEMON
|
||||
ExecStart=/usr/sbin/hadoop-daemon.sh start DAEMON
|
||||
ExecStop=/usr/sbin/hadoop-daemon.sh stop DAEMON
|
||||
User=hdfs
|
||||
Group=hadoop
|
||||
PIDFile=/run/hadoop-hdfs/hadoop-hdfs-DAEMON.pid
|
||||
LimitNOFILE=32768
|
||||
LimitNPROC=65536
|
||||
|
||||
#######################################
|
||||
# Note: Below are cgroup options
|
||||
#######################################
|
||||
#Slice=
|
||||
#CPUAccounting=true
|
||||
#CPUShares=1024
|
||||
|
||||
#MemoryAccounting=true
|
||||
#TBD: MemoryLimit=bytes, MemorySoftLimit=bytes
|
||||
|
||||
#BlockIOAccounting=true
|
||||
#BlockIOWeight=??
|
||||
#BlockIODeviceWeight=??
|
||||
#TBD: BlockIOReadBandwidth=bytes, BlockIOWriteBandwidth=bytes
|
||||
|
||||
#DeviceAllow=
|
||||
#DevicePolicy=auto|closed|strict
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
5
hadoop-httpfs.sysconfig
Normal file
5
hadoop-httpfs.sysconfig
Normal file
@ -0,0 +1,5 @@
|
||||
CATALINA_BASE=/usr/share/hadoop/httpfs/tomcat
|
||||
CATALINA_HOME=/usr/share/hadoop/httpfs/tomcat
|
||||
CATALINA_TMPDIR=/var/cache/hadoop-httpfs
|
||||
|
||||
CATALINA_OPTS="-Dhttpfs.home.dir=/usr -Dhttpfs.config.dir=/etc/hadoop -Dhttpfs.log.dir=/var/log/hadoop-httpfs -Dhttpfs.temp.dir=/var/cache/hadoop-httpfs -Dhttpfs.admin.port=14001 -Dhttpfs.http.port=14000"
|
||||
29
hadoop-layout.sh
Normal file
29
hadoop-layout.sh
Normal file
@ -0,0 +1,29 @@
|
||||
export HADOOP_PREFIX=/usr
|
||||
export HADOOP_COMMON_HOME=/usr
|
||||
export HADOOP_COMMON_DIR=share/hadoop/common
|
||||
export HADOOP_COMMON_LIB_JARS_DIR=share/hadoop/common/lib
|
||||
export HADOOP_COMMON_LIB_NATIVE_DIR=lib/hadoop
|
||||
export HADOOP_CONF_DIR=/etc/hadoop
|
||||
export HADOOP_LIBEXEC_DIR=/usr/libexec
|
||||
|
||||
export HADOOP_HDFS_HOME=$HADOOP_PREFIX
|
||||
export HDFS_DIR=share/hadoop/hdfs
|
||||
export HDFS_LIB_JARS_DIR=share/hadoop/hadoop/lib
|
||||
export HADOOP_PID_DIR=/var/run/hadoop-hdfs
|
||||
export HADOOP_LOG_DIR=/var/log/hadoop-hdfs
|
||||
export HADOOP_IDENT_STRING=hdfs
|
||||
|
||||
export HADOOP_YARN_HOME=$HADOOP_PREFIX
|
||||
export YARN_DIR=share/hadoop/yarn
|
||||
export YARN_LIB_JARS_DIR=share/hadoop/yarn/lib
|
||||
export YARN_PID_DIR=/var/run/hadoop-yarn
|
||||
export YARN_LOG_DIR=/var/log/hadoop-yarn
|
||||
export YARN_CONF_DIR=/etc/hadoop
|
||||
export YARN_IDENT_STRING=yarn
|
||||
|
||||
export HADOOP_MAPRED_HOME=$HADOOP_PREFIX
|
||||
export MAPRED_DIR=share/hadoop/mapreduce
|
||||
export MAPRED_LIB_JARS_DIR=share/hadoop/mapreduce/lib
|
||||
export HADOOP_MAPRED_PID_DIR=/var/run/hadoop-mapreduce
|
||||
export HADOOP_MAPRED_LOG_DIR=/var/log/hadoop-mapreduce
|
||||
export HADOOP_MAPRED_IDENT_STRING=mapred
|
||||
37
hadoop-mapred-site.xml
Normal file
37
hadoop-mapred-site.xml
Normal file
@ -0,0 +1,37 @@
|
||||
<?xml version="1.0"?>
|
||||
<!--
|
||||
Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
contributor license agreements. See the NOTICE file distributed with
|
||||
this work for additional information regarding copyright ownership.
|
||||
The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
(the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
-->
|
||||
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
|
||||
|
||||
<configuration>
|
||||
<property>
|
||||
<name>mapred.job.tracker</name>
|
||||
<value>localhost:8021</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>mapreduce.framework.name</name>
|
||||
<value>yarn</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<description>To set the value of tmp directory for map and reduce tasks.</description>
|
||||
<name>mapreduce.task.tmp.dir</name>
|
||||
<value>/var/cache/hadoop-mapreduce/${user.name}/tasks</value>
|
||||
</property>
|
||||
|
||||
</configuration>
|
||||
37
hadoop-mapreduce.service.template
Normal file
37
hadoop-mapreduce.service.template
Normal file
@ -0,0 +1,37 @@
|
||||
[Unit]
|
||||
Description=The Hadoop DAEMON daemon
|
||||
After=network.target
|
||||
After=NetworkManager.target
|
||||
|
||||
[Service]
|
||||
Type=forking
|
||||
EnvironmentFile=-/etc/sysconfig/hadoop-mapreduce
|
||||
EnvironmentFile=-/etc/sysconfig/hadoop-DAEMON
|
||||
ExecStart=/usr/sbin/mr-jobhistory-daemon.sh start DAEMON
|
||||
ExecStop=/usr/sbin/mr-jobhistory-daemon.sh stop DAEMON
|
||||
User=mapred
|
||||
Group=hadoop
|
||||
PIDFile=/run/hadoop-mapreduce/hadoop-mapred-DAEMON.pid
|
||||
LimitNOFILE=32768
|
||||
LimitNPROC=65536
|
||||
|
||||
#######################################
|
||||
# Note: Below are cgroup options
|
||||
#######################################
|
||||
#Slice=
|
||||
#CPUAccounting=true
|
||||
#CPUShares=1024
|
||||
|
||||
#MemoryAccounting=true
|
||||
#TBD: MemoryLimit=bytes, MemorySoftLimit=bytes
|
||||
|
||||
#BlockIOAccounting=true
|
||||
#BlockIOWeight=??
|
||||
#BlockIODeviceWeight=??
|
||||
#TBD: BlockIOReadBandwidth=bytes, BlockIOWriteBandwidth=bytes
|
||||
|
||||
#DeviceAllow=
|
||||
#DevicePolicy=auto|closed|strict
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
49
hadoop-tomcat-users.xml
Normal file
49
hadoop-tomcat-users.xml
Normal file
@ -0,0 +1,49 @@
|
||||
<?xml version='1.0' encoding='utf-8'?>
|
||||
<!--
|
||||
Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
contributor license agreements. See the NOTICE file distributed with
|
||||
this work for additional information regarding copyright ownership.
|
||||
The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
(the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
-->
|
||||
<tomcat-users xmlns="http://tomcat.apache.org/xml"
|
||||
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
|
||||
xsi:schemaLocation="http://tomcat.apache.org/xml tomcat-users.xsd"
|
||||
version="1.0">
|
||||
<!--
|
||||
NOTE: By default, no user is included in the "manager-gui" role required
|
||||
to operate the "/manager/html" web application. If you wish to use this app,
|
||||
you must define such a user - the username and password are arbitrary.
|
||||
-->
|
||||
<!--
|
||||
NOTE: The sample user and role entries below are wrapped in a comment
|
||||
and thus are ignored when reading this file. Do not forget to remove
|
||||
<!.. ..> that surrounds them.
|
||||
-->
|
||||
<!--
|
||||
<role rolename="tomcat"/>
|
||||
<role rolename="role1"/>
|
||||
<user username="tomcat" password="tomcat" roles="tomcat"/>
|
||||
<user username="both" password="tomcat" roles="tomcat,role1"/>
|
||||
<user username="role1" password="tomcat" roles="role1"/>
|
||||
-->
|
||||
|
||||
<!-- <role rolename="admin"/> -->
|
||||
<!-- <role rolename="admin-gui"/> -->
|
||||
<!-- <role rolename="admin-script"/> -->
|
||||
<!-- <role rolename="manager"/> -->
|
||||
<!-- <role rolename="manager-gui"/> -->
|
||||
<!-- <role rolename="manager-script"/> -->
|
||||
<!-- <role rolename="manager-jmx"/> -->
|
||||
<!-- <role rolename="manager-status"/> -->
|
||||
<!-- <user name="admin" password="adminadmin" roles="admin,manager,admin-gui,admin-script,manager-gui,manager-script,manager-jmx,manager-status" /> -->
|
||||
</tomcat-users>
|
||||
75
hadoop-yarn-site.xml
Normal file
75
hadoop-yarn-site.xml
Normal file
@ -0,0 +1,75 @@
|
||||
<?xml version="1.0"?>
|
||||
<!--
|
||||
Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
contributor license agreements. See the NOTICE file distributed with
|
||||
this work for additional information regarding copyright ownership.
|
||||
The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
(the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
-->
|
||||
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
|
||||
|
||||
<configuration>
|
||||
<property>
|
||||
<name>yarn.nodemanager.aux-services</name>
|
||||
<value>mapreduce_shuffle</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>yarn.nodemanager.aux-services.mapreduce_shuffle.class</name>
|
||||
<value>org.apache.hadoop.mapred.ShuffleHandler</value>
|
||||
</property>
|
||||
|
||||
<!--
|
||||
<property>
|
||||
<name>yarn.log-aggregation-enable</name>
|
||||
<value>true</value>
|
||||
</property>
|
||||
-->
|
||||
|
||||
<property>
|
||||
<name>yarn.dispatcher.exit-on-error</name>
|
||||
<value>true</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<description>List of directories to store localized files in.</description>
|
||||
<name>yarn.nodemanager.local-dirs</name>
|
||||
<value>/var/cache/hadoop-yarn/${user.name}/nm-local-dir</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<description>Where to store container logs.</description>
|
||||
<name>yarn.nodemanager.log-dirs</name>
|
||||
<value>/var/log/hadoop-yarn/containers</value>
|
||||
</property>
|
||||
|
||||
<!--
|
||||
<property>
|
||||
<description>Where to aggregate logs to.</description>
|
||||
<name>yarn.nodemanager.remote-app-log-dir</name>
|
||||
<value>/var/log/hadoop-yarn/apps</value>
|
||||
</property>
|
||||
-->
|
||||
|
||||
<property>
|
||||
<description>Classpath for typical applications.</description>
|
||||
<name>yarn.application.classpath</name>
|
||||
<value>
|
||||
$HADOOP_CONF_DIR,$HADOOP_COMMON_HOME/$HADOOP_COMMON_DIR/*,
|
||||
$HADOOP_COMMON_HOME/$HADOOP_COMMON_LIB_JARS_DIR/*,
|
||||
$HADOOP_HDFS_HOME/$HDFS_DIR/*,$HADOOP_HDFS_HOME/$HDFS_LIB_JARS_DIR/*,
|
||||
$HADOOP_MAPRED_HOME/$MAPRED_DIR/*,
|
||||
$HADOOP_MAPRED_HOME/$MAPRED_LIB_JARS_DIR/*,
|
||||
$HADOOP_YARN_HOME/$YARN_DIR/*,$HADOOP_YARN_HOME/$YARN_LIB_JARS_DIR/*
|
||||
</value>
|
||||
</property>
|
||||
</configuration>
|
||||
37
hadoop-yarn.service.template
Normal file
37
hadoop-yarn.service.template
Normal file
@ -0,0 +1,37 @@
|
||||
[Unit]
|
||||
Description=The Hadoop DAEMON daemon
|
||||
After=network.target
|
||||
After=NetworkManager.target
|
||||
|
||||
[Service]
|
||||
Type=forking
|
||||
EnvironmentFile=-/etc/sysconfig/hadoop-yarn
|
||||
EnvironmentFile=-/etc/sysconfig/hadoop-DAEMON
|
||||
ExecStart=/usr/sbin/yarn-daemon.sh start DAEMON
|
||||
ExecStop=/usr/sbin/yarn-daemon.sh stop DAEMON
|
||||
User=yarn
|
||||
Group=hadoop
|
||||
PIDFile=/run/hadoop-yarn/hadoop-yarn-DAEMON.pid
|
||||
LimitNOFILE=32768
|
||||
LimitNPROC=65536
|
||||
|
||||
#######################################
|
||||
# Note: Below are cgroup options
|
||||
#######################################
|
||||
#Slice=
|
||||
#CPUAccounting=true
|
||||
#CPUShares=1024
|
||||
|
||||
#MemoryAccounting=true
|
||||
#TBD: MemoryLimit=bytes, MemorySoftLimit=bytes
|
||||
|
||||
#BlockIOAccounting=true
|
||||
#BlockIOWeight=??
|
||||
#BlockIODeviceWeight=??
|
||||
#TBD: BlockIOReadBandwidth=bytes, BlockIOWriteBandwidth=bytes
|
||||
|
||||
#DeviceAllow=
|
||||
#DevicePolicy=auto|closed|strict
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
8
hadoop.logrotate
Normal file
8
hadoop.logrotate
Normal file
@ -0,0 +1,8 @@
|
||||
/var/log/hadoop-NAME/*.log
|
||||
{
|
||||
missingok
|
||||
copytruncate
|
||||
compress
|
||||
weekly
|
||||
rotate 52
|
||||
}
|
||||
1114
hadoop.spec
Normal file
1114
hadoop.spec
Normal file
File diff suppressed because it is too large
Load Diff
4
hadoop.yaml
Normal file
4
hadoop.yaml
Normal file
@ -0,0 +1,4 @@
|
||||
version_control: NA
|
||||
src_repo: NA
|
||||
tag_prefix: NA
|
||||
separator: NA
|
||||
66
hdfs-create-dirs
Normal file
66
hdfs-create-dirs
Normal file
@ -0,0 +1,66 @@
|
||||
#!/bin/bash
|
||||
|
||||
hdfs_dirs="/user /var/log /tmp"
|
||||
mapred_dirs="/tmp/hadoop-yarn/staging /tmp/hadoop-yarn/staging/history /tmp/hadoop-yarn/staging/history/done /tmp/hadoop-yarn/staging/history/done_intermediate"
|
||||
yarn_dirs="/tmp/hadoop-yarn /var/log/hadoop-yarn"
|
||||
|
||||
# Must be run as root
|
||||
if [[ $EUID -ne 0 ]]
|
||||
then
|
||||
echo "This must be run as root" 1>&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Start the namenode if it isn't running
|
||||
started=0
|
||||
systemctl status hadoop-namenode > /dev/null 2>&1
|
||||
rc=$?
|
||||
if [[ $rc -gt 0 ]]
|
||||
then
|
||||
# Format the namenode if it hasn't been formatted
|
||||
runuser hdfs -s /bin/bash /bin/bash -c "hdfs namenode -format -nonInteractive" > /dev/null 2>&1
|
||||
if [[ $? -eq 0 ]]
|
||||
then
|
||||
echo "Formatted the Hadoop namenode"
|
||||
fi
|
||||
|
||||
echo "Starting the Hadoop namenode"
|
||||
systemctl start hadoop-namenode > /dev/null 2>&1
|
||||
rc=$?
|
||||
started=1
|
||||
fi
|
||||
|
||||
if [[ $rc -ne 0 ]]
|
||||
then
|
||||
echo "The Hadoop namenode failed to start"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
for dir in $hdfs_dirs $yarn_dirs $mapred_dirs
|
||||
do
|
||||
echo "Creating directory $dir"
|
||||
runuser hdfs -s /bin/bash /bin/bash -c "hadoop fs -mkdir -p $dir" > /dev/null 2>&1
|
||||
done
|
||||
|
||||
echo "Setting permissions on /tmp"
|
||||
runuser hdfs -s /bin/bash /bin/bash -c "hadoop fs -chmod 1777 /tmp" > /dev/null 2>&1
|
||||
|
||||
for dir in $mapred_dirs
|
||||
do
|
||||
echo "Setting permissions and ownership for $dir"
|
||||
runuser hdfs -s /bin/bash /bin/bash -c "hadoop fs -chown mapred:mapred $dir" > /dev/null 2>&1
|
||||
runuser hdfs -s /bin/bash /bin/bash -c "hadoop fs -chmod 1777 $dir" > /dev/null 2>&1
|
||||
done
|
||||
|
||||
for dir in $yarn_dirs
|
||||
do
|
||||
echo "Setting permissions and ownership for $dir"
|
||||
runuser hdfs -s /bin/bash /bin/bash -c "hadoop fs -chown yarn:mapred $dir" > /dev/null 2>&1
|
||||
done
|
||||
|
||||
# Stop the namenode if we started it
|
||||
if [[ $started -gt 0 ]]
|
||||
then
|
||||
echo "Stopping the Hadoop namenode"
|
||||
systemctl stop hadoop-namenode > /dev/null 2>&1
|
||||
fi
|
||||
Loading…
x
Reference in New Issue
Block a user