大数据环境部署之集群启动脚本

创建脚本

集群执行脚本

创建脚本

1
2
mkdir /data/tools/bigdata/mysh/
vi /data/tools/bigdata/mysh/ha-call.sh

内容如下

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
#!/bin/bash

#集群执行命令脚本

USAGE="使用方法:sh ha-call.sh jps or sh ha-call.sh 'jps -l;java -version'"
if [ $# -eq 0 ];then
echo $USAGE
exit 1
fi

NODES=("hadoop01" "hadoop02" "hadoop03")
echo "======================================================"
for NODE in ${NODES[*]};do
echo -e "执行命令:\033[5;32mssh $NODE \"$*\"\033[0m"
echo ""
ssh $NODE "$*"
echo "------------------------------------------------------"
done
echo -e "\033[5;32m集群执行命令脚本执行完成!\033[0m"
echo "======================================================"

修改权限

1
chmod 777 /data/tools/bigdata/mysh/ha-call.sh

测试

1
2
3
ha-call.sh jps
#或者
ha-call.sh 'jps -l;java -version'

分发

1
2
3
4
ha-fenfa.sh $MYSH_HOME
ha-fenfa.sh /etc/profile.d/
ssh hadoop02 "source /etc/profile"
ssh hadoop03 "source /etc/profile"

测试服务器互通

1
2
3
4
5
6
ssh hadoop01
ha-call.sh date
ssh hadoop02
ha-call.sh date
ssh hadoop03
ha-call.sh date

集群ZK启动脚本

创建脚本

1
2
mkdir /data/tools/bigdata/mysh/
vi /data/tools/bigdata/mysh/ha-zk.sh

内容如下

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
#!/bin/bash

#zookeeper集群启动/停止/查看状态脚本

USAGE="使用方法:sh ha-zk.sh start/stop/status"
if [ $# -ne 1 ];then
echo $USAGE
exit 1
fi
NODES=("hadoop01" "hadoop02" "hadoop03")

case $1 in
"start")
for NODE in ${NODES[*]};do
echo "--------$NODE启动zookeeper--------"
ssh $NODE "$ZK_HOME/bin/zkServer.sh start"
done
;;
"stop")
for NODE in ${NODES[*]};do
echo "--------$NODE停止zookeeper--------"
ssh $NODE "$ZK_HOME/bin/zkServer.sh stop"
done
;;
"status")
for NODE in ${NODES[*]};do
echo "--------$NODE查看zookeeper状态--------"
ssh $NODE "$ZK_HOME/bin/zkServer.sh status"
done
;;
*)
echo $USAGE
;;
esac
echo "----------------------------------------------------------------------------------------"
echo "--------ha-zk.sh脚本执行完成!--------"
echo "----------------------------------------------------------------------------------------"

修改权限

1
chmod 777 /data/tools/bigdata/mysh/ha-zk.sh

测试

1
2
3
ha-zk.sh start
ha-zk.sh stop
ha-zk.sh status

集群Hadoop启动脚本

依赖ZK

创建脚本

1
2
mkdir /data/tools/bigdata/mysh/
vi /data/tools/bigdata/mysh/ha-hadoop.sh

内容如下

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
#!/bin/bash

#ha-hadoop集群启动停止以及查看进程脚本

USAGE="使用方法:sh ha-hadoop.sh start/stop/status"
if [ $# -ne 1 ];then
echo $USAGE
exit 1
fi

NODES=("hadoop01" "hadoop02" "hadoop03")
case $1 in
"start")
#开始启动ha-hadoop集群
for NODE in ${NODES[*]};do
echo "--------$NODE启动ha-hadoop集群--------"
if [ "hadoop01" = $NODE ];then
ssh $NODE "$HADOOP_HOME/sbin/start-dfs.sh && $HADOOP_HOME/sbin/mr-jobhistory-daemon.sh start historyserver"
fi
if [ "hadoop02" = $NODE ];then
ssh $NODE "$HADOOP_HOME/sbin/yarn-daemon.sh start resourcemanager"
fi
if [ "hadoop03" = $NODE ];then
ssh $NODE "$HADOOP_HOME/sbin/start-yarn.sh"
fi
done
;;
"stop")
#开始停止ha-hadoop集群
for NODE in ${NODES[*]};do
echo "--------$NODE停止ha-hadoop集群--------"
if [ "hadoop01" = $NODE ];then
ssh $NODE "$HADOOP_HOME/sbin/stop-dfs.sh && $HADOOP_HOME/sbin/mr-jobhistory-daemon.sh stop historyserver"
fi
if [ "hadoop02" = $NODE ];then
ssh $NODE "$HADOOP_HOME/sbin/yarn-daemon.sh stop resourcemanager"
fi
if [ "hadoop03" = $NODE ];then
ssh $NODE "$HADOOP_HOME/sbin/stop-yarn.sh"
fi
done
;;
"status")
for NODE in ${NODES[*]};do
echo "--------$NODE查看ha-hadoop集群进程--------"
ssh $NODE "jps"
done
;;
*)
echo $USAGE
;;
esac
echo "----------------------------------------------------------------------------------------"
echo "--------ha-hadoop.sh脚本执行完成!--------"
echo "----------------------------------------------------------------------------------------"

修改权限

1
chmod 777 /data/tools/bigdata/mysh/ha-hadoop.sh

测试

1
2
3
ha-hadoop.sh start
ha-hadoop.sh stop
ha-hadoop.sh status

集群Flink启动脚本

依赖ZK和Hadoop

创建脚本

1
2
mkdir /data/tools/bigdata/mysh/
vi /data/tools/bigdata/mysh/ha-flink.sh

内容如下

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
#!/bin/bash

#ha-flink集群(yarn模式)启动停止以及进程查看脚本

USAGE="使用方法:sh ha-flink.sh start/stop/status"
if [ $# -ne 1 ];then
echo $USAGE
exit 1
fi


NODES=("hadoop01" "hadoop02" "hadoop03")

case $1 in
"start")
#开始启动ha-flink集群
for NODE in ${NODES[*]};do
if [ "hadoop01" = $NODE ];then
echo "--------$NODE启动ha-flink集群--------"
ssh $NODE "$FLINK_HOME/bin/start-cluster.sh"
fi
done
;;
"stop")
#开始停止ha-flink集群
for NODE in ${NODES[*]};do
if [ "hadoop01" = $NODE ];then
echo "--------$NODE停止ha-flink集群--------"
ssh $NODE "$FLINK_HOME/bin/stop-cluster.sh"
fi
done
;;
"status")
echo "--------查看ha-flink集群进程信息"
for NODE in ${NODES[*]};do
echo "--------$NODE 查看ha-flink集群进程--------"
ssh $NODE "jps"
done
;;
*)
echo $USAGE
;;
esac
echo "----------------------------------------------------------------------------------------"
echo "--------ha-flink.sh 脚本执行完成!--------"
echo "----------------------------------------------------------------------------------------"

修改权限

1
chmod 777 /data/tools/bigdata/mysh/ha-flink.sh

测试

1
2
3
ha-flink.sh start
ha-flink.sh status
ha-flink.sh stop

配置环境变量

之前配置过可以跳过。

添加环境变量

1
cd /etc/profile.d/

创建配置文件

1
vi /etc/profile.d/mysh.sh

加入:

1
2
export MYSH_HOME=/data/tools/bigdata/mysh/ 
export PATH=$MYSH_HOME:$PATH

配置立即生效

1
source /etc/profile

查看MYSH_HOME

1
echo $MYSH_HOME

配置分发

1
2
3
4
ha-fenfa.sh $MYSH_HOME
ha-fenfa.sh /etc/profile.d/
ssh hadoop02 "source /etc/profile"
ssh hadoop03 "source /etc/profile"