解决:spark的bin目录下,无法启动spark问题
[root@hadoop7 sbin]# ./start-all.sh
./start-all.sh:行29: /root/install/spark-2.4.0-bin-hadoop2.7/sbin/spark-config.sh: 没有那个文件或目录
./start-all.sh:行32: /root/install/spark-2.4.0-bin-hadoop2.7/sbin/start-master.sh: 没有那个文件或目录
./start-all.sh:行35: /root/install/spark-2.4.0-bin-hadoop2.7/sbin/start-slaves.sh: 没有那个文件或目录
[root@hadoop7 ~]# jps
1357 Jps
[root@hadoop7 ~]# start-all.sh
This script is Deprecated. Instead use start-dfs.sh and start-yarn.sh
Starting namenodes on [hadoop7]
hadoop7: starting namenode, logging to /root/install/hadoop-2.7.7/logs/hadoop-root-namenode-hadoop7.out
localhost: starting datanode, logging to /root/install/hadoop-2.7.7/logs/hadoop-root-datanode-hadoop7.out
Starting secondary namenodes [0.0.0.0]
0.0.0.0: starting secondarynamenode, logging to /root/install/hadoop-2.7.7/logs/hadoop-root-secondarynamenode-hadoop7.out
starting yarn daemons
starting resourcemanager, logging to /root/install/hadoop-2.7.7/logs/yarn-root-resourcemanager-hadoop7.out
localhost: starting nodemanager, logging to /root/install/hadoop-2.7.7/logs/yarn-root-nodemanager-hadoop7.out
[root@hadoop7 ~]# jps
2850 Jps
1763 DataNode
2515 NodeManager
1564 NameNode
2268 ResourceManager
2014 SecondaryNameNode
[root@hadoop7 ~]# zkServer.sh start
ZooKeeper JMX enabled by default
Using config: /root/install/zookeeper-3.4.14/bin/../conf/zoo.cfg
Starting zookeeper ... STARTED
[root@hadoop7 spark-2.4.5-bin-hadoop2.7]# cd conf/
[root@hadoop7 conf]# ls
docker.properties.template hive-site.xml metrics.properties.template slaves.template spark-env.sh
fairscheduler.xml.template log4j.properties.template slaves spark-defaults.conf.template spark-env.sh.template
[root@hadoop7 conf]# vi spark-env.sh //把hadoop6 改为了hadoop7
[root@hadoop7 conf]#
[root@hadoop7 conf]# cd ..
[root@hadoop7 spark-2.4.5-bin-hadoop2.7]# cd logs/
[root@hadoop7 logs]# ls
spark-root-org.apache.spark.deploy.master.Master-1-hadoop4.out spark-root-org.apache.spark.deploy.worker.Worker-1-hadoop4.out
spark-root-org.apache.spark.deploy.master.Master-1-hadoop4.out.1 spark-root-org.apache.spark.deploy.worker.Worker-1-hadoop4.out.1
spark-root-org.apache.spark.deploy.master.Master-1-hadoop4.out.2 spark-root-org.apache.spark.deploy.worker.Worker-1-hadoop4.out.2
spark-root-org.apache.spark.deploy.master.Master-1-hadoop4.out.3 spark-root-org.apache.spark.deploy.worker.Worker-1-hadoop4.out.3
spark-root-org.apache.spark.deploy.master.Master-1-hadoop4.out.4 spark-root-org.apache.spark.deploy.worker.Worker-1-hadoop5.out
spark-root-org.apache.spark.deploy.master.Master-1-hadoop5.out spark-root-org.apache.spark.deploy.worker.Worker-1-hadoop5.out.1
spark-root-org.apache.spark.deploy.master.Master-1-hadoop5.out.1 spark-root-org.apache.spark.deploy.worker.Worker-1-hadoop5.out.2
spark-root-org.apache.spark.deploy.master.Master-1-hadoop5.out.2 spark-root-org.apache.spark.deploy.worker.Worker-1-hadoop5.out.3
spark-root-org.apache.spark.deploy.master.Master-1-hadoop5.out.3 spark-root-org.apache.spark.deploy.worker.Worker-1-hadoop5.out.4
spark-root-org.apache.spark.deploy.master.Master-1-hadoop5.out.4 spark-root-org.apache.spark.deploy.worker.Worker-1-hadoop5.out.5
spark-root-org.apache.spark.deploy.master.Master-1-hadoop5.out.5 spark-root-org.apache.spark.deploy.worker.Worker-1-hadoop6.out
spark-root-org.apache.spark.deploy.master.Master-1-hadoop6.out spark-root-org.apache.spark.deploy.worker.Worker-1-hadoop6.out.1
spark-root-org.apache.spark.deploy.master.Master-1-hadoop6.out.1
[root@hadoop7 logs]# rm -rf *
[root@hadoop7 logs]# cd ..
[root@hadoop7 spark-2.4.5-bin-hadoop2.7]# cd sbin/
[root@hadoop7 sbin]# ls
slaves.sh start-all.sh start-mesos-shuffle-service.sh start-thriftserver.sh stop-mesos-dispatcher.sh stop-slaves.sh
spark-config.sh start-history-server.sh start-shuffle-service.sh stop-all.sh stop-mesos-shuffle-service.sh stop-thriftserver.sh
spark-daemon.sh start-master.sh start-slave.sh stop-history-server.sh stop-shuffle-service.sh
spark-daemons.sh start-mesos-dispatcher.sh start-slaves.sh stop-master.sh stop-slave.sh
[root@hadoop7 sbin]# ./start-all.sh
starting org.apache.spark.deploy.master.Master, logging to /root/install/spark-2.4.5-bin-hadoop2.7/logs/spark-root-org.apache.spark.deploy.master.Master-1-hadoop7.out
localhost: starting org.apache.spark.deploy.worker.Worker, logging to /root/install/spark-2.4.5-bin-hadoop2.7/logs/spark-root-org.apache.spark.deploy.worker.Worker-1-hadoop7.out
就启动成功了