欢迎来到尧图网

客户服务 关于我们

您的位置:首页 > 汽车 > 新车 > 如何启动spark

如何启动spark

2025/4/16 7:36:08 来源:https://blog.csdn.net/2201_75439183/article/details/147147242  浏览:    关键词:如何启动spark

解决:spark的bin目录下,无法启动spark问题

[root@hadoop7 sbin]# ./start-all.sh
./start-all.sh:行29: /root/install/spark-2.4.0-bin-hadoop2.7/sbin/spark-config.sh: 没有那个文件或目录
./start-all.sh:行32: /root/install/spark-2.4.0-bin-hadoop2.7/sbin/start-master.sh: 没有那个文件或目录
./start-all.sh:行35: /root/install/spark-2.4.0-bin-hadoop2.7/sbin/start-slaves.sh: 没有那个文件或目录
 


[root@hadoop7 ~]# jps
1357 Jps
[root@hadoop7 ~]# start-all.sh 
This script is Deprecated. Instead use start-dfs.sh and start-yarn.sh
Starting namenodes on [hadoop7]
hadoop7: starting namenode, logging to /root/install/hadoop-2.7.7/logs/hadoop-root-namenode-hadoop7.out
localhost: starting datanode, logging to /root/install/hadoop-2.7.7/logs/hadoop-root-datanode-hadoop7.out
Starting secondary namenodes [0.0.0.0]
0.0.0.0: starting secondarynamenode, logging to /root/install/hadoop-2.7.7/logs/hadoop-root-secondarynamenode-hadoop7.out
starting yarn daemons
starting resourcemanager, logging to /root/install/hadoop-2.7.7/logs/yarn-root-resourcemanager-hadoop7.out
localhost: starting nodemanager, logging to /root/install/hadoop-2.7.7/logs/yarn-root-nodemanager-hadoop7.out
[root@hadoop7 ~]# jps
2850 Jps
1763 DataNode
2515 NodeManager
1564 NameNode
2268 ResourceManager
2014 SecondaryNameNode
[root@hadoop7 ~]# zkServer.sh start
ZooKeeper JMX enabled by default
Using config: /root/install/zookeeper-3.4.14/bin/../conf/zoo.cfg
Starting zookeeper ... STARTED
 


[root@hadoop7 spark-2.4.5-bin-hadoop2.7]# cd conf/
[root@hadoop7 conf]# ls
docker.properties.template  hive-site.xml              metrics.properties.template  slaves.template               spark-env.sh
fairscheduler.xml.template  log4j.properties.template  slaves                       spark-defaults.conf.template  spark-env.sh.template
[root@hadoop7 conf]# vi spark-env.sh   //把hadoop6 改为了hadoop7
[root@hadoop7 conf]# 
[root@hadoop7 conf]# cd ..
[root@hadoop7 spark-2.4.5-bin-hadoop2.7]# cd logs/
[root@hadoop7 logs]# ls
spark-root-org.apache.spark.deploy.master.Master-1-hadoop4.out    spark-root-org.apache.spark.deploy.worker.Worker-1-hadoop4.out
spark-root-org.apache.spark.deploy.master.Master-1-hadoop4.out.1  spark-root-org.apache.spark.deploy.worker.Worker-1-hadoop4.out.1
spark-root-org.apache.spark.deploy.master.Master-1-hadoop4.out.2  spark-root-org.apache.spark.deploy.worker.Worker-1-hadoop4.out.2
spark-root-org.apache.spark.deploy.master.Master-1-hadoop4.out.3  spark-root-org.apache.spark.deploy.worker.Worker-1-hadoop4.out.3
spark-root-org.apache.spark.deploy.master.Master-1-hadoop4.out.4  spark-root-org.apache.spark.deploy.worker.Worker-1-hadoop5.out
spark-root-org.apache.spark.deploy.master.Master-1-hadoop5.out    spark-root-org.apache.spark.deploy.worker.Worker-1-hadoop5.out.1
spark-root-org.apache.spark.deploy.master.Master-1-hadoop5.out.1  spark-root-org.apache.spark.deploy.worker.Worker-1-hadoop5.out.2
spark-root-org.apache.spark.deploy.master.Master-1-hadoop5.out.2  spark-root-org.apache.spark.deploy.worker.Worker-1-hadoop5.out.3
spark-root-org.apache.spark.deploy.master.Master-1-hadoop5.out.3  spark-root-org.apache.spark.deploy.worker.Worker-1-hadoop5.out.4
spark-root-org.apache.spark.deploy.master.Master-1-hadoop5.out.4  spark-root-org.apache.spark.deploy.worker.Worker-1-hadoop5.out.5
spark-root-org.apache.spark.deploy.master.Master-1-hadoop5.out.5  spark-root-org.apache.spark.deploy.worker.Worker-1-hadoop6.out
spark-root-org.apache.spark.deploy.master.Master-1-hadoop6.out    spark-root-org.apache.spark.deploy.worker.Worker-1-hadoop6.out.1
spark-root-org.apache.spark.deploy.master.Master-1-hadoop6.out.1
[root@hadoop7 logs]# rm -rf *
[root@hadoop7 logs]# cd ..
[root@hadoop7 spark-2.4.5-bin-hadoop2.7]# cd sbin/
[root@hadoop7 sbin]# ls
slaves.sh         start-all.sh               start-mesos-shuffle-service.sh  start-thriftserver.sh   stop-mesos-dispatcher.sh       stop-slaves.sh
spark-config.sh   start-history-server.sh    start-shuffle-service.sh        stop-all.sh             stop-mesos-shuffle-service.sh  stop-thriftserver.sh
spark-daemon.sh   start-master.sh            start-slave.sh                  stop-history-server.sh  stop-shuffle-service.sh
spark-daemons.sh  start-mesos-dispatcher.sh  start-slaves.sh                 stop-master.sh          stop-slave.sh
[root@hadoop7 sbin]# ./start-all.sh 
starting org.apache.spark.deploy.master.Master, logging to /root/install/spark-2.4.5-bin-hadoop2.7/logs/spark-root-org.apache.spark.deploy.master.Master-1-hadoop7.out
localhost: starting org.apache.spark.deploy.worker.Worker, logging to /root/install/spark-2.4.5-bin-hadoop2.7/logs/spark-root-org.apache.spark.deploy.worker.Worker-1-hadoop7.out

就启动成功了
 

版权声明:

本网仅为发布的内容提供存储空间,不对发表、转载的内容提供任何形式的保证。凡本网注明“来源:XXX网络”的作品,均转载自其它媒体,著作权归作者所有,商业转载请联系作者获得授权,非商业转载请注明出处。

我们尊重并感谢每一位作者,均已注明文章来源和作者。如因作品内容、版权或其它问题,请及时与我们联系,联系邮箱:809451989@qq.com,投稿邮箱:809451989@qq.com

热搜词