22
22
# values for that; it doesn't need a lot
23
23
if [ " $1 " = " spark.deploy.master.Master" -o " $1 " = " spark.deploy.worker.Worker" ]; then
24
24
SPARK_MEM=${SPARK_DAEMON_MEMORY:- 512m}
25
- SPARK_DAEMON_JAVA_OPTS+= " -Dspark.akka.logLifecycleEvents=true"
25
+ SPARK_DAEMON_JAVA_OPTS= " $SPARK_DAEMON_JAVA_OPTS -Dspark.akka.logLifecycleEvents=true"
26
26
SPARK_JAVA_OPTS=$SPARK_DAEMON_JAVA_OPTS # Empty by default
27
27
fi
28
28
29
29
30
30
# Add java opts for master, worker, executor. The opts maybe null
31
31
case " $1 " in
32
32
' spark.deploy.master.Master' )
33
- SPARK_JAVA_OPTS+= " $SPARK_MASTER_OPTS "
33
+ SPARK_JAVA_OPTS= " $SPARK_JAVA_OPTS $SPARK_MASTER_OPTS "
34
34
;;
35
35
' spark.deploy.worker.Worker' )
36
- SPARK_JAVA_OPTS+= " $SPARK_WORKER_OPTS "
36
+ SPARK_JAVA_OPTS= " $SPARK_JAVA_OPTS $SPARK_WORKER_OPTS "
37
37
;;
38
38
' spark.executor.StandaloneExecutorBackend' )
39
- SPARK_JAVA_OPTS+= " $SPARK_EXECUTOR_OPTS "
39
+ SPARK_JAVA_OPTS= " $SPARK_JAVA_OPTS $SPARK_EXECUTOR_OPTS "
40
40
;;
41
41
' spark.executor.MesosExecutorBackend' )
42
- SPARK_JAVA_OPTS+= " $SPARK_EXECUTOR_OPTS "
42
+ SPARK_JAVA_OPTS= " $SPARK_JAVA_OPTS $SPARK_EXECUTOR_OPTS "
43
43
;;
44
44
' spark.repl.Main' )
45
- SPARK_JAVA_OPTS+= " $SPARK_REPL_OPTS "
45
+ SPARK_JAVA_OPTS= " $SPARK_JAVA_OPTS $SPARK_REPL_OPTS "
46
46
;;
47
47
esac
48
48
@@ -85,11 +85,11 @@ export SPARK_MEM
85
85
86
86
# Set JAVA_OPTS to be able to load native libraries and to set heap size
87
87
JAVA_OPTS=" $SPARK_JAVA_OPTS "
88
- JAVA_OPTS+= " -Djava.library.path=$SPARK_LIBRARY_PATH "
89
- JAVA_OPTS+= " -Xms$SPARK_MEM -Xmx$SPARK_MEM "
88
+ JAVA_OPTS= " $JAVA_OPTS -Djava.library.path=$SPARK_LIBRARY_PATH "
89
+ JAVA_OPTS= " $JAVA_OPTS -Xms$SPARK_MEM -Xmx$SPARK_MEM "
90
90
# Load extra JAVA_OPTS from conf/java-opts, if it exists
91
91
if [ -e $FWDIR /conf/java-opts ] ; then
92
- JAVA_OPTS+= " ` cat $FWDIR /conf/java-opts` "
92
+ JAVA_OPTS= " $JAVA_OPTS ` cat $FWDIR /conf/java-opts` "
93
93
fi
94
94
export JAVA_OPTS
95
95
110
110
111
111
# Build up classpath
112
112
CLASSPATH=" $SPARK_CLASSPATH "
113
- CLASSPATH+= " :$FWDIR /conf"
114
- CLASSPATH+= " :$CORE_DIR /target/scala-$SCALA_VERSION /classes"
113
+ CLASSPATH= " $CLASSPATH :$FWDIR /conf"
114
+ CLASSPATH= " $CLASSPATH :$CORE_DIR /target/scala-$SCALA_VERSION /classes"
115
115
if [ -n " $SPARK_TESTING " ] ; then
116
- CLASSPATH+= " :$CORE_DIR /target/scala-$SCALA_VERSION /test-classes"
117
- CLASSPATH+= " :$STREAMING_DIR /target/scala-$SCALA_VERSION /test-classes"
116
+ CLASSPATH= " $CLASSPATH :$CORE_DIR /target/scala-$SCALA_VERSION /test-classes"
117
+ CLASSPATH= " $CLASSPATH :$STREAMING_DIR /target/scala-$SCALA_VERSION /test-classes"
118
118
fi
119
- CLASSPATH+= " :$CORE_DIR /src/main/resources"
120
- CLASSPATH+= " :$REPL_DIR /target/scala-$SCALA_VERSION /classes"
121
- CLASSPATH+= " :$EXAMPLES_DIR /target/scala-$SCALA_VERSION /classes"
122
- CLASSPATH+= " :$STREAMING_DIR /target/scala-$SCALA_VERSION /classes"
123
- CLASSPATH+= " :$STREAMING_DIR /lib/org/apache/kafka/kafka/0.7.2-spark/*" # <-- our in-project Kafka Jar
119
+ CLASSPATH= " $CLASSPATH :$CORE_DIR /src/main/resources"
120
+ CLASSPATH= " $CLASSPATH :$REPL_DIR /target/scala-$SCALA_VERSION /classes"
121
+ CLASSPATH= " $CLASSPATH :$EXAMPLES_DIR /target/scala-$SCALA_VERSION /classes"
122
+ CLASSPATH= " $CLASSPATH :$STREAMING_DIR /target/scala-$SCALA_VERSION /classes"
123
+ CLASSPATH= " $CLASSPATH :$STREAMING_DIR /lib/org/apache/kafka/kafka/0.7.2-spark/*" # <-- our in-project Kafka Jar
124
124
if [ -e " $FWDIR /lib_managed" ]; then
125
- CLASSPATH+= " :$FWDIR /lib_managed/jars/*"
126
- CLASSPATH+= " :$FWDIR /lib_managed/bundles/*"
125
+ CLASSPATH= " $CLASSPATH :$FWDIR /lib_managed/jars/*"
126
+ CLASSPATH= " $CLASSPATH :$FWDIR /lib_managed/bundles/*"
127
127
fi
128
- CLASSPATH+= " :$REPL_DIR /lib/*"
128
+ CLASSPATH= " $CLASSPATH :$REPL_DIR /lib/*"
129
129
if [ -e $REPL_BIN_DIR /target ]; then
130
130
for jar in ` find " $REPL_BIN_DIR /target" -name ' spark-repl-*-shaded-hadoop*.jar' ` ; do
131
- CLASSPATH+= " :$jar "
131
+ CLASSPATH= " $CLASSPATH :$jar "
132
132
done
133
133
fi
134
- CLASSPATH+= " :$BAGEL_DIR /target/scala-$SCALA_VERSION /classes"
134
+ CLASSPATH= " $CLASSPATH :$BAGEL_DIR /target/scala-$SCALA_VERSION /classes"
135
135
for jar in ` find $PYSPARK_DIR /lib -name ' *jar' ` ; do
136
- CLASSPATH+= " :$jar "
136
+ CLASSPATH= " $CLASSPATH :$jar "
137
137
done
138
138
139
139
# Figure out the JAR file that our examples were packaged into. This includes a bit of a hack
@@ -147,6 +147,17 @@ if [ -e "$EXAMPLES_DIR/target/spark-examples-"*hadoop[12].jar ]; then
147
147
export SPARK_EXAMPLES_JAR=` ls " $EXAMPLES_DIR /target/spark-examples-" * hadoop[12].jar`
148
148
fi
149
149
150
+ # Add hadoop conf dir - else FileSystem.*, etc fail !
151
+ # Note, this assumes that there is either a HADOOP_CONF_DIR or YARN_CONF_DIR which hosts
152
+ # the configurtion files.
153
+ if [ " x" != " x$HADOOP_CONF_DIR " ]; then
154
+ CLASSPATH=" $CLASSPATH :$HADOOP_CONF_DIR "
155
+ fi
156
+ if [ " x" != " x$YARN_CONF_DIR " ]; then
157
+ CLASSPATH=" $CLASSPATH :$YARN_CONF_DIR "
158
+ fi
159
+
160
+
150
161
# Figure out whether to run our class with java or with the scala launcher.
151
162
# In most cases, we'd prefer to execute our process with java because scala
152
163
# creates a shell script as the parent of its Java process, which makes it
156
167
if [ " $SPARK_LAUNCH_WITH_SCALA " == " 1" ]; then
157
168
EXTRA_ARGS=" " # Java options will be passed to scala as JAVA_OPTS
158
169
else
159
- CLASSPATH+= " :$SCALA_LIBRARY_PATH /scala-library.jar"
160
- CLASSPATH+= " :$SCALA_LIBRARY_PATH /scala-compiler.jar"
161
- CLASSPATH+= " :$SCALA_LIBRARY_PATH /jline.jar"
170
+ CLASSPATH= " $CLASSPATH :$SCALA_LIBRARY_PATH /scala-library.jar"
171
+ CLASSPATH= " $CLASSPATH :$SCALA_LIBRARY_PATH /scala-compiler.jar"
172
+ CLASSPATH= " $CLASSPATH :$SCALA_LIBRARY_PATH /jline.jar"
162
173
# The JVM doesn't read JAVA_OPTS by default so we need to pass it in
163
174
EXTRA_ARGS=" $JAVA_OPTS "
164
175
fi
0 commit comments