@@ -366,7 +366,7 @@ class SparkContext(config: SparkConf) extends Logging {
366
366
* @param logLevel The desired log level as a string.
367
367
* Valid log levels include: ALL, DEBUG, ERROR, FATAL, INFO, OFF, TRACE, WARN
368
368
*/
369
- def setLogLevel (logLevel : String ) {
369
+ def setLogLevel (logLevel : String ): Unit = {
370
370
// let's allow lowercase or mixed case too
371
371
val upperCased = logLevel.toUpperCase(Locale .ROOT )
372
372
require(SparkContext .VALID_LOG_LEVELS .contains(upperCased),
@@ -661,7 +661,7 @@ class SparkContext(config: SparkConf) extends Logging {
661
661
662
662
private [spark] def getLocalProperties : Properties = localProperties.get()
663
663
664
- private [spark] def setLocalProperties (props : Properties ) {
664
+ private [spark] def setLocalProperties (props : Properties ): Unit = {
665
665
localProperties.set(props)
666
666
}
667
667
@@ -676,7 +676,7 @@ class SparkContext(config: SparkConf) extends Logging {
676
676
* implementation of thread pools have worker threads spawn other worker threads.
677
677
* As a result, local properties may propagate unpredictably.
678
678
*/
679
- def setLocalProperty (key : String , value : String ) {
679
+ def setLocalProperty (key : String , value : String ): Unit = {
680
680
if (value == null ) {
681
681
localProperties.get.remove(key)
682
682
} else {
@@ -692,7 +692,7 @@ class SparkContext(config: SparkConf) extends Logging {
692
692
Option (localProperties.get).map(_.getProperty(key)).orNull
693
693
694
694
/** Set a human readable description of the current job. */
695
- def setJobDescription (value : String ) {
695
+ def setJobDescription (value : String ): Unit = {
696
696
setLocalProperty(SparkContext .SPARK_JOB_DESCRIPTION , value)
697
697
}
698
698
@@ -720,7 +720,8 @@ class SparkContext(config: SparkConf) extends Logging {
720
720
* are actually stopped in a timely manner, but is off by default due to HDFS-1208, where HDFS
721
721
* may respond to Thread.interrupt() by marking nodes as dead.
722
722
*/
723
- def setJobGroup (groupId : String , description : String , interruptOnCancel : Boolean = false ) {
723
+ def setJobGroup (groupId : String ,
724
+ description : String , interruptOnCancel : Boolean = false ): Unit = {
724
725
setLocalProperty(SparkContext .SPARK_JOB_DESCRIPTION , description)
725
726
setLocalProperty(SparkContext .SPARK_JOB_GROUP_ID , groupId)
726
727
// Note: Specifying interruptOnCancel in setJobGroup (rather than cancelJobGroup) avoids
@@ -731,7 +732,7 @@ class SparkContext(config: SparkConf) extends Logging {
731
732
}
732
733
733
734
/** Clear the current thread's job group ID and its description. */
734
- def clearJobGroup () {
735
+ def clearJobGroup (): Unit = {
735
736
setLocalProperty(SparkContext .SPARK_JOB_DESCRIPTION , null )
736
737
setLocalProperty(SparkContext .SPARK_JOB_GROUP_ID , null )
737
738
setLocalProperty(SparkContext .SPARK_JOB_INTERRUPT_ON_CANCEL , null )
@@ -1559,7 +1560,7 @@ class SparkContext(config: SparkConf) extends Logging {
1559
1560
* Register a listener to receive up-calls from events that happen during execution.
1560
1561
*/
1561
1562
@ DeveloperApi
1562
- def addSparkListener (listener : SparkListenerInterface ) {
1563
+ def addSparkListener (listener : SparkListenerInterface ): Unit = {
1563
1564
listenerBus.addToSharedQueue(listener)
1564
1565
}
1565
1566
@@ -1788,14 +1789,14 @@ class SparkContext(config: SparkConf) extends Logging {
1788
1789
/**
1789
1790
* Register an RDD to be persisted in memory and/or disk storage
1790
1791
*/
1791
- private [spark] def persistRDD (rdd : RDD [_]) {
1792
+ private [spark] def persistRDD (rdd : RDD [_]): Unit = {
1792
1793
persistentRdds(rdd.id) = rdd
1793
1794
}
1794
1795
1795
1796
/**
1796
1797
* Unpersist an RDD from memory and/or disk storage
1797
1798
*/
1798
- private [spark] def unpersistRDD (rddId : Int , blocking : Boolean ) {
1799
+ private [spark] def unpersistRDD (rddId : Int , blocking : Boolean ): Unit = {
1799
1800
env.blockManager.master.removeRdd(rddId, blocking)
1800
1801
persistentRdds.remove(rddId)
1801
1802
listenerBus.post(SparkListenerUnpersistRDD (rddId))
@@ -1811,7 +1812,7 @@ class SparkContext(config: SparkConf) extends Logging {
1811
1812
*
1812
1813
* @note A path can be added only once. Subsequent additions of the same path are ignored.
1813
1814
*/
1814
- def addJar (path : String ) {
1815
+ def addJar (path : String ): Unit = {
1815
1816
def addLocalJarFile (file : File ): String = {
1816
1817
try {
1817
1818
if (! file.exists()) {
@@ -2018,15 +2019,15 @@ class SparkContext(config: SparkConf) extends Logging {
2018
2019
* Set the thread-local property for overriding the call sites
2019
2020
* of actions and RDDs.
2020
2021
*/
2021
- def setCallSite (shortCallSite : String ) {
2022
+ def setCallSite (shortCallSite : String ): Unit = {
2022
2023
setLocalProperty(CallSite .SHORT_FORM , shortCallSite)
2023
2024
}
2024
2025
2025
2026
/**
2026
2027
* Set the thread-local property for overriding the call sites
2027
2028
* of actions and RDDs.
2028
2029
*/
2029
- private [spark] def setCallSite (callSite : CallSite ) {
2030
+ private [spark] def setCallSite (callSite : CallSite ): Unit = {
2030
2031
setLocalProperty(CallSite .SHORT_FORM , callSite.shortForm)
2031
2032
setLocalProperty(CallSite .LONG_FORM , callSite.longForm)
2032
2033
}
@@ -2035,7 +2036,7 @@ class SparkContext(config: SparkConf) extends Logging {
2035
2036
* Clear the thread-local property for overriding the call sites
2036
2037
* of actions and RDDs.
2037
2038
*/
2038
- def clearCallSite () {
2039
+ def clearCallSite (): Unit = {
2039
2040
setLocalProperty(CallSite .SHORT_FORM , null )
2040
2041
setLocalProperty(CallSite .LONG_FORM , null )
2041
2042
}
@@ -2155,8 +2156,7 @@ class SparkContext(config: SparkConf) extends Logging {
2155
2156
def runJob [T , U : ClassTag ](
2156
2157
rdd : RDD [T ],
2157
2158
processPartition : (TaskContext , Iterator [T ]) => U ,
2158
- resultHandler : (Int , U ) => Unit )
2159
- {
2159
+ resultHandler : (Int , U ) => Unit ): Unit = {
2160
2160
runJob[T , U ](rdd, processPartition, 0 until rdd.partitions.length, resultHandler)
2161
2161
}
2162
2162
@@ -2170,8 +2170,7 @@ class SparkContext(config: SparkConf) extends Logging {
2170
2170
def runJob [T , U : ClassTag ](
2171
2171
rdd : RDD [T ],
2172
2172
processPartition : Iterator [T ] => U ,
2173
- resultHandler : (Int , U ) => Unit )
2174
- {
2173
+ resultHandler : (Int , U ) => Unit ): Unit = {
2175
2174
val processFunc = (context : TaskContext , iter : Iterator [T ]) => processPartition(iter)
2176
2175
runJob[T , U ](rdd, processFunc, 0 until rdd.partitions.length, resultHandler)
2177
2176
}
@@ -2256,13 +2255,13 @@ class SparkContext(config: SparkConf) extends Logging {
2256
2255
* Cancel active jobs for the specified group. See `org.apache.spark.SparkContext.setJobGroup`
2257
2256
* for more information.
2258
2257
*/
2259
- def cancelJobGroup (groupId : String ) {
2258
+ def cancelJobGroup (groupId : String ): Unit = {
2260
2259
assertNotStopped()
2261
2260
dagScheduler.cancelJobGroup(groupId)
2262
2261
}
2263
2262
2264
2263
/** Cancel all jobs that have been scheduled or are running. */
2265
- def cancelAllJobs () {
2264
+ def cancelAllJobs (): Unit = {
2266
2265
assertNotStopped()
2267
2266
dagScheduler.cancelAllJobs()
2268
2267
}
@@ -2350,7 +2349,7 @@ class SparkContext(config: SparkConf) extends Logging {
2350
2349
* @param directory path to the directory where checkpoint files will be stored
2351
2350
* (must be HDFS path if running in cluster)
2352
2351
*/
2353
- def setCheckpointDir (directory : String ) {
2352
+ def setCheckpointDir (directory : String ): Unit = {
2354
2353
2355
2354
// If we are running on a cluster, log a warning if the directory is local.
2356
2355
// Otherwise, the driver may attempt to reconstruct the checkpointed RDD from
@@ -2422,7 +2421,7 @@ class SparkContext(config: SparkConf) extends Logging {
2422
2421
}
2423
2422
2424
2423
/** Post the application start event */
2425
- private def postApplicationStart () {
2424
+ private def postApplicationStart (): Unit = {
2426
2425
// Note: this code assumes that the task scheduler has been initialized and has contacted
2427
2426
// the cluster manager to get an application ID (in case the cluster manager provides one).
2428
2427
listenerBus.post(SparkListenerApplicationStart (appName, Some (applicationId),
@@ -2432,12 +2431,12 @@ class SparkContext(config: SparkConf) extends Logging {
2432
2431
}
2433
2432
2434
2433
/** Post the application end event */
2435
- private def postApplicationEnd () {
2434
+ private def postApplicationEnd (): Unit = {
2436
2435
listenerBus.post(SparkListenerApplicationEnd (System .currentTimeMillis))
2437
2436
}
2438
2437
2439
2438
/** Post the environment update event once the task scheduler is ready */
2440
- private def postEnvironmentUpdate () {
2439
+ private def postEnvironmentUpdate (): Unit = {
2441
2440
if (taskScheduler != null ) {
2442
2441
val schedulingMode = getSchedulingMode.toString
2443
2442
val addedJarPaths = addedJars.keys.toSeq
0 commit comments