Skip to content

Commit 91d1d53

Browse files
authored
Fix Java Files Import Order
- enable ImportOrder rule in check-style.xml and fix by mvn checkstyle:check cmd - add spark2.3.2/scala dir to scalastyle-maven-plugin config and fix problems.
1 parent 887562c commit 91d1d53

18 files changed

+62
-56
lines changed

checkstyle.xml

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -155,13 +155,11 @@
155155
</module>
156156
-->
157157
<!-- TODO: 11/09/15 disabled - order is currently wrong in many places -->
158-
<!--
159158
<module name="ImportOrder">
160159
<property name="separated" value="true"/>
161160
<property name="ordered" value="true"/>
162161
<property name="groups" value="/^javax?\./,scala,*,org.apache.spark"/>
163162
</module>
164-
-->
165163
<module name="MethodParamPad"/>
166164
<module name="AnnotationLocation">
167165
<property name="tokens" value="CLASS_DEF, INTERFACE_DEF, ENUM_DEF, METHOD_DEF, CTOR_DEF"/>

pom.xml

Lines changed: 12 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -434,8 +434,14 @@
434434
<failOnViolation>true</failOnViolation>
435435
<failOnWarning>false</failOnWarning>
436436
<includeTestSourceDirectory>true</includeTestSourceDirectory>
437-
<sourceDirectory>src/main/scala</sourceDirectory>
438-
<testSourceDirectory>src/test/scala</testSourceDirectory>
437+
<sourceDirectories>
438+
<directory>src/main/scala</directory>
439+
<directory>src/main/spark2.3.2/scala</directory>
440+
</sourceDirectories>
441+
<testSourceDirectories>
442+
<directory>src/test/scala</directory>
443+
<directory>src/test/spark2.3.2/scala</directory>
444+
</testSourceDirectories>
439445
<configLocation>scalastyle-config.xml</configLocation>
440446
<outputFile>target/scalastyle-output.xml</outputFile>
441447
<inputEncoding>UTF-8</inputEncoding>
@@ -589,12 +595,12 @@
589595
<failOnViolation>true</failOnViolation>
590596
<includeTestSourceDirectory>true</includeTestSourceDirectory>
591597
<sourceDirectories>
592-
<directory>${basedir}/src/main/java</directory>
593-
<directory>${basedir}/src/main/scala</directory>
598+
<directory>src/main/java</directory>
599+
<directory>src/spark2.3.2/scala</directory>
594600
</sourceDirectories>
595601
<testSourceDirectories>
596-
<directory>${basedir}/src/test/java</directory>
597-
<directory>${basedir}/src/test/scala</directory>
602+
<directory>src/test/java</directory>
603+
<directory>src/test/spark2.3.2/java</directory>
598604
</testSourceDirectories>
599605
<configLocation>checkstyle.xml</configLocation>
600606
<outputFile>${basedir}/target/checkstyle-output.xml</outputFile>

src/main/java/org/apache/parquet/hadoop/IndexedVectorizedOapRecordReader.java

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -23,8 +23,8 @@
2323
import com.google.common.collect.Maps;
2424
import org.apache.hadoop.conf.Configuration;
2525
import org.apache.hadoop.fs.Path;
26-
import org.apache.parquet.hadoop.metadata.ParquetFooter;
2726
import org.apache.parquet.hadoop.OapParquetFileReader.RowGroupDataAndRowIds;
27+
import org.apache.parquet.hadoop.metadata.ParquetFooter;
2828
import org.apache.parquet.it.unimi.dsi.fastutil.ints.IntArrayList;
2929
import org.apache.parquet.it.unimi.dsi.fastutil.ints.IntList;
3030

src/main/java/org/apache/parquet/hadoop/InternalOapRecordReader.java

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -16,21 +16,18 @@
1616
*/
1717
package org.apache.parquet.hadoop;
1818

19-
import static org.apache.parquet.Log.DEBUG;
20-
import static org.apache.parquet.hadoop.ParquetInputFormat.STRICT_TYPE_CHECKING;
21-
2219
import java.io.IOException;
2320
import java.util.List;
2421
import java.util.Map;
2522

2623
import org.apache.hadoop.conf.Configuration;
2724
import org.apache.parquet.column.page.PageReadStore;
25+
import org.apache.parquet.hadoop.OapParquetFileReader.RowGroupDataAndRowIds;
2826
import org.apache.parquet.hadoop.api.InitContext;
2927
import org.apache.parquet.hadoop.api.ReadSupport;
3028
import org.apache.parquet.hadoop.metadata.BlockMetaData;
3129
import org.apache.parquet.hadoop.metadata.FileMetaData;
3230
import org.apache.parquet.hadoop.metadata.IndexedBlockMetaData;
33-
import org.apache.parquet.hadoop.OapParquetFileReader.RowGroupDataAndRowIds;
3431
import org.apache.parquet.hadoop.utils.Collections3;
3532
import org.apache.parquet.io.ColumnIOFactory;
3633
import org.apache.parquet.io.MessageColumnIO;
@@ -43,6 +40,9 @@
4340
import org.slf4j.Logger;
4441
import org.slf4j.LoggerFactory;
4542

43+
import static org.apache.parquet.Log.DEBUG;
44+
import static org.apache.parquet.hadoop.ParquetInputFormat.STRICT_TYPE_CHECKING;
45+
4646
public class InternalOapRecordReader<T> {
4747

4848
private static final Logger LOG = LoggerFactory.getLogger(InternalOapRecordReader.class);

src/main/java/org/apache/parquet/hadoop/MrOapRecordReader.java

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -18,8 +18,6 @@
1818
*/
1919
package org.apache.parquet.hadoop;
2020

21-
import static org.apache.parquet.hadoop.ParquetInputFormat.getFilter;
22-
2321
import java.io.IOException;
2422

2523
import org.apache.hadoop.conf.Configuration;
@@ -28,6 +26,8 @@
2826
import org.apache.parquet.hadoop.api.RecordReader;
2927
import org.apache.parquet.hadoop.metadata.ParquetFooter;
3028

29+
import static org.apache.parquet.hadoop.ParquetInputFormat.getFilter;
30+
3131
public class MrOapRecordReader<T> implements RecordReader<T> {
3232

3333
private Configuration configuration;

src/main/java/org/apache/parquet/hadoop/OapParquetFileReader.java

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -16,8 +16,6 @@
1616
*/
1717
package org.apache.parquet.hadoop;
1818

19-
import static org.apache.parquet.format.converter.ParquetMetadataConverter.NO_FILTER;
20-
2119
import java.io.Closeable;
2220
import java.io.IOException;
2321
import java.util.List;
@@ -34,6 +32,8 @@
3432
import org.apache.parquet.it.unimi.dsi.fastutil.ints.IntList;
3533
import org.apache.parquet.schema.MessageType;
3634

35+
import static org.apache.parquet.format.converter.ParquetMetadataConverter.NO_FILTER;
36+
3737
public class OapParquetFileReader implements Closeable {
3838

3939
private ParquetFileReader reader;

src/main/java/org/apache/parquet/hadoop/SpecificOapRecordReaderBase.java

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -16,8 +16,6 @@
1616
*/
1717
package org.apache.parquet.hadoop;
1818

19-
import static org.apache.parquet.hadoop.ParquetInputFormat.getFilter;
20-
2119
import java.io.IOException;
2220
import java.util.Map;
2321

@@ -31,6 +29,8 @@
3129
import org.apache.parquet.hadoop.utils.Collections3;
3230
import org.apache.parquet.schema.MessageType;
3331

32+
import static org.apache.parquet.hadoop.ParquetInputFormat.getFilter;
33+
3434
import org.apache.spark.sql.execution.datasources.parquet.ParquetReadSupportWrapper;
3535
import org.apache.spark.sql.types.StructType;
3636
import org.apache.spark.sql.types.StructType$;

src/main/java/org/apache/parquet/hadoop/VectorizedOapRecordReader.java

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -27,6 +27,7 @@
2727
import org.apache.parquet.column.page.PageReadStore;
2828
import org.apache.parquet.hadoop.metadata.ParquetFooter;
2929
import org.apache.parquet.schema.Type;
30+
3031
import org.apache.spark.memory.MemoryMode;
3132
import org.apache.spark.sql.catalyst.InternalRow;
3233
import org.apache.spark.sql.execution.datasources.parquet.SkippableVectorizedColumnReader;

src/main/java/org/apache/parquet/io/RecordReaderFactory.java

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -18,13 +18,13 @@
1818
*/
1919
package org.apache.parquet.io;
2020

21+
import java.util.List;
22+
2123
import org.apache.parquet.column.impl.ColumnReadStoreImpl;
2224
import org.apache.parquet.column.page.PageReadStore;
2325
import org.apache.parquet.io.api.RecordMaterializer;
2426
import org.apache.parquet.it.unimi.dsi.fastutil.ints.IntList;
2527

26-
import java.util.List;
27-
2828
import static org.apache.parquet.Preconditions.checkNotNull;
2929

3030
public class RecordReaderFactory {

src/main/java/org/apache/spark/sql/execution/datasources/oap/orc/IndexedOrcMapreduceRecordReader.java

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -17,12 +17,12 @@
1717
*/
1818
package org.apache.spark.sql.execution.datasources.oap.orc;
1919

20+
import java.io.IOException;
21+
2022
import org.apache.hadoop.conf.Configuration;
2123
import org.apache.hadoop.fs.Path;
2224
import org.apache.hadoop.io.WritableComparable;
2325

24-
import java.io.IOException;
25-
2626
/**
2727
* This record reader has rowIds in order to seek to specific rows to skip unused data.
2828
* @param <V> the root type of the file

0 commit comments

Comments
 (0)