Skip to content

Commit

Permalink
Spark/Flink: Replace & Ban Hamcrest usage (apache#6030)
Browse files Browse the repository at this point in the history
Most of the time using Hamcrest Matchers is much more clunky and so we
should rather use AssertJ assertions as those are more fluent and more
flexible in their usage & readability.
  • Loading branch information
nastra authored Oct 21, 2022
1 parent 39a2c12 commit c103b93
Show file tree
Hide file tree
Showing 8 changed files with 25 additions and 26 deletions.
5 changes: 5 additions & 0 deletions .baseline/checkstyle/checkstyle.xml
Original file line number Diff line number Diff line change
Expand Up @@ -394,6 +394,11 @@
<property name="illegalClasses" value="org.junit.rules.ExpectedException"/>
<message key="import.illegal" value="Prefer using Assertions.assertThatThrownBy(...).isInstanceOf(...) instead."/>
</module>
<module name="IllegalImport">
<property name="id" value="BanHamcrestUsage"/>
<property name="illegalPkgs" value="org.hamcrest"/>
<message key="import.illegal" value="Prefer using org.assertj.core.api.Assertions instead."/>
</module>
<module name="RegexpSinglelineJava">
<property name="ignoreComments" value="true"/>
<property name="format" value="@Json(S|Des)erialize"/>
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -32,8 +32,7 @@
import org.apache.iceberg.flink.source.split.IcebergSourceSplitState;
import org.apache.iceberg.flink.source.split.IcebergSourceSplitStatus;
import org.apache.iceberg.flink.source.split.SplitRequestEvent;
import org.hamcrest.CoreMatchers;
import org.hamcrest.MatcherAssert;
import org.assertj.core.api.Assertions;
import org.junit.Assert;
import org.junit.ClassRule;
import org.junit.Test;
Expand Down Expand Up @@ -97,9 +96,8 @@ public void testDiscoverWhenReaderRegistered() throws Exception {
enumeratorContext.triggerAllActions();

Assert.assertTrue(enumerator.snapshotState(1).pendingSplits().isEmpty());
MatcherAssert.assertThat(
enumeratorContext.getSplitAssignments().get(2).getAssignedSplits(),
CoreMatchers.hasItem(splits.get(0)));
Assertions.assertThat(enumeratorContext.getSplitAssignments().get(2).getAssignedSplits())
.contains(splits.get(0));
}

@Test
Expand Down Expand Up @@ -145,9 +143,8 @@ public void testRequestingReaderUnavailableWhenSplitDiscovered() throws Exceptio
enumerator.handleSourceEvent(2, new SplitRequestEvent());

Assert.assertTrue(enumerator.snapshotState(2).pendingSplits().isEmpty());
MatcherAssert.assertThat(
enumeratorContext.getSplitAssignments().get(2).getAssignedSplits(),
CoreMatchers.hasItem(splits.get(0)));
Assertions.assertThat(enumeratorContext.getSplitAssignments().get(2).getAssignedSplits())
.contains(splits.get(0));
}

private static ContinuousIcebergEnumerator createEnumerator(
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -32,8 +32,7 @@
import org.apache.iceberg.flink.source.split.IcebergSourceSplitState;
import org.apache.iceberg.flink.source.split.IcebergSourceSplitStatus;
import org.apache.iceberg.flink.source.split.SplitRequestEvent;
import org.hamcrest.CoreMatchers;
import org.hamcrest.MatcherAssert;
import org.assertj.core.api.Assertions;
import org.junit.Assert;
import org.junit.ClassRule;
import org.junit.Test;
Expand Down Expand Up @@ -97,9 +96,8 @@ public void testDiscoverWhenReaderRegistered() throws Exception {
enumeratorContext.triggerAllActions();

Assert.assertTrue(enumerator.snapshotState(1).pendingSplits().isEmpty());
MatcherAssert.assertThat(
enumeratorContext.getSplitAssignments().get(2).getAssignedSplits(),
CoreMatchers.hasItem(splits.get(0)));
Assertions.assertThat(enumeratorContext.getSplitAssignments().get(2).getAssignedSplits())
.contains(splits.get(0));
}

@Test
Expand Down Expand Up @@ -145,9 +143,8 @@ public void testRequestingReaderUnavailableWhenSplitDiscovered() throws Exceptio
enumerator.handleSourceEvent(2, new SplitRequestEvent());

Assert.assertTrue(enumerator.snapshotState(2).pendingSplits().isEmpty());
MatcherAssert.assertThat(
enumeratorContext.getSplitAssignments().get(2).getAssignedSplits(),
CoreMatchers.hasItem(splits.get(0)));
Assertions.assertThat(enumeratorContext.getSplitAssignments().get(2).getAssignedSplits())
.contains(splits.get(0));
}

private static ContinuousIcebergEnumerator createEnumerator(
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -57,7 +57,7 @@
import org.apache.spark.sql.types.Metadata;
import org.apache.spark.sql.types.StructField;
import org.apache.spark.sql.types.StructType;
import org.hamcrest.CoreMatchers;
import org.assertj.core.api.Assertions;
import org.junit.Assert;
import org.junit.Assume;
import org.junit.Test;
Expand Down Expand Up @@ -158,7 +158,7 @@ public void testInt96TimestampProducedBySparkIsReadCorrectly() throws IOExceptio
InputFile parquetInputFile = Files.localInput(outputFilePath);
List<InternalRow> readRows = rowsFromFile(parquetInputFile, schema);
Assert.assertEquals(rows.size(), readRows.size());
Assert.assertThat(readRows, CoreMatchers.is(rows));
Assertions.assertThat(readRows).isEqualTo(rows);

// Now we try to import that file as an Iceberg table to make sure Iceberg can read
// Int96 end to end.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -57,7 +57,7 @@
import org.apache.spark.sql.types.Metadata;
import org.apache.spark.sql.types.StructField;
import org.apache.spark.sql.types.StructType;
import org.hamcrest.CoreMatchers;
import org.assertj.core.api.Assertions;
import org.junit.Assert;
import org.junit.Assume;
import org.junit.Test;
Expand Down Expand Up @@ -158,7 +158,7 @@ public void testInt96TimestampProducedBySparkIsReadCorrectly() throws IOExceptio
InputFile parquetInputFile = Files.localInput(outputFilePath);
List<InternalRow> readRows = rowsFromFile(parquetInputFile, schema);
Assert.assertEquals(rows.size(), readRows.size());
Assert.assertThat(readRows, CoreMatchers.is(rows));
Assertions.assertThat(readRows).isEqualTo(rows);

// Now we try to import that file as an Iceberg table to make sure Iceberg can read
// Int96 end to end.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -57,7 +57,7 @@
import org.apache.spark.sql.types.Metadata;
import org.apache.spark.sql.types.StructField;
import org.apache.spark.sql.types.StructType;
import org.hamcrest.CoreMatchers;
import org.assertj.core.api.Assertions;
import org.junit.Assert;
import org.junit.Assume;
import org.junit.Test;
Expand Down Expand Up @@ -158,7 +158,7 @@ public void testInt96TimestampProducedBySparkIsReadCorrectly() throws IOExceptio
InputFile parquetInputFile = Files.localInput(outputFilePath);
List<InternalRow> readRows = rowsFromFile(parquetInputFile, schema);
Assert.assertEquals(rows.size(), readRows.size());
Assert.assertThat(readRows, CoreMatchers.is(rows));
Assertions.assertThat(readRows).isEqualTo(rows);

// Now we try to import that file as an Iceberg table to make sure Iceberg can read
// Int96 end to end.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -57,7 +57,7 @@
import org.apache.spark.sql.types.Metadata;
import org.apache.spark.sql.types.StructField;
import org.apache.spark.sql.types.StructType;
import org.hamcrest.CoreMatchers;
import org.assertj.core.api.Assertions;
import org.junit.Assert;
import org.junit.Assume;
import org.junit.Test;
Expand Down Expand Up @@ -158,7 +158,7 @@ public void testInt96TimestampProducedBySparkIsReadCorrectly() throws IOExceptio
InputFile parquetInputFile = Files.localInput(outputFilePath);
List<InternalRow> readRows = rowsFromFile(parquetInputFile, schema);
Assert.assertEquals(rows.size(), readRows.size());
Assert.assertThat(readRows, CoreMatchers.is(rows));
Assertions.assertThat(readRows).isEqualTo(rows);

// Now we try to import that file as an Iceberg table to make sure Iceberg can read
// Int96 end to end.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -57,7 +57,7 @@
import org.apache.spark.sql.types.Metadata;
import org.apache.spark.sql.types.StructField;
import org.apache.spark.sql.types.StructType;
import org.hamcrest.CoreMatchers;
import org.assertj.core.api.Assertions;
import org.junit.Assert;
import org.junit.Assume;
import org.junit.Test;
Expand Down Expand Up @@ -159,7 +159,7 @@ public void testInt96TimestampProducedBySparkIsReadCorrectly() throws IOExceptio
InputFile parquetInputFile = Files.localInput(outputFilePath);
List<InternalRow> readRows = rowsFromFile(parquetInputFile, schema);
Assert.assertEquals(rows.size(), readRows.size());
Assert.assertThat(readRows, CoreMatchers.is(rows));
Assertions.assertThat(readRows).isEqualTo(rows);

// Now we try to import that file as an Iceberg table to make sure Iceberg can read
// Int96 end to end.
Expand Down

0 comments on commit c103b93

Please sign in to comment.