Skip to content

Commit

Permalink
Version bumps BT-332 (#6456)
Browse files Browse the repository at this point in the history
  • Loading branch information
kshakir authored Aug 31, 2021
1 parent 61ac953 commit fdedcbb
Show file tree
Hide file tree
Showing 42 changed files with 446 additions and 355 deletions.
2 changes: 1 addition & 1 deletion .sdkmanrc
Original file line number Diff line number Diff line change
@@ -1,3 +1,3 @@
# Enable auto-env through the sdkman_auto_env config
# Add key=value pairs of SDKs to use below
java=11.0.10.hs-adpt
java=11.0.11.hs-adpt
10 changes: 3 additions & 7 deletions .travis.yml
Original file line number Diff line number Diff line change
Expand Up @@ -40,21 +40,18 @@ env:
- >-
BUILD_TYPE=centaurHoricromtalPapiV2beta
BUILD_MYSQL=5.7
- >-
BUILD_TYPE=centaurHoricromtalPapiV2beta
BUILD_MARIADB=10.3
- >-
BUILD_TYPE=centaurHoricromtalEngineUpgradePapiV2alpha1
BUILD_MYSQL=5.7
- >-
BUILD_TYPE=centaurHoricromtalEngineUpgradePapiV2alpha1
BUILD_MARIADB=10.3
- >-
BUILD_TYPE=centaurPapiUpgradePapiV2alpha1
BUILD_MYSQL=5.7
- >-
BUILD_TYPE=centaurPapiUpgradeNewWorkflowsPapiV2alpha1
BUILD_MYSQL=5.7
- >-
BUILD_TYPE=centaurLocal
BUILD_MARIADB=10.3
- >-
BUILD_TYPE=centaurLocal
BUILD_MYSQL=5.7
Expand All @@ -78,7 +75,6 @@ env:
BUILD_MYSQL=5.7
- >-
BUILD_TYPE=checkPublish
BUILD_MYSQL=5.7
- >-
BUILD_TYPE=conformanceLocal
BUILD_MYSQL=5.7
Expand Down
2 changes: 2 additions & 0 deletions CromIAM/src/main/resources/sentry.properties
Original file line number Diff line number Diff line change
@@ -0,0 +1,2 @@
# Quiet warnings about missing sentry DSNs by providing an empty string
dsn=
8 changes: 4 additions & 4 deletions centaur/test_cromwell.sh
Original file line number Diff line number Diff line change
Expand Up @@ -124,11 +124,11 @@ cd "${RUN_DIR}"
TEST_STATUS="failed"

if [[ "${CENTAUR_SBT_COVERAGE}" == "true" ]]; then
sbt -Dsbt.supershell=false --warn coverage centaur/it:compile
CP=$(sbt -no-colors --error coverage "export centaur/it:dependencyClasspath")
sbt -Dsbt.supershell=false --warn coverage centaur/IntegrationTest/compile
CP=$(sbt -no-colors --error coverage "export centaur/IntegrationTest/dependencyClasspath")
else
sbt -Dsbt.supershell=false --warn centaur/it:compile
CP=$(sbt -no-colors --error "export centaur/it:dependencyClasspath")
sbt -Dsbt.supershell=false --warn centaur/IntegrationTest/compile
CP=$(sbt -no-colors --error "export centaur/IntegrationTest/dependencyClasspath")
fi

# Add the it-classes folder to the classpath to ensure logback configuration files are picked up.
Expand Down
2 changes: 1 addition & 1 deletion codegen_java/build.sbt
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@ lazy val root = (project in file(".")).
Seq(organization := "org.broadinstitute.cromwell",
name := "cromwell-client",
version := createVersion("0.1"),
scalaVersion := "2.12.12", // scala-steward:off (CROM-6777) - 2.12.13 blocked by duplicate import of nowarn
scalaVersion := "2.12.14",
scalacOptions ++= Seq("-feature"),
compile / javacOptions ++= Seq("-Xlint:deprecation"),
Compile / packageDoc / publishArtifact := false,
Expand Down
2 changes: 1 addition & 1 deletion codegen_java/project/build.properties
Original file line number Diff line number Diff line change
@@ -1 +1 @@
sbt.version=1.4.9
sbt.version=1.5.5
Original file line number Diff line number Diff line change
Expand Up @@ -228,7 +228,7 @@ abstract class SlickDatabase(override val originalDatabaseConfig: Config) extend
Instead resorting to reflection.
*/
val message = pSQLException.getServerErrorMessage
val field = classOf[ServerErrorMessage].getDeclaredField("m_mesgParts")
val field = classOf[ServerErrorMessage].getDeclaredField("mesgParts")
field.setAccessible(true)
val parts = field.get(message).asInstanceOf[java.util.Map[Character, String]]
parts.remove('D')
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -14,11 +14,12 @@ object SqlConverters {
// https://github.com/slick/slick/issues/1026

implicit class TimestampToSystemOffsetDateTime(val timestamp: Timestamp) extends AnyVal {
def toSystemOffsetDateTime = timestamp.toLocalDateTime.atZone(ZoneId.systemDefault).toOffsetDateTime
def toSystemOffsetDateTime: OffsetDateTime = timestamp.toLocalDateTime.atZone(ZoneId.systemDefault).toOffsetDateTime
}

implicit class OffsetDateTimeToSystemTimestamp(val offsetDateTime: OffsetDateTime) extends AnyVal {
def toSystemTimestamp = Timestamp.valueOf(offsetDateTime.atZoneSameInstant(ZoneId.systemDefault).toLocalDateTime)
def toSystemTimestamp: Timestamp =
Timestamp.valueOf(offsetDateTime.atZoneSameInstant(ZoneId.systemDefault).toLocalDateTime)
}

implicit class ClobOptionToRawString(val clobOption: Option[Clob]) extends AnyVal {
Expand Down Expand Up @@ -56,10 +57,11 @@ object SqlConverters {
import eu.timepit.refined.api.Refined
import eu.timepit.refined.collection.NonEmpty

def toClobOption: Option[SerialClob] = if (str.isEmpty) None else Option(new SerialClob(str.toCharArray))
def toClobOption: Option[SerialClob] =
if (str == null || str.isEmpty) None else Option(new SerialClob(str.toCharArray))

def toClob(default: String Refined NonEmpty): SerialClob = {
val nonEmpty = if (str.isEmpty) default.value else str
val nonEmpty = if (str == null || str.isEmpty) default.value else str
new SerialClob(nonEmpty.toCharArray)
}
}
Expand Down Expand Up @@ -91,7 +93,7 @@ object SqlConverters {
}

implicit class BytesToBlobOption(val bytes: Array[Byte]) extends AnyVal {
def toBlobOption: Option[SerialBlob] = if (bytes.isEmpty) None else Option(new SerialBlob(bytes))
def toBlobOption: Option[SerialBlob] = if (bytes == null || bytes.isEmpty) None else Option(new SerialBlob(bytes))
}

implicit class EnhancedFiniteDuration(val duration: FiniteDuration) extends AnyVal {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -36,22 +36,34 @@ class WorkflowDockerLookupActorSpec
with Mockito {

var workflowId: WorkflowId = _
var dockerSendingActor: TestProbe = _
var dockerHashingActor: TestProbe = _
var numReads: Int = _
var numWrites: Int = _

before {
workflowId = WorkflowId.randomId()
dockerHashingActor = TestProbe(s"test-probe-$workflowId")
/*
Instead of TestKit.self use a custom global sender that we reset before each test.
Otherwise, a latent failure/timeout from one test may be sent to the shared Testkit.self during a different test.
In that case a call to expectMsg() will suddenly receive an unexpected result. This especially happens in slow
running CI where the entire suite takes a few minutes to run.
*/
dockerSendingActor = TestProbe(s"test-sending-probe-$workflowId")
dockerHashingActor = TestProbe(s"test-hashing-probe-$workflowId")
numReads = 0
numWrites = 0
}

it should "wait and resubmit the docker request when it gets a backpressure message" in {
val backoff = SimpleExponentialBackoff(2.seconds, 10.minutes, 2D)

val lookupActor = TestActorRef(Props(new TestWorkflowDockerLookupActor(workflowId, dockerHashingActor.ref, Submitted, backoff)), self)
lookupActor ! LatestRequest
val lookupActor = TestActorRef(
Props(new TestWorkflowDockerLookupActor(workflowId, dockerHashingActor.ref, Submitted, backoff)),
dockerSendingActor.ref,
)
lookupActor.tell(LatestRequest, dockerSendingActor.ref)

dockerHashingActor.expectMsg(LatestRequest)
dockerHashingActor.reply(BackPressure(LatestRequest))
Expand All @@ -66,28 +78,28 @@ class WorkflowDockerLookupActorSpec
}

val lookupActor = TestActorRef(WorkflowDockerLookupActor.props(workflowId, dockerHashingActor.ref, isRestart = false, db))
lookupActor ! LatestRequest
lookupActor.tell(LatestRequest, dockerSendingActor.ref)

// The WorkflowDockerLookupActor should not have the hash for this tag yet and will need to query the dockerHashingActor.
dockerHashingActor.expectMsg(LatestRequest)
dockerHashingActor.reply(LatestSuccessResponse)
// The WorkflowDockerLookupActor should forward the success message to this actor.
expectMsg(LatestSuccessResponse)
dockerSendingActor.expectMsg(LatestSuccessResponse)
numWrites should equal(1)

// Now the WorkflowDockerLookupActor should now have this hash in its mappings and should not query the dockerHashingActor again.
lookupActor ! LatestRequest
lookupActor.tell(LatestRequest, dockerSendingActor.ref)
dockerHashingActor.expectNoMessage()
// The WorkflowDockerLookupActor should forward the success message to this actor.
expectMsg(LatestSuccessResponse)
dockerSendingActor.expectMsg(LatestSuccessResponse)
numWrites should equal(1)
}

it should "soldier on after docker hashing actor timeouts" in {
val lookupActor = TestActorRef(WorkflowDockerLookupActor.props(workflowId, dockerHashingActor.ref, isRestart = false))

lookupActor ! LatestRequest
lookupActor ! OlderRequest
lookupActor.tell(LatestRequest, dockerSendingActor.ref)
lookupActor.tell(OlderRequest, dockerSendingActor.ref)

val timeout = DockerHashActorTimeout(LatestRequest)

Expand All @@ -100,20 +112,20 @@ class WorkflowDockerLookupActorSpec
// DockerHashActor for this hash again.
dockerHashingActor.reply(OlderSuccessResponse)

val results = receiveN(2, 2 seconds).toSet
val results = dockerSendingActor.receiveN(2, 2 seconds).toSet
val failedRequests = results collect {
case f: WorkflowDockerLookupFailure if f.request == LatestRequest => f.request
}

failedRequests should equal(Set(LatestRequest))

// Try again. The hashing actor should receive the latest message and this time won't time out.
lookupActor ! LatestRequest
lookupActor ! OlderRequest
lookupActor.tell(LatestRequest, dockerSendingActor.ref)
lookupActor.tell(OlderRequest, dockerSendingActor.ref)
dockerHashingActor.expectMsg(LatestRequest)
dockerHashingActor.reply(LatestSuccessResponse)

val responses = receiveN(2, 2 seconds).toSet
val responses = dockerSendingActor.receiveN(2, 2 seconds).toSet
val hashResponses = responses collect { case msg: DockerInfoSuccessResponse => msg }
// Success after transient timeout failures:
hashResponses should equal(Set(LatestSuccessResponse, OlderSuccessResponse))
Expand All @@ -123,7 +135,7 @@ class WorkflowDockerLookupActorSpec
it should "not fail and enter terminal state when response for certain image id from DockerHashingActor arrived after the self-imposed timeout" in {
val lookupActor = TestFSMRef(new WorkflowDockerLookupActor(workflowId, dockerHashingActor.ref, isRestart = false, EngineServicesStore.engineDatabaseInterface))

lookupActor ! LatestRequest
lookupActor.tell(LatestRequest, dockerSendingActor.ref)

val timeout = DockerHashActorTimeout(LatestRequest)

Expand All @@ -132,15 +144,15 @@ class WorkflowDockerLookupActorSpec
// WorkflowDockerLookupActor actually sends DockerHashActorTimeout to itself
lookupActor.tell(timeout, lookupActor)

val failedRequest: WorkflowDockerLookupFailure = receiveOne(2 seconds).asInstanceOf[WorkflowDockerLookupFailure]
val failedRequest: WorkflowDockerLookupFailure = dockerSendingActor.receiveOne(2 seconds).asInstanceOf[WorkflowDockerLookupFailure]
failedRequest.request shouldBe LatestRequest

lookupActor ! LatestRequest
lookupActor.tell(LatestRequest, dockerSendingActor.ref)
dockerHashingActor.expectMsg(LatestRequest)
dockerHashingActor.reply(LatestSuccessResponse) // responding for previously timeouted request
dockerHashingActor.reply(LatestSuccessResponse) // responding for current request

val hashResponse = receiveOne(2 seconds)
val hashResponse = dockerSendingActor.receiveOne(2 seconds)
hashResponse shouldBe LatestSuccessResponse

// Give WorkflowDockerLookupActor a chance to finish its unfinished business
Expand All @@ -151,8 +163,8 @@ class WorkflowDockerLookupActorSpec

it should "respond appropriately to docker hash lookup failures" in {
val lookupActor = TestActorRef(WorkflowDockerLookupActor.props(workflowId, dockerHashingActor.ref, isRestart = false))
lookupActor ! LatestRequest
lookupActor ! OlderRequest
lookupActor.tell(LatestRequest, dockerSendingActor.ref)
lookupActor.tell(OlderRequest, dockerSendingActor.ref)

// The WorkflowDockerLookupActor should not have the hash for this tag yet and will need to query the dockerHashingActor.
dockerHashingActor.expectMsg(LatestRequest)
Expand All @@ -162,7 +174,7 @@ class WorkflowDockerLookupActorSpec
dockerHashingActor.reply(LatestSuccessResponse)
dockerHashingActor.reply(olderFailedResponse)

val results = receiveN(2, 2 seconds).toSet
val results = dockerSendingActor.receiveN(2, 2 seconds).toSet
val mixedResponses = results collect {
case msg: DockerInfoSuccessResponse => msg
// Scoop out the request here since we can't match the exception on the whole message.
Expand All @@ -172,10 +184,10 @@ class WorkflowDockerLookupActorSpec
Set(LatestSuccessResponse, OlderRequest) should equal(mixedResponses)

// Try again, I have a good feeling about this.
lookupActor ! OlderRequest
lookupActor.tell(OlderRequest, dockerSendingActor.ref)
dockerHashingActor.expectMsg(OlderRequest)
dockerHashingActor.reply(OlderSuccessResponse)
expectMsg(OlderSuccessResponse)
dockerSendingActor.expectMsg(OlderSuccessResponse)
}

it should "reuse previously looked up hashes following a restart" in {
Expand All @@ -186,12 +198,12 @@ class WorkflowDockerLookupActorSpec

val lookupActor = TestActorRef(WorkflowDockerLookupActor.props(workflowId, dockerHashingActor.ref, isRestart = true, db))

lookupActor ! LatestRequest
lookupActor ! OlderRequest
lookupActor.tell(LatestRequest, dockerSendingActor.ref)
lookupActor.tell(OlderRequest, dockerSendingActor.ref)

dockerHashingActor.expectNoMessage()

val results = receiveN(2, 2 seconds).toSet
val results = dockerSendingActor.receiveN(2, 2 seconds).toSet
val successes = results collect { case result: DockerInfoSuccessResponse => result }

successes should equal(Set(LatestSuccessResponse, OlderSuccessResponse))
Expand All @@ -201,15 +213,15 @@ class WorkflowDockerLookupActorSpec
val db = dbWithWrite(Future.successful(()))
val lookupActor = TestActorRef(WorkflowDockerLookupActor.props(workflowId, dockerHashingActor.ref, isRestart = false, db))

lookupActor ! LatestRequest
lookupActor ! OlderRequest
lookupActor.tell(LatestRequest, dockerSendingActor.ref)
lookupActor.tell(OlderRequest, dockerSendingActor.ref)

dockerHashingActor.expectMsg(LatestRequest)
dockerHashingActor.expectMsg(OlderRequest)
dockerHashingActor.reply(LatestSuccessResponse)
dockerHashingActor.reply(OlderSuccessResponse)

val results = receiveN(2, 2 seconds).toSet
val results = dockerSendingActor.receiveN(2, 2 seconds).toSet
val successes = results collect { case result: DockerInfoSuccessResponse => result }

successes should equal(Set(LatestSuccessResponse, OlderSuccessResponse))
Expand All @@ -222,21 +234,21 @@ class WorkflowDockerLookupActorSpec
}

val lookupActor = TestActorRef(WorkflowDockerLookupActor.props(workflowId, dockerHashingActor.ref, isRestart = false, db))
lookupActor ! LatestRequest
lookupActor.tell(LatestRequest, dockerSendingActor.ref)

// The WorkflowDockerLookupActor should not have the hash for this tag yet and will need to query the dockerHashingActor.
dockerHashingActor.expectMsg(LatestRequest)
dockerHashingActor.reply(LatestSuccessResponse)
// The WorkflowDockerLookupActor is going to fail when it tries to write to that broken DB.
expectMsgClass(classOf[WorkflowDockerLookupFailure])
dockerSendingActor.expectMsgClass(classOf[WorkflowDockerLookupFailure])
numWrites should equal(1)

lookupActor ! LatestRequest
lookupActor.tell(LatestRequest, dockerSendingActor.ref)
// The WorkflowDockerLookupActor will query the dockerHashingActor again.
dockerHashingActor.expectMsg(LatestRequest)
dockerHashingActor.reply(LatestSuccessResponse)
// The WorkflowDockerLookupActor should forward the success message to this actor.
expectMsg(LatestSuccessResponse)
dockerSendingActor.expectMsg(LatestSuccessResponse)
numWrites should equal(2)
}

Expand All @@ -247,10 +259,10 @@ class WorkflowDockerLookupActorSpec
}

val lookupActor = TestActorRef(WorkflowDockerLookupActor.props(workflowId, dockerHashingActor.ref, isRestart = true, db))
lookupActor ! LatestRequest
lookupActor.tell(LatestRequest, dockerSendingActor.ref)

dockerHashingActor.expectNoMessage()
expectMsgClass(classOf[WorkflowDockerTerminalFailure])
dockerSendingActor.expectMsgClass(classOf[WorkflowDockerTerminalFailure])
numReads should equal(1)
}

Expand All @@ -265,10 +277,10 @@ class WorkflowDockerLookupActorSpec
}

val lookupActor = TestActorRef(WorkflowDockerLookupActor.props(workflowId, dockerHashingActor.ref, isRestart = true, db))
lookupActor ! LatestRequest
lookupActor.tell(LatestRequest, dockerSendingActor.ref)

dockerHashingActor.expectNoMessage()
expectMsgClass(classOf[WorkflowDockerTerminalFailure])
dockerSendingActor.expectMsgClass(classOf[WorkflowDockerTerminalFailure])
numReads should equal(1)
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -57,7 +57,10 @@ case class OssBatchDeleteCommand(
override val file: OssPath,
override val swallowIOExceptions: Boolean
) extends IoDeleteCommand(file, swallowIOExceptions) with OssBatchIoCommand[Unit, Void] {
def operation: Unit = file.ossClient.deleteObject(file.bucket, file.key)
def operation: Unit = {
file.ossClient.deleteObject(file.bucket, file.key)
()
}
override protected def mapOssResponse(response: Void): Unit = ()
override def commandDescription: String = s"OssBatchDeleteCommand file '$file' swallowIOExceptions '$swallowIOExceptions'"
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -98,6 +98,7 @@ trait OssNioUtilSpec extends AnyFlatSpecLike with CromwellTimeoutSpec with Mocki
OssStorageRetry.from(
() => ossClient.deleteObject(path.bucket, path.key)
)
()
}

def writeObject(path: OssStoragePath): Unit = {
Expand Down
Loading

0 comments on commit fdedcbb

Please sign in to comment.