From e2d3924056ca69816d59eed296989bc1247db20c Mon Sep 17 00:00:00 2001 From: Alexandre Gattiker Date: Sun, 13 Oct 2019 08:18:51 +0200 Subject: [PATCH] Fixed tests --- components/azure-dataexplorer/create-dataexplorer.sh | 2 +- eventhubs-databricks-azuresql/test_spec.json | 1 + eventhubs-databricks-cosmosdb/test_spec.json | 1 + eventhubs-databricks-delta/test_spec.json | 1 + eventhubs-dataexplorer/test_spec.json | 11 +++++++++++ eventhubs-functions-azuresql/test_spec.json | 1 + eventhubs-functions-cosmosdb/test_spec.json | 1 + eventhubs-streamanalytics-azuresql/test_spec.json | 1 + eventhubs-streamanalytics-cosmosdb/test_spec.json | 1 + eventhubs-streamanalytics-eventhubs/test_spec.json | 1 + eventhubs-timeseriesinsights/test_spec.json | 1 + eventhubskafka-databricks-cosmosdb/test_spec.json | 1 + hdinsightkafka-databricks-sqldw/test_spec.json | 1 + integration-tests/test_solutions.py | 10 ++++++++-- .../databricks/notebooks/eventhubs-to-azuresql.scala | 2 +- .../databricks/notebooks/eventhubs-to-cosmosdb.scala | 3 ++- .../databricks/notebooks/eventhubs-to-delta.scala | 2 +- .../databricks/notebooks/kafka-to-cosmosdb.scala | 2 +- streaming/databricks/notebooks/kafka-to-sqldw.scala | 2 +- streaming/databricks/notebooks/verify-common.scala | 2 +- .../databricks/notebooks/verify-dataexplorer.scala | 4 +++- streaming/databricks/notebooks/verify-delta.scala | 4 +++- streaming/databricks/notebooks/verify-eventhubs.scala | 4 +++- streaming/databricks/notebooks/verify-sqldw.scala | 4 +++- .../notebooks/verify-timeseriesinsights-parquet.scala | 4 +++- streaming/databricks/runners/verify-dataexplorer.sh | 2 +- 26 files changed, 54 insertions(+), 15 deletions(-) create mode 100644 eventhubs-dataexplorer/test_spec.json diff --git a/components/azure-dataexplorer/create-dataexplorer.sh b/components/azure-dataexplorer/create-dataexplorer.sh index afd56360..e1ac94c5 100755 --- a/components/azure-dataexplorer/create-dataexplorer.sh +++ b/components/azure-dataexplorer/create-dataexplorer.sh @@ -76,7 +76,7 @@ echo 'creating Data Explorer table' kustoQuery "/v1/rest/mgmt" ".create table EventTable ( eventId: string, complexData: dynamic, value: string, type: string, deviceId: string, deviceSequenceNumber: long, createdAt: datetime)" echo 'creating Data Explorer table mapping' if ! kustoQuery "/v1/rest/mgmt" ".show table EventTable ingestion json mapping \\\"EventMapping\\\"" 2>/dev/null; then - kustoQuery "/v1/rest/mgmt" ".create table EventTable ingestion json mapping 'EventMapping' '[ { \\\"column\\\": \\\"eventId\\\", \\\"path\\\": \\\"$.eventId\\\" }, { \\\"column\\\": \\\"complexData\\\", \\\"path\\\": \\\"$.complexData\\\" }, { \\\"column\\\": \\\"value\\\", \\\"path\\\": \\\"$.value\\\" }, { \\\"column\\\": \\\"type\\\", \\\"path\\\": \\\"$.type\\\" }, { \\\"column\\\": \\\"deviceId\\\", \\\"path\\\": \\\"$.deviceId\\\" }, { \\\"column\\\": \\\deviceSequenceNumber\\\", \\\"path\\\": \\\"$deviceSequenceNumber\\\" }, { \\\"column\\\": \\\"createdAt\\\", \\\"path\\\": \\\"$.createdAt\\\" } ]'" + kustoQuery "/v1/rest/mgmt" ".create table EventTable ingestion json mapping 'EventMapping' '[ { \\\"column\\\": \\\"eventId\\\", \\\"path\\\": \\\"$.eventId\\\" }, { \\\"column\\\": \\\"complexData\\\", \\\"path\\\": \\\"$.complexData\\\" }, { \\\"column\\\": \\\"value\\\", \\\"path\\\": \\\"$.value\\\" }, { \\\"column\\\": \\\"type\\\", \\\"path\\\": \\\"$.type\\\" }, { \\\"column\\\": \\\"deviceId\\\", \\\"path\\\": \\\"$.deviceId\\\" }, { \\\"column\\\": \\\"deviceSequenceNumber\\\", \\\"path\\\": \\\"$.deviceSequenceNumber\\\" }, { \\\"column\\\": \\\"createdAt\\\", \\\"path\\\": \\\"$.createdAt\\\" } ]'" fi echo "getting Service Principal ID" diff --git a/eventhubs-databricks-azuresql/test_spec.json b/eventhubs-databricks-azuresql/test_spec.json index e515c4bb..8f050404 100644 --- a/eventhubs-databricks-azuresql/test_spec.json +++ b/eventhubs-databricks-azuresql/test_spec.json @@ -1,5 +1,6 @@ [ { + "enabled": true, "stage": "2", "short": "eda1", "steps": "CIDPTMV", diff --git a/eventhubs-databricks-cosmosdb/test_spec.json b/eventhubs-databricks-cosmosdb/test_spec.json index 3eebdb8e..be717945 100644 --- a/eventhubs-databricks-cosmosdb/test_spec.json +++ b/eventhubs-databricks-cosmosdb/test_spec.json @@ -1,5 +1,6 @@ [ { + "enabled": true, "stage": "2", "short": "edc1", "steps": "CIDPTMV", diff --git a/eventhubs-databricks-delta/test_spec.json b/eventhubs-databricks-delta/test_spec.json index 28ff2d92..4137c684 100644 --- a/eventhubs-databricks-delta/test_spec.json +++ b/eventhubs-databricks-delta/test_spec.json @@ -1,5 +1,6 @@ [ { + "enabled": true, "stage": "1", "short": "edd1", "steps": "CIPTMV", diff --git a/eventhubs-dataexplorer/test_spec.json b/eventhubs-dataexplorer/test_spec.json new file mode 100644 index 00000000..bfa2c8b2 --- /dev/null +++ b/eventhubs-dataexplorer/test_spec.json @@ -0,0 +1,11 @@ +[ + { + "enabled": false, + "stage": "3", + "short": "ed1", + "steps": "CIDTMV", + "minutes": "45", + "throughput": "1", + "extra_args": [] + } +] diff --git a/eventhubs-functions-azuresql/test_spec.json b/eventhubs-functions-azuresql/test_spec.json index b6f41b59..66fb1071 100644 --- a/eventhubs-functions-azuresql/test_spec.json +++ b/eventhubs-functions-azuresql/test_spec.json @@ -1,5 +1,6 @@ [ { + "enabled": true, "stage": "2", "short": "efa1", "steps": "CIDPTMV", diff --git a/eventhubs-functions-cosmosdb/test_spec.json b/eventhubs-functions-cosmosdb/test_spec.json index 56ef90a7..39cb42b0 100644 --- a/eventhubs-functions-cosmosdb/test_spec.json +++ b/eventhubs-functions-cosmosdb/test_spec.json @@ -1,5 +1,6 @@ [ { + "enabled": true, "stage": "2", "short": "efc1", "steps": "CIDPTMV", diff --git a/eventhubs-streamanalytics-azuresql/test_spec.json b/eventhubs-streamanalytics-azuresql/test_spec.json index 0e3137fc..a5323848 100644 --- a/eventhubs-streamanalytics-azuresql/test_spec.json +++ b/eventhubs-streamanalytics-azuresql/test_spec.json @@ -1,5 +1,6 @@ [ { + "enabled": true, "stage": "2", "short": "esa1", "steps": "CIDPTMV", diff --git a/eventhubs-streamanalytics-cosmosdb/test_spec.json b/eventhubs-streamanalytics-cosmosdb/test_spec.json index 32f0bacb..76930baf 100644 --- a/eventhubs-streamanalytics-cosmosdb/test_spec.json +++ b/eventhubs-streamanalytics-cosmosdb/test_spec.json @@ -1,5 +1,6 @@ [ { + "enabled": true, "stage": "2", "short": "esc1", "steps": "CIDPTMV", diff --git a/eventhubs-streamanalytics-eventhubs/test_spec.json b/eventhubs-streamanalytics-eventhubs/test_spec.json index ea61d557..1ba68418 100644 --- a/eventhubs-streamanalytics-eventhubs/test_spec.json +++ b/eventhubs-streamanalytics-eventhubs/test_spec.json @@ -1,5 +1,6 @@ [ { + "enabled": true, "stage": "2", "short": "ese1", "steps": "CIPTMV", diff --git a/eventhubs-timeseriesinsights/test_spec.json b/eventhubs-timeseriesinsights/test_spec.json index 0fb73008..d0918f67 100644 --- a/eventhubs-timeseriesinsights/test_spec.json +++ b/eventhubs-timeseriesinsights/test_spec.json @@ -1,5 +1,6 @@ [ { + "enabled": true, "stage": "2", "short": "eti1", "steps": "CIDTMV", diff --git a/eventhubskafka-databricks-cosmosdb/test_spec.json b/eventhubskafka-databricks-cosmosdb/test_spec.json index 110952bc..f71f4c52 100644 --- a/eventhubskafka-databricks-cosmosdb/test_spec.json +++ b/eventhubskafka-databricks-cosmosdb/test_spec.json @@ -1,5 +1,6 @@ [ { + "enabled": false, "stage": "2", "short": "kdc1", "steps": "CIDPTMV", diff --git a/hdinsightkafka-databricks-sqldw/test_spec.json b/hdinsightkafka-databricks-sqldw/test_spec.json index 4db2897a..599e44a1 100644 --- a/hdinsightkafka-databricks-sqldw/test_spec.json +++ b/hdinsightkafka-databricks-sqldw/test_spec.json @@ -1,5 +1,6 @@ [ { + "enabled": true, "stage": "3", "short": "hdw1", "steps": "CIDPTMV", diff --git a/integration-tests/test_solutions.py b/integration-tests/test_solutions.py index 1d7a54f5..a49c64cb 100644 --- a/integration-tests/test_solutions.py +++ b/integration-tests/test_solutions.py @@ -23,7 +23,7 @@ def pytest_generate_tests(metafunc): test_id = "{} {} ({})".format( spec["short"], spec["folder"], " ".join(spec["extra_args"])) test_ids.append(test_id) - argnames = ["folder", "short", "steps", + argnames = ["enabled", "folder", "short", "steps", "minutes", "throughput", "extra_args"] metafunc.parametrize( argnames, @@ -38,7 +38,9 @@ class TestSolutions(): # Flaky is used to rerun tests that may fail because of transient cloud issues. #@flaky(max_runs=3) def test_solution(self, folder, steps, minutes, throughput, extra_args): + print(self, folder, steps, minutes, throughput, extra_args) + cmd = ["./create-solution.sh", "-d", self.rg, "-s", steps, @@ -52,7 +54,11 @@ def test_solution(self, folder, steps, minutes, throughput, extra_args): assert test_output == "" @pytest.fixture(autouse=True) - def run_around_tests(self, short): + def run_around_tests(self, short, enabled): + + if not enabled: + pytest.skip("Disabled in test_spec.json") + self.rg = os.environ['RESOURCE_GROUP_PREFIX'] + short # Delete solution resource group if already exists subprocess.run(["./check-resource-group.sh", self.rg], check=True) diff --git a/streaming/databricks/notebooks/eventhubs-to-azuresql.scala b/streaming/databricks/notebooks/eventhubs-to-azuresql.scala index 238e935e..f0607748 100644 --- a/streaming/databricks/notebooks/eventhubs-to-azuresql.scala +++ b/streaming/databricks/notebooks/eventhubs-to-azuresql.scala @@ -123,7 +123,7 @@ var writeDataBatch : java.sql.PreparedStatement = null val WriteToSQLQuery = dataToWrite .writeStream - .option("checkpointLocation", "dbfs:/streaming_at_scale/checkpoints/streaming-azuresql") + .option("checkpointLocation", "dbfs:/streaming_at_scale/checkpoints/eventhubs-to-azuresql") .foreachBatch((batchDF: DataFrame, batchId: Long) => retry(6, 0) { // Load data into staging table. diff --git a/streaming/databricks/notebooks/eventhubs-to-cosmosdb.scala b/streaming/databricks/notebooks/eventhubs-to-cosmosdb.scala index d867d68c..e78e4306 100644 --- a/streaming/databricks/notebooks/eventhubs-to-cosmosdb.scala +++ b/streaming/databricks/notebooks/eventhubs-to-cosmosdb.scala @@ -47,6 +47,7 @@ val streamData = eventhubs // for the description of the available configurations. val cosmosDbConfig = Map( "Endpoint" -> dbutils.widgets.get("cosmosdb-endpoint"), + "ConnectionMode" -> "DirectHttps", "Masterkey" -> dbutils.secrets.get(scope = "MAIN", key = "cosmosdb-write-master-key"), "Database" -> dbutils.widgets.get("cosmosdb-database"), "Collection" -> dbutils.widgets.get("cosmosdb-collection") @@ -67,7 +68,7 @@ import com.microsoft.azure.cosmosdb.spark.streaming.CosmosDBSinkProvider streamDataMutated .writeStream .format(classOf[CosmosDBSinkProvider].getName) - .option("checkpointLocation", "dbfs:/streaming_at_scale/checkpoints/streaming-cosmosdb") + .option("checkpointLocation", "dbfs:/streaming_at_scale/checkpoints/eventhubs-to-cosmosdb") .outputMode("append") .options(cosmosDbConfig) .start() diff --git a/streaming/databricks/notebooks/eventhubs-to-delta.scala b/streaming/databricks/notebooks/eventhubs-to-delta.scala index 3ee9fc09..9d2563dd 100644 --- a/streaming/databricks/notebooks/eventhubs-to-delta.scala +++ b/streaming/databricks/notebooks/eventhubs-to-delta.scala @@ -60,7 +60,7 @@ streamData .withColumn("storedAt", current_timestamp) .writeStream .outputMode("append") - .option("checkpointLocation", "dbfs:/streaming_at_scale/checkpoints/streaming-delta/" + dbutils.widgets.get("delta-table")) + .option("checkpointLocation", "dbfs:/streaming_at_scale/checkpoints/eventhubs-to-delta/" + dbutils.widgets.get("delta-table")) .format("delta") .option("path", s"abfss://streamingatscale@$gen2account.dfs.core.windows.net/" + dbutils.widgets.get("delta-table")) .table(dbutils.widgets.get("delta-table")) diff --git a/streaming/databricks/notebooks/kafka-to-cosmosdb.scala b/streaming/databricks/notebooks/kafka-to-cosmosdb.scala index 5d8a4b66..c121efc8 100644 --- a/streaming/databricks/notebooks/kafka-to-cosmosdb.scala +++ b/streaming/databricks/notebooks/kafka-to-cosmosdb.scala @@ -67,7 +67,7 @@ import com.microsoft.azure.cosmosdb.spark.streaming.CosmosDBSinkProvider streamDataMutated .writeStream .format(classOf[CosmosDBSinkProvider].getName) - .option("checkpointLocation", "dbfs:/streaming_at_scale/checkpoints/streaming-cosmosdb") + .option("checkpointLocation", "dbfs:/streaming_at_scale/checkpoints/kafka-to-cosmosdb") .outputMode("append") .options(cosmosDbConfig) .start() diff --git a/streaming/databricks/notebooks/kafka-to-sqldw.scala b/streaming/databricks/notebooks/kafka-to-sqldw.scala index 11e47f6c..44dc7ca1 100644 --- a/streaming/databricks/notebooks/kafka-to-sqldw.scala +++ b/streaming/databricks/notebooks/kafka-to-sqldw.scala @@ -59,5 +59,5 @@ dataToWrite.writeStream .option("forwardSparkAzureStorageCredentials", "true") .option("maxStrLength", "4000") .option("dbTable", dbutils.widgets.get("sqldw-table")) - .option("checkpointLocation", "dbfs:/streaming_at_scale/checkpoints/streaming-sqldw") + .option("checkpointLocation", "dbfs:/streaming_at_scale/checkpoints/kafka-to-sqldw") .start() diff --git a/streaming/databricks/notebooks/verify-common.scala b/streaming/databricks/notebooks/verify-common.scala index ffcae117..5ff0a6af 100644 --- a/streaming/databricks/notebooks/verify-common.scala +++ b/streaming/databricks/notebooks/verify-common.scala @@ -77,7 +77,7 @@ if (assertLatencyMilliseconds.nonEmpty) { val expected = assertLatencyMilliseconds.get val actual = stats.minLatencySeconds if (actual.isEmpty || ((actual.get * 1000) > expected)) { - assertionsFailed += s"max latency in milliseconds: expected max $expected, got $actual" + assertionsFailed += s"max latency in milliseconds: expected max $expected milliseconds, got $actual seconds" } } diff --git a/streaming/databricks/notebooks/verify-dataexplorer.scala b/streaming/databricks/notebooks/verify-dataexplorer.scala index f334f551..96b328e9 100644 --- a/streaming/databricks/notebooks/verify-dataexplorer.scala +++ b/streaming/databricks/notebooks/verify-dataexplorer.scala @@ -9,6 +9,7 @@ dbutils.widgets.text("dataexplorer-storage-container", "dataexplorer") dbutils.widgets.text("assert-events-per-second", "900", "Assert min events per second (computed over 1 min windows)") dbutils.widgets.text("assert-latency-milliseconds", "15000", "Assert max latency in milliseconds (averaged over 1 min windows)") dbutils.widgets.text("assert-duplicate-fraction", "0", "Assert max proportion of duplicate events") +dbutils.widgets.text("assert-outofsequence-fraction", "0", "Assert max proportion of out-of-sequence events") // COMMAND ---------- @@ -49,5 +50,6 @@ dbutils.notebook.run("verify-common", 0, Map( "input-table" -> (spark.conf.get("spark.sql.globalTempDatabase") + "." + tempTable), "assert-events-per-second" -> dbutils.widgets.get("assert-events-per-second"), "assert-latency-milliseconds" -> dbutils.widgets.get("assert-latency-milliseconds"), - "assert-duplicate-fraction" -> dbutils.widgets.get("assert-duplicate-fraction") + "assert-duplicate-fraction" -> dbutils.widgets.get("assert-duplicate-fraction"), + "assert-outofsequence-fraction" -> dbutils.widgets.get("assert-outofsequence-fraction") )) diff --git a/streaming/databricks/notebooks/verify-delta.scala b/streaming/databricks/notebooks/verify-delta.scala index 628afc9b..5d7036ad 100644 --- a/streaming/databricks/notebooks/verify-delta.scala +++ b/streaming/databricks/notebooks/verify-delta.scala @@ -4,6 +4,7 @@ dbutils.widgets.text("delta-table", "streaming_events", "Delta table containing dbutils.widgets.text("assert-events-per-second", "900", "Assert min events per second (computed over 1 min windows)") dbutils.widgets.text("assert-latency-milliseconds", "15000", "Assert max latency in milliseconds (averaged over 1 min windows)") dbutils.widgets.text("assert-duplicate-fraction", "0", "Assert max proportion of duplicate events") +dbutils.widgets.text("assert-outofsequence-fraction", "0", "Assert max proportion of out-of-sequence events") // COMMAND ---------- @@ -12,5 +13,6 @@ dbutils.notebook.run("verify-common", 0, Map( "input-table" -> dbutils.widgets.get("delta-table"), "assert-events-per-second" -> dbutils.widgets.get("assert-events-per-second"), "assert-latency-milliseconds" -> dbutils.widgets.get("assert-latency-milliseconds"), - "assert-duplicate-fraction" -> dbutils.widgets.get("assert-duplicate-fraction") + "assert-duplicate-fraction" -> dbutils.widgets.get("assert-duplicate-fraction"), + "assert-outofsequence-fraction" -> dbutils.widgets.get("assert-outofsequence-fraction") )) diff --git a/streaming/databricks/notebooks/verify-eventhubs.scala b/streaming/databricks/notebooks/verify-eventhubs.scala index d5500ab6..b87ef178 100644 --- a/streaming/databricks/notebooks/verify-eventhubs.scala +++ b/streaming/databricks/notebooks/verify-eventhubs.scala @@ -5,6 +5,7 @@ dbutils.widgets.text("eventhub-maxEventsPerTrigger", "1000000", "Event Hubs max dbutils.widgets.text("assert-events-per-second", "900", "Assert min events per second (computed over 1 min windows)") dbutils.widgets.text("assert-latency-milliseconds", "15000", "Assert max latency in milliseconds (averaged over 1 min windows)") dbutils.widgets.text("assert-duplicate-fraction", "0", "Assert max proportion of duplicate events") +dbutils.widgets.text("assert-outofsequence-fraction", "0", "Assert max proportion of out-of-sequence events") // COMMAND ---------- @@ -81,7 +82,8 @@ dbutils.notebook.run("verify-common", 0, Map( "input-table" -> stagingTable, "assert-events-per-second" -> dbutils.widgets.get("assert-events-per-second"), "assert-latency-milliseconds" -> dbutils.widgets.get("assert-latency-milliseconds"), - "assert-duplicate-fraction" -> dbutils.widgets.get("assert-duplicate-fraction") + "assert-duplicate-fraction" -> dbutils.widgets.get("assert-duplicate-fraction"), + "assert-outofsequence-fraction" -> dbutils.widgets.get("assert-outofsequence-fraction") )) // COMMAND ---------- diff --git a/streaming/databricks/notebooks/verify-sqldw.scala b/streaming/databricks/notebooks/verify-sqldw.scala index 6afcd7a3..52a25242 100644 --- a/streaming/databricks/notebooks/verify-sqldw.scala +++ b/streaming/databricks/notebooks/verify-sqldw.scala @@ -8,6 +8,7 @@ dbutils.widgets.text("sqldw-table", "rawdata_cs") dbutils.widgets.text("assert-events-per-second", "900", "Assert min events per second (computed over 1 min windows)") dbutils.widgets.text("assert-latency-milliseconds", "15000", "Assert max latency in milliseconds (averaged over 1 min windows)") dbutils.widgets.text("assert-duplicate-fraction", "0", "Assert max proportion of duplicate events") +dbutils.widgets.text("assert-outofsequence-fraction", "0", "Assert max proportion of out-of-sequence events") // COMMAND ---------- val tempStorageAccount = dbutils.widgets.get("sqldw-tempstorage-account") @@ -45,5 +46,6 @@ dbutils.notebook.run("verify-common", 0, Map( "input-table" -> (spark.conf.get("spark.sql.globalTempDatabase") + "." + tempTable), "assert-events-per-second" -> dbutils.widgets.get("assert-events-per-second"), "assert-latency-milliseconds" -> dbutils.widgets.get("assert-latency-milliseconds"), - "assert-duplicate-fraction" -> dbutils.widgets.get("assert-duplicate-fraction") + "assert-duplicate-fraction" -> dbutils.widgets.get("assert-duplicate-fraction"), + "assert-outofsequence-fraction" -> dbutils.widgets.get("assert-outofsequence-fraction") )) diff --git a/streaming/databricks/notebooks/verify-timeseriesinsights-parquet.scala b/streaming/databricks/notebooks/verify-timeseriesinsights-parquet.scala index 802a3c2b..085208d0 100644 --- a/streaming/databricks/notebooks/verify-timeseriesinsights-parquet.scala +++ b/streaming/databricks/notebooks/verify-timeseriesinsights-parquet.scala @@ -3,6 +3,7 @@ dbutils.widgets.text("test-output-path", "dbfs:/test-output/test-output.txt", "D dbutils.widgets.text("storage-path", "", "WASB URL to data storage container") dbutils.widgets.text("assert-events-per-second", "900", "Assert min events per second (computed over 1 min windows)") dbutils.widgets.text("assert-duplicate-fraction", "0", "Assert max proportion of duplicate events") +dbutils.widgets.text("assert-outofsequence-fraction", "0", "Assert max proportion of out-of-sequence events") // COMMAND ---------- @@ -37,7 +38,8 @@ dbutils.notebook.run("verify-common", 0, Map( "input-table" -> (spark.conf.get("spark.sql.globalTempDatabase") + "." + tempView), "assert-events-per-second" -> dbutils.widgets.get("assert-events-per-second"), "assert-latency-milliseconds" -> "0", // As we use event timestamp as stored timestamp, measured latency should be 0 - "assert-duplicate-fraction" -> dbutils.widgets.get("assert-duplicate-fraction") + "assert-duplicate-fraction" -> dbutils.widgets.get("assert-duplicate-fraction"), + "assert-outofsequence-fraction" -> dbutils.widgets.get("assert-outofsequence-fraction") )) // COMMAND ---------- diff --git a/streaming/databricks/runners/verify-dataexplorer.sh b/streaming/databricks/runners/verify-dataexplorer.sh index a2875c95..afb5637c 100755 --- a/streaming/databricks/runners/verify-dataexplorer.sh +++ b/streaming/databricks/runners/verify-dataexplorer.sh @@ -20,7 +20,7 @@ databricks secrets put --scope "MAIN" --key "dataexplorer-client-password" --str databricks secrets put --scope "MAIN" --key "dataexplorer-storage-key" --string-value "$AZURE_STORAGE_KEY" source ../streaming/databricks/job/run-databricks-job.sh verify-dataexplorer true "$(cat <