Skip to content

Commit d9a88b3

Browse files
committed
all tests pass
1 parent e0ee499 commit d9a88b3

File tree

8 files changed

+198
-108
lines changed

8 files changed

+198
-108
lines changed

sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/v2/state/StateDataSource.scala

Lines changed: 18 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -76,10 +76,7 @@ class StateDataSource extends TableProvider with DataSourceRegister with Logging
7676
sourceOptions.resolvedCpLocation,
7777
stateConf.providerClass)
7878
}
79-
val (stateStoreReaderInfo, storeMetadata) = getStoreMetadataAndRunChecks(
80-
sourceOptions)
81-
82-
val stateFormatVersion = getStateFormatVersion(storeMetadata)
79+
val stateStoreReaderInfo = getStoreMetadataAndRunChecks(sourceOptions)
8380

8481
// The key state encoder spec should be available for all operators except stream-stream joins
8582
val keyStateEncoderSpec = if (stateStoreReaderInfo.keyStateEncoderSpecOpt.isDefined) {
@@ -94,18 +91,16 @@ class StateDataSource extends TableProvider with DataSourceRegister with Logging
9491
stateStoreReaderInfo.stateStoreColFamilySchemaOpt,
9592
stateStoreReaderInfo.stateSchemaProviderOpt,
9693
stateStoreReaderInfo.joinColFamilyOpt,
97-
Option(stateStoreReaderInfo.allColumnFamiliesReaderInfo),
98-
stateFormatVersion)
94+
Option(stateStoreReaderInfo.allColumnFamiliesReaderInfo))
9995
}
10096

10197
override def inferSchema(options: CaseInsensitiveStringMap): StructType = {
10298
val sourceOptions = StateSourceOptions.modifySourceOptions(hadoopConf,
10399
StateSourceOptions.apply(session, hadoopConf, options))
104100

105-
val (stateStoreReaderInfo, storeMetadata) = getStoreMetadataAndRunChecks(sourceOptions)
101+
val stateStoreReaderInfo = getStoreMetadataAndRunChecks(sourceOptions)
106102
val oldSchemaFilePaths = StateDataSource.getOldSchemaFilePaths(sourceOptions, hadoopConf)
107-
108-
val stateFormatVersion = getStateFormatVersion(storeMetadata)
103+
val allCFReaderInfo = stateStoreReaderInfo.allColumnFamiliesReaderInfo
109104

110105
val stateCheckpointLocation = sourceOptions.stateCheckpointLocation
111106
try {
@@ -127,16 +122,16 @@ class StateDataSource extends TableProvider with DataSourceRegister with Logging
127122

128123
val stateVarInfo: Option[TransformWithStateVariableInfo] = if (
129124
sourceOptions.internalOnlyReadAllColumnFamilies) {
130-
stateStoreReaderInfo.allColumnFamiliesReaderInfo.stateVariableInfos.headOption
125+
allCFReaderInfo.stateVariableInfos.headOption
131126
} else {
132127
stateStoreReaderInfo.transformWithStateVariableInfoOpt
133128
}
134129
SchemaUtil.getSourceSchema(sourceOptions, keySchema,
135130
valueSchema,
136131
stateVarInfo,
137132
stateStoreReaderInfo.stateStoreColFamilySchemaOpt,
138-
storeMetadata,
139-
stateFormatVersion)
133+
allCFReaderInfo.operatorName,
134+
allCFReaderInfo.stateFormatVersion)
140135
} catch {
141136
case NonFatal(e) =>
142137
throw StateDataSourceErrors.failedToReadStateSchema(sourceOptions, e)
@@ -146,8 +141,10 @@ class StateDataSource extends TableProvider with DataSourceRegister with Logging
146141
override def supportsExternalMetadata(): Boolean = false
147142

148143
/**
149-
* Returns the state format version for SYMMETRIC_HASH_JOIN operators.
150-
* For join operators, returns the configured version; for other operators returns None.
144+
* Return the state format version for SYMMETRIC_HASH_JOIN operators.
145+
* This currently only support join operators because this function is only used by
146+
* PartitionKeyExtractor and PartitionKeyExtractor only needs state format version for
147+
* join operators.
151148
*/
152149
private def getStateFormatVersion(
153150
storeMetadata: Array[StateMetadataTableEntry]): Option[Int] = {
@@ -283,8 +280,8 @@ class StateDataSource extends TableProvider with DataSourceRegister with Logging
283280
}
284281
}
285282

286-
private def getStoreMetadataAndRunChecks(sourceOptions: StateSourceOptions):
287-
(StateStoreReaderInfo, Array[StateMetadataTableEntry]) = {
283+
private def getStoreMetadataAndRunChecks(
284+
sourceOptions: StateSourceOptions): StateStoreReaderInfo = {
288285
val storeMetadata = StateDataSource.getStateStoreMetadata(sourceOptions, hadoopConf)
289286
if (!sourceOptions.internalOnlyReadAllColumnFamilies) {
290287
// skipping runStateVarChecks for StatePartitionAllColumnFamiliesReader because
@@ -390,14 +387,16 @@ class StateDataSource extends TableProvider with DataSourceRegister with Logging
390387
}
391388

392389
val operatorName = if (storeMetadata.nonEmpty) storeMetadata.head.operatorName else ""
393-
(StateStoreReaderInfo(
390+
val stateFormatVersion = getStateFormatVersion(storeMetadata)
391+
StateStoreReaderInfo(
394392
keyStateEncoderSpecOpt,
395393
stateStoreColFamilySchemaOpt,
396394
transformWithStateVariableInfoOpt,
397395
stateSchemaProvider,
398396
joinColFamilyOpt,
399-
AllColumnFamiliesReaderInfo(stateStoreColFamilySchemas, stateVariableInfos, operatorName)
400-
), storeMetadata)
397+
AllColumnFamiliesReaderInfo(
398+
stateStoreColFamilySchemas, stateVariableInfos, operatorName, stateFormatVersion)
399+
)
401400
}
402401

403402
private def getKeyStateEncoderSpec(

sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/v2/state/StatePartitionReader.scala

Lines changed: 74 additions & 29 deletions
Original file line numberDiff line numberDiff line change
@@ -21,8 +21,10 @@ import org.apache.spark.sql.catalyst.InternalRow
2121
import org.apache.spark.sql.catalyst.expressions.{GenericInternalRow, UnsafeRow}
2222
import org.apache.spark.sql.connector.read.{InputPartition, PartitionReader, PartitionReaderFactory}
2323
import org.apache.spark.sql.execution.datasources.v2.state.utils.SchemaUtil
24+
import org.apache.spark.sql.execution.streaming.operators.stateful.StatefulOperatorsUtils
2425
import org.apache.spark.sql.execution.streaming.operators.stateful.join.SymmetricHashJoinStateManager
25-
import org.apache.spark.sql.execution.streaming.operators.stateful.transformwithstate.{StateStoreColumnFamilySchemaUtils, StateVariableType, TransformWithStateVariableInfo}
26+
import org.apache.spark.sql.execution.streaming.operators.stateful.transformwithstate.{StateStoreColumnFamilySchemaUtils, StateVariableType, TransformWithStateVariableInfo, TransformWithStateVariableUtils}
27+
import org.apache.spark.sql.execution.streaming.operators.stateful.transformwithstate.timers.TimerStateUtils
2628
import org.apache.spark.sql.execution.streaming.state._
2729
import org.apache.spark.sql.execution.streaming.state.RecordType.{getRecordTypeAsString, RecordType}
2830
import org.apache.spark.sql.types.{NullType, StructField, StructType}
@@ -32,7 +34,8 @@ import org.apache.spark.util.{NextIterator, SerializableConfiguration}
3234
case class AllColumnFamiliesReaderInfo(
3335
colFamilySchemas: List[StateStoreColFamilySchema] = List.empty,
3436
stateVariableInfos: List[TransformWithStateVariableInfo] = List.empty,
35-
operatorName: String = "")
37+
operatorName: String = "",
38+
stateFormatVersion: Option[Int] = None)
3639

3740
/**
3841
* An implementation of [[PartitionReaderFactory]] for State data source. This is used to support
@@ -50,8 +53,7 @@ class StatePartitionReaderFactory(
5053
stateStoreColFamilySchemaOpt: Option[StateStoreColFamilySchema],
5154
stateSchemaProviderOpt: Option[StateSchemaProvider],
5255
joinColFamilyOpt: Option[String],
53-
allColumnFamiliesReaderInfo: Option[AllColumnFamiliesReaderInfo],
54-
stateFormatVersion: Option[Int])
56+
allColumnFamiliesReaderInfo: Option[AllColumnFamiliesReaderInfo])
5557
extends PartitionReaderFactory {
5658

5759
override def createReader(partition: InputPartition): PartitionReader[InternalRow] = {
@@ -60,7 +62,7 @@ class StatePartitionReaderFactory(
6062
require(allColumnFamiliesReaderInfo.isDefined)
6163
new StatePartitionAllColumnFamiliesReader(storeConf, hadoopConf,
6264
stateStoreInputPartition, schema, keyStateEncoderSpec, stateStoreColFamilySchemaOpt,
63-
stateSchemaProviderOpt, allColumnFamiliesReaderInfo.get, stateFormatVersion)
65+
stateSchemaProviderOpt, allColumnFamiliesReaderInfo.get)
6466
} else if (stateStoreInputPartition.sourceOptions.readChangeFeed) {
6567
new StateStoreChangeDataPartitionReader(storeConf, hadoopConf,
6668
stateStoreInputPartition, schema, keyStateEncoderSpec, stateVariableInfoOpt,
@@ -270,8 +272,7 @@ class StatePartitionAllColumnFamiliesReader(
270272
keyStateEncoderSpec: KeyStateEncoderSpec,
271273
defaultStateStoreColFamilySchemaOpt: Option[StateStoreColFamilySchema],
272274
stateSchemaProviderOpt: Option[StateSchemaProvider],
273-
allColumnFamiliesReaderInfo: AllColumnFamiliesReaderInfo,
274-
stateFormatVersion: Option[Int])
275+
allColumnFamiliesReaderInfo: AllColumnFamiliesReaderInfo)
275276
extends StatePartitionReaderBase(
276277
storeConf,
277278
hadoopConf, partition, schema,
@@ -282,14 +283,55 @@ class StatePartitionAllColumnFamiliesReader(
282283
private val stateStoreColFamilySchemas = allColumnFamiliesReaderInfo.colFamilySchemas
283284
private val stateVariableInfos = allColumnFamiliesReaderInfo.stateVariableInfos
284285
private val operatorName = allColumnFamiliesReaderInfo.operatorName
286+
private val stateFormatVersion = allColumnFamiliesReaderInfo.stateFormatVersion
285287

286-
// Create the extractor for partition key extraction
287-
private lazy val partitionKeyExtractor = SchemaUtil.getExtractor(
288-
operatorName,
289-
keySchema,
290-
partition.sourceOptions.storeName,
291-
stateVariableInfos.headOption,
292-
stateFormatVersion)
288+
private def isDefaultColFamilyInTWS(operatorName: String, colFamilyName: String): Boolean = {
289+
StatefulOperatorsUtils.TRANSFORM_WITH_STATE_OP_NAMES.contains(operatorName) &&
290+
colFamilyName == StateStore.DEFAULT_COL_FAMILY_NAME
291+
}
292+
293+
/**
294+
* Extracts the base state variable name from internal column family names.
295+
*/
296+
private def getBaseStateName(colFamilyName: String): String = {
297+
if (StateStoreColumnFamilySchemaUtils.isTtlColFamilyName(colFamilyName)) {
298+
StateStoreColumnFamilySchemaUtils.getStateNameFromTtlColFamily(colFamilyName)
299+
} else if (StateStoreColumnFamilySchemaUtils.isMinExpiryIndexCFName(colFamilyName)) {
300+
StateStoreColumnFamilySchemaUtils.getStateNameFromMinExpiryIndexCFName(colFamilyName)
301+
} else if (StateStoreColumnFamilySchemaUtils.isCountIndexCFName(colFamilyName)) {
302+
StateStoreColumnFamilySchemaUtils.getStateNameFromCountIndexCFName(colFamilyName)
303+
} else if (TransformWithStateVariableUtils.isRowCounterCFName(colFamilyName)) {
304+
TransformWithStateVariableUtils.getStateNameFromRowCounterCFName(colFamilyName)
305+
} else {
306+
colFamilyName
307+
}
308+
}
309+
310+
311+
private def getStateVarInfo(
312+
colFamilyName: String): Option[TransformWithStateVariableInfo] = {
313+
if (TimerStateUtils.isTimerSecondaryIndexCF(colFamilyName)) {
314+
Some(TransformWithStateVariableUtils.getTimerState(colFamilyName))
315+
} else {
316+
stateVariableInfos.find(_.stateName == getBaseStateName(colFamilyName))
317+
}
318+
}
319+
320+
// Create extractors for each column family - each column family may have different key schema
321+
private lazy val partitionKeyExtractors: Map[String, StatePartitionKeyExtractor] = {
322+
stateStoreColFamilySchemas
323+
.filter(schema => !isDefaultColFamilyInTWS(operatorName, schema.colFamilyName))
324+
.map { cfSchema =>
325+
val extractor = SchemaUtil.getPartitionKeyExtractor(
326+
operatorName,
327+
cfSchema.keySchema,
328+
partition.sourceOptions.storeName,
329+
cfSchema.colFamilyName,
330+
getStateVarInfo(cfSchema.colFamilyName),
331+
stateFormatVersion)
332+
cfSchema.colFamilyName -> extractor
333+
}.toMap
334+
}
293335

294336
private def isListType(colFamilyName: String): Boolean = {
295337
SchemaUtil.checkVariableType(
@@ -368,22 +410,25 @@ class StatePartitionAllColumnFamiliesReader(
368410

369411
override lazy val iter: Iterator[InternalRow] = {
370412
// Iterate all column families and concatenate results
371-
stateStoreColFamilySchemas.iterator.flatMap { cfSchema =>
372-
if (isListType(cfSchema.colFamilyName)) {
373-
store.iterator(cfSchema.colFamilyName).flatMap(
374-
pair =>
375-
store.valuesIterator(pair.key, cfSchema.colFamilyName).map {
376-
value =>
377-
SchemaUtil.unifyStateRowPairAsRawBytes(
378-
(pair.key, value), cfSchema.colFamilyName, partitionKeyExtractor)
379-
}
380-
)
381-
} else {
382-
store.iterator(cfSchema.colFamilyName).map { pair =>
383-
SchemaUtil.unifyStateRowPairAsRawBytes(
384-
(pair.key, pair.value), cfSchema.colFamilyName, partitionKeyExtractor)
413+
stateStoreColFamilySchemas.iterator
414+
.filter(schema => !isDefaultColFamilyInTWS(operatorName, schema.colFamilyName))
415+
.flatMap { cfSchema =>
416+
val extractor = partitionKeyExtractors(cfSchema.colFamilyName)
417+
if (isListType(cfSchema.colFamilyName)) {
418+
store.iterator(cfSchema.colFamilyName).flatMap(
419+
pair =>
420+
store.valuesIterator(pair.key, cfSchema.colFamilyName).map {
421+
value =>
422+
SchemaUtil.unifyStateRowPairAsRawBytes(
423+
(pair.key, value), cfSchema.colFamilyName, extractor)
424+
}
425+
)
426+
} else {
427+
store.iterator(cfSchema.colFamilyName).map { pair =>
428+
SchemaUtil.unifyStateRowPairAsRawBytes(
429+
(pair.key, pair.value), cfSchema.colFamilyName, extractor)
430+
}
385431
}
386-
}
387432
}
388433
}
389434

sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/v2/state/StateScanBuilder.scala

Lines changed: 4 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -47,11 +47,10 @@ class StateScanBuilder(
4747
stateStoreColFamilySchemaOpt: Option[StateStoreColFamilySchema],
4848
stateSchemaProviderOpt: Option[StateSchemaProvider],
4949
joinColFamilyOpt: Option[String],
50-
allColumnFamiliesReaderInfo: Option[AllColumnFamiliesReaderInfo],
51-
stateFormatVersion: Option[Int]) extends ScanBuilder {
50+
allColumnFamiliesReaderInfo: Option[AllColumnFamiliesReaderInfo]) extends ScanBuilder {
5251
override def build(): Scan = new StateScan(session, schema, sourceOptions, stateStoreConf,
5352
keyStateEncoderSpec, stateVariableInfoOpt, stateStoreColFamilySchemaOpt, stateSchemaProviderOpt,
54-
joinColFamilyOpt, allColumnFamiliesReaderInfo, stateFormatVersion)
53+
joinColFamilyOpt, allColumnFamiliesReaderInfo)
5554
}
5655

5756
/** An implementation of [[InputPartition]] for State Store data source. */
@@ -71,8 +70,7 @@ class StateScan(
7170
stateStoreColFamilySchemaOpt: Option[StateStoreColFamilySchema],
7271
stateSchemaProviderOpt: Option[StateSchemaProvider],
7372
joinColFamilyOpt: Option[String],
74-
allColumnFamiliesReaderInfo: Option[AllColumnFamiliesReaderInfo],
75-
stateFormatVersion: Option[Int])
73+
allColumnFamiliesReaderInfo: Option[AllColumnFamiliesReaderInfo])
7674
extends Scan with Batch {
7775

7876
// A Hadoop Configuration can be about 10 KB, which is pretty big, so broadcast it
@@ -148,7 +146,7 @@ class StateScan(
148146
case JoinSideValues.none =>
149147
new StatePartitionReaderFactory(stateStoreConf, hadoopConfBroadcast.value, schema,
150148
keyStateEncoderSpec, stateVariableInfoOpt, stateStoreColFamilySchemaOpt,
151-
stateSchemaProviderOpt, joinColFamilyOpt, allColumnFamiliesReaderInfo, stateFormatVersion)
149+
stateSchemaProviderOpt, joinColFamilyOpt, allColumnFamiliesReaderInfo)
152150
}
153151

154152
override def toBatch: Batch = this

sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/v2/state/StateTable.scala

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -46,8 +46,7 @@ class StateTable(
4646
stateStoreColFamilySchemaOpt: Option[StateStoreColFamilySchema],
4747
stateSchemaProviderOpt: Option[StateSchemaProvider],
4848
joinColFamilyOpt: Option[String],
49-
allColumnFamiliesReaderInfo: Option[AllColumnFamiliesReaderInfo] = None,
50-
stateFormatVersion: Option[Int] = None)
49+
allColumnFamiliesReaderInfo: Option[AllColumnFamiliesReaderInfo] = None)
5150
extends Table with SupportsRead with SupportsMetadataColumns {
5251

5352
import StateTable._
@@ -89,7 +88,7 @@ class StateTable(
8988
override def newScanBuilder(options: CaseInsensitiveStringMap): ScanBuilder =
9089
new StateScanBuilder(session, schema, sourceOptions, stateConf, keyStateEncoderSpec,
9190
stateVariableInfoOpt, stateStoreColFamilySchemaOpt, stateSchemaProviderOpt,
92-
joinColFamilyOpt, allColumnFamiliesReaderInfo, stateFormatVersion)
91+
joinColFamilyOpt, allColumnFamiliesReaderInfo)
9392

9493
override def properties(): util.Map[String, String] = Map.empty[String, String].asJava
9594

sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/v2/state/utils/SchemaUtil.scala

Lines changed: 22 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -25,7 +25,6 @@ import org.apache.spark.sql.catalyst.InternalRow
2525
import org.apache.spark.sql.catalyst.expressions.{GenericInternalRow, UnsafeRow}
2626
import org.apache.spark.sql.catalyst.util.{ArrayBasedMapData, GenericArrayData}
2727
import org.apache.spark.sql.execution.datasources.v2.state.{StateDataSourceErrors, StateSourceOptions}
28-
import org.apache.spark.sql.execution.datasources.v2.state.metadata.StateMetadataTableEntry
2928
import org.apache.spark.sql.execution.streaming.operators.stateful.{StatefulOperatorsUtils, StatePartitionKeyExtractorFactory}
3029
import org.apache.spark.sql.execution.streaming.operators.stateful.join.StreamingSymmetricHashJoinHelper.LeftSide
3130
import org.apache.spark.sql.execution.streaming.operators.stateful.join.SymmetricHashJoinStateManager
@@ -54,14 +53,28 @@ object SchemaUtil {
5453
valueSchema: StructType,
5554
transformWithStateVariableInfoOpt: Option[TransformWithStateVariableInfo],
5655
stateStoreColFamilySchemaOpt: Option[StateStoreColFamilySchema],
57-
storeMetadata: Array[StateMetadataTableEntry],
56+
operatorName: String,
5857
stateFormatVersion: Option[Int] = None): StructType = {
5958
if (sourceOptions.internalOnlyReadAllColumnFamilies) {
60-
// Extract partition key schema using StatePartitionKeyExtractor
61-
require(storeMetadata.nonEmpty)
62-
val extractor = getExtractor(
63-
storeMetadata.head.operatorName, keySchema, sourceOptions.storeName,
64-
transformWithStateVariableInfoOpt, stateFormatVersion)
59+
val colFamilyName: String =
60+
if (
61+
operatorName == StatefulOperatorsUtils.SYMMETRIC_HASH_JOIN_EXEC_OP_NAME
62+
) {
63+
SymmetricHashJoinStateManager.allStateStoreNames(LeftSide).head
64+
} else if (
65+
StatefulOperatorsUtils.TRANSFORM_WITH_STATE_OP_NAMES.contains(operatorName)
66+
) {
67+
require(
68+
transformWithStateVariableInfoOpt.isDefined,
69+
"transformWithStateVariableInfo is required for TransformWithState"
70+
)
71+
transformWithStateVariableInfoOpt.get.stateName
72+
} else {
73+
StateStore.DEFAULT_COL_FAMILY_NAME
74+
}
75+
val extractor = getPartitionKeyExtractor(
76+
operatorName, keySchema, sourceOptions.storeName,
77+
colFamilyName, transformWithStateVariableInfoOpt, stateFormatVersion)
6578
new StructType()
6679
.add("partition_key", extractor.partitionKeySchema)
6780
.add("key_bytes", BinaryType)
@@ -90,19 +103,13 @@ object SchemaUtil {
90103
* Creates a StatePartitionKeyExtractor for the given operator.
91104
* This is used to extract partition keys from state store keys for state repartitioning.
92105
*/
93-
def getExtractor(
106+
def getPartitionKeyExtractor(
94107
operatorName: String,
95108
keySchema: StructType,
96109
storeName: String,
110+
colFamilyName: String,
97111
transformWithStateVariableInfoOpt: Option[TransformWithStateVariableInfo],
98112
stateFormatVersion: Option[Int]): StatePartitionKeyExtractor = {
99-
val colFamilyName: String =
100-
if (operatorName == StatefulOperatorsUtils.SYMMETRIC_HASH_JOIN_EXEC_OP_NAME) {
101-
SymmetricHashJoinStateManager.allStateStoreNames(LeftSide).head
102-
} else {
103-
transformWithStateVariableInfoOpt.map(_.stateName)
104-
.getOrElse(StateStore.DEFAULT_COL_FAMILY_NAME)
105-
}
106113
StatePartitionKeyExtractorFactory.create(
107114
operatorName,
108115
keySchema,

sql/core/src/main/scala/org/apache/spark/sql/execution/streaming/operators/stateful/transformwithstate/TransformWithStateVariableUtils.scala

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -63,6 +63,10 @@ object TransformWithStateVariableUtils {
6363
def isRowCounterCFName(colFamilyName: String): Boolean = {
6464
colFamilyName.startsWith(ROW_COUNTER_CF_PREFIX)
6565
}
66+
67+
def getStateNameFromRowCounterCFName(colFamilyName: String): String = {
68+
colFamilyName.substring(ROW_COUNTER_CF_PREFIX.length)
69+
}
6670
}
6771

6872
// Enum of possible State Variable types

sql/core/src/main/scala/org/apache/spark/sql/execution/streaming/operators/stateful/transformwithstate/timers/TimerStateImpl.scala

Lines changed: 6 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -61,9 +61,14 @@ object TimerStateUtils {
6161
}
6262

6363
def isTimerSecondaryIndexCF(colFamilyName: String): Boolean = {
64-
assert(isTimerCFName(colFamilyName), s"Column family name must be for a timer: $colFamilyName")
6564
colFamilyName.endsWith(TIMESTAMP_TO_KEY_CF)
6665
}
66+
67+
def getPrimaryIndexFromSecondaryIndexCF(colFamilyName: String): String = {
68+
assert(isTimerSecondaryIndexCF(colFamilyName),
69+
s"Column family name must be for a timer secondary index: $colFamilyName")
70+
colFamilyName.replace(TIMESTAMP_TO_KEY_CF, KEY_TO_TIMESTAMP_CF)
71+
}
6772
}
6873

6974
/**

0 commit comments

Comments
 (0)