diff --git a/Makefile b/Makefile index f82cd84c1..858046b0f 100755 --- a/Makefile +++ b/Makefile @@ -64,6 +64,31 @@ run-light: clean $(call setup_env, envs/arkd.light.env) @go run ./cmd/arkd +## run-local: run arkd locally with explicit env vars (no container hostnames) +run-local: clean + @echo "Running arkd locally with explicit environment variables..." + @ARKD_LOG_LEVEL=6 \ + ARKD_NO_MACAROONS=true \ + ARKD_VTXO_TREE_EXPIRY=511 \ + ARKD_SCHEDULER_TYPE=block \ + ARKD_UNILATERAL_EXIT_DELAY=512 \ + ARKD_BOARDING_EXIT_DELAY=1024 \ + ARKD_CHECKPOINT_EXIT_DELAY=10 \ + ARKD_DATADIR=./data/regtest \ + ARKD_WALLET_ADDR=127.0.0.1:6060 \ + ARKD_ESPLORA_URL=http://localhost:3000 \ + ARKD_ROUND_MIN_PARTICIPANTS_COUNT=$${ARKD_ROUND_MIN_PARTICIPANTS_COUNT:-1} \ + ARKD_ROUND_MAX_PARTICIPANTS_COUNT=$${ARKD_ROUND_MAX_PARTICIPANTS_COUNT:-128} \ + ARKD_VTXO_MIN_AMOUNT=1 \ + ARKD_LIVE_STORE_TYPE=inmemory \ + ARKD_EVENT_DB_TYPE=badger \ + ARKD_DB_TYPE=sqlite \ + ARKD_SESSION_DURATION=$${ARKD_SESSION_DURATION:-10} \ + ARKD_ROUND_REPORT_ENABLED=$${ARKD_ROUND_REPORT_ENABLED:-true} \ + ARKD_BAN_THRESHOLD=1 \ + ARKD_ONCHAIN_OUTPUT_FEE=100 \ + go run ./cmd/arkd + ## test: runs unit and component tests test: pgtest redis-test-up @sleep 2 @@ -234,4 +259,4 @@ run-simulation: ## pprof: run pprof tool (e.g. make pprof PROFILE=heap) pprof: @echo "Running pprof..." - @go tool pprof -http=:8080 http://localhost:7071/debug/pprof/$(PROFILE) \ No newline at end of file + @go tool pprof -http=:8080 http://localhost:7071/debug/pprof/$(PROFILE) diff --git a/api-spec/openapi/swagger/ark/v1/indexer.openapi.json b/api-spec/openapi/swagger/ark/v1/indexer.openapi.json index 45c691717..02bf5bd67 100644 --- a/api-spec/openapi/swagger/ark/v1/indexer.openapi.json +++ b/api-spec/openapi/swagger/ark/v1/indexer.openapi.json @@ -5,6 +5,47 @@ "version": "version not set" }, "paths": { + "/v1/indexer/assetGroup/{asset_id}": { + "get": { + "tags": [ + "IndexerService" + ], + "description": "GetAsset returns the asset information and metadata for the specified asset ID.", + "operationId": "IndexerService_GetAssetGroup", + "parameters": [ + { + "name": "assetId", + "in": "path", + "required": true, + "schema": { + "type": "string" + } + } + ], + "responses": { + "200": { + "description": "a successful response.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/GetAssetGroupResponse" + } + } + } + }, + "default": { + "description": "An unexpected error response.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Status" + } + } + } + } + } + } + }, "/v1/indexer/batch/{batch_outpoint.txid}/{batch_outpoint.vout}/sweepTxs": { "get": { "tags": [ @@ -731,6 +772,61 @@ } } }, + "AssetGroup": { + "title": "AssetGroup", + "type": "object", + "properties": { + "id": { + "type": "string" + }, + "immutable": { + "type": "boolean" + }, + "metadata": { + "type": "array", + "items": { + "$ref": "#/components/schemas/AssetMetadata" + } + }, + "quantity": { + "type": "integer", + "format": "uint64" + } + } + }, + "AssetMetadata": { + "title": "AssetMetadata", + "type": "object", + "properties": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + } + } + }, + "GetAssetGroupRequest": { + "title": "GetAssetGroupRequest", + "type": "object", + "properties": { + "assetId": { + "type": "string" + } + } + }, + "GetAssetGroupResponse": { + "title": "GetAssetGroupResponse", + "type": "object", + "properties": { + "assetGroup": { + "$ref": "#/components/schemas/AssetGroup" + }, + "assetId": { + "type": "string" + } + } + }, "GetBatchSweepTransactionsRequest": { "title": "GetBatchSweepTransactionsRequest", "type": "object", @@ -1047,6 +1143,19 @@ } } }, + "IndexerAsset": { + "title": "IndexerAsset", + "type": "object", + "properties": { + "amount": { + "type": "integer", + "format": "uint64" + }, + "assetId": { + "type": "string" + } + } + }, "IndexerBatch": { "title": "IndexerBatch", "type": "object", @@ -1202,6 +1311,19 @@ "$ref": "#/components/schemas/IndexerVtxo" } }, + "teleportEvents": { + "type": "array", + "description": "List of teleport events involved in the transaction", + "items": { + "$ref": "#/components/schemas/TeleportEvent" + } + }, + "teleportHashes": { + "type": "array", + "items": { + "type": "string" + } + }, "tx": { "type": "string" }, @@ -1272,6 +1394,12 @@ "arkTxid": { "type": "string" }, + "assets": { + "type": "array", + "items": { + "$ref": "#/components/schemas/IndexerAsset" + } + }, "commitmentTxids": { "type": "array", "items": { @@ -1356,6 +1484,37 @@ } } }, + "TeleportEvent": { + "title": "TeleportEvent", + "type": "object", + "properties": { + "amount": { + "type": "integer", + "format": "uint64" + }, + "anchorOutpoint": { + "type": "string" + }, + "assetId": { + "type": "string" + }, + "createdAt": { + "type": "integer", + "format": "int64" + }, + "expiresAt": { + "type": "integer", + "format": "int64" + }, + "outputVout": { + "type": "integer", + "format": "uint32" + }, + "teleportHash": { + "type": "string" + } + } + }, "UnsubscribeForScriptsRequest": { "title": "UnsubscribeForScriptsRequest", "type": "object", diff --git a/api-spec/openapi/swagger/ark/v1/service.openapi.json b/api-spec/openapi/swagger/ark/v1/service.openapi.json index 67fc9545c..043ed1537 100644 --- a/api-spec/openapi/swagger/ark/v1/service.openapi.json +++ b/api-spec/openapi/swagger/ark/v1/service.openapi.json @@ -615,6 +615,19 @@ } } }, + "Asset": { + "title": "Asset", + "type": "object", + "properties": { + "amount": { + "type": "integer", + "format": "uint64" + }, + "assetId": { + "type": "string" + } + } + }, "BatchFailedEvent": { "title": "BatchFailedEvent", "type": "object", @@ -1426,6 +1439,12 @@ "arkTxid": { "type": "string" }, + "assets": { + "type": "array", + "items": { + "$ref": "#/components/schemas/Asset" + } + }, "commitmentTxids": { "type": "array", "items": { diff --git a/api-spec/openapi/swagger/ark/v1/types.openapi.json b/api-spec/openapi/swagger/ark/v1/types.openapi.json index 668ec230a..2e3be362b 100644 --- a/api-spec/openapi/swagger/ark/v1/types.openapi.json +++ b/api-spec/openapi/swagger/ark/v1/types.openapi.json @@ -6,6 +6,19 @@ }, "components": { "schemas": { + "Asset": { + "title": "Asset", + "type": "object", + "properties": { + "amount": { + "type": "integer", + "format": "uint64" + }, + "assetId": { + "type": "string" + } + } + }, "BatchFailedEvent": { "title": "BatchFailedEvent", "type": "object", @@ -400,6 +413,12 @@ "arkTxid": { "type": "string" }, + "assets": { + "type": "array", + "items": { + "$ref": "#/components/schemas/Asset" + } + }, "commitmentTxids": { "type": "array", "items": { diff --git a/api-spec/protobuf/ark/v1/indexer.proto b/api-spec/protobuf/ark/v1/indexer.proto index a74be965b..99c594699 100644 --- a/api-spec/protobuf/ark/v1/indexer.proto +++ b/api-spec/protobuf/ark/v1/indexer.proto @@ -75,6 +75,13 @@ service IndexerService { }; } + // GetAsset returns the asset information and metadata for the specified asset ID. + rpc GetAssetGroup(GetAssetGroupRequest) returns (GetAssetGroupResponse) { + option (meshapi.gateway.http) = { + get: "/v1/indexer/assetGroup/{asset_id}" + }; + } + // GetBatchSweepTransactions returns the list of transaction (txid) that swept a given batch // output. // In most cases the list contains only one txid, meaning that all the amount locked for a @@ -213,11 +220,33 @@ message GetVirtualTxsRequest { repeated string txids = 1; IndexerPageRequest page = 2; } + message GetVirtualTxsResponse { repeated string txs = 1; IndexerPageResponse page = 2; } +message GetAssetGroupRequest { + string asset_id = 1; +} + +message GetAssetGroupResponse { + string asset_id = 1; + AssetGroup asset_group = 2; +} + +message AssetGroup { + string id = 1; + uint64 quantity = 2; + bool immutable = 3; + repeated AssetMetadata metadata = 4; +} + +message AssetMetadata { + string key = 1; + string value = 2; +} + message GetBatchSweepTransactionsRequest { IndexerOutpoint batch_outpoint = 1; @@ -257,6 +286,12 @@ message IndexerVtxo { repeated string commitment_txids = 11; string settled_by = 12; string ark_txid = 13; + repeated IndexerAsset assets = 14; +} + +message IndexerAsset { + string asset_id = 1; + uint64 amount = 2; } message IndexerChain { @@ -322,6 +357,16 @@ message UnsubscribeForScriptsRequest { message UnsubscribeForScriptsResponse {} +message TeleportEvent { + string teleport_hash = 1; + string anchor_outpoint = 2; + string asset_id = 3; + uint64 amount = 4; + uint32 output_vout = 5; + int64 created_at = 6; + int64 expires_at = 7; +} + message GetSubscriptionRequest { string subscription_id = 1; } @@ -349,4 +394,7 @@ message IndexerSubscriptionEvent { string tx = 5; map checkpoint_txs = 6; repeated IndexerVtxo swept_vtxos = 7; + // List of teleport events involved in the transaction + repeated TeleportEvent teleport_events = 8; + repeated string teleport_hashes = 9; } \ No newline at end of file diff --git a/api-spec/protobuf/ark/v1/types.proto b/api-spec/protobuf/ark/v1/types.proto index 58a178fdd..f3981e813 100644 --- a/api-spec/protobuf/ark/v1/types.proto +++ b/api-spec/protobuf/ark/v1/types.proto @@ -28,6 +28,12 @@ message Vtxo { string spent_by = 11; string settled_by = 12; string ark_txid = 13; + repeated Asset assets = 14; +} + +message Asset { + string asset_id = 1; + uint64 amount = 2; } message TxData { diff --git a/api-spec/protobuf/gen/ark/v1/indexer.pb.go b/api-spec/protobuf/gen/ark/v1/indexer.pb.go index 9dfca2ec1..f9120584f 100644 --- a/api-spec/protobuf/gen/ark/v1/indexer.pb.go +++ b/api-spec/protobuf/gen/ark/v1/indexer.pb.go @@ -1057,6 +1057,222 @@ func (x *GetVirtualTxsResponse) GetPage() *IndexerPageResponse { return nil } +type GetAssetGroupRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + AssetId string `protobuf:"bytes,1,opt,name=asset_id,json=assetId,proto3" json:"asset_id,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *GetAssetGroupRequest) Reset() { + *x = GetAssetGroupRequest{} + mi := &file_ark_v1_indexer_proto_msgTypes[16] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *GetAssetGroupRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetAssetGroupRequest) ProtoMessage() {} + +func (x *GetAssetGroupRequest) ProtoReflect() protoreflect.Message { + mi := &file_ark_v1_indexer_proto_msgTypes[16] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetAssetGroupRequest.ProtoReflect.Descriptor instead. +func (*GetAssetGroupRequest) Descriptor() ([]byte, []int) { + return file_ark_v1_indexer_proto_rawDescGZIP(), []int{16} +} + +func (x *GetAssetGroupRequest) GetAssetId() string { + if x != nil { + return x.AssetId + } + return "" +} + +type GetAssetGroupResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + AssetId string `protobuf:"bytes,1,opt,name=asset_id,json=assetId,proto3" json:"asset_id,omitempty"` + AssetGroup *AssetGroup `protobuf:"bytes,2,opt,name=asset_group,json=assetGroup,proto3" json:"asset_group,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *GetAssetGroupResponse) Reset() { + *x = GetAssetGroupResponse{} + mi := &file_ark_v1_indexer_proto_msgTypes[17] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *GetAssetGroupResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetAssetGroupResponse) ProtoMessage() {} + +func (x *GetAssetGroupResponse) ProtoReflect() protoreflect.Message { + mi := &file_ark_v1_indexer_proto_msgTypes[17] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetAssetGroupResponse.ProtoReflect.Descriptor instead. +func (*GetAssetGroupResponse) Descriptor() ([]byte, []int) { + return file_ark_v1_indexer_proto_rawDescGZIP(), []int{17} +} + +func (x *GetAssetGroupResponse) GetAssetId() string { + if x != nil { + return x.AssetId + } + return "" +} + +func (x *GetAssetGroupResponse) GetAssetGroup() *AssetGroup { + if x != nil { + return x.AssetGroup + } + return nil +} + +type AssetGroup struct { + state protoimpl.MessageState `protogen:"open.v1"` + Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + Quantity uint64 `protobuf:"varint,2,opt,name=quantity,proto3" json:"quantity,omitempty"` + Immutable bool `protobuf:"varint,3,opt,name=immutable,proto3" json:"immutable,omitempty"` + Metadata []*AssetMetadata `protobuf:"bytes,4,rep,name=metadata,proto3" json:"metadata,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *AssetGroup) Reset() { + *x = AssetGroup{} + mi := &file_ark_v1_indexer_proto_msgTypes[18] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *AssetGroup) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*AssetGroup) ProtoMessage() {} + +func (x *AssetGroup) ProtoReflect() protoreflect.Message { + mi := &file_ark_v1_indexer_proto_msgTypes[18] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use AssetGroup.ProtoReflect.Descriptor instead. +func (*AssetGroup) Descriptor() ([]byte, []int) { + return file_ark_v1_indexer_proto_rawDescGZIP(), []int{18} +} + +func (x *AssetGroup) GetId() string { + if x != nil { + return x.Id + } + return "" +} + +func (x *AssetGroup) GetQuantity() uint64 { + if x != nil { + return x.Quantity + } + return 0 +} + +func (x *AssetGroup) GetImmutable() bool { + if x != nil { + return x.Immutable + } + return false +} + +func (x *AssetGroup) GetMetadata() []*AssetMetadata { + if x != nil { + return x.Metadata + } + return nil +} + +type AssetMetadata struct { + state protoimpl.MessageState `protogen:"open.v1"` + Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` + Value string `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *AssetMetadata) Reset() { + *x = AssetMetadata{} + mi := &file_ark_v1_indexer_proto_msgTypes[19] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *AssetMetadata) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*AssetMetadata) ProtoMessage() {} + +func (x *AssetMetadata) ProtoReflect() protoreflect.Message { + mi := &file_ark_v1_indexer_proto_msgTypes[19] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use AssetMetadata.ProtoReflect.Descriptor instead. +func (*AssetMetadata) Descriptor() ([]byte, []int) { + return file_ark_v1_indexer_proto_rawDescGZIP(), []int{19} +} + +func (x *AssetMetadata) GetKey() string { + if x != nil { + return x.Key + } + return "" +} + +func (x *AssetMetadata) GetValue() string { + if x != nil { + return x.Value + } + return "" +} + type GetBatchSweepTransactionsRequest struct { state protoimpl.MessageState `protogen:"open.v1"` BatchOutpoint *IndexerOutpoint `protobuf:"bytes,1,opt,name=batch_outpoint,json=batchOutpoint,proto3" json:"batch_outpoint,omitempty"` @@ -1066,7 +1282,7 @@ type GetBatchSweepTransactionsRequest struct { func (x *GetBatchSweepTransactionsRequest) Reset() { *x = GetBatchSweepTransactionsRequest{} - mi := &file_ark_v1_indexer_proto_msgTypes[16] + mi := &file_ark_v1_indexer_proto_msgTypes[20] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1078,7 +1294,7 @@ func (x *GetBatchSweepTransactionsRequest) String() string { func (*GetBatchSweepTransactionsRequest) ProtoMessage() {} func (x *GetBatchSweepTransactionsRequest) ProtoReflect() protoreflect.Message { - mi := &file_ark_v1_indexer_proto_msgTypes[16] + mi := &file_ark_v1_indexer_proto_msgTypes[20] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1091,7 +1307,7 @@ func (x *GetBatchSweepTransactionsRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use GetBatchSweepTransactionsRequest.ProtoReflect.Descriptor instead. func (*GetBatchSweepTransactionsRequest) Descriptor() ([]byte, []int) { - return file_ark_v1_indexer_proto_rawDescGZIP(), []int{16} + return file_ark_v1_indexer_proto_rawDescGZIP(), []int{20} } func (x *GetBatchSweepTransactionsRequest) GetBatchOutpoint() *IndexerOutpoint { @@ -1110,7 +1326,7 @@ type GetBatchSweepTransactionsResponse struct { func (x *GetBatchSweepTransactionsResponse) Reset() { *x = GetBatchSweepTransactionsResponse{} - mi := &file_ark_v1_indexer_proto_msgTypes[17] + mi := &file_ark_v1_indexer_proto_msgTypes[21] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1122,7 +1338,7 @@ func (x *GetBatchSweepTransactionsResponse) String() string { func (*GetBatchSweepTransactionsResponse) ProtoMessage() {} func (x *GetBatchSweepTransactionsResponse) ProtoReflect() protoreflect.Message { - mi := &file_ark_v1_indexer_proto_msgTypes[17] + mi := &file_ark_v1_indexer_proto_msgTypes[21] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1135,7 +1351,7 @@ func (x *GetBatchSweepTransactionsResponse) ProtoReflect() protoreflect.Message // Deprecated: Use GetBatchSweepTransactionsResponse.ProtoReflect.Descriptor instead. func (*GetBatchSweepTransactionsResponse) Descriptor() ([]byte, []int) { - return file_ark_v1_indexer_proto_rawDescGZIP(), []int{17} + return file_ark_v1_indexer_proto_rawDescGZIP(), []int{21} } func (x *GetBatchSweepTransactionsResponse) GetSweptBy() []string { @@ -1157,7 +1373,7 @@ type IndexerBatch struct { func (x *IndexerBatch) Reset() { *x = IndexerBatch{} - mi := &file_ark_v1_indexer_proto_msgTypes[18] + mi := &file_ark_v1_indexer_proto_msgTypes[22] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1169,7 +1385,7 @@ func (x *IndexerBatch) String() string { func (*IndexerBatch) ProtoMessage() {} func (x *IndexerBatch) ProtoReflect() protoreflect.Message { - mi := &file_ark_v1_indexer_proto_msgTypes[18] + mi := &file_ark_v1_indexer_proto_msgTypes[22] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1182,7 +1398,7 @@ func (x *IndexerBatch) ProtoReflect() protoreflect.Message { // Deprecated: Use IndexerBatch.ProtoReflect.Descriptor instead. func (*IndexerBatch) Descriptor() ([]byte, []int) { - return file_ark_v1_indexer_proto_rawDescGZIP(), []int{18} + return file_ark_v1_indexer_proto_rawDescGZIP(), []int{22} } func (x *IndexerBatch) GetTotalOutputAmount() uint64 { @@ -1223,7 +1439,7 @@ type IndexerOutpoint struct { func (x *IndexerOutpoint) Reset() { *x = IndexerOutpoint{} - mi := &file_ark_v1_indexer_proto_msgTypes[19] + mi := &file_ark_v1_indexer_proto_msgTypes[23] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1235,7 +1451,7 @@ func (x *IndexerOutpoint) String() string { func (*IndexerOutpoint) ProtoMessage() {} func (x *IndexerOutpoint) ProtoReflect() protoreflect.Message { - mi := &file_ark_v1_indexer_proto_msgTypes[19] + mi := &file_ark_v1_indexer_proto_msgTypes[23] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1248,7 +1464,7 @@ func (x *IndexerOutpoint) ProtoReflect() protoreflect.Message { // Deprecated: Use IndexerOutpoint.ProtoReflect.Descriptor instead. func (*IndexerOutpoint) Descriptor() ([]byte, []int) { - return file_ark_v1_indexer_proto_rawDescGZIP(), []int{19} + return file_ark_v1_indexer_proto_rawDescGZIP(), []int{23} } func (x *IndexerOutpoint) GetTxid() string { @@ -1275,7 +1491,7 @@ type IndexerNode struct { func (x *IndexerNode) Reset() { *x = IndexerNode{} - mi := &file_ark_v1_indexer_proto_msgTypes[20] + mi := &file_ark_v1_indexer_proto_msgTypes[24] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1287,7 +1503,7 @@ func (x *IndexerNode) String() string { func (*IndexerNode) ProtoMessage() {} func (x *IndexerNode) ProtoReflect() protoreflect.Message { - mi := &file_ark_v1_indexer_proto_msgTypes[20] + mi := &file_ark_v1_indexer_proto_msgTypes[24] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1300,7 +1516,7 @@ func (x *IndexerNode) ProtoReflect() protoreflect.Message { // Deprecated: Use IndexerNode.ProtoReflect.Descriptor instead. func (*IndexerNode) Descriptor() ([]byte, []int) { - return file_ark_v1_indexer_proto_rawDescGZIP(), []int{20} + return file_ark_v1_indexer_proto_rawDescGZIP(), []int{24} } func (x *IndexerNode) GetTxid() string { @@ -1332,13 +1548,14 @@ type IndexerVtxo struct { CommitmentTxids []string `protobuf:"bytes,11,rep,name=commitment_txids,json=commitmentTxids,proto3" json:"commitment_txids,omitempty"` SettledBy string `protobuf:"bytes,12,opt,name=settled_by,json=settledBy,proto3" json:"settled_by,omitempty"` ArkTxid string `protobuf:"bytes,13,opt,name=ark_txid,json=arkTxid,proto3" json:"ark_txid,omitempty"` + Assets []*IndexerAsset `protobuf:"bytes,14,rep,name=assets,proto3" json:"assets,omitempty"` unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache } func (x *IndexerVtxo) Reset() { *x = IndexerVtxo{} - mi := &file_ark_v1_indexer_proto_msgTypes[21] + mi := &file_ark_v1_indexer_proto_msgTypes[25] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1350,7 +1567,7 @@ func (x *IndexerVtxo) String() string { func (*IndexerVtxo) ProtoMessage() {} func (x *IndexerVtxo) ProtoReflect() protoreflect.Message { - mi := &file_ark_v1_indexer_proto_msgTypes[21] + mi := &file_ark_v1_indexer_proto_msgTypes[25] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1363,7 +1580,7 @@ func (x *IndexerVtxo) ProtoReflect() protoreflect.Message { // Deprecated: Use IndexerVtxo.ProtoReflect.Descriptor instead. func (*IndexerVtxo) Descriptor() ([]byte, []int) { - return file_ark_v1_indexer_proto_rawDescGZIP(), []int{21} + return file_ark_v1_indexer_proto_rawDescGZIP(), []int{25} } func (x *IndexerVtxo) GetOutpoint() *IndexerOutpoint { @@ -1457,6 +1674,65 @@ func (x *IndexerVtxo) GetArkTxid() string { return "" } +func (x *IndexerVtxo) GetAssets() []*IndexerAsset { + if x != nil { + return x.Assets + } + return nil +} + +type IndexerAsset struct { + state protoimpl.MessageState `protogen:"open.v1"` + AssetId string `protobuf:"bytes,1,opt,name=asset_id,json=assetId,proto3" json:"asset_id,omitempty"` + Amount uint64 `protobuf:"varint,2,opt,name=amount,proto3" json:"amount,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *IndexerAsset) Reset() { + *x = IndexerAsset{} + mi := &file_ark_v1_indexer_proto_msgTypes[26] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *IndexerAsset) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*IndexerAsset) ProtoMessage() {} + +func (x *IndexerAsset) ProtoReflect() protoreflect.Message { + mi := &file_ark_v1_indexer_proto_msgTypes[26] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use IndexerAsset.ProtoReflect.Descriptor instead. +func (*IndexerAsset) Descriptor() ([]byte, []int) { + return file_ark_v1_indexer_proto_rawDescGZIP(), []int{26} +} + +func (x *IndexerAsset) GetAssetId() string { + if x != nil { + return x.AssetId + } + return "" +} + +func (x *IndexerAsset) GetAmount() uint64 { + if x != nil { + return x.Amount + } + return 0 +} + type IndexerChain struct { state protoimpl.MessageState `protogen:"open.v1"` Txid string `protobuf:"bytes,1,opt,name=txid,proto3" json:"txid,omitempty"` @@ -1470,7 +1746,7 @@ type IndexerChain struct { func (x *IndexerChain) Reset() { *x = IndexerChain{} - mi := &file_ark_v1_indexer_proto_msgTypes[22] + mi := &file_ark_v1_indexer_proto_msgTypes[27] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1482,7 +1758,7 @@ func (x *IndexerChain) String() string { func (*IndexerChain) ProtoMessage() {} func (x *IndexerChain) ProtoReflect() protoreflect.Message { - mi := &file_ark_v1_indexer_proto_msgTypes[22] + mi := &file_ark_v1_indexer_proto_msgTypes[27] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1495,7 +1771,7 @@ func (x *IndexerChain) ProtoReflect() protoreflect.Message { // Deprecated: Use IndexerChain.ProtoReflect.Descriptor instead. func (*IndexerChain) Descriptor() ([]byte, []int) { - return file_ark_v1_indexer_proto_rawDescGZIP(), []int{22} + return file_ark_v1_indexer_proto_rawDescGZIP(), []int{27} } func (x *IndexerChain) GetTxid() string { @@ -1544,7 +1820,7 @@ type IndexerTxHistoryRecord struct { func (x *IndexerTxHistoryRecord) Reset() { *x = IndexerTxHistoryRecord{} - mi := &file_ark_v1_indexer_proto_msgTypes[23] + mi := &file_ark_v1_indexer_proto_msgTypes[28] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1556,7 +1832,7 @@ func (x *IndexerTxHistoryRecord) String() string { func (*IndexerTxHistoryRecord) ProtoMessage() {} func (x *IndexerTxHistoryRecord) ProtoReflect() protoreflect.Message { - mi := &file_ark_v1_indexer_proto_msgTypes[23] + mi := &file_ark_v1_indexer_proto_msgTypes[28] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1569,7 +1845,7 @@ func (x *IndexerTxHistoryRecord) ProtoReflect() protoreflect.Message { // Deprecated: Use IndexerTxHistoryRecord.ProtoReflect.Descriptor instead. func (*IndexerTxHistoryRecord) Descriptor() ([]byte, []int) { - return file_ark_v1_indexer_proto_rawDescGZIP(), []int{23} + return file_ark_v1_indexer_proto_rawDescGZIP(), []int{28} } func (x *IndexerTxHistoryRecord) GetKey() isIndexerTxHistoryRecord_Key { @@ -1658,7 +1934,7 @@ type IndexerPageRequest struct { func (x *IndexerPageRequest) Reset() { *x = IndexerPageRequest{} - mi := &file_ark_v1_indexer_proto_msgTypes[24] + mi := &file_ark_v1_indexer_proto_msgTypes[29] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1670,7 +1946,7 @@ func (x *IndexerPageRequest) String() string { func (*IndexerPageRequest) ProtoMessage() {} func (x *IndexerPageRequest) ProtoReflect() protoreflect.Message { - mi := &file_ark_v1_indexer_proto_msgTypes[24] + mi := &file_ark_v1_indexer_proto_msgTypes[29] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1683,7 +1959,7 @@ func (x *IndexerPageRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use IndexerPageRequest.ProtoReflect.Descriptor instead. func (*IndexerPageRequest) Descriptor() ([]byte, []int) { - return file_ark_v1_indexer_proto_rawDescGZIP(), []int{24} + return file_ark_v1_indexer_proto_rawDescGZIP(), []int{29} } func (x *IndexerPageRequest) GetSize() int32 { @@ -1711,7 +1987,7 @@ type IndexerPageResponse struct { func (x *IndexerPageResponse) Reset() { *x = IndexerPageResponse{} - mi := &file_ark_v1_indexer_proto_msgTypes[25] + mi := &file_ark_v1_indexer_proto_msgTypes[30] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1723,7 +1999,7 @@ func (x *IndexerPageResponse) String() string { func (*IndexerPageResponse) ProtoMessage() {} func (x *IndexerPageResponse) ProtoReflect() protoreflect.Message { - mi := &file_ark_v1_indexer_proto_msgTypes[25] + mi := &file_ark_v1_indexer_proto_msgTypes[30] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1736,7 +2012,7 @@ func (x *IndexerPageResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use IndexerPageResponse.ProtoReflect.Descriptor instead. func (*IndexerPageResponse) Descriptor() ([]byte, []int) { - return file_ark_v1_indexer_proto_rawDescGZIP(), []int{25} + return file_ark_v1_indexer_proto_rawDescGZIP(), []int{30} } func (x *IndexerPageResponse) GetCurrent() int32 { @@ -1771,7 +2047,7 @@ type SubscribeForScriptsRequest struct { func (x *SubscribeForScriptsRequest) Reset() { *x = SubscribeForScriptsRequest{} - mi := &file_ark_v1_indexer_proto_msgTypes[26] + mi := &file_ark_v1_indexer_proto_msgTypes[31] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1783,7 +2059,7 @@ func (x *SubscribeForScriptsRequest) String() string { func (*SubscribeForScriptsRequest) ProtoMessage() {} func (x *SubscribeForScriptsRequest) ProtoReflect() protoreflect.Message { - mi := &file_ark_v1_indexer_proto_msgTypes[26] + mi := &file_ark_v1_indexer_proto_msgTypes[31] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1796,7 +2072,7 @@ func (x *SubscribeForScriptsRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use SubscribeForScriptsRequest.ProtoReflect.Descriptor instead. func (*SubscribeForScriptsRequest) Descriptor() ([]byte, []int) { - return file_ark_v1_indexer_proto_rawDescGZIP(), []int{26} + return file_ark_v1_indexer_proto_rawDescGZIP(), []int{31} } func (x *SubscribeForScriptsRequest) GetScripts() []string { @@ -1822,7 +2098,7 @@ type SubscribeForScriptsResponse struct { func (x *SubscribeForScriptsResponse) Reset() { *x = SubscribeForScriptsResponse{} - mi := &file_ark_v1_indexer_proto_msgTypes[27] + mi := &file_ark_v1_indexer_proto_msgTypes[32] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1834,7 +2110,7 @@ func (x *SubscribeForScriptsResponse) String() string { func (*SubscribeForScriptsResponse) ProtoMessage() {} func (x *SubscribeForScriptsResponse) ProtoReflect() protoreflect.Message { - mi := &file_ark_v1_indexer_proto_msgTypes[27] + mi := &file_ark_v1_indexer_proto_msgTypes[32] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1847,7 +2123,7 @@ func (x *SubscribeForScriptsResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use SubscribeForScriptsResponse.ProtoReflect.Descriptor instead. func (*SubscribeForScriptsResponse) Descriptor() ([]byte, []int) { - return file_ark_v1_indexer_proto_rawDescGZIP(), []int{27} + return file_ark_v1_indexer_proto_rawDescGZIP(), []int{32} } func (x *SubscribeForScriptsResponse) GetSubscriptionId() string { @@ -1868,7 +2144,7 @@ type UnsubscribeForScriptsRequest struct { func (x *UnsubscribeForScriptsRequest) Reset() { *x = UnsubscribeForScriptsRequest{} - mi := &file_ark_v1_indexer_proto_msgTypes[28] + mi := &file_ark_v1_indexer_proto_msgTypes[33] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1880,7 +2156,7 @@ func (x *UnsubscribeForScriptsRequest) String() string { func (*UnsubscribeForScriptsRequest) ProtoMessage() {} func (x *UnsubscribeForScriptsRequest) ProtoReflect() protoreflect.Message { - mi := &file_ark_v1_indexer_proto_msgTypes[28] + mi := &file_ark_v1_indexer_proto_msgTypes[33] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1893,7 +2169,7 @@ func (x *UnsubscribeForScriptsRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use UnsubscribeForScriptsRequest.ProtoReflect.Descriptor instead. func (*UnsubscribeForScriptsRequest) Descriptor() ([]byte, []int) { - return file_ark_v1_indexer_proto_rawDescGZIP(), []int{28} + return file_ark_v1_indexer_proto_rawDescGZIP(), []int{33} } func (x *UnsubscribeForScriptsRequest) GetSubscriptionId() string { @@ -1918,7 +2194,7 @@ type UnsubscribeForScriptsResponse struct { func (x *UnsubscribeForScriptsResponse) Reset() { *x = UnsubscribeForScriptsResponse{} - mi := &file_ark_v1_indexer_proto_msgTypes[29] + mi := &file_ark_v1_indexer_proto_msgTypes[34] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1930,7 +2206,7 @@ func (x *UnsubscribeForScriptsResponse) String() string { func (*UnsubscribeForScriptsResponse) ProtoMessage() {} func (x *UnsubscribeForScriptsResponse) ProtoReflect() protoreflect.Message { - mi := &file_ark_v1_indexer_proto_msgTypes[29] + mi := &file_ark_v1_indexer_proto_msgTypes[34] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1943,7 +2219,99 @@ func (x *UnsubscribeForScriptsResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use UnsubscribeForScriptsResponse.ProtoReflect.Descriptor instead. func (*UnsubscribeForScriptsResponse) Descriptor() ([]byte, []int) { - return file_ark_v1_indexer_proto_rawDescGZIP(), []int{29} + return file_ark_v1_indexer_proto_rawDescGZIP(), []int{34} +} + +type TeleportEvent struct { + state protoimpl.MessageState `protogen:"open.v1"` + TeleportHash string `protobuf:"bytes,1,opt,name=teleport_hash,json=teleportHash,proto3" json:"teleport_hash,omitempty"` + AnchorOutpoint string `protobuf:"bytes,2,opt,name=anchor_outpoint,json=anchorOutpoint,proto3" json:"anchor_outpoint,omitempty"` + AssetId string `protobuf:"bytes,3,opt,name=asset_id,json=assetId,proto3" json:"asset_id,omitempty"` + Amount uint64 `protobuf:"varint,4,opt,name=amount,proto3" json:"amount,omitempty"` + OutputVout uint32 `protobuf:"varint,5,opt,name=output_vout,json=outputVout,proto3" json:"output_vout,omitempty"` + CreatedAt int64 `protobuf:"varint,6,opt,name=created_at,json=createdAt,proto3" json:"created_at,omitempty"` + ExpiresAt int64 `protobuf:"varint,7,opt,name=expires_at,json=expiresAt,proto3" json:"expires_at,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *TeleportEvent) Reset() { + *x = TeleportEvent{} + mi := &file_ark_v1_indexer_proto_msgTypes[35] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *TeleportEvent) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TeleportEvent) ProtoMessage() {} + +func (x *TeleportEvent) ProtoReflect() protoreflect.Message { + mi := &file_ark_v1_indexer_proto_msgTypes[35] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TeleportEvent.ProtoReflect.Descriptor instead. +func (*TeleportEvent) Descriptor() ([]byte, []int) { + return file_ark_v1_indexer_proto_rawDescGZIP(), []int{35} +} + +func (x *TeleportEvent) GetTeleportHash() string { + if x != nil { + return x.TeleportHash + } + return "" +} + +func (x *TeleportEvent) GetAnchorOutpoint() string { + if x != nil { + return x.AnchorOutpoint + } + return "" +} + +func (x *TeleportEvent) GetAssetId() string { + if x != nil { + return x.AssetId + } + return "" +} + +func (x *TeleportEvent) GetAmount() uint64 { + if x != nil { + return x.Amount + } + return 0 +} + +func (x *TeleportEvent) GetOutputVout() uint32 { + if x != nil { + return x.OutputVout + } + return 0 +} + +func (x *TeleportEvent) GetCreatedAt() int64 { + if x != nil { + return x.CreatedAt + } + return 0 +} + +func (x *TeleportEvent) GetExpiresAt() int64 { + if x != nil { + return x.ExpiresAt + } + return 0 } type GetSubscriptionRequest struct { @@ -1955,7 +2323,7 @@ type GetSubscriptionRequest struct { func (x *GetSubscriptionRequest) Reset() { *x = GetSubscriptionRequest{} - mi := &file_ark_v1_indexer_proto_msgTypes[30] + mi := &file_ark_v1_indexer_proto_msgTypes[36] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1967,7 +2335,7 @@ func (x *GetSubscriptionRequest) String() string { func (*GetSubscriptionRequest) ProtoMessage() {} func (x *GetSubscriptionRequest) ProtoReflect() protoreflect.Message { - mi := &file_ark_v1_indexer_proto_msgTypes[30] + mi := &file_ark_v1_indexer_proto_msgTypes[36] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1980,7 +2348,7 @@ func (x *GetSubscriptionRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use GetSubscriptionRequest.ProtoReflect.Descriptor instead. func (*GetSubscriptionRequest) Descriptor() ([]byte, []int) { - return file_ark_v1_indexer_proto_rawDescGZIP(), []int{30} + return file_ark_v1_indexer_proto_rawDescGZIP(), []int{36} } func (x *GetSubscriptionRequest) GetSubscriptionId() string { @@ -2003,7 +2371,7 @@ type GetSubscriptionResponse struct { func (x *GetSubscriptionResponse) Reset() { *x = GetSubscriptionResponse{} - mi := &file_ark_v1_indexer_proto_msgTypes[31] + mi := &file_ark_v1_indexer_proto_msgTypes[37] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2015,7 +2383,7 @@ func (x *GetSubscriptionResponse) String() string { func (*GetSubscriptionResponse) ProtoMessage() {} func (x *GetSubscriptionResponse) ProtoReflect() protoreflect.Message { - mi := &file_ark_v1_indexer_proto_msgTypes[31] + mi := &file_ark_v1_indexer_proto_msgTypes[37] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2028,7 +2396,7 @@ func (x *GetSubscriptionResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use GetSubscriptionResponse.ProtoReflect.Descriptor instead. func (*GetSubscriptionResponse) Descriptor() ([]byte, []int) { - return file_ark_v1_indexer_proto_rawDescGZIP(), []int{31} + return file_ark_v1_indexer_proto_rawDescGZIP(), []int{37} } func (x *GetSubscriptionResponse) GetData() isGetSubscriptionResponse_Data { @@ -2082,7 +2450,7 @@ type IndexerTxData struct { func (x *IndexerTxData) Reset() { *x = IndexerTxData{} - mi := &file_ark_v1_indexer_proto_msgTypes[32] + mi := &file_ark_v1_indexer_proto_msgTypes[38] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2094,7 +2462,7 @@ func (x *IndexerTxData) String() string { func (*IndexerTxData) ProtoMessage() {} func (x *IndexerTxData) ProtoReflect() protoreflect.Message { - mi := &file_ark_v1_indexer_proto_msgTypes[32] + mi := &file_ark_v1_indexer_proto_msgTypes[38] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2107,7 +2475,7 @@ func (x *IndexerTxData) ProtoReflect() protoreflect.Message { // Deprecated: Use IndexerTxData.ProtoReflect.Descriptor instead. func (*IndexerTxData) Descriptor() ([]byte, []int) { - return file_ark_v1_indexer_proto_rawDescGZIP(), []int{32} + return file_ark_v1_indexer_proto_rawDescGZIP(), []int{38} } func (x *IndexerTxData) GetTxid() string { @@ -2132,7 +2500,7 @@ type IndexerHeartbeat struct { func (x *IndexerHeartbeat) Reset() { *x = IndexerHeartbeat{} - mi := &file_ark_v1_indexer_proto_msgTypes[33] + mi := &file_ark_v1_indexer_proto_msgTypes[39] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2144,7 +2512,7 @@ func (x *IndexerHeartbeat) String() string { func (*IndexerHeartbeat) ProtoMessage() {} func (x *IndexerHeartbeat) ProtoReflect() protoreflect.Message { - mi := &file_ark_v1_indexer_proto_msgTypes[33] + mi := &file_ark_v1_indexer_proto_msgTypes[39] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2157,7 +2525,7 @@ func (x *IndexerHeartbeat) ProtoReflect() protoreflect.Message { // Deprecated: Use IndexerHeartbeat.ProtoReflect.Descriptor instead. func (*IndexerHeartbeat) Descriptor() ([]byte, []int) { - return file_ark_v1_indexer_proto_rawDescGZIP(), []int{33} + return file_ark_v1_indexer_proto_rawDescGZIP(), []int{39} } type IndexerSubscriptionEvent struct { @@ -2169,13 +2537,16 @@ type IndexerSubscriptionEvent struct { Tx string `protobuf:"bytes,5,opt,name=tx,proto3" json:"tx,omitempty"` CheckpointTxs map[string]*IndexerTxData `protobuf:"bytes,6,rep,name=checkpoint_txs,json=checkpointTxs,proto3" json:"checkpoint_txs,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` SweptVtxos []*IndexerVtxo `protobuf:"bytes,7,rep,name=swept_vtxos,json=sweptVtxos,proto3" json:"swept_vtxos,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache + // List of teleport events involved in the transaction + TeleportEvents []*TeleportEvent `protobuf:"bytes,8,rep,name=teleport_events,json=teleportEvents,proto3" json:"teleport_events,omitempty"` + TeleportHashes []string `protobuf:"bytes,9,rep,name=teleport_hashes,json=teleportHashes,proto3" json:"teleport_hashes,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *IndexerSubscriptionEvent) Reset() { *x = IndexerSubscriptionEvent{} - mi := &file_ark_v1_indexer_proto_msgTypes[34] + mi := &file_ark_v1_indexer_proto_msgTypes[40] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2187,7 +2558,7 @@ func (x *IndexerSubscriptionEvent) String() string { func (*IndexerSubscriptionEvent) ProtoMessage() {} func (x *IndexerSubscriptionEvent) ProtoReflect() protoreflect.Message { - mi := &file_ark_v1_indexer_proto_msgTypes[34] + mi := &file_ark_v1_indexer_proto_msgTypes[40] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2200,7 +2571,7 @@ func (x *IndexerSubscriptionEvent) ProtoReflect() protoreflect.Message { // Deprecated: Use IndexerSubscriptionEvent.ProtoReflect.Descriptor instead. func (*IndexerSubscriptionEvent) Descriptor() ([]byte, []int) { - return file_ark_v1_indexer_proto_rawDescGZIP(), []int{34} + return file_ark_v1_indexer_proto_rawDescGZIP(), []int{40} } func (x *IndexerSubscriptionEvent) GetTxid() string { @@ -2252,6 +2623,20 @@ func (x *IndexerSubscriptionEvent) GetSweptVtxos() []*IndexerVtxo { return nil } +func (x *IndexerSubscriptionEvent) GetTeleportEvents() []*TeleportEvent { + if x != nil { + return x.TeleportEvents + } + return nil +} + +func (x *IndexerSubscriptionEvent) GetTeleportHashes() []string { + if x != nil { + return x.TeleportHashes + } + return nil +} + var File_ark_v1_indexer_proto protoreflect.FileDescriptor const file_ark_v1_indexer_proto_rawDesc = "" + @@ -2322,7 +2707,22 @@ const file_ark_v1_indexer_proto_rawDesc = "" + "\x04page\x18\x02 \x01(\v2\x1a.ark.v1.IndexerPageRequestR\x04page\"Z\n" + "\x15GetVirtualTxsResponse\x12\x10\n" + "\x03txs\x18\x01 \x03(\tR\x03txs\x12/\n" + - "\x04page\x18\x02 \x01(\v2\x1b.ark.v1.IndexerPageResponseR\x04page\"b\n" + + "\x04page\x18\x02 \x01(\v2\x1b.ark.v1.IndexerPageResponseR\x04page\"1\n" + + "\x14GetAssetGroupRequest\x12\x19\n" + + "\basset_id\x18\x01 \x01(\tR\aassetId\"g\n" + + "\x15GetAssetGroupResponse\x12\x19\n" + + "\basset_id\x18\x01 \x01(\tR\aassetId\x123\n" + + "\vasset_group\x18\x02 \x01(\v2\x12.ark.v1.AssetGroupR\n" + + "assetGroup\"\x89\x01\n" + + "\n" + + "AssetGroup\x12\x0e\n" + + "\x02id\x18\x01 \x01(\tR\x02id\x12\x1a\n" + + "\bquantity\x18\x02 \x01(\x04R\bquantity\x12\x1c\n" + + "\timmutable\x18\x03 \x01(\bR\timmutable\x121\n" + + "\bmetadata\x18\x04 \x03(\v2\x15.ark.v1.AssetMetadataR\bmetadata\"7\n" + + "\rAssetMetadata\x12\x10\n" + + "\x03key\x18\x01 \x01(\tR\x03key\x12\x14\n" + + "\x05value\x18\x02 \x01(\tR\x05value\"b\n" + " GetBatchSweepTransactionsRequest\x12>\n" + "\x0ebatch_outpoint\x18\x01 \x01(\v2\x17.ark.v1.IndexerOutpointR\rbatchOutpoint\">\n" + "!GetBatchSweepTransactionsResponse\x12\x19\n" + @@ -2341,7 +2741,7 @@ const file_ark_v1_indexer_proto_rawDesc = "" + "\bchildren\x18\x02 \x03(\v2!.ark.v1.IndexerNode.ChildrenEntryR\bchildren\x1a;\n" + "\rChildrenEntry\x12\x10\n" + "\x03key\x18\x01 \x01(\rR\x03key\x12\x14\n" + - "\x05value\x18\x02 \x01(\tR\x05value:\x028\x01\"\xb0\x03\n" + + "\x05value\x18\x02 \x01(\tR\x05value:\x028\x01\"\xde\x03\n" + "\vIndexerVtxo\x123\n" + "\boutpoint\x18\x01 \x01(\v2\x17.ark.v1.IndexerOutpointR\boutpoint\x12\x1d\n" + "\n" + @@ -2360,7 +2760,11 @@ const file_ark_v1_indexer_proto_rawDesc = "" + "\x10commitment_txids\x18\v \x03(\tR\x0fcommitmentTxids\x12\x1d\n" + "\n" + "settled_by\x18\f \x01(\tR\tsettledBy\x12\x19\n" + - "\bark_txid\x18\r \x01(\tR\aarkTxid\"\x8b\x01\n" + + "\bark_txid\x18\r \x01(\tR\aarkTxid\x12,\n" + + "\x06assets\x18\x0e \x03(\v2\x14.ark.v1.IndexerAssetR\x06assets\"A\n" + + "\fIndexerAsset\x12\x19\n" + + "\basset_id\x18\x01 \x01(\tR\aassetId\x12\x16\n" + + "\x06amount\x18\x02 \x01(\x04R\x06amount\"\x8b\x01\n" + "\fIndexerChain\x12\x12\n" + "\x04txid\x18\x01 \x01(\tR\x04txid\x12\x1d\n" + "\n" + @@ -2394,7 +2798,18 @@ const file_ark_v1_indexer_proto_rawDesc = "" + "\x1cUnsubscribeForScriptsRequest\x12'\n" + "\x0fsubscription_id\x18\x01 \x01(\tR\x0esubscriptionId\x12\x18\n" + "\ascripts\x18\x02 \x03(\tR\ascripts\"\x1f\n" + - "\x1dUnsubscribeForScriptsResponse\"A\n" + + "\x1dUnsubscribeForScriptsResponse\"\xef\x01\n" + + "\rTeleportEvent\x12#\n" + + "\rteleport_hash\x18\x01 \x01(\tR\fteleportHash\x12'\n" + + "\x0fanchor_outpoint\x18\x02 \x01(\tR\x0eanchorOutpoint\x12\x19\n" + + "\basset_id\x18\x03 \x01(\tR\aassetId\x12\x16\n" + + "\x06amount\x18\x04 \x01(\x04R\x06amount\x12\x1f\n" + + "\voutput_vout\x18\x05 \x01(\rR\n" + + "outputVout\x12\x1d\n" + + "\n" + + "created_at\x18\x06 \x01(\x03R\tcreatedAt\x12\x1d\n" + + "\n" + + "expires_at\x18\a \x01(\x03R\texpiresAt\"A\n" + "\x16GetSubscriptionRequest\x12'\n" + "\x0fsubscription_id\x18\x01 \x01(\tR\x0esubscriptionId\"\x95\x01\n" + "\x17GetSubscriptionResponse\x128\n" + @@ -2404,7 +2819,7 @@ const file_ark_v1_indexer_proto_rawDesc = "" + "\rIndexerTxData\x12\x12\n" + "\x04txid\x18\x01 \x01(\tR\x04txid\x12\x0e\n" + "\x02tx\x18\x02 \x01(\tR\x02tx\"\x12\n" + - "\x10IndexerHeartbeat\"\xab\x03\n" + + "\x10IndexerHeartbeat\"\x94\x04\n" + "\x18IndexerSubscriptionEvent\x12\x12\n" + "\x04txid\x18\x01 \x01(\tR\x04txid\x12\x18\n" + "\ascripts\x18\x02 \x03(\tR\ascripts\x120\n" + @@ -2414,7 +2829,9 @@ const file_ark_v1_indexer_proto_rawDesc = "" + "\x02tx\x18\x05 \x01(\tR\x02tx\x12Z\n" + "\x0echeckpoint_txs\x18\x06 \x03(\v23.ark.v1.IndexerSubscriptionEvent.CheckpointTxsEntryR\rcheckpointTxs\x124\n" + "\vswept_vtxos\x18\a \x03(\v2\x13.ark.v1.IndexerVtxoR\n" + - "sweptVtxos\x1aW\n" + + "sweptVtxos\x12>\n" + + "\x0fteleport_events\x18\b \x03(\v2\x15.ark.v1.TeleportEventR\x0eteleportEvents\x12'\n" + + "\x0fteleport_hashes\x18\t \x03(\tR\x0eteleportHashes\x1aW\n" + "\x12CheckpointTxsEntry\x12\x10\n" + "\x03key\x18\x01 \x01(\tR\x03key\x12+\n" + "\x05value\x18\x02 \x01(\v2\x15.ark.v1.IndexerTxDataR\x05value:\x028\x01*h\n" + @@ -2427,7 +2844,7 @@ const file_ark_v1_indexer_proto_rawDesc = "" + "\"INDEXER_CHAINED_TX_TYPE_COMMITMENT\x10\x01\x12\x1f\n" + "\x1bINDEXER_CHAINED_TX_TYPE_ARK\x10\x02\x12 \n" + "\x1cINDEXER_CHAINED_TX_TYPE_TREE\x10\x03\x12&\n" + - "\"INDEXER_CHAINED_TX_TYPE_CHECKPOINT\x10\x042\x82\r\n" + + "\"INDEXER_CHAINED_TX_TYPE_CHECKPOINT\x10\x042\xf8\r\n" + "\x0eIndexerService\x12x\n" + "\x0fGetCommitmentTx\x12\x1e.ark.v1.GetCommitmentTxRequest\x1a\x1f.ark.v1.GetCommitmentTxResponse\"$\xb2J!\x12\x1f/v1/indexer/commitmentTx/{txid}\x12}\n" + "\rGetForfeitTxs\x12\x1c.ark.v1.GetForfeitTxsRequest\x1a\x1d.ark.v1.GetForfeitTxsResponse\"/\xb2J,\x12*/v1/indexer/commitmentTx/{txid}/forfeitTxs\x12}\n" + @@ -2436,7 +2853,8 @@ const file_ark_v1_indexer_proto_rawDesc = "" + "\x11GetVtxoTreeLeaves\x12 .ark.v1.GetVtxoTreeLeavesRequest\x1a!.ark.v1.GetVtxoTreeLeavesResponse\"N\xb2JK\x12I/v1/indexer/batch/{batch_outpoint.txid}/{batch_outpoint.vout}/tree/leaves\x12U\n" + "\bGetVtxos\x12\x17.ark.v1.GetVtxosRequest\x1a\x18.ark.v1.GetVtxosResponse\"\x16\xb2J\x13\x12\x11/v1/indexer/vtxos\x12\x86\x01\n" + "\fGetVtxoChain\x12\x1b.ark.v1.GetVtxoChainRequest\x1a\x1c.ark.v1.GetVtxoChainResponse\";\xb2J8\x126/v1/indexer/vtxo/{outpoint.txid}/{outpoint.vout}/chain\x12p\n" + - "\rGetVirtualTxs\x12\x1c.ark.v1.GetVirtualTxsRequest\x1a\x1d.ark.v1.GetVirtualTxsResponse\"\"\xb2J\x1f\x12\x1d/v1/indexer/virtualTx/{txids}\x12\xbd\x01\n" + + "\rGetVirtualTxs\x12\x1c.ark.v1.GetVirtualTxsRequest\x1a\x1d.ark.v1.GetVirtualTxsResponse\"\"\xb2J\x1f\x12\x1d/v1/indexer/virtualTx/{txids}\x12t\n" + + "\rGetAssetGroup\x12\x1c.ark.v1.GetAssetGroupRequest\x1a\x1d.ark.v1.GetAssetGroupResponse\"&\xb2J#\x12!/v1/indexer/assetGroup/{asset_id}\x12\xbd\x01\n" + "\x19GetBatchSweepTransactions\x12(.ark.v1.GetBatchSweepTransactionsRequest\x1a).ark.v1.GetBatchSweepTransactionsResponse\"K\xb2JH\x12F/v1/indexer/batch/{batch_outpoint.txid}/{batch_outpoint.vout}/sweepTxs\x12\x84\x01\n" + "\x13SubscribeForScripts\x12\".ark.v1.SubscribeForScriptsRequest\x1a#.ark.v1.SubscribeForScriptsResponse\"$\xb2J!B\x01*\"\x1c/v1/indexer/script/subscribe\x12\x8c\x01\n" + "\x15UnsubscribeForScripts\x12$.ark.v1.UnsubscribeForScriptsRequest\x1a%.ark.v1.UnsubscribeForScriptsResponse\"&\xb2J#B\x01*\"\x1e/v1/indexer/script/unsubscribe\x12\x92\x01\n" + @@ -2457,7 +2875,7 @@ func file_ark_v1_indexer_proto_rawDescGZIP() []byte { } var file_ark_v1_indexer_proto_enumTypes = make([]protoimpl.EnumInfo, 2) -var file_ark_v1_indexer_proto_msgTypes = make([]protoimpl.MessageInfo, 38) +var file_ark_v1_indexer_proto_msgTypes = make([]protoimpl.MessageInfo, 44) var file_ark_v1_indexer_proto_goTypes = []any{ (IndexerTxType)(0), // 0: ark.v1.IndexerTxType (IndexerChainedTxType)(0), // 1: ark.v1.IndexerChainedTxType @@ -2477,95 +2895,107 @@ var file_ark_v1_indexer_proto_goTypes = []any{ (*GetVtxoChainResponse)(nil), // 15: ark.v1.GetVtxoChainResponse (*GetVirtualTxsRequest)(nil), // 16: ark.v1.GetVirtualTxsRequest (*GetVirtualTxsResponse)(nil), // 17: ark.v1.GetVirtualTxsResponse - (*GetBatchSweepTransactionsRequest)(nil), // 18: ark.v1.GetBatchSweepTransactionsRequest - (*GetBatchSweepTransactionsResponse)(nil), // 19: ark.v1.GetBatchSweepTransactionsResponse - (*IndexerBatch)(nil), // 20: ark.v1.IndexerBatch - (*IndexerOutpoint)(nil), // 21: ark.v1.IndexerOutpoint - (*IndexerNode)(nil), // 22: ark.v1.IndexerNode - (*IndexerVtxo)(nil), // 23: ark.v1.IndexerVtxo - (*IndexerChain)(nil), // 24: ark.v1.IndexerChain - (*IndexerTxHistoryRecord)(nil), // 25: ark.v1.IndexerTxHistoryRecord - (*IndexerPageRequest)(nil), // 26: ark.v1.IndexerPageRequest - (*IndexerPageResponse)(nil), // 27: ark.v1.IndexerPageResponse - (*SubscribeForScriptsRequest)(nil), // 28: ark.v1.SubscribeForScriptsRequest - (*SubscribeForScriptsResponse)(nil), // 29: ark.v1.SubscribeForScriptsResponse - (*UnsubscribeForScriptsRequest)(nil), // 30: ark.v1.UnsubscribeForScriptsRequest - (*UnsubscribeForScriptsResponse)(nil), // 31: ark.v1.UnsubscribeForScriptsResponse - (*GetSubscriptionRequest)(nil), // 32: ark.v1.GetSubscriptionRequest - (*GetSubscriptionResponse)(nil), // 33: ark.v1.GetSubscriptionResponse - (*IndexerTxData)(nil), // 34: ark.v1.IndexerTxData - (*IndexerHeartbeat)(nil), // 35: ark.v1.IndexerHeartbeat - (*IndexerSubscriptionEvent)(nil), // 36: ark.v1.IndexerSubscriptionEvent - nil, // 37: ark.v1.GetCommitmentTxResponse.BatchesEntry - nil, // 38: ark.v1.IndexerNode.ChildrenEntry - nil, // 39: ark.v1.IndexerSubscriptionEvent.CheckpointTxsEntry + (*GetAssetGroupRequest)(nil), // 18: ark.v1.GetAssetGroupRequest + (*GetAssetGroupResponse)(nil), // 19: ark.v1.GetAssetGroupResponse + (*AssetGroup)(nil), // 20: ark.v1.AssetGroup + (*AssetMetadata)(nil), // 21: ark.v1.AssetMetadata + (*GetBatchSweepTransactionsRequest)(nil), // 22: ark.v1.GetBatchSweepTransactionsRequest + (*GetBatchSweepTransactionsResponse)(nil), // 23: ark.v1.GetBatchSweepTransactionsResponse + (*IndexerBatch)(nil), // 24: ark.v1.IndexerBatch + (*IndexerOutpoint)(nil), // 25: ark.v1.IndexerOutpoint + (*IndexerNode)(nil), // 26: ark.v1.IndexerNode + (*IndexerVtxo)(nil), // 27: ark.v1.IndexerVtxo + (*IndexerAsset)(nil), // 28: ark.v1.IndexerAsset + (*IndexerChain)(nil), // 29: ark.v1.IndexerChain + (*IndexerTxHistoryRecord)(nil), // 30: ark.v1.IndexerTxHistoryRecord + (*IndexerPageRequest)(nil), // 31: ark.v1.IndexerPageRequest + (*IndexerPageResponse)(nil), // 32: ark.v1.IndexerPageResponse + (*SubscribeForScriptsRequest)(nil), // 33: ark.v1.SubscribeForScriptsRequest + (*SubscribeForScriptsResponse)(nil), // 34: ark.v1.SubscribeForScriptsResponse + (*UnsubscribeForScriptsRequest)(nil), // 35: ark.v1.UnsubscribeForScriptsRequest + (*UnsubscribeForScriptsResponse)(nil), // 36: ark.v1.UnsubscribeForScriptsResponse + (*TeleportEvent)(nil), // 37: ark.v1.TeleportEvent + (*GetSubscriptionRequest)(nil), // 38: ark.v1.GetSubscriptionRequest + (*GetSubscriptionResponse)(nil), // 39: ark.v1.GetSubscriptionResponse + (*IndexerTxData)(nil), // 40: ark.v1.IndexerTxData + (*IndexerHeartbeat)(nil), // 41: ark.v1.IndexerHeartbeat + (*IndexerSubscriptionEvent)(nil), // 42: ark.v1.IndexerSubscriptionEvent + nil, // 43: ark.v1.GetCommitmentTxResponse.BatchesEntry + nil, // 44: ark.v1.IndexerNode.ChildrenEntry + nil, // 45: ark.v1.IndexerSubscriptionEvent.CheckpointTxsEntry } var file_ark_v1_indexer_proto_depIdxs = []int32{ - 37, // 0: ark.v1.GetCommitmentTxResponse.batches:type_name -> ark.v1.GetCommitmentTxResponse.BatchesEntry - 26, // 1: ark.v1.GetForfeitTxsRequest.page:type_name -> ark.v1.IndexerPageRequest - 27, // 2: ark.v1.GetForfeitTxsResponse.page:type_name -> ark.v1.IndexerPageResponse - 26, // 3: ark.v1.GetConnectorsRequest.page:type_name -> ark.v1.IndexerPageRequest - 22, // 4: ark.v1.GetConnectorsResponse.connectors:type_name -> ark.v1.IndexerNode - 27, // 5: ark.v1.GetConnectorsResponse.page:type_name -> ark.v1.IndexerPageResponse - 21, // 6: ark.v1.GetVtxoTreeRequest.batch_outpoint:type_name -> ark.v1.IndexerOutpoint - 26, // 7: ark.v1.GetVtxoTreeRequest.page:type_name -> ark.v1.IndexerPageRequest - 22, // 8: ark.v1.GetVtxoTreeResponse.vtxo_tree:type_name -> ark.v1.IndexerNode - 27, // 9: ark.v1.GetVtxoTreeResponse.page:type_name -> ark.v1.IndexerPageResponse - 21, // 10: ark.v1.GetVtxoTreeLeavesRequest.batch_outpoint:type_name -> ark.v1.IndexerOutpoint - 26, // 11: ark.v1.GetVtxoTreeLeavesRequest.page:type_name -> ark.v1.IndexerPageRequest - 21, // 12: ark.v1.GetVtxoTreeLeavesResponse.leaves:type_name -> ark.v1.IndexerOutpoint - 27, // 13: ark.v1.GetVtxoTreeLeavesResponse.page:type_name -> ark.v1.IndexerPageResponse - 26, // 14: ark.v1.GetVtxosRequest.page:type_name -> ark.v1.IndexerPageRequest - 23, // 15: ark.v1.GetVtxosResponse.vtxos:type_name -> ark.v1.IndexerVtxo - 27, // 16: ark.v1.GetVtxosResponse.page:type_name -> ark.v1.IndexerPageResponse - 21, // 17: ark.v1.GetVtxoChainRequest.outpoint:type_name -> ark.v1.IndexerOutpoint - 26, // 18: ark.v1.GetVtxoChainRequest.page:type_name -> ark.v1.IndexerPageRequest - 24, // 19: ark.v1.GetVtxoChainResponse.chain:type_name -> ark.v1.IndexerChain - 27, // 20: ark.v1.GetVtxoChainResponse.page:type_name -> ark.v1.IndexerPageResponse - 26, // 21: ark.v1.GetVirtualTxsRequest.page:type_name -> ark.v1.IndexerPageRequest - 27, // 22: ark.v1.GetVirtualTxsResponse.page:type_name -> ark.v1.IndexerPageResponse - 21, // 23: ark.v1.GetBatchSweepTransactionsRequest.batch_outpoint:type_name -> ark.v1.IndexerOutpoint - 38, // 24: ark.v1.IndexerNode.children:type_name -> ark.v1.IndexerNode.ChildrenEntry - 21, // 25: ark.v1.IndexerVtxo.outpoint:type_name -> ark.v1.IndexerOutpoint - 1, // 26: ark.v1.IndexerChain.type:type_name -> ark.v1.IndexerChainedTxType - 0, // 27: ark.v1.IndexerTxHistoryRecord.type:type_name -> ark.v1.IndexerTxType - 35, // 28: ark.v1.GetSubscriptionResponse.heartbeat:type_name -> ark.v1.IndexerHeartbeat - 36, // 29: ark.v1.GetSubscriptionResponse.event:type_name -> ark.v1.IndexerSubscriptionEvent - 23, // 30: ark.v1.IndexerSubscriptionEvent.new_vtxos:type_name -> ark.v1.IndexerVtxo - 23, // 31: ark.v1.IndexerSubscriptionEvent.spent_vtxos:type_name -> ark.v1.IndexerVtxo - 39, // 32: ark.v1.IndexerSubscriptionEvent.checkpoint_txs:type_name -> ark.v1.IndexerSubscriptionEvent.CheckpointTxsEntry - 23, // 33: ark.v1.IndexerSubscriptionEvent.swept_vtxos:type_name -> ark.v1.IndexerVtxo - 20, // 34: ark.v1.GetCommitmentTxResponse.BatchesEntry.value:type_name -> ark.v1.IndexerBatch - 34, // 35: ark.v1.IndexerSubscriptionEvent.CheckpointTxsEntry.value:type_name -> ark.v1.IndexerTxData - 2, // 36: ark.v1.IndexerService.GetCommitmentTx:input_type -> ark.v1.GetCommitmentTxRequest - 4, // 37: ark.v1.IndexerService.GetForfeitTxs:input_type -> ark.v1.GetForfeitTxsRequest - 6, // 38: ark.v1.IndexerService.GetConnectors:input_type -> ark.v1.GetConnectorsRequest - 8, // 39: ark.v1.IndexerService.GetVtxoTree:input_type -> ark.v1.GetVtxoTreeRequest - 10, // 40: ark.v1.IndexerService.GetVtxoTreeLeaves:input_type -> ark.v1.GetVtxoTreeLeavesRequest - 12, // 41: ark.v1.IndexerService.GetVtxos:input_type -> ark.v1.GetVtxosRequest - 14, // 42: ark.v1.IndexerService.GetVtxoChain:input_type -> ark.v1.GetVtxoChainRequest - 16, // 43: ark.v1.IndexerService.GetVirtualTxs:input_type -> ark.v1.GetVirtualTxsRequest - 18, // 44: ark.v1.IndexerService.GetBatchSweepTransactions:input_type -> ark.v1.GetBatchSweepTransactionsRequest - 28, // 45: ark.v1.IndexerService.SubscribeForScripts:input_type -> ark.v1.SubscribeForScriptsRequest - 30, // 46: ark.v1.IndexerService.UnsubscribeForScripts:input_type -> ark.v1.UnsubscribeForScriptsRequest - 32, // 47: ark.v1.IndexerService.GetSubscription:input_type -> ark.v1.GetSubscriptionRequest - 3, // 48: ark.v1.IndexerService.GetCommitmentTx:output_type -> ark.v1.GetCommitmentTxResponse - 5, // 49: ark.v1.IndexerService.GetForfeitTxs:output_type -> ark.v1.GetForfeitTxsResponse - 7, // 50: ark.v1.IndexerService.GetConnectors:output_type -> ark.v1.GetConnectorsResponse - 9, // 51: ark.v1.IndexerService.GetVtxoTree:output_type -> ark.v1.GetVtxoTreeResponse - 11, // 52: ark.v1.IndexerService.GetVtxoTreeLeaves:output_type -> ark.v1.GetVtxoTreeLeavesResponse - 13, // 53: ark.v1.IndexerService.GetVtxos:output_type -> ark.v1.GetVtxosResponse - 15, // 54: ark.v1.IndexerService.GetVtxoChain:output_type -> ark.v1.GetVtxoChainResponse - 17, // 55: ark.v1.IndexerService.GetVirtualTxs:output_type -> ark.v1.GetVirtualTxsResponse - 19, // 56: ark.v1.IndexerService.GetBatchSweepTransactions:output_type -> ark.v1.GetBatchSweepTransactionsResponse - 29, // 57: ark.v1.IndexerService.SubscribeForScripts:output_type -> ark.v1.SubscribeForScriptsResponse - 31, // 58: ark.v1.IndexerService.UnsubscribeForScripts:output_type -> ark.v1.UnsubscribeForScriptsResponse - 33, // 59: ark.v1.IndexerService.GetSubscription:output_type -> ark.v1.GetSubscriptionResponse - 48, // [48:60] is the sub-list for method output_type - 36, // [36:48] is the sub-list for method input_type - 36, // [36:36] is the sub-list for extension type_name - 36, // [36:36] is the sub-list for extension extendee - 0, // [0:36] is the sub-list for field type_name + 43, // 0: ark.v1.GetCommitmentTxResponse.batches:type_name -> ark.v1.GetCommitmentTxResponse.BatchesEntry + 31, // 1: ark.v1.GetForfeitTxsRequest.page:type_name -> ark.v1.IndexerPageRequest + 32, // 2: ark.v1.GetForfeitTxsResponse.page:type_name -> ark.v1.IndexerPageResponse + 31, // 3: ark.v1.GetConnectorsRequest.page:type_name -> ark.v1.IndexerPageRequest + 26, // 4: ark.v1.GetConnectorsResponse.connectors:type_name -> ark.v1.IndexerNode + 32, // 5: ark.v1.GetConnectorsResponse.page:type_name -> ark.v1.IndexerPageResponse + 25, // 6: ark.v1.GetVtxoTreeRequest.batch_outpoint:type_name -> ark.v1.IndexerOutpoint + 31, // 7: ark.v1.GetVtxoTreeRequest.page:type_name -> ark.v1.IndexerPageRequest + 26, // 8: ark.v1.GetVtxoTreeResponse.vtxo_tree:type_name -> ark.v1.IndexerNode + 32, // 9: ark.v1.GetVtxoTreeResponse.page:type_name -> ark.v1.IndexerPageResponse + 25, // 10: ark.v1.GetVtxoTreeLeavesRequest.batch_outpoint:type_name -> ark.v1.IndexerOutpoint + 31, // 11: ark.v1.GetVtxoTreeLeavesRequest.page:type_name -> ark.v1.IndexerPageRequest + 25, // 12: ark.v1.GetVtxoTreeLeavesResponse.leaves:type_name -> ark.v1.IndexerOutpoint + 32, // 13: ark.v1.GetVtxoTreeLeavesResponse.page:type_name -> ark.v1.IndexerPageResponse + 31, // 14: ark.v1.GetVtxosRequest.page:type_name -> ark.v1.IndexerPageRequest + 27, // 15: ark.v1.GetVtxosResponse.vtxos:type_name -> ark.v1.IndexerVtxo + 32, // 16: ark.v1.GetVtxosResponse.page:type_name -> ark.v1.IndexerPageResponse + 25, // 17: ark.v1.GetVtxoChainRequest.outpoint:type_name -> ark.v1.IndexerOutpoint + 31, // 18: ark.v1.GetVtxoChainRequest.page:type_name -> ark.v1.IndexerPageRequest + 29, // 19: ark.v1.GetVtxoChainResponse.chain:type_name -> ark.v1.IndexerChain + 32, // 20: ark.v1.GetVtxoChainResponse.page:type_name -> ark.v1.IndexerPageResponse + 31, // 21: ark.v1.GetVirtualTxsRequest.page:type_name -> ark.v1.IndexerPageRequest + 32, // 22: ark.v1.GetVirtualTxsResponse.page:type_name -> ark.v1.IndexerPageResponse + 20, // 23: ark.v1.GetAssetGroupResponse.asset_group:type_name -> ark.v1.AssetGroup + 21, // 24: ark.v1.AssetGroup.metadata:type_name -> ark.v1.AssetMetadata + 25, // 25: ark.v1.GetBatchSweepTransactionsRequest.batch_outpoint:type_name -> ark.v1.IndexerOutpoint + 44, // 26: ark.v1.IndexerNode.children:type_name -> ark.v1.IndexerNode.ChildrenEntry + 25, // 27: ark.v1.IndexerVtxo.outpoint:type_name -> ark.v1.IndexerOutpoint + 28, // 28: ark.v1.IndexerVtxo.assets:type_name -> ark.v1.IndexerAsset + 1, // 29: ark.v1.IndexerChain.type:type_name -> ark.v1.IndexerChainedTxType + 0, // 30: ark.v1.IndexerTxHistoryRecord.type:type_name -> ark.v1.IndexerTxType + 41, // 31: ark.v1.GetSubscriptionResponse.heartbeat:type_name -> ark.v1.IndexerHeartbeat + 42, // 32: ark.v1.GetSubscriptionResponse.event:type_name -> ark.v1.IndexerSubscriptionEvent + 27, // 33: ark.v1.IndexerSubscriptionEvent.new_vtxos:type_name -> ark.v1.IndexerVtxo + 27, // 34: ark.v1.IndexerSubscriptionEvent.spent_vtxos:type_name -> ark.v1.IndexerVtxo + 45, // 35: ark.v1.IndexerSubscriptionEvent.checkpoint_txs:type_name -> ark.v1.IndexerSubscriptionEvent.CheckpointTxsEntry + 27, // 36: ark.v1.IndexerSubscriptionEvent.swept_vtxos:type_name -> ark.v1.IndexerVtxo + 37, // 37: ark.v1.IndexerSubscriptionEvent.teleport_events:type_name -> ark.v1.TeleportEvent + 24, // 38: ark.v1.GetCommitmentTxResponse.BatchesEntry.value:type_name -> ark.v1.IndexerBatch + 40, // 39: ark.v1.IndexerSubscriptionEvent.CheckpointTxsEntry.value:type_name -> ark.v1.IndexerTxData + 2, // 40: ark.v1.IndexerService.GetCommitmentTx:input_type -> ark.v1.GetCommitmentTxRequest + 4, // 41: ark.v1.IndexerService.GetForfeitTxs:input_type -> ark.v1.GetForfeitTxsRequest + 6, // 42: ark.v1.IndexerService.GetConnectors:input_type -> ark.v1.GetConnectorsRequest + 8, // 43: ark.v1.IndexerService.GetVtxoTree:input_type -> ark.v1.GetVtxoTreeRequest + 10, // 44: ark.v1.IndexerService.GetVtxoTreeLeaves:input_type -> ark.v1.GetVtxoTreeLeavesRequest + 12, // 45: ark.v1.IndexerService.GetVtxos:input_type -> ark.v1.GetVtxosRequest + 14, // 46: ark.v1.IndexerService.GetVtxoChain:input_type -> ark.v1.GetVtxoChainRequest + 16, // 47: ark.v1.IndexerService.GetVirtualTxs:input_type -> ark.v1.GetVirtualTxsRequest + 18, // 48: ark.v1.IndexerService.GetAssetGroup:input_type -> ark.v1.GetAssetGroupRequest + 22, // 49: ark.v1.IndexerService.GetBatchSweepTransactions:input_type -> ark.v1.GetBatchSweepTransactionsRequest + 33, // 50: ark.v1.IndexerService.SubscribeForScripts:input_type -> ark.v1.SubscribeForScriptsRequest + 35, // 51: ark.v1.IndexerService.UnsubscribeForScripts:input_type -> ark.v1.UnsubscribeForScriptsRequest + 38, // 52: ark.v1.IndexerService.GetSubscription:input_type -> ark.v1.GetSubscriptionRequest + 3, // 53: ark.v1.IndexerService.GetCommitmentTx:output_type -> ark.v1.GetCommitmentTxResponse + 5, // 54: ark.v1.IndexerService.GetForfeitTxs:output_type -> ark.v1.GetForfeitTxsResponse + 7, // 55: ark.v1.IndexerService.GetConnectors:output_type -> ark.v1.GetConnectorsResponse + 9, // 56: ark.v1.IndexerService.GetVtxoTree:output_type -> ark.v1.GetVtxoTreeResponse + 11, // 57: ark.v1.IndexerService.GetVtxoTreeLeaves:output_type -> ark.v1.GetVtxoTreeLeavesResponse + 13, // 58: ark.v1.IndexerService.GetVtxos:output_type -> ark.v1.GetVtxosResponse + 15, // 59: ark.v1.IndexerService.GetVtxoChain:output_type -> ark.v1.GetVtxoChainResponse + 17, // 60: ark.v1.IndexerService.GetVirtualTxs:output_type -> ark.v1.GetVirtualTxsResponse + 19, // 61: ark.v1.IndexerService.GetAssetGroup:output_type -> ark.v1.GetAssetGroupResponse + 23, // 62: ark.v1.IndexerService.GetBatchSweepTransactions:output_type -> ark.v1.GetBatchSweepTransactionsResponse + 34, // 63: ark.v1.IndexerService.SubscribeForScripts:output_type -> ark.v1.SubscribeForScriptsResponse + 36, // 64: ark.v1.IndexerService.UnsubscribeForScripts:output_type -> ark.v1.UnsubscribeForScriptsResponse + 39, // 65: ark.v1.IndexerService.GetSubscription:output_type -> ark.v1.GetSubscriptionResponse + 53, // [53:66] is the sub-list for method output_type + 40, // [40:53] is the sub-list for method input_type + 40, // [40:40] is the sub-list for extension type_name + 40, // [40:40] is the sub-list for extension extendee + 0, // [0:40] is the sub-list for field type_name } func init() { file_ark_v1_indexer_proto_init() } @@ -2573,11 +3003,11 @@ func file_ark_v1_indexer_proto_init() { if File_ark_v1_indexer_proto != nil { return } - file_ark_v1_indexer_proto_msgTypes[23].OneofWrappers = []any{ + file_ark_v1_indexer_proto_msgTypes[28].OneofWrappers = []any{ (*IndexerTxHistoryRecord_CommitmentTxid)(nil), (*IndexerTxHistoryRecord_VirtualTxid)(nil), } - file_ark_v1_indexer_proto_msgTypes[31].OneofWrappers = []any{ + file_ark_v1_indexer_proto_msgTypes[37].OneofWrappers = []any{ (*GetSubscriptionResponse_Heartbeat)(nil), (*GetSubscriptionResponse_Event)(nil), } @@ -2587,7 +3017,7 @@ func file_ark_v1_indexer_proto_init() { GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: unsafe.Slice(unsafe.StringData(file_ark_v1_indexer_proto_rawDesc), len(file_ark_v1_indexer_proto_rawDesc)), NumEnums: 2, - NumMessages: 38, + NumMessages: 44, NumExtensions: 0, NumServices: 1, }, diff --git a/api-spec/protobuf/gen/ark/v1/indexer.pb.rgw.go b/api-spec/protobuf/gen/ark/v1/indexer.pb.rgw.go index 0e7bfee9d..1c540e93b 100644 --- a/api-spec/protobuf/gen/ark/v1/indexer.pb.rgw.go +++ b/api-spec/protobuf/gen/ark/v1/indexer.pb.rgw.go @@ -125,7 +125,7 @@ func request_IndexerService_GetConnectors_0(ctx context.Context, marshaler gatew var ( query_params_IndexerService_GetVtxoTree_0 = gateway.QueryParameterParseOptions{ - Filter: trie.New("txid", "vout", "batch_outpoint.txid", "batch_outpoint.vout"), + Filter: trie.New("vout", "batch_outpoint.vout", "batch_outpoint.txid", "txid"), } ) @@ -173,7 +173,7 @@ func request_IndexerService_GetVtxoTree_0(ctx context.Context, marshaler gateway var ( query_params_IndexerService_GetVtxoTreeLeaves_0 = gateway.QueryParameterParseOptions{ - Filter: trie.New("batch_outpoint.txid", "batch_outpoint.vout", "txid", "vout"), + Filter: trie.New("batch_outpoint.vout", "batch_outpoint.txid", "txid", "vout"), } ) @@ -243,7 +243,7 @@ func request_IndexerService_GetVtxos_0(ctx context.Context, marshaler gateway.Ma var ( query_params_IndexerService_GetVtxoChain_0 = gateway.QueryParameterParseOptions{ - Filter: trie.New("outpoint.txid", "outpoint.vout", "txid", "vout"), + Filter: trie.New("vout", "outpoint.txid", "outpoint.vout", "txid"), } ) @@ -327,6 +327,31 @@ func request_IndexerService_GetVirtualTxs_0(ctx context.Context, marshaler gatew } +func request_IndexerService_GetAssetGroup_0(ctx context.Context, marshaler gateway.Marshaler, mux *gateway.ServeMux, client IndexerServiceClient, req *http.Request, pathParams gateway.Params) (proto.Message, gateway.ServerMetadata, error) { + var protoReq GetAssetGroupRequest + var metadata gateway.ServerMetadata + + var ( + val string + err error + _ = err + ) + + val = pathParams.ByName("asset_id") + if val == "" { + return nil, metadata, gateway.ErrPathParameterMissing{Name: "asset_id"} + } + + protoReq.AssetId, err = protoconvert.String(val) + if err != nil { + return nil, metadata, gateway.ErrPathParameterTypeMismatch{Err: err, Name: "asset_id"} + } + + msg, err := client.GetAssetGroup(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + func request_IndexerService_GetBatchSweepTransactions_0(ctx context.Context, marshaler gateway.Marshaler, mux *gateway.ServeMux, client IndexerServiceClient, req *http.Request, pathParams gateway.Params) (proto.Message, gateway.ServerMetadata, error) { var protoReq GetBatchSweepTransactionsRequest var metadata gateway.ServerMetadata @@ -636,6 +661,28 @@ func RegisterIndexerServiceHandlerClient(ctx context.Context, mux *gateway.Serve mux.ForwardResponseMessage(annotatedContext, outboundMarshaler, w, req, resp) }) + mux.HandleWithParams("GET", "/v1/indexer/assetGroup/:asset_id", func(w http.ResponseWriter, req *http.Request, pathParams gateway.Params) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := mux.MarshalerForRequest(req) + var err error + var annotatedContext context.Context + annotatedContext, err = gateway.AnnotateContext(ctx, mux, req, "/ark.v1.IndexerService/GetAssetGroup", gateway.WithHTTPPathPattern("/v1/indexer/assetGroup/{asset_id}")) + if err != nil { + mux.HTTPError(ctx, outboundMarshaler, w, req, err) + return + } + + resp, md, err := request_IndexerService_GetAssetGroup_0(annotatedContext, inboundMarshaler, mux, client, req, pathParams) + annotatedContext = gateway.NewServerMetadataContext(annotatedContext, md) + if err != nil { + mux.HTTPError(annotatedContext, outboundMarshaler, w, req, err) + return + } + + mux.ForwardResponseMessage(annotatedContext, outboundMarshaler, w, req, resp) + }) + mux.HandleWithParams("GET", "/v1/indexer/batch/:batch_outpoint.txid/:batch_outpoint.vout/sweepTxs", func(w http.ResponseWriter, req *http.Request, pathParams gateway.Params) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() diff --git a/api-spec/protobuf/gen/ark/v1/indexer_grpc.pb.go b/api-spec/protobuf/gen/ark/v1/indexer_grpc.pb.go index daef8a40f..c0fa5f031 100644 --- a/api-spec/protobuf/gen/ark/v1/indexer_grpc.pb.go +++ b/api-spec/protobuf/gen/ark/v1/indexer_grpc.pb.go @@ -27,6 +27,7 @@ const ( IndexerService_GetVtxos_FullMethodName = "/ark.v1.IndexerService/GetVtxos" IndexerService_GetVtxoChain_FullMethodName = "/ark.v1.IndexerService/GetVtxoChain" IndexerService_GetVirtualTxs_FullMethodName = "/ark.v1.IndexerService/GetVirtualTxs" + IndexerService_GetAssetGroup_FullMethodName = "/ark.v1.IndexerService/GetAssetGroup" IndexerService_GetBatchSweepTransactions_FullMethodName = "/ark.v1.IndexerService/GetBatchSweepTransactions" IndexerService_SubscribeForScripts_FullMethodName = "/ark.v1.IndexerService/SubscribeForScripts" IndexerService_UnsubscribeForScripts_FullMethodName = "/ark.v1.IndexerService/UnsubscribeForScripts" @@ -67,6 +68,8 @@ type IndexerServiceClient interface { // GetVirtualTxs returns the virtual transactions in hex format for the specified txids. // The response may be paginated if the results span multiple pages. GetVirtualTxs(ctx context.Context, in *GetVirtualTxsRequest, opts ...grpc.CallOption) (*GetVirtualTxsResponse, error) + // GetAsset returns the asset information and metadata for the specified asset ID. + GetAssetGroup(ctx context.Context, in *GetAssetGroupRequest, opts ...grpc.CallOption) (*GetAssetGroupResponse, error) // GetBatchSweepTransactions returns the list of transaction (txid) that swept a given batch // output. // In most cases the list contains only one txid, meaning that all the amount locked for a @@ -178,6 +181,16 @@ func (c *indexerServiceClient) GetVirtualTxs(ctx context.Context, in *GetVirtual return out, nil } +func (c *indexerServiceClient) GetAssetGroup(ctx context.Context, in *GetAssetGroupRequest, opts ...grpc.CallOption) (*GetAssetGroupResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(GetAssetGroupResponse) + err := c.cc.Invoke(ctx, IndexerService_GetAssetGroup_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + func (c *indexerServiceClient) GetBatchSweepTransactions(ctx context.Context, in *GetBatchSweepTransactionsRequest, opts ...grpc.CallOption) (*GetBatchSweepTransactionsResponse, error) { cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(GetBatchSweepTransactionsResponse) @@ -261,6 +274,8 @@ type IndexerServiceServer interface { // GetVirtualTxs returns the virtual transactions in hex format for the specified txids. // The response may be paginated if the results span multiple pages. GetVirtualTxs(context.Context, *GetVirtualTxsRequest) (*GetVirtualTxsResponse, error) + // GetAsset returns the asset information and metadata for the specified asset ID. + GetAssetGroup(context.Context, *GetAssetGroupRequest) (*GetAssetGroupResponse, error) // GetBatchSweepTransactions returns the list of transaction (txid) that swept a given batch // output. // In most cases the list contains only one txid, meaning that all the amount locked for a @@ -315,6 +330,9 @@ func (UnimplementedIndexerServiceServer) GetVtxoChain(context.Context, *GetVtxoC func (UnimplementedIndexerServiceServer) GetVirtualTxs(context.Context, *GetVirtualTxsRequest) (*GetVirtualTxsResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method GetVirtualTxs not implemented") } +func (UnimplementedIndexerServiceServer) GetAssetGroup(context.Context, *GetAssetGroupRequest) (*GetAssetGroupResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetAssetGroup not implemented") +} func (UnimplementedIndexerServiceServer) GetBatchSweepTransactions(context.Context, *GetBatchSweepTransactionsRequest) (*GetBatchSweepTransactionsResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method GetBatchSweepTransactions not implemented") } @@ -491,6 +509,24 @@ func _IndexerService_GetVirtualTxs_Handler(srv interface{}, ctx context.Context, return interceptor(ctx, in, info, handler) } +func _IndexerService_GetAssetGroup_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetAssetGroupRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(IndexerServiceServer).GetAssetGroup(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: IndexerService_GetAssetGroup_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(IndexerServiceServer).GetAssetGroup(ctx, req.(*GetAssetGroupRequest)) + } + return interceptor(ctx, in, info, handler) +} + func _IndexerService_GetBatchSweepTransactions_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(GetBatchSweepTransactionsRequest) if err := dec(in); err != nil { @@ -595,6 +631,10 @@ var IndexerService_ServiceDesc = grpc.ServiceDesc{ MethodName: "GetVirtualTxs", Handler: _IndexerService_GetVirtualTxs_Handler, }, + { + MethodName: "GetAssetGroup", + Handler: _IndexerService_GetAssetGroup_Handler, + }, { MethodName: "GetBatchSweepTransactions", Handler: _IndexerService_GetBatchSweepTransactions_Handler, diff --git a/api-spec/protobuf/gen/ark/v1/types.pb.go b/api-spec/protobuf/gen/ark/v1/types.pb.go index 5cba0bc1b..1132f6c7c 100644 --- a/api-spec/protobuf/gen/ark/v1/types.pb.go +++ b/api-spec/protobuf/gen/ark/v1/types.pb.go @@ -140,6 +140,7 @@ type Vtxo struct { SpentBy string `protobuf:"bytes,11,opt,name=spent_by,json=spentBy,proto3" json:"spent_by,omitempty"` SettledBy string `protobuf:"bytes,12,opt,name=settled_by,json=settledBy,proto3" json:"settled_by,omitempty"` ArkTxid string `protobuf:"bytes,13,opt,name=ark_txid,json=arkTxid,proto3" json:"ark_txid,omitempty"` + Assets []*Asset `protobuf:"bytes,14,rep,name=assets,proto3" json:"assets,omitempty"` unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache } @@ -265,6 +266,65 @@ func (x *Vtxo) GetArkTxid() string { return "" } +func (x *Vtxo) GetAssets() []*Asset { + if x != nil { + return x.Assets + } + return nil +} + +type Asset struct { + state protoimpl.MessageState `protogen:"open.v1"` + AssetId string `protobuf:"bytes,1,opt,name=asset_id,json=assetId,proto3" json:"asset_id,omitempty"` + Amount uint64 `protobuf:"varint,2,opt,name=amount,proto3" json:"amount,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *Asset) Reset() { + *x = Asset{} + mi := &file_ark_v1_types_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *Asset) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Asset) ProtoMessage() {} + +func (x *Asset) ProtoReflect() protoreflect.Message { + mi := &file_ark_v1_types_proto_msgTypes[3] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Asset.ProtoReflect.Descriptor instead. +func (*Asset) Descriptor() ([]byte, []int) { + return file_ark_v1_types_proto_rawDescGZIP(), []int{3} +} + +func (x *Asset) GetAssetId() string { + if x != nil { + return x.AssetId + } + return "" +} + +func (x *Asset) GetAmount() uint64 { + if x != nil { + return x.Amount + } + return 0 +} + type TxData struct { state protoimpl.MessageState `protogen:"open.v1"` Txid string `protobuf:"bytes,1,opt,name=txid,proto3" json:"txid,omitempty"` @@ -275,7 +335,7 @@ type TxData struct { func (x *TxData) Reset() { *x = TxData{} - mi := &file_ark_v1_types_proto_msgTypes[3] + mi := &file_ark_v1_types_proto_msgTypes[4] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -287,7 +347,7 @@ func (x *TxData) String() string { func (*TxData) ProtoMessage() {} func (x *TxData) ProtoReflect() protoreflect.Message { - mi := &file_ark_v1_types_proto_msgTypes[3] + mi := &file_ark_v1_types_proto_msgTypes[4] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -300,7 +360,7 @@ func (x *TxData) ProtoReflect() protoreflect.Message { // Deprecated: Use TxData.ProtoReflect.Descriptor instead. func (*TxData) Descriptor() ([]byte, []int) { - return file_ark_v1_types_proto_rawDescGZIP(), []int{3} + return file_ark_v1_types_proto_rawDescGZIP(), []int{4} } func (x *TxData) GetTxid() string { @@ -331,7 +391,7 @@ type TxNotification struct { func (x *TxNotification) Reset() { *x = TxNotification{} - mi := &file_ark_v1_types_proto_msgTypes[4] + mi := &file_ark_v1_types_proto_msgTypes[5] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -343,7 +403,7 @@ func (x *TxNotification) String() string { func (*TxNotification) ProtoMessage() {} func (x *TxNotification) ProtoReflect() protoreflect.Message { - mi := &file_ark_v1_types_proto_msgTypes[4] + mi := &file_ark_v1_types_proto_msgTypes[5] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -356,7 +416,7 @@ func (x *TxNotification) ProtoReflect() protoreflect.Message { // Deprecated: Use TxNotification.ProtoReflect.Descriptor instead. func (*TxNotification) Descriptor() ([]byte, []int) { - return file_ark_v1_types_proto_rawDescGZIP(), []int{4} + return file_ark_v1_types_proto_rawDescGZIP(), []int{5} } func (x *TxNotification) GetTxid() string { @@ -403,7 +463,7 @@ type Tapscripts struct { func (x *Tapscripts) Reset() { *x = Tapscripts{} - mi := &file_ark_v1_types_proto_msgTypes[5] + mi := &file_ark_v1_types_proto_msgTypes[6] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -415,7 +475,7 @@ func (x *Tapscripts) String() string { func (*Tapscripts) ProtoMessage() {} func (x *Tapscripts) ProtoReflect() protoreflect.Message { - mi := &file_ark_v1_types_proto_msgTypes[5] + mi := &file_ark_v1_types_proto_msgTypes[6] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -428,7 +488,7 @@ func (x *Tapscripts) ProtoReflect() protoreflect.Message { // Deprecated: Use Tapscripts.ProtoReflect.Descriptor instead. func (*Tapscripts) Descriptor() ([]byte, []int) { - return file_ark_v1_types_proto_rawDescGZIP(), []int{5} + return file_ark_v1_types_proto_rawDescGZIP(), []int{6} } func (x *Tapscripts) GetScripts() []string { @@ -448,7 +508,7 @@ type Intent struct { func (x *Intent) Reset() { *x = Intent{} - mi := &file_ark_v1_types_proto_msgTypes[6] + mi := &file_ark_v1_types_proto_msgTypes[7] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -460,7 +520,7 @@ func (x *Intent) String() string { func (*Intent) ProtoMessage() {} func (x *Intent) ProtoReflect() protoreflect.Message { - mi := &file_ark_v1_types_proto_msgTypes[6] + mi := &file_ark_v1_types_proto_msgTypes[7] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -473,7 +533,7 @@ func (x *Intent) ProtoReflect() protoreflect.Message { // Deprecated: Use Intent.ProtoReflect.Descriptor instead. func (*Intent) Descriptor() ([]byte, []int) { - return file_ark_v1_types_proto_rawDescGZIP(), []int{6} + return file_ark_v1_types_proto_rawDescGZIP(), []int{7} } func (x *Intent) GetProof() string { @@ -503,7 +563,7 @@ type ScheduledSession struct { func (x *ScheduledSession) Reset() { *x = ScheduledSession{} - mi := &file_ark_v1_types_proto_msgTypes[7] + mi := &file_ark_v1_types_proto_msgTypes[8] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -515,7 +575,7 @@ func (x *ScheduledSession) String() string { func (*ScheduledSession) ProtoMessage() {} func (x *ScheduledSession) ProtoReflect() protoreflect.Message { - mi := &file_ark_v1_types_proto_msgTypes[7] + mi := &file_ark_v1_types_proto_msgTypes[8] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -528,7 +588,7 @@ func (x *ScheduledSession) ProtoReflect() protoreflect.Message { // Deprecated: Use ScheduledSession.ProtoReflect.Descriptor instead. func (*ScheduledSession) Descriptor() ([]byte, []int) { - return file_ark_v1_types_proto_rawDescGZIP(), []int{7} + return file_ark_v1_types_proto_rawDescGZIP(), []int{8} } func (x *ScheduledSession) GetNextStartTime() int64 { @@ -576,7 +636,7 @@ type FeeInfo struct { func (x *FeeInfo) Reset() { *x = FeeInfo{} - mi := &file_ark_v1_types_proto_msgTypes[8] + mi := &file_ark_v1_types_proto_msgTypes[9] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -588,7 +648,7 @@ func (x *FeeInfo) String() string { func (*FeeInfo) ProtoMessage() {} func (x *FeeInfo) ProtoReflect() protoreflect.Message { - mi := &file_ark_v1_types_proto_msgTypes[8] + mi := &file_ark_v1_types_proto_msgTypes[9] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -601,7 +661,7 @@ func (x *FeeInfo) ProtoReflect() protoreflect.Message { // Deprecated: Use FeeInfo.ProtoReflect.Descriptor instead. func (*FeeInfo) Descriptor() ([]byte, []int) { - return file_ark_v1_types_proto_rawDescGZIP(), []int{8} + return file_ark_v1_types_proto_rawDescGZIP(), []int{9} } func (x *FeeInfo) GetIntentFee() *IntentFeeInfo { @@ -630,7 +690,7 @@ type IntentFeeInfo struct { func (x *IntentFeeInfo) Reset() { *x = IntentFeeInfo{} - mi := &file_ark_v1_types_proto_msgTypes[9] + mi := &file_ark_v1_types_proto_msgTypes[10] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -642,7 +702,7 @@ func (x *IntentFeeInfo) String() string { func (*IntentFeeInfo) ProtoMessage() {} func (x *IntentFeeInfo) ProtoReflect() protoreflect.Message { - mi := &file_ark_v1_types_proto_msgTypes[9] + mi := &file_ark_v1_types_proto_msgTypes[10] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -655,7 +715,7 @@ func (x *IntentFeeInfo) ProtoReflect() protoreflect.Message { // Deprecated: Use IntentFeeInfo.ProtoReflect.Descriptor instead. func (*IntentFeeInfo) Descriptor() ([]byte, []int) { - return file_ark_v1_types_proto_rawDescGZIP(), []int{9} + return file_ark_v1_types_proto_rawDescGZIP(), []int{10} } func (x *IntentFeeInfo) GetOffchainInput() string { @@ -697,7 +757,7 @@ type PendingTx struct { func (x *PendingTx) Reset() { *x = PendingTx{} - mi := &file_ark_v1_types_proto_msgTypes[10] + mi := &file_ark_v1_types_proto_msgTypes[11] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -709,7 +769,7 @@ func (x *PendingTx) String() string { func (*PendingTx) ProtoMessage() {} func (x *PendingTx) ProtoReflect() protoreflect.Message { - mi := &file_ark_v1_types_proto_msgTypes[10] + mi := &file_ark_v1_types_proto_msgTypes[11] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -722,7 +782,7 @@ func (x *PendingTx) ProtoReflect() protoreflect.Message { // Deprecated: Use PendingTx.ProtoReflect.Descriptor instead. func (*PendingTx) Descriptor() ([]byte, []int) { - return file_ark_v1_types_proto_rawDescGZIP(), []int{10} + return file_ark_v1_types_proto_rawDescGZIP(), []int{11} } func (x *PendingTx) GetArkTxid() string { @@ -756,7 +816,7 @@ type DeprecatedSigner struct { func (x *DeprecatedSigner) Reset() { *x = DeprecatedSigner{} - mi := &file_ark_v1_types_proto_msgTypes[11] + mi := &file_ark_v1_types_proto_msgTypes[12] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -768,7 +828,7 @@ func (x *DeprecatedSigner) String() string { func (*DeprecatedSigner) ProtoMessage() {} func (x *DeprecatedSigner) ProtoReflect() protoreflect.Message { - mi := &file_ark_v1_types_proto_msgTypes[11] + mi := &file_ark_v1_types_proto_msgTypes[12] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -781,7 +841,7 @@ func (x *DeprecatedSigner) ProtoReflect() protoreflect.Message { // Deprecated: Use DeprecatedSigner.ProtoReflect.Descriptor instead. func (*DeprecatedSigner) Descriptor() ([]byte, []int) { - return file_ark_v1_types_proto_rawDescGZIP(), []int{11} + return file_ark_v1_types_proto_rawDescGZIP(), []int{12} } func (x *DeprecatedSigner) GetPubkey() string { @@ -809,7 +869,7 @@ type BatchStartedEvent struct { func (x *BatchStartedEvent) Reset() { *x = BatchStartedEvent{} - mi := &file_ark_v1_types_proto_msgTypes[12] + mi := &file_ark_v1_types_proto_msgTypes[13] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -821,7 +881,7 @@ func (x *BatchStartedEvent) String() string { func (*BatchStartedEvent) ProtoMessage() {} func (x *BatchStartedEvent) ProtoReflect() protoreflect.Message { - mi := &file_ark_v1_types_proto_msgTypes[12] + mi := &file_ark_v1_types_proto_msgTypes[13] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -834,7 +894,7 @@ func (x *BatchStartedEvent) ProtoReflect() protoreflect.Message { // Deprecated: Use BatchStartedEvent.ProtoReflect.Descriptor instead. func (*BatchStartedEvent) Descriptor() ([]byte, []int) { - return file_ark_v1_types_proto_rawDescGZIP(), []int{12} + return file_ark_v1_types_proto_rawDescGZIP(), []int{13} } func (x *BatchStartedEvent) GetId() string { @@ -868,7 +928,7 @@ type BatchFinalizationEvent struct { func (x *BatchFinalizationEvent) Reset() { *x = BatchFinalizationEvent{} - mi := &file_ark_v1_types_proto_msgTypes[13] + mi := &file_ark_v1_types_proto_msgTypes[14] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -880,7 +940,7 @@ func (x *BatchFinalizationEvent) String() string { func (*BatchFinalizationEvent) ProtoMessage() {} func (x *BatchFinalizationEvent) ProtoReflect() protoreflect.Message { - mi := &file_ark_v1_types_proto_msgTypes[13] + mi := &file_ark_v1_types_proto_msgTypes[14] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -893,7 +953,7 @@ func (x *BatchFinalizationEvent) ProtoReflect() protoreflect.Message { // Deprecated: Use BatchFinalizationEvent.ProtoReflect.Descriptor instead. func (*BatchFinalizationEvent) Descriptor() ([]byte, []int) { - return file_ark_v1_types_proto_rawDescGZIP(), []int{13} + return file_ark_v1_types_proto_rawDescGZIP(), []int{14} } func (x *BatchFinalizationEvent) GetId() string { @@ -920,7 +980,7 @@ type BatchFinalizedEvent struct { func (x *BatchFinalizedEvent) Reset() { *x = BatchFinalizedEvent{} - mi := &file_ark_v1_types_proto_msgTypes[14] + mi := &file_ark_v1_types_proto_msgTypes[15] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -932,7 +992,7 @@ func (x *BatchFinalizedEvent) String() string { func (*BatchFinalizedEvent) ProtoMessage() {} func (x *BatchFinalizedEvent) ProtoReflect() protoreflect.Message { - mi := &file_ark_v1_types_proto_msgTypes[14] + mi := &file_ark_v1_types_proto_msgTypes[15] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -945,7 +1005,7 @@ func (x *BatchFinalizedEvent) ProtoReflect() protoreflect.Message { // Deprecated: Use BatchFinalizedEvent.ProtoReflect.Descriptor instead. func (*BatchFinalizedEvent) Descriptor() ([]byte, []int) { - return file_ark_v1_types_proto_rawDescGZIP(), []int{14} + return file_ark_v1_types_proto_rawDescGZIP(), []int{15} } func (x *BatchFinalizedEvent) GetId() string { @@ -972,7 +1032,7 @@ type BatchFailedEvent struct { func (x *BatchFailedEvent) Reset() { *x = BatchFailedEvent{} - mi := &file_ark_v1_types_proto_msgTypes[15] + mi := &file_ark_v1_types_proto_msgTypes[16] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -984,7 +1044,7 @@ func (x *BatchFailedEvent) String() string { func (*BatchFailedEvent) ProtoMessage() {} func (x *BatchFailedEvent) ProtoReflect() protoreflect.Message { - mi := &file_ark_v1_types_proto_msgTypes[15] + mi := &file_ark_v1_types_proto_msgTypes[16] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -997,7 +1057,7 @@ func (x *BatchFailedEvent) ProtoReflect() protoreflect.Message { // Deprecated: Use BatchFailedEvent.ProtoReflect.Descriptor instead. func (*BatchFailedEvent) Descriptor() ([]byte, []int) { - return file_ark_v1_types_proto_rawDescGZIP(), []int{15} + return file_ark_v1_types_proto_rawDescGZIP(), []int{16} } func (x *BatchFailedEvent) GetId() string { @@ -1025,7 +1085,7 @@ type TreeSigningStartedEvent struct { func (x *TreeSigningStartedEvent) Reset() { *x = TreeSigningStartedEvent{} - mi := &file_ark_v1_types_proto_msgTypes[16] + mi := &file_ark_v1_types_proto_msgTypes[17] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1037,7 +1097,7 @@ func (x *TreeSigningStartedEvent) String() string { func (*TreeSigningStartedEvent) ProtoMessage() {} func (x *TreeSigningStartedEvent) ProtoReflect() protoreflect.Message { - mi := &file_ark_v1_types_proto_msgTypes[16] + mi := &file_ark_v1_types_proto_msgTypes[17] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1050,7 +1110,7 @@ func (x *TreeSigningStartedEvent) ProtoReflect() protoreflect.Message { // Deprecated: Use TreeSigningStartedEvent.ProtoReflect.Descriptor instead. func (*TreeSigningStartedEvent) Descriptor() ([]byte, []int) { - return file_ark_v1_types_proto_rawDescGZIP(), []int{16} + return file_ark_v1_types_proto_rawDescGZIP(), []int{17} } func (x *TreeSigningStartedEvent) GetId() string { @@ -1084,7 +1144,7 @@ type TreeNoncesAggregatedEvent struct { func (x *TreeNoncesAggregatedEvent) Reset() { *x = TreeNoncesAggregatedEvent{} - mi := &file_ark_v1_types_proto_msgTypes[17] + mi := &file_ark_v1_types_proto_msgTypes[18] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1096,7 +1156,7 @@ func (x *TreeNoncesAggregatedEvent) String() string { func (*TreeNoncesAggregatedEvent) ProtoMessage() {} func (x *TreeNoncesAggregatedEvent) ProtoReflect() protoreflect.Message { - mi := &file_ark_v1_types_proto_msgTypes[17] + mi := &file_ark_v1_types_proto_msgTypes[18] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1109,7 +1169,7 @@ func (x *TreeNoncesAggregatedEvent) ProtoReflect() protoreflect.Message { // Deprecated: Use TreeNoncesAggregatedEvent.ProtoReflect.Descriptor instead. func (*TreeNoncesAggregatedEvent) Descriptor() ([]byte, []int) { - return file_ark_v1_types_proto_rawDescGZIP(), []int{17} + return file_ark_v1_types_proto_rawDescGZIP(), []int{18} } func (x *TreeNoncesAggregatedEvent) GetId() string { @@ -1138,7 +1198,7 @@ type TreeNoncesEvent struct { func (x *TreeNoncesEvent) Reset() { *x = TreeNoncesEvent{} - mi := &file_ark_v1_types_proto_msgTypes[18] + mi := &file_ark_v1_types_proto_msgTypes[19] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1150,7 +1210,7 @@ func (x *TreeNoncesEvent) String() string { func (*TreeNoncesEvent) ProtoMessage() {} func (x *TreeNoncesEvent) ProtoReflect() protoreflect.Message { - mi := &file_ark_v1_types_proto_msgTypes[18] + mi := &file_ark_v1_types_proto_msgTypes[19] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1163,7 +1223,7 @@ func (x *TreeNoncesEvent) ProtoReflect() protoreflect.Message { // Deprecated: Use TreeNoncesEvent.ProtoReflect.Descriptor instead. func (*TreeNoncesEvent) Descriptor() ([]byte, []int) { - return file_ark_v1_types_proto_rawDescGZIP(), []int{18} + return file_ark_v1_types_proto_rawDescGZIP(), []int{19} } func (x *TreeNoncesEvent) GetId() string { @@ -1208,7 +1268,7 @@ type TreeTxEvent struct { func (x *TreeTxEvent) Reset() { *x = TreeTxEvent{} - mi := &file_ark_v1_types_proto_msgTypes[19] + mi := &file_ark_v1_types_proto_msgTypes[20] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1220,7 +1280,7 @@ func (x *TreeTxEvent) String() string { func (*TreeTxEvent) ProtoMessage() {} func (x *TreeTxEvent) ProtoReflect() protoreflect.Message { - mi := &file_ark_v1_types_proto_msgTypes[19] + mi := &file_ark_v1_types_proto_msgTypes[20] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1233,7 +1293,7 @@ func (x *TreeTxEvent) ProtoReflect() protoreflect.Message { // Deprecated: Use TreeTxEvent.ProtoReflect.Descriptor instead. func (*TreeTxEvent) Descriptor() ([]byte, []int) { - return file_ark_v1_types_proto_rawDescGZIP(), []int{19} + return file_ark_v1_types_proto_rawDescGZIP(), []int{20} } func (x *TreeTxEvent) GetId() string { @@ -1291,7 +1351,7 @@ type TreeSignatureEvent struct { func (x *TreeSignatureEvent) Reset() { *x = TreeSignatureEvent{} - mi := &file_ark_v1_types_proto_msgTypes[20] + mi := &file_ark_v1_types_proto_msgTypes[21] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1303,7 +1363,7 @@ func (x *TreeSignatureEvent) String() string { func (*TreeSignatureEvent) ProtoMessage() {} func (x *TreeSignatureEvent) ProtoReflect() protoreflect.Message { - mi := &file_ark_v1_types_proto_msgTypes[20] + mi := &file_ark_v1_types_proto_msgTypes[21] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1316,7 +1376,7 @@ func (x *TreeSignatureEvent) ProtoReflect() protoreflect.Message { // Deprecated: Use TreeSignatureEvent.ProtoReflect.Descriptor instead. func (*TreeSignatureEvent) Descriptor() ([]byte, []int) { - return file_ark_v1_types_proto_rawDescGZIP(), []int{20} + return file_ark_v1_types_proto_rawDescGZIP(), []int{21} } func (x *TreeSignatureEvent) GetId() string { @@ -1362,7 +1422,7 @@ type Heartbeat struct { func (x *Heartbeat) Reset() { *x = Heartbeat{} - mi := &file_ark_v1_types_proto_msgTypes[21] + mi := &file_ark_v1_types_proto_msgTypes[22] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1374,7 +1434,7 @@ func (x *Heartbeat) String() string { func (*Heartbeat) ProtoMessage() {} func (x *Heartbeat) ProtoReflect() protoreflect.Message { - mi := &file_ark_v1_types_proto_msgTypes[21] + mi := &file_ark_v1_types_proto_msgTypes[22] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1387,7 +1447,7 @@ func (x *Heartbeat) ProtoReflect() protoreflect.Message { // Deprecated: Use Heartbeat.ProtoReflect.Descriptor instead. func (*Heartbeat) Descriptor() ([]byte, []int) { - return file_ark_v1_types_proto_rawDescGZIP(), []int{21} + return file_ark_v1_types_proto_rawDescGZIP(), []int{22} } type StreamStartedEvent struct { @@ -1399,7 +1459,7 @@ type StreamStartedEvent struct { func (x *StreamStartedEvent) Reset() { *x = StreamStartedEvent{} - mi := &file_ark_v1_types_proto_msgTypes[22] + mi := &file_ark_v1_types_proto_msgTypes[23] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1411,7 +1471,7 @@ func (x *StreamStartedEvent) String() string { func (*StreamStartedEvent) ProtoMessage() {} func (x *StreamStartedEvent) ProtoReflect() protoreflect.Message { - mi := &file_ark_v1_types_proto_msgTypes[22] + mi := &file_ark_v1_types_proto_msgTypes[23] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1424,7 +1484,7 @@ func (x *StreamStartedEvent) ProtoReflect() protoreflect.Message { // Deprecated: Use StreamStartedEvent.ProtoReflect.Descriptor instead. func (*StreamStartedEvent) Descriptor() ([]byte, []int) { - return file_ark_v1_types_proto_rawDescGZIP(), []int{22} + return file_ark_v1_types_proto_rawDescGZIP(), []int{23} } func (x *StreamStartedEvent) GetId() string { @@ -1446,7 +1506,7 @@ type ErrorDetails struct { func (x *ErrorDetails) Reset() { *x = ErrorDetails{} - mi := &file_ark_v1_types_proto_msgTypes[23] + mi := &file_ark_v1_types_proto_msgTypes[24] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1458,7 +1518,7 @@ func (x *ErrorDetails) String() string { func (*ErrorDetails) ProtoMessage() {} func (x *ErrorDetails) ProtoReflect() protoreflect.Message { - mi := &file_ark_v1_types_proto_msgTypes[23] + mi := &file_ark_v1_types_proto_msgTypes[24] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1471,7 +1531,7 @@ func (x *ErrorDetails) ProtoReflect() protoreflect.Message { // Deprecated: Use ErrorDetails.ProtoReflect.Descriptor instead. func (*ErrorDetails) Descriptor() ([]byte, []int) { - return file_ark_v1_types_proto_rawDescGZIP(), []int{23} + return file_ark_v1_types_proto_rawDescGZIP(), []int{24} } func (x *ErrorDetails) GetCode() int32 { @@ -1512,7 +1572,7 @@ const file_ark_v1_types_proto_rawDesc = "" + "\x04vout\x18\x02 \x01(\rR\x04vout\"l\n" + "\x05Input\x12,\n" + "\boutpoint\x18\x01 \x01(\v2\x10.ark.v1.OutpointR\boutpoint\x125\n" + - "\ftaproot_tree\x18\x02 \x01(\v2\x12.ark.v1.TapscriptsR\vtaprootTree\"\xa2\x03\n" + + "\ftaproot_tree\x18\x02 \x01(\v2\x12.ark.v1.TapscriptsR\vtaprootTree\"\xc9\x03\n" + "\x04Vtxo\x12,\n" + "\boutpoint\x18\x01 \x01(\v2\x10.ark.v1.OutpointR\boutpoint\x12\x16\n" + "\x06amount\x18\x02 \x01(\x04R\x06amount\x12\x16\n" + @@ -1531,7 +1591,11 @@ const file_ark_v1_types_proto_rawDesc = "" + "\bspent_by\x18\v \x01(\tR\aspentBy\x12\x1d\n" + "\n" + "settled_by\x18\f \x01(\tR\tsettledBy\x12\x19\n" + - "\bark_txid\x18\r \x01(\tR\aarkTxid\",\n" + + "\bark_txid\x18\r \x01(\tR\aarkTxid\x12%\n" + + "\x06assets\x18\x0e \x03(\v2\r.ark.v1.AssetR\x06assets\":\n" + + "\x05Asset\x12\x19\n" + + "\basset_id\x18\x01 \x01(\tR\aassetId\x12\x16\n" + + "\x06amount\x18\x02 \x01(\x04R\x06amount\",\n" + "\x06TxData\x12\x12\n" + "\x04txid\x18\x01 \x01(\tR\x04txid\x12\x0e\n" + "\x02tx\x18\x02 \x01(\tR\x02tx\"\xbe\x02\n" + @@ -1652,57 +1716,59 @@ func file_ark_v1_types_proto_rawDescGZIP() []byte { return file_ark_v1_types_proto_rawDescData } -var file_ark_v1_types_proto_msgTypes = make([]protoimpl.MessageInfo, 29) +var file_ark_v1_types_proto_msgTypes = make([]protoimpl.MessageInfo, 30) var file_ark_v1_types_proto_goTypes = []any{ (*Outpoint)(nil), // 0: ark.v1.Outpoint (*Input)(nil), // 1: ark.v1.Input (*Vtxo)(nil), // 2: ark.v1.Vtxo - (*TxData)(nil), // 3: ark.v1.TxData - (*TxNotification)(nil), // 4: ark.v1.TxNotification - (*Tapscripts)(nil), // 5: ark.v1.Tapscripts - (*Intent)(nil), // 6: ark.v1.Intent - (*ScheduledSession)(nil), // 7: ark.v1.ScheduledSession - (*FeeInfo)(nil), // 8: ark.v1.FeeInfo - (*IntentFeeInfo)(nil), // 9: ark.v1.IntentFeeInfo - (*PendingTx)(nil), // 10: ark.v1.PendingTx - (*DeprecatedSigner)(nil), // 11: ark.v1.DeprecatedSigner - (*BatchStartedEvent)(nil), // 12: ark.v1.BatchStartedEvent - (*BatchFinalizationEvent)(nil), // 13: ark.v1.BatchFinalizationEvent - (*BatchFinalizedEvent)(nil), // 14: ark.v1.BatchFinalizedEvent - (*BatchFailedEvent)(nil), // 15: ark.v1.BatchFailedEvent - (*TreeSigningStartedEvent)(nil), // 16: ark.v1.TreeSigningStartedEvent - (*TreeNoncesAggregatedEvent)(nil), // 17: ark.v1.TreeNoncesAggregatedEvent - (*TreeNoncesEvent)(nil), // 18: ark.v1.TreeNoncesEvent - (*TreeTxEvent)(nil), // 19: ark.v1.TreeTxEvent - (*TreeSignatureEvent)(nil), // 20: ark.v1.TreeSignatureEvent - (*Heartbeat)(nil), // 21: ark.v1.Heartbeat - (*StreamStartedEvent)(nil), // 22: ark.v1.StreamStartedEvent - (*ErrorDetails)(nil), // 23: ark.v1.ErrorDetails - nil, // 24: ark.v1.TxNotification.CheckpointTxsEntry - nil, // 25: ark.v1.TreeNoncesAggregatedEvent.TreeNoncesEntry - nil, // 26: ark.v1.TreeNoncesEvent.NoncesEntry - nil, // 27: ark.v1.TreeTxEvent.ChildrenEntry - nil, // 28: ark.v1.ErrorDetails.MetadataEntry + (*Asset)(nil), // 3: ark.v1.Asset + (*TxData)(nil), // 4: ark.v1.TxData + (*TxNotification)(nil), // 5: ark.v1.TxNotification + (*Tapscripts)(nil), // 6: ark.v1.Tapscripts + (*Intent)(nil), // 7: ark.v1.Intent + (*ScheduledSession)(nil), // 8: ark.v1.ScheduledSession + (*FeeInfo)(nil), // 9: ark.v1.FeeInfo + (*IntentFeeInfo)(nil), // 10: ark.v1.IntentFeeInfo + (*PendingTx)(nil), // 11: ark.v1.PendingTx + (*DeprecatedSigner)(nil), // 12: ark.v1.DeprecatedSigner + (*BatchStartedEvent)(nil), // 13: ark.v1.BatchStartedEvent + (*BatchFinalizationEvent)(nil), // 14: ark.v1.BatchFinalizationEvent + (*BatchFinalizedEvent)(nil), // 15: ark.v1.BatchFinalizedEvent + (*BatchFailedEvent)(nil), // 16: ark.v1.BatchFailedEvent + (*TreeSigningStartedEvent)(nil), // 17: ark.v1.TreeSigningStartedEvent + (*TreeNoncesAggregatedEvent)(nil), // 18: ark.v1.TreeNoncesAggregatedEvent + (*TreeNoncesEvent)(nil), // 19: ark.v1.TreeNoncesEvent + (*TreeTxEvent)(nil), // 20: ark.v1.TreeTxEvent + (*TreeSignatureEvent)(nil), // 21: ark.v1.TreeSignatureEvent + (*Heartbeat)(nil), // 22: ark.v1.Heartbeat + (*StreamStartedEvent)(nil), // 23: ark.v1.StreamStartedEvent + (*ErrorDetails)(nil), // 24: ark.v1.ErrorDetails + nil, // 25: ark.v1.TxNotification.CheckpointTxsEntry + nil, // 26: ark.v1.TreeNoncesAggregatedEvent.TreeNoncesEntry + nil, // 27: ark.v1.TreeNoncesEvent.NoncesEntry + nil, // 28: ark.v1.TreeTxEvent.ChildrenEntry + nil, // 29: ark.v1.ErrorDetails.MetadataEntry } var file_ark_v1_types_proto_depIdxs = []int32{ 0, // 0: ark.v1.Input.outpoint:type_name -> ark.v1.Outpoint - 5, // 1: ark.v1.Input.taproot_tree:type_name -> ark.v1.Tapscripts + 6, // 1: ark.v1.Input.taproot_tree:type_name -> ark.v1.Tapscripts 0, // 2: ark.v1.Vtxo.outpoint:type_name -> ark.v1.Outpoint - 2, // 3: ark.v1.TxNotification.spent_vtxos:type_name -> ark.v1.Vtxo - 2, // 4: ark.v1.TxNotification.spendable_vtxos:type_name -> ark.v1.Vtxo - 24, // 5: ark.v1.TxNotification.checkpoint_txs:type_name -> ark.v1.TxNotification.CheckpointTxsEntry - 8, // 6: ark.v1.ScheduledSession.fees:type_name -> ark.v1.FeeInfo - 9, // 7: ark.v1.FeeInfo.intent_fee:type_name -> ark.v1.IntentFeeInfo - 25, // 8: ark.v1.TreeNoncesAggregatedEvent.tree_nonces:type_name -> ark.v1.TreeNoncesAggregatedEvent.TreeNoncesEntry - 26, // 9: ark.v1.TreeNoncesEvent.nonces:type_name -> ark.v1.TreeNoncesEvent.NoncesEntry - 27, // 10: ark.v1.TreeTxEvent.children:type_name -> ark.v1.TreeTxEvent.ChildrenEntry - 28, // 11: ark.v1.ErrorDetails.metadata:type_name -> ark.v1.ErrorDetails.MetadataEntry - 3, // 12: ark.v1.TxNotification.CheckpointTxsEntry.value:type_name -> ark.v1.TxData - 13, // [13:13] is the sub-list for method output_type - 13, // [13:13] is the sub-list for method input_type - 13, // [13:13] is the sub-list for extension type_name - 13, // [13:13] is the sub-list for extension extendee - 0, // [0:13] is the sub-list for field type_name + 3, // 3: ark.v1.Vtxo.assets:type_name -> ark.v1.Asset + 2, // 4: ark.v1.TxNotification.spent_vtxos:type_name -> ark.v1.Vtxo + 2, // 5: ark.v1.TxNotification.spendable_vtxos:type_name -> ark.v1.Vtxo + 25, // 6: ark.v1.TxNotification.checkpoint_txs:type_name -> ark.v1.TxNotification.CheckpointTxsEntry + 9, // 7: ark.v1.ScheduledSession.fees:type_name -> ark.v1.FeeInfo + 10, // 8: ark.v1.FeeInfo.intent_fee:type_name -> ark.v1.IntentFeeInfo + 26, // 9: ark.v1.TreeNoncesAggregatedEvent.tree_nonces:type_name -> ark.v1.TreeNoncesAggregatedEvent.TreeNoncesEntry + 27, // 10: ark.v1.TreeNoncesEvent.nonces:type_name -> ark.v1.TreeNoncesEvent.NoncesEntry + 28, // 11: ark.v1.TreeTxEvent.children:type_name -> ark.v1.TreeTxEvent.ChildrenEntry + 29, // 12: ark.v1.ErrorDetails.metadata:type_name -> ark.v1.ErrorDetails.MetadataEntry + 4, // 13: ark.v1.TxNotification.CheckpointTxsEntry.value:type_name -> ark.v1.TxData + 14, // [14:14] is the sub-list for method output_type + 14, // [14:14] is the sub-list for method input_type + 14, // [14:14] is the sub-list for extension type_name + 14, // [14:14] is the sub-list for extension extendee + 0, // [0:14] is the sub-list for field type_name } func init() { file_ark_v1_types_proto_init() } @@ -1716,7 +1782,7 @@ func file_ark_v1_types_proto_init() { GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: unsafe.Slice(unsafe.StringData(file_ark_v1_types_proto_rawDesc), len(file_ark_v1_types_proto_rawDesc)), NumEnums: 0, - NumMessages: 29, + NumMessages: 30, NumExtensions: 0, NumServices: 0, }, diff --git a/buf.Dockerfile b/buf.Dockerfile index d47335245..fa395636c 100644 --- a/buf.Dockerfile +++ b/buf.Dockerfile @@ -1,7 +1,7 @@ -FROM golang:1.24-alpine3.20 as builder +FROM golang:1.24-alpine3.20 AS builder RUN apk add --no-cache git -RUN wget -qO /usr/local/bin/buf https://github.com/bufbuild/buf/releases/download/v1.55.1/buf-Linux-armv7 +RUN wget -qO /usr/local/bin/buf https://github.com/bufbuild/buf/releases/download/v1.55.1/buf-Linux-x86_64 RUN chmod u+x /usr/local/bin/buf RUN go install github.com/meshapi/grpc-api-gateway/codegen/cmd/protoc-gen-grpc-api-gateway@latest diff --git a/internal/core/application/asset_validation.go b/internal/core/application/asset_validation.go new file mode 100644 index 000000000..50e2398c5 --- /dev/null +++ b/internal/core/application/asset_validation.go @@ -0,0 +1,694 @@ +package application + +import ( + "bytes" + "context" + "encoding/hex" + "strings" + + "github.com/arkade-os/arkd/pkg/ark-lib/extension" + "github.com/arkade-os/arkd/pkg/errors" + "github.com/btcsuite/btcd/btcutil/psbt" + "github.com/btcsuite/btcd/chaincfg/chainhash" + "github.com/btcsuite/btcd/wire" +) + +type assetValidationState int + +const ( + assetValidationDecode assetValidationState = iota + assetValidationControl + assetValidationGroup + assetValidationDone +) + +type assetValidationMachine struct { + ctx context.Context + arkTx wire.MsgTx + checkpointTxMap map[string]string + opReturnOutput wire.TxOut + assets []extension.AssetGroup + groupIndex int +} + +type controlAssetState int + +const ( + controlAssetStart controlAssetState = iota + controlAssetIssuance + controlAssetReissuance + controlAssetDone +) + +type controlAssetMachine struct { + ctx context.Context + assets []extension.AssetGroup + asset extension.AssetGroup + state controlAssetState +} + +func (s *service) validateAssetTransition( + ctx context.Context, + arkTx wire.MsgTx, + checkpointTxMap map[string]string, + opReturnOutput wire.TxOut, +) error { + machine := assetValidationMachine{ + ctx: ctx, + arkTx: arkTx, + checkpointTxMap: checkpointTxMap, + opReturnOutput: opReturnOutput, + } + return machine.run(s) +} + +func (m *assetValidationMachine) run(s *service) error { + state := assetValidationDecode + + for { + switch state { + case assetValidationDecode: + decodedAssetPacket, err := extension.DecodeAssetPacket(m.opReturnOutput) + if err != nil { + return errors.ASSET_PACKET_INVALID.New("error decoding asset from opreturn: %s", err). + WithMetadata(errors.AssetValidationMetadata{Message: err.Error()}) + } + + m.assets = decodedAssetPacket.Assets + m.groupIndex = 0 + state = assetValidationControl + case assetValidationControl: + if err := s.validateControlAssets(m.ctx, m.assets); err != nil { + return err + } + state = assetValidationGroup + case assetValidationGroup: + if m.groupIndex >= len(m.assets) { + state = assetValidationDone + continue + } + + if err := s.validateAssetGroup(m.ctx, m.arkTx, m.checkpointTxMap, m.assets, m.groupIndex); err != nil { + return err + } + + m.groupIndex++ + case assetValidationDone: + return nil + default: + return errors.ASSET_VALIDATION_FAILED.New("invalid asset validation state %d", state). + WithMetadata(errors.AssetValidationMetadata{}) + } + } +} + +func (s *service) validateControlAssets(ctx context.Context, assets []extension.AssetGroup) error { + for _, asst := range assets { + machine := controlAssetMachine{ + ctx: ctx, + assets: assets, + asset: asst, + state: controlAssetStart, + } + if err := machine.run(s); err != nil { + return err + } + } + + return nil +} + +func (m *controlAssetMachine) run(s *service) error { + for { + switch m.state { + case controlAssetStart: + m.state = m.classify() + case controlAssetIssuance: + if err := m.validateIssuance(s); err != nil { + return err + } + m.state = controlAssetDone + case controlAssetReissuance: + if err := m.validateReissuance(s); err != nil { + return err + } + m.state = controlAssetDone + case controlAssetDone: + return nil + default: + return errors.CONTROL_ASSET_INVALID.New("invalid control asset validation state %d", m.state). + WithMetadata(errors.ControlAssetMetadata{}) + } + } +} + +func (m *controlAssetMachine) classify() controlAssetState { + if m.asset.AssetId == nil { + return controlAssetIssuance + } + return controlAssetReissuance +} + +func (m *controlAssetMachine) validateIssuance(s *service) error { + if m.asset.ControlAsset == nil { + return nil + } + + switch m.asset.ControlAsset.Type { + case extension.AssetRefByGroup: + if int(m.asset.ControlAsset.GroupIndex) >= len(m.assets) { + return errors.CONTROL_ASSET_INVALID.New( + "control asset group index %d out of range for issuance", + m.asset.ControlAsset.GroupIndex, + ).WithMetadata(errors.ControlAssetMetadata{ + GroupIndex: int(m.asset.ControlAsset.GroupIndex), + }) + } + case extension.AssetRefByID: + controlAssetIDStr := m.asset.ControlAsset.AssetId.ToString() + assetGroup, err := s.repoManager.Assets().GetAssetGroupByID(m.ctx, controlAssetIDStr) + if err != nil { + return errors.CONTROL_ASSET_INVALID.New( + "error retrieving control asset %s for issuance: %w", + controlAssetIDStr, + err, + ).WithMetadata(errors.ControlAssetMetadata{ + ControlAssetID: controlAssetIDStr, + }) + } + if assetGroup == nil { + return errors.CONTROL_ASSET_NOT_FOUND.New( + "control asset %s does not exist for issuance", + controlAssetIDStr, + ).WithMetadata(errors.ControlAssetMetadata{ + ControlAssetID: controlAssetIDStr, + }) + } + default: + return errors.CONTROL_ASSET_INVALID.New("invalid control asset reference for issuance"). + WithMetadata(errors.ControlAssetMetadata{}) + } + + return nil +} + +func (m *controlAssetMachine) validateReissuance(s *service) error { + totalInputAmount := sumAssetInputs(m.asset.Inputs) + totalOutputAmount := sumAssetOutputs(m.asset.Outputs) + if totalOutputAmount <= totalInputAmount { + return nil + } + + if m.asset.AssetId == nil { + return errors.ASSET_VALIDATION_FAILED.New("missing asset ID"). + WithMetadata(errors.AssetValidationMetadata{Message: "asset ID required for reissuance"}) + } + + assetID := m.asset.AssetId.ToString() + controlAssetDetails, err := s.repoManager.Assets().GetAssetGroupByID(m.ctx, assetID) + if err != nil { + return errors.ASSET_VALIDATION_FAILED.New("error retrieving asset %s: %w", assetID, err). + WithMetadata(errors.AssetValidationMetadata{AssetID: assetID}) + } + if controlAssetDetails == nil { + return errors.ASSET_NOT_FOUND.New("asset %s does not exist", assetID). + WithMetadata(errors.AssetValidationMetadata{AssetID: assetID}) + } + + controlAssetId := controlAssetDetails.ControlAssetID + if controlAssetId == "" { + return errors.CONTROL_ASSET_INVALID.New("asset %s does not have a control asset", assetID). + WithMetadata(errors.ControlAssetMetadata{AssetID: assetID}) + } + + decodedControlAssetId, err := extension.AssetIdFromString(controlAssetId) + if err != nil { + return errors.CONTROL_ASSET_INVALID.New("error decoding control asset ID %s: %w", controlAssetId, err). + WithMetadata(errors.ControlAssetMetadata{AssetID: assetID, ControlAssetID: controlAssetId}) + } + if decodedControlAssetId == nil { + return errors.CONTROL_ASSET_INVALID.New("invalid control asset ID %s", controlAssetId). + WithMetadata(errors.ControlAssetMetadata{AssetID: assetID, ControlAssetID: controlAssetId}) + } + + if err := s.ensureAssetPresence(m.ctx, m.assets, *decodedControlAssetId); err != nil { + return err + } + + return nil +} + +type assetGroupValidationState int + +const ( + assetGroupValidateExists assetGroupValidationState = iota + assetGroupValidateOutputs + assetGroupValidateInputs + assetGroupDone +) + +type assetGroupValidationMachine struct { + ctx context.Context + arkTx wire.MsgTx + checkpointTxMap map[string]string + assets []extension.AssetGroup + groupIndex int + state assetGroupValidationState + inputIndex int +} + +func (s *service) validateAssetGroup( + ctx context.Context, + arkTx wire.MsgTx, + checkpointTxMap map[string]string, + assetPacketList []extension.AssetGroup, + groupIndex int, +) error { + machine := assetGroupValidationMachine{ + ctx: ctx, + arkTx: arkTx, + checkpointTxMap: checkpointTxMap, + assets: assetPacketList, + groupIndex: groupIndex, + state: assetGroupValidateExists, + } + return machine.run(s) +} + +func (m *assetGroupValidationMachine) run(s *service) error { + grpAsset := m.group() + for { + switch m.state { + case assetGroupValidateExists: + if grpAsset.AssetId != nil { + assetID := grpAsset.AssetId.ToString() + gp, err := s.repoManager.Assets().GetAssetGroupByID(m.ctx, assetID) + if err != nil { + return errors.ASSET_VALIDATION_FAILED.New( + "error retrieving asset group %s: %w", + assetID, + err, + ).WithMetadata(errors.AssetValidationMetadata{AssetID: assetID}) + } + if gp == nil { + return errors.ASSET_NOT_FOUND.New("asset group %s does not exist", assetID). + WithMetadata(errors.AssetValidationMetadata{AssetID: assetID}) + } + } + m.state = assetGroupValidateOutputs + case assetGroupValidateOutputs: + if err := m.validateOutputs(s); err != nil { + return err + } + m.state = assetGroupValidateInputs + case assetGroupValidateInputs: + if m.inputIndex >= len(grpAsset.Inputs) { + m.state = assetGroupDone + continue + } + + input := grpAsset.Inputs[m.inputIndex] + if err := m.validateInput(s, input); err != nil { + return err + } + m.inputIndex++ + case assetGroupDone: + return nil + default: + return errors.ASSET_VALIDATION_FAILED.New("invalid asset group validation state %d", m.state). + WithMetadata(errors.AssetValidationMetadata{}) + } + } +} + +func (m *assetGroupValidationMachine) group() extension.AssetGroup { + return m.assets[m.groupIndex] +} + +func (m *assetGroupValidationMachine) validateOutputs(s *service) error { + machine := assetOutputValidationMachine{ + ctx: m.ctx, + arkTx: m.arkTx, + assetsList: m.assets, + assetGp: m.group(), + state: assetOutputInit, + } + return machine.run(s) +} + +func (m *assetGroupValidationMachine) validateInput(s *service, input extension.AssetInput) error { + grpAsset := m.group() + if input.Type == extension.AssetTypeTeleport { + if grpAsset.AssetId == nil { + return errors.TELEPORT_VALIDATION_FAILED.New("asset ID is required for teleport input validation"). + WithMetadata(errors.TeleportValidationMetadata{}) + } + + txHash, err := chainhash.NewHash(input.Witness.Txid[:]) + if err != nil { + return errors.TELEPORT_VALIDATION_FAILED.New("invalid intent ID for teleport input validation: %w", err). + WithMetadata(errors.TeleportValidationMetadata{}) + } + + intent, err := s.repoManager.Rounds().GetIntentByTxid(context.Background(), txHash.String()) + if err != nil { + return errors.TELEPORT_VALIDATION_FAILED.New("error retrieving intent for teleport input validation: %w", err). + WithMetadata(errors.TeleportValidationMetadata{}) + } + + decodedProof, err := psbt.NewFromRawBytes(strings.NewReader(intent.Proof), true) + if err != nil { + return errors.TELEPORT_VALIDATION_FAILED.New("error decoding intent proof for teleport input validation: %w", err). + WithMetadata(errors.TeleportValidationMetadata{}) + } + + if err := s.validateTeleportInput(*decodedProof, m.arkTx, *grpAsset.AssetId, uint32(input.Witness.Index), input.Witness.Script); err != nil { + return err + } + return nil + } + + if int(input.Vin) >= len(m.arkTx.TxIn) { + return errors.ASSET_INPUT_INVALID.New("asset input index out of range: %d", input.Vin). + WithMetadata(errors.AssetInputMetadata{InputIndex: int(input.Vin)}) + } + + checkpointOutpoint := m.arkTx.TxIn[input.Vin].PreviousOutPoint + checkpointTxHex, ok := m.checkpointTxMap[checkpointOutpoint.Hash.String()] + if !ok { + return errors.CHECKPOINT_TX_NOT_FOUND.New( + "checkpoint tx %s not found for asset input %d", + checkpointOutpoint.Hash, + input.Vin, + ).WithMetadata(errors.CheckpointValidationMetadata{ + Txid: checkpointOutpoint.Hash.String(), + InputIndex: int(input.Vin), + }) + } + + checkpointPtx, err := psbt.NewFromRawBytes(strings.NewReader(checkpointTxHex), true) + if err != nil { + return errors.CHECKPOINT_TX_INVALID.New("failed to decode checkpoint tx %s: %w", checkpointOutpoint.Hash, err). + WithMetadata(errors.CheckpointValidationMetadata{Txid: checkpointOutpoint.Hash.String()}) + } + if len(checkpointPtx.UnsignedTx.TxIn) == 0 { + return errors.CHECKPOINT_TX_INVALID.New("checkpoint tx %s missing input", checkpointOutpoint.Hash). + WithMetadata(errors.CheckpointValidationMetadata{Txid: checkpointOutpoint.Hash.String()}) + } + + prev := checkpointPtx.UnsignedTx.TxIn[0].PreviousOutPoint + + if err := s.verifyAssetInputPrevOut(m.ctx, input, prev); err != nil { + return err + } + + return nil +} + +type assetOutputValidationState int + +const ( + assetOutputInit assetOutputValidationState = iota + assetOutputCollect + assetOutputCountCheck + assetOutputTeleportCheck + assetOutputDone +) + +type assetOutputValidationMachine struct { + ctx context.Context + arkTx wire.MsgTx + assetsList []extension.AssetGroup + assetGp extension.AssetGroup + state assetOutputValidationState + processed int + sumInputs uint64 + sumOutputs uint64 + teleportInputs map[string]uint64 + localScriptAmts map[string]uint64 +} + +func (m *assetOutputValidationMachine) run(s *service) error { + for { + switch m.state { + case assetOutputInit: + m.sumInputs = sumAssetInputs(m.assetGp.Inputs) + m.sumOutputs = sumAssetOutputs(m.assetGp.Outputs) + m.teleportInputs = make(map[string]uint64) + m.localScriptAmts = make(map[string]uint64) + m.processed = 0 + for _, in := range m.assetGp.Inputs { + if in.Type == extension.AssetTypeTeleport { + teleportScriptHex := hex.EncodeToString(in.Witness.Script[:]) + m.teleportInputs[teleportScriptHex] += in.Amount + } + } + m.state = assetOutputCollect + case assetOutputCollect: + for _, assetOut := range m.assetGp.Outputs { + switch assetOut.Type { + case extension.AssetTypeLocal: + for index, txout := range m.arkTx.TxOut { + if index == int(assetOut.Vout) { + m.localScriptAmts[hex.EncodeToString(txout.PkScript)] += assetOut.Amount + m.processed++ + break + } + } + case extension.AssetTypeTeleport: + m.processed++ + default: + return errors.ASSET_OUTPUT_INVALID.New("unknown asset output type %d", assetOut.Type). + WithMetadata(errors.AssetOutputMetadata{OutputType: int(assetOut.Type)}) + } + } + m.state = assetOutputCountCheck + case assetOutputCountCheck: + if m.processed != len(m.assetGp.Outputs) { + return errors.ASSET_OUTPUT_INVALID.New( + "not all asset outputs verified: processed %d of %d", + m.processed, len(m.assetGp.Outputs), + ).WithMetadata(errors.AssetOutputMetadata{}) + } + m.state = assetOutputTeleportCheck + case assetOutputTeleportCheck: + // Teleport redemptions must land on a local output with the same script. + for scriptHex, inAmount := range m.teleportInputs { + outAmount, exists := m.localScriptAmts[scriptHex] + if !exists { + return errors.TELEPORT_VALIDATION_FAILED.New( + "teleport input script %s not found in output", + scriptHex, + ).WithMetadata(errors.TeleportValidationMetadata{Script: scriptHex}) + } + if outAmount > inAmount { + // verify if extra amount is covered by existing input + if m.sumInputs == m.sumOutputs { + continue + } + + assetID := m.assetGp.AssetId.ToString() + controlAssetDetails, err := s.repoManager.Assets().GetAssetGroupByID( + m.ctx, assetID, + ) + if err != nil { + return errors.ASSET_VALIDATION_FAILED.New( + "error retrieving asset %s: %w", + assetID, + err, + ).WithMetadata(errors.AssetValidationMetadata{AssetID: assetID}) + } + + if controlAssetDetails == nil { + return errors.ASSET_NOT_FOUND.New("asset %s does not exist", assetID). + WithMetadata(errors.AssetValidationMetadata{AssetID: assetID}) + } + + controlAssetId := controlAssetDetails.ControlAssetID + if controlAssetId == "" { + return errors.CONTROL_ASSET_INVALID.New( + "asset %s does not have a control asset", + assetID, + ).WithMetadata(errors.ControlAssetMetadata{AssetID: assetID}) + } + + decodedControlAssetId, err := extension.AssetIdFromString(controlAssetId) + if err != nil { + return errors.CONTROL_ASSET_INVALID.New( + "error decoding control asset ID %s: %w", + controlAssetId, + err, + ).WithMetadata(errors.ControlAssetMetadata{ + AssetID: assetID, + ControlAssetID: controlAssetId, + }) + } + + if decodedControlAssetId == nil { + return errors.CONTROL_ASSET_INVALID.New("invalid control asset ID %s", controlAssetId). + WithMetadata(errors.ControlAssetMetadata{ + AssetID: assetID, + ControlAssetID: controlAssetId, + }) + } + + if err := s.ensureAssetPresence(m.ctx, m.assetsList, *decodedControlAssetId); err != nil { + return err + } + } + } + m.state = assetOutputDone + case assetOutputDone: + return nil + default: + return errors.ASSET_OUTPUT_INVALID.New("invalid asset output validation state %d", m.state). + WithMetadata(errors.AssetOutputMetadata{}) + } + } +} + +func (s *service) validateTeleportInput( + intentProof psbt.Packet, + arkTx wire.MsgTx, + assetId extension.AssetId, + index uint32, + script []byte, +) error { + // validate teleport script exists in intent proof + assetPacket, _, err := extension.DeriveAssetPacketFromTx(*intentProof.UnsignedTx) + if err != nil { + return errors.TELEPORT_VALIDATION_FAILED.New("error deriving asset packet from intent proof: %s", err). + WithMetadata(errors.TeleportValidationMetadata{AssetID: assetId.ToString()}) + } + + if assetPacket == nil { + return errors.ASSET_PACKET_INVALID.New("no asset packet found in intent proof"). + WithMetadata(errors.AssetValidationMetadata{}) + } + + teleportOutputFound := false + for _, assetGroup := range assetPacket.Assets { + for i, assetOutput := range assetGroup.Outputs { + if assetOutput.Type == extension.AssetTypeTeleport && + bytes.Equal(assetOutput.Script, script) && + assetId == *assetGroup.AssetId && + index == uint32(i) { + teleportOutputFound = true + break + } + } + } + + if !teleportOutputFound { + return errors.TELEPORT_VALIDATION_FAILED.New( + "teleport output not found in intent proof for asset %s index %d", + assetId.ToString(), + index, + ).WithMetadata(errors.TeleportValidationMetadata{ + AssetID: assetId.ToString(), + OutputIndex: int(index), + }) + } + + return nil + +} + +func (s *service) verifyAssetInputPrevOut( + ctx context.Context, + input extension.AssetInput, + prev wire.OutPoint, +) error { + txid := prev.Hash.String() + + offchainTx, err := s.repoManager.OffchainTxs().GetOffchainTx(ctx, txid) + if err != nil { + return errors.OFFCHAIN_TX_INVALID.New("error retrieving offchain tx %s: %w", txid, err). + WithMetadata(errors.OffchainTxValidationMetadata{Txid: txid}) + } + if offchainTx == nil { + return errors.OFFCHAIN_TX_INVALID.New("offchain tx %s not found in rounds or offchain storage", txid). + WithMetadata(errors.OffchainTxValidationMetadata{Txid: txid}) + } + if !offchainTx.IsFinalized() { + return errors.OFFCHAIN_TX_INVALID.New("offchain tx %s is failed", txid). + WithMetadata(errors.OffchainTxValidationMetadata{Txid: txid}) + } + + decodedArkTx, err := psbt.NewFromRawBytes(strings.NewReader(offchainTx.ArkTx), true) + if err != nil { + return errors.OFFCHAIN_TX_INVALID.New("error decoding Ark Tx: %s", err). + WithMetadata(errors.OffchainTxValidationMetadata{Txid: txid}) + } + + var assetGroup *extension.AssetPacket + + for _, output := range decodedArkTx.UnsignedTx.TxOut { + if extension.ContainsAssetPacket(output.PkScript) { + assetGp, err := extension.DecodeAssetPacket(*output) + if err != nil { + return errors.ASSET_PACKET_INVALID.New("error decoding asset Opreturn: %s", err). + WithMetadata(errors.AssetValidationMetadata{}) + } + assetGroup = assetGp + break + } + } + if assetGroup == nil { + return errors.ASSET_PACKET_INVALID.New("asset packet missing in offchain tx %s", txid). + WithMetadata(errors.AssetValidationMetadata{}) + } + + // verify asset input in present in assetGroup.Inputs + totalAssetOuts := make([]extension.AssetOutput, 0) + for _, asset := range assetGroup.Assets { + totalAssetOuts = append(totalAssetOuts, asset.Outputs...) + } + + for _, assetOut := range totalAssetOuts { + if assetOut.Vout == prev.Index && input.Amount == assetOut.Amount { + return nil + } + } + + return errors.ASSET_OUTPUT_INVALID.New("asset output %d not found", prev.Index). + WithMetadata(errors.AssetOutputMetadata{OutputIndex: int(prev.Index)}) + +} + +func (s *service) ensureAssetPresence( + ctx context.Context, + assets []extension.AssetGroup, + asset extension.AssetId, +) error { + if len(assets) == 0 { + return errors.CONTROL_ASSET_INVALID.New("no assets provided for control asset validation"). + WithMetadata(errors.ControlAssetMetadata{}) + } + + for _, asst := range assets { + if asst.AssetId != nil && (*asst.AssetId == asset) { + return nil + } + } + + assetID := asset.ToString() + return errors.CONTROL_ASSET_NOT_FOUND.New("missing control asset %s in transaction", assetID). + WithMetadata(errors.ControlAssetMetadata{ControlAssetID: assetID}) +} + +func sumAssetInputs(inputs []extension.AssetInput) uint64 { + total := uint64(0) + for _, in := range inputs { + total += in.Amount + } + return total +} + +func sumAssetOutputs(outputs []extension.AssetOutput) uint64 { + total := uint64(0) + for _, out := range outputs { + total += out.Amount + } + return total +} diff --git a/internal/core/application/indexer.go b/internal/core/application/indexer.go index daf1de9ab..fc55373e1 100644 --- a/internal/core/application/indexer.go +++ b/internal/core/application/indexer.go @@ -38,6 +38,7 @@ type IndexerService interface { GetVtxoChain(ctx context.Context, vtxoKey Outpoint, page *Page) (*VtxoChainResp, error) GetVirtualTxs(ctx context.Context, txids []string, page *Page) (*VirtualTxsResp, error) GetBatchSweepTxs(ctx context.Context, batchOutpoint Outpoint) ([]string, error) + GetAssetGroup(ctx context.Context, assetID string) (*AssetGroupResp, error) } type indexerService struct { @@ -93,6 +94,38 @@ func (i *indexerService) GetVtxoTree( }, nil } +func (i *indexerService) GetAssetGroup( + ctx context.Context, assetID string, +) (*AssetGroupResp, error) { + asset, err := i.repoManager.Assets().GetAssetGroupByID(ctx, assetID) + if err != nil { + return nil, err + } + + if asset == nil { + return nil, fmt.Errorf("asset not found: %s", assetID) + } + + assetAnchorList, err := i.repoManager.Assets().ListAssetAnchorsByAssetID(ctx, assetID) + if err != nil { + return nil, err + } + + assetAnchors := make([]Outpoint, 0, len(assetAnchorList)) + for _, anchor := range assetAnchorList { + assetAnchors = append(assetAnchors, Outpoint{ + Txid: anchor.Txid, + VOut: anchor.VOut, + }) + } + + return &AssetGroupResp{ + AssetID: assetID, + AssetGroup: *asset, + AnchorOutpoints: assetAnchors, + }, nil +} + func (i *indexerService) GetVtxoTreeLeaves( ctx context.Context, outpoint Outpoint, page *Page, ) (*VtxoTreeLeavesResp, error) { @@ -211,6 +244,18 @@ func (i *indexerService) GetVtxos( } vtxos, pageResp := paginate(allVtxos, page, maxPageSizeSpendableVtxos) + + for j, v := range allVtxos { + // add asset to vtxo if present + asset, err := i.repoManager.Assets().GetAssetByOutpoint(ctx, v.Outpoint) + if err == nil && asset != nil { + allVtxos[j].Assets = append(allVtxos[j].Assets, domain.Asset{ + AssetID: asset.AssetID, + Amount: asset.Amount, + }) + } + } + return &GetVtxosResp{ Vtxos: vtxos, Page: pageResp, @@ -225,6 +270,17 @@ func (i *indexerService) GetVtxosByOutpoint( return nil, err } + for j, v := range allVtxos { + // add asset to vtxo if present + asset, err := i.repoManager.Assets().GetAssetByOutpoint(ctx, v.Outpoint) + if err == nil && asset != nil { + allVtxos[j].Assets = append(allVtxos[j].Assets, domain.Asset{ + AssetID: asset.AssetID, + Amount: 0, + }) + } + } + vtxos, pageResp := paginate(allVtxos, page, maxPageSizeSpendableVtxos) return &GetVtxosResp{ Vtxos: vtxos, diff --git a/internal/core/application/service.go b/internal/core/application/service.go index 7c61ffd80..e5201b77e 100644 --- a/internal/core/application/service.go +++ b/internal/core/application/service.go @@ -15,6 +15,7 @@ import ( "github.com/arkade-os/arkd/internal/core/domain" "github.com/arkade-os/arkd/internal/core/ports" arklib "github.com/arkade-os/arkd/pkg/ark-lib" + "github.com/arkade-os/arkd/pkg/ark-lib/extension" "github.com/arkade-os/arkd/pkg/ark-lib/intent" "github.com/arkade-os/arkd/pkg/ark-lib/offchain" "github.com/arkade-os/arkd/pkg/ark-lib/script" @@ -277,12 +278,15 @@ func NewService( spentVtxos := svc.getSpentVtxos(round.Intents) newVtxos := getNewVtxosFromRound(round) + newTeleportAssets := getTeleportAssets(round) + // commitment tx event txEvent := TransactionEvent{ TxData: TxData{Tx: round.CommitmentTx, Txid: round.CommitmentTxid}, Type: CommitmentTxType, SpentVtxos: spentVtxos, SpendableVtxos: newVtxos, + TeleportAssets: newTeleportAssets, } svc.propagateTransactionEvent(txEvent) @@ -330,7 +334,6 @@ func NewService( } } - // ark tx event txEvent := TransactionEvent{ TxData: TxData{Txid: txid, Tx: offchainTx.ArkTx}, Type: ArkTxType, @@ -349,9 +352,9 @@ func NewService( }, ) - if err := svc.restoreWatchingVtxos(); err != nil { - return nil, fmt.Errorf("failed to restore watching vtxos: %s", err) - } + // if err := svc.restoreWatchingVtxos(); err != nil { + // return nil, fmt.Errorf("failed to restore watching vtxos: %s", err) + // } go svc.listenToScannerNotifications() return svc, nil } @@ -911,8 +914,31 @@ func (s *service) SubmitOffchainTx( outputs := make([]*wire.TxOut, 0) // outputs excluding the anchor foundAnchor := false foundOpReturn := false + assetOutputIndex := -1 + var rebuiltArkTx *psbt.Packet + var rebuiltCheckpointTxs []*psbt.Packet for outIndex, out := range arkPtx.UnsignedTx.TxOut { + // validate asset packet if present + if extension.ContainsAssetPacket(out.PkScript) { + if foundOpReturn { + return nil, errors.MALFORMED_ARK_TX.New( + "tx %s has multiple op return outputs, not allowed for assets", txid, + ).WithMetadata(errors.PsbtMetadata{Tx: signedArkTx}) + } + foundOpReturn = true + + err := s.validateAssetTransition(ctx, *arkPtx.UnsignedTx, checkpointTxs, *out) + if err != nil { + log.WithError(err).Warn("asset transaction validation failed") + return nil, errors.ASSET_VALIDATION_FAILED.Wrap(err) + } + + outputs = append(outputs, out) + assetOutputIndex = outIndex + continue + } + if bytes.Equal(out.PkScript, txutils.ANCHOR_PKSCRIPT) { if foundAnchor { return nil, errors.MALFORMED_ARK_TX.New( @@ -979,9 +1005,10 @@ func (s *service) SubmitOffchainTx( } // recompute all txs (checkpoint txs + ark tx) - rebuiltArkTx, rebuiltCheckpointTxs, err := offchain.BuildTxs( + rebuiltArkTx, rebuiltCheckpointTxs, err = offchain.BuildTxs( ins, outputs, s.checkpointTapscript, ) + if err != nil { return nil, errors.INTERNAL_ERROR.New("failed to rebuild ark transaction: %w", err). WithMetadata(map[string]any{ @@ -1101,6 +1128,16 @@ func (s *service) SubmitOffchainTx( // apply Accepted event only after verifying the spent vtxos changes = append(changes, change) + if assetOutputIndex >= 0 { + if err := s.storeAssetDetailsFromArkTx( + ctx, *arkPtx.UnsignedTx, assetOutputIndex, + ); err != nil { + log.WithError(err).Errorf( + "failed to store asset details for offchain tx %s", txid, + ) + } + } + signedCheckpointTxs := make([]string, 0, len(signedCheckpointTxsMap)) for _, tx := range signedCheckpointTxsMap { signedCheckpointTxs = append(signedCheckpointTxs, tx) @@ -1437,6 +1474,7 @@ func (s *service) RegisterIntent( boardingUtxos := make([]boardingIntentInput, 0) outpoints := proof.GetOutpoints() + if len(outpoints) == 0 { return "", errors.INVALID_INTENT_PSBT.New("proof misses inputs"). WithMetadata(errors.PsbtMetadata{Tx: proof.UnsignedTx.TxID()}) @@ -1483,6 +1521,175 @@ func (s *service) RegisterIntent( seenOutpoints := make(map[wire.OutPoint]struct{}) + assetInputMap := make(map[uint32][]AssetInput) + var assetPacket *extension.AssetPacket + + hasOffChainReceiver := false + receivers := make([]domain.Receiver, 0) + onchainOutputs := make([]wire.TxOut, 0) + offchainOutputs := make([]wire.TxOut, 0) + + for outputIndex, output := range proof.UnsignedTx.TxOut { + + if extension.ContainsAssetPacket(output.PkScript) { + assetPacket, err = extension.DecodeAssetPacket(*output) + if err != nil { + return "", errors.INVALID_INTENT_PROOF.New( + "failed to decode asset packet: %w", err, + ).WithMetadata(errors.InvalidIntentProofMetadata{ + Proof: encodedProof, + Message: encodedMessage, + }) + } + + for _, asst := range assetPacket.Assets { + if asst.AssetId == nil { + return "", errors.INVALID_INTENT_PROOF.New( + "asset packet missing asset id", + ).WithMetadata(errors.InvalidIntentProofMetadata{ + Proof: encodedProof, + Message: encodedMessage, + }) + } + + for _, input := range asst.Inputs { + if _, ok := assetInputMap[input.Vin]; !ok { + assetInputMap[input.Vin] = make([]AssetInput, 0) + } + + assetInputMap[input.Vin] = append(assetInputMap[input.Vin], AssetInput{ + AssetInput: input, + AssetId: asst.AssetId.ToString(), + }) + } + + for i, output := range asst.Outputs { + if output.Type != extension.AssetTypeTeleport { + return "", errors.INVALID_INTENT_PROOF.New( + "asset output is not teleport output", + ).WithMetadata(errors.InvalidIntentProofMetadata{ + Proof: encodedProof, + Message: encodedMessage, + }) + } + + teleportScript := hex.EncodeToString(output.Script) + + if err := s.repoManager.Assets().InsertTeleportAsset(ctx, domain.TeleportAsset{ + IntentID: proofTxid, + Script: teleportScript, + OutputIndex: uint32(i), + AssetID: asst.AssetId.ToString(), + Amount: output.Amount, + IsClaimed: false, + }); err != nil { + log.WithError(err).Warn("failed to insert teleport asset") + } + + receivers = append(receivers, domain.Receiver{ + Amount: output.Amount, + AssetId: asst.AssetId.ToString(), + PubKey: hex.EncodeToString(output.Script[2:]), + }) + } + + } + + continue + } + + amount := uint64(output.Value) + rcv := domain.Receiver{ + Amount: amount, + } + + isOnchainOutput := slices.Contains(message.OnchainOutputIndexes, outputIndex) + if isOnchainOutput { + if s.utxoMaxAmount >= 0 { + if amount > uint64(s.utxoMaxAmount) { + return "", errors.AMOUNT_TOO_HIGH.New( + "output %d amount is higher than max utxo amount: %d", + outputIndex, + s.utxoMaxAmount, + ).WithMetadata(errors.AmountTooHighMetadata{ + OutputIndex: outputIndex, + Amount: int(amount), + MaxAmount: int(s.utxoMaxAmount), + }) + } + } + if amount < uint64(s.utxoMinAmount) { + return "", errors.AMOUNT_TOO_LOW.New( + "output %d amount is lower than min utxo amount: %d", + outputIndex, + s.utxoMinAmount, + ).WithMetadata(errors.AmountTooLowMetadata{ + OutputIndex: outputIndex, + Amount: int(amount), + MinAmount: int(s.utxoMinAmount), + }) + } + + chainParams := s.chainParams() + if chainParams == nil { + return "", errors.INTERNAL_ERROR.New("unsupported network: %s", s.network.Name). + WithMetadata(map[string]any{ + "network": s.network.Name, + }) + } + scriptType, addrs, _, err := txscript.ExtractPkScriptAddrs( + output.PkScript, chainParams, + ) + if err != nil { + return "", errors.INVALID_PKSCRIPT.New( + "failed to get onchain address from script of output %d: %w", outputIndex, err, + ).WithMetadata(errors.InvalidPkScriptMetadata{ + Script: hex.EncodeToString(output.PkScript), + }) + } + + if len(addrs) == 0 { + return "", errors.INVALID_PKSCRIPT.New( + "invalid script type for output %d: %s", outputIndex, scriptType, + ).WithMetadata(errors.InvalidPkScriptMetadata{ + Script: hex.EncodeToString(output.PkScript), + }) + } + + rcv.OnchainAddress = addrs[0].EncodeAddress() + onchainOutputs = append(onchainOutputs, *output) + } else { + if s.vtxoMaxAmount >= 0 { + if amount > uint64(s.vtxoMaxAmount) { + return "", errors.AMOUNT_TOO_HIGH.New( + "output %d amount is higher than max vtxo amount: %d", + outputIndex, s.vtxoMaxAmount, + ).WithMetadata(errors.AmountTooHighMetadata{ + OutputIndex: outputIndex, + Amount: int(amount), + MaxAmount: int(s.vtxoMaxAmount), + }) + } + } + if amount < uint64(s.vtxoMinSettlementAmount) { + return "", errors.AMOUNT_TOO_LOW.New( + "output %d amount is lower than min vtxo amount: %d", + outputIndex, s.vtxoMinSettlementAmount, + ).WithMetadata(errors.AmountTooLowMetadata{ + OutputIndex: outputIndex, + Amount: int(amount), + MinAmount: int(s.vtxoMinSettlementAmount), + }) + } + + hasOffChainReceiver = true + rcv.PubKey = hex.EncodeToString(output.PkScript[2:]) + } + + receivers = append(receivers, rcv) + offchainOutputs = append(offchainOutputs, *output) + } + for i, outpoint := range outpoints { if _, seen := seenOutpoints[outpoint]; seen { return "", errors.INVALID_INTENT_PROOF.New( @@ -1532,6 +1739,13 @@ func (s *service) RegisterIntent( taptreeFields, _ := txutils.GetArkPsbtFields( &proof.Packet, i+1, txutils.VtxoTaprootTreeField, ) + + if err != nil { + return "", errors.INVALID_PSBT_INPUT.New( + "failed to get asset seal field for input %d: %w", i+1, err, + ).WithMetadata(errors.InputMetadata{Txid: proofTxid, InputIndex: i + 1}) + } + tapscripts := make([]string, 0) if len(taptreeFields) > 0 { tapscripts = taptreeFields[0] @@ -1543,6 +1757,7 @@ func (s *service) RegisterIntent( ) vtxosResult, err := s.repoManager.Vtxos().GetVtxos(ctx, []domain.Outpoint{vtxoOutpoint}) + if err != nil || len(vtxosResult) == 0 { // reject if intent specifies onchain outputs and boarding inputs if len(message.OnchainOutputIndexes) > 0 { @@ -1575,6 +1790,30 @@ func (s *service) RegisterIntent( } vtxo := vtxosResult[0] + + // verify asset input if present + // +1 to account for proof fake input at index 0 + if assetInputList, ok := assetInputMap[uint32(i+1)]; ok { + for _, assetInput := range assetInputList { + if err := s.verifyAssetInputPrevOut(ctx, assetInput.AssetInput, outpoint); err != nil { + return "", errors.ASSET_VALIDATION_FAILED.New( + "asset input validation failed for input %d: %w", i, err, + ).WithMetadata(errors.AssetValidationMetadata{ + AssetID: assetInput.AssetId, + Message: fmt.Sprintf( + "validation failed for vtxo %s", vtxo.Outpoint.String(), + ), + }) + } + + vtxo.Assets = append(vtxo.Assets, domain.Asset{ + AssetID: assetInput.AssetId, + Amount: assetInput.Amount, + }) + } + + } + if err := s.checkIfBanned(ctx, vtxo); err != nil { return "", errors.VTXO_BANNED.Wrap(err). WithMetadata(errors.VtxoMetadata{VtxoOutpoint: vtxo.Outpoint.String()}) @@ -1718,104 +1957,6 @@ func (s *service) RegisterIntent( }) } - hasOffChainReceiver := false - receivers := make([]domain.Receiver, 0) - onchainOutputs := make([]wire.TxOut, 0) - offchainOutputs := make([]wire.TxOut, 0) - - for outputIndex, output := range proof.UnsignedTx.TxOut { - amount := uint64(output.Value) - rcv := domain.Receiver{ - Amount: amount, - } - - isOnchainOutput := slices.Contains(message.OnchainOutputIndexes, outputIndex) - if isOnchainOutput { - if s.utxoMaxAmount >= 0 { - if amount > uint64(s.utxoMaxAmount) { - return "", errors.AMOUNT_TOO_HIGH.New( - "output %d amount is higher than max utxo amount: %d", - outputIndex, - s.utxoMaxAmount, - ).WithMetadata(errors.AmountTooHighMetadata{ - OutputIndex: outputIndex, - Amount: int(amount), - MaxAmount: int(s.utxoMaxAmount), - }) - } - } - if amount < uint64(s.utxoMinAmount) { - return "", errors.AMOUNT_TOO_LOW.New( - "output %d amount is lower than min utxo amount: %d", - outputIndex, - s.utxoMinAmount, - ).WithMetadata(errors.AmountTooLowMetadata{ - OutputIndex: outputIndex, - Amount: int(amount), - MinAmount: int(s.utxoMinAmount), - }) - } - - chainParams := s.chainParams() - if chainParams == nil { - return "", errors.INTERNAL_ERROR.New("unsupported network: %s", s.network.Name). - WithMetadata(map[string]any{ - "network": s.network.Name, - }) - } - scriptType, addrs, _, err := txscript.ExtractPkScriptAddrs( - output.PkScript, chainParams, - ) - if err != nil { - return "", errors.INVALID_PKSCRIPT.New( - "failed to get onchain address from script of output %d: %w", outputIndex, err, - ).WithMetadata(errors.InvalidPkScriptMetadata{ - Script: hex.EncodeToString(output.PkScript), - }) - } - - if len(addrs) == 0 { - return "", errors.INVALID_PKSCRIPT.New( - "invalid script type for output %d: %s", outputIndex, scriptType, - ).WithMetadata(errors.InvalidPkScriptMetadata{ - Script: hex.EncodeToString(output.PkScript), - }) - } - - rcv.OnchainAddress = addrs[0].EncodeAddress() - onchainOutputs = append(onchainOutputs, *output) - } else { - if s.vtxoMaxAmount >= 0 { - if amount > uint64(s.vtxoMaxAmount) { - return "", errors.AMOUNT_TOO_HIGH.New( - "output %d amount is higher than max vtxo amount: %d", - outputIndex, s.vtxoMaxAmount, - ).WithMetadata(errors.AmountTooHighMetadata{ - OutputIndex: outputIndex, - Amount: int(amount), - MaxAmount: int(s.vtxoMaxAmount), - }) - } - } - if amount < uint64(s.vtxoMinSettlementAmount) { - return "", errors.AMOUNT_TOO_LOW.New( - "output %d amount is lower than min vtxo amount: %d", - outputIndex, s.vtxoMinSettlementAmount, - ).WithMetadata(errors.AmountTooLowMetadata{ - OutputIndex: outputIndex, - Amount: int(amount), - MinAmount: int(s.vtxoMinSettlementAmount), - }) - } - - hasOffChainReceiver = true - rcv.PubKey = hex.EncodeToString(output.PkScript[2:]) - } - - receivers = append(receivers, rcv) - offchainOutputs = append(offchainOutputs, *output) - } - if hasOffChainReceiver { if len(message.CosignersPublicKeys) == 0 { return "", errors.INVALID_INTENT_MESSAGE.New( @@ -1930,8 +2071,8 @@ func (s *service) SubmitForfeitTxs(ctx context.Context, forfeitTxs []string) err } // TODO move forfeit validation outside of ports.LiveStore - if err := s.cache.ForfeitTxs().Sign(ctx, forfeitTxs); err != nil { - return errors.INVALID_FORFEIT_TXS.New("failed to sign forfeit txs: %w", err). + if err := s.cache.ForfeitTxs().Verify(ctx, forfeitTxs); err != nil { + return errors.INVALID_FORFEIT_TXS.New("failed to verify forfeit txs: %w", err). WithMetadata(errors.InvalidForfeitTxsMetadata{ForfeitTxs: forfeitTxs}) } @@ -3648,39 +3789,6 @@ func (s *service) stopWatchingVtxos(tapkeys []string) { } } -func (s *service) restoreWatchingVtxos() error { - ctx := context.Background() - - commitmentTxIds, err := s.repoManager.Rounds().GetSweepableRounds(ctx) - if err != nil { - return err - } - - scripts := make([]string, 0) - - for _, commitmentTxId := range commitmentTxIds { - tapKeys, err := s.repoManager.Vtxos().GetVtxoPubKeysByCommitmentTxid(ctx, commitmentTxId, 0) - if err != nil { - return err - } - - for _, key := range tapKeys { - scripts = append(scripts, fmt.Sprintf("5120%s", key)) - } - } - - if len(scripts) <= 0 { - return nil - } - - if err := s.scanner.WatchScripts(ctx, scripts); err != nil { - return err - } - - log.Debugf("restored watching %d vtxo scripts", len(scripts)) - return nil -} - // extractVtxosScriptsForScanner extracts the scripts for the vtxos to be watched by the scanner // it excludes subdust vtxos scripts and duplicates // it logs errors and continues in order to not block the start/stop watching vtxos operations @@ -3694,6 +3802,11 @@ func (s *service) extractVtxosScriptsForScanner(vtxos []domain.Vtxo) ([]string, scripts := make([]string, 0) for _, vtxo := range vtxos { + // skip OP_RETURN outputs + if vtxo.Amount < dustLimit { + continue + } + vtxoTapKeyBytes, err := hex.DecodeString(vtxo.PubKey) if err != nil { log.WithError(err).Warnf("failed to decode vtxo pubkey: %s", vtxo.PubKey) @@ -3706,10 +3819,6 @@ func (s *service) extractVtxosScriptsForScanner(vtxos []domain.Vtxo) ([]string, continue } - if vtxo.Amount < dustLimit { - continue - } - p2trScript, err := script.P2TRScript(vtxoTapKey) if err != nil { log.WithError(err). @@ -4167,3 +4276,308 @@ func (s *service) propagateTransactionEvent(event TransactionEvent) { s.transactionEventsCh <- event }() } + +func (s *service) storeAssetDetailsFromArkTx( + ctx context.Context, + arkTx wire.MsgTx, + assetPacketIndex int, +) error { + assetPkt, err := extension.DecodeAssetPacket(*arkTx.TxOut[assetPacketIndex]) + if err != nil { + return fmt.Errorf("error decoding asset from opreturn: %s", err) + } + + if err := s.storeAssetGroups(ctx, assetPacketIndex, assetPkt.Assets, arkTx); err != nil { + return err + } + + return nil + +} + +func (s *service) storeAssetGroups( + ctx context.Context, + assetPacketIndex int, + assetGroupList []extension.AssetGroup, + arkTx wire.MsgTx, +) error { + anchorPoint := domain.Outpoint{ + Txid: arkTx.TxID(), + VOut: uint32(assetPacketIndex), + } + + assetList := make([]domain.NormalAsset, 0) + + for i, asstGp := range assetGroupList { + totalIn := sumAssetInputs(asstGp.Inputs) + totalOut := sumAssetOutputs(asstGp.Outputs) + + s.markTeleportInputsClaimed(ctx, asstGp) + + metadataList := assetMetadataFromGroup(asstGp.Metadata) + + assetId := asstGp.AssetId + + // For Issuance + if assetId == nil { + var controlAsset string + txHash := arkTx.TxHash() + var txHashBytes [32]byte + copy(txHashBytes[:], txHash[:]) + + assetId := extension.AssetId{ + Txid: txHashBytes, + Index: uint16(i), + } + + if asstGp.ControlAsset != nil { + switch asstGp.ControlAsset.Type { + case extension.AssetRefByID: + controlAsset = asstGp.ControlAsset.AssetId.ToString() + case extension.AssetRefByGroup: + controlAsset = extension.AssetId{ + Txid: txHashBytes, + Index: asstGp.ControlAsset.GroupIndex, + }.ToString() + } + } + + err := s.repoManager.Assets().InsertAssetGroup(ctx, domain.AssetGroup{ + ID: assetId.ToString(), + Quantity: totalOut, + Immutable: asstGp.Immutable, + Metadata: metadataList, + ControlAssetID: controlAsset, + }) + if err != nil { + return fmt.Errorf("error storing new asset: %s", err) + } + + log.Infof("stored new asset with id %s and total quantity %d", + assetId.ToString(), + totalOut, + ) + + for _, out := range asstGp.Outputs { + if out.Type == extension.AssetTypeTeleport { + continue + } + + asst := domain.NormalAsset{ + Outpoint: domain.Outpoint{ + Txid: arkTx.TxID(), + VOut: uint32(out.Vout), + }, + AssetID: assetId.ToString(), + Amount: out.Amount, + } + assetList = append(assetList, asst) + } + + continue + } + + assetGp, err := s.repoManager.Assets().GetAssetGroupByID(ctx, assetId.ToString()) + if err != nil { + return fmt.Errorf("error retrieving asset data: %s", err) + } + if assetGp == nil { + return fmt.Errorf("asset with id %s not found for update", assetId.ToString()) + } + + if !assetGp.Immutable && len(metadataList) > 0 { + if len(assetGp.ControlAssetID) == 0 { + return fmt.Errorf("cannot update mutable asset without control asset") + } + + controlAssetID, err := extension.AssetIdFromString(assetGp.ControlAssetID) + if err != nil { + return fmt.Errorf("error parsing control asset id: %s", err) + } + + if controlAssetID == nil { + return fmt.Errorf("invalid control asset id for asset %s", assetId.ToString()) + } + + if err := s.ensureAssetPresence(ctx, assetGroupList, *controlAssetID); err != nil { + return fmt.Errorf( + "cannot update asset metadata while control asset is being issued", + ) + } + + if err := s.repoManager.Assets().UpdateAssetMetadataList(ctx, assetId.ToString(), metadataList); err != nil { + return fmt.Errorf("error updating asset metadata: %s", err) + } + } + + log.Infof("updated asset metadata for asset id %s", + assetId.ToString(), + ) + + if err := s.updateAssetQuantity(ctx, assetId.ToString(), totalIn, totalOut); err != nil { + return err + } + + for _, out := range asstGp.Outputs { + asst := domain.NormalAsset{ + Outpoint: domain.Outpoint{ + Txid: arkTx.TxID(), + VOut: uint32(out.Vout), + }, + AssetID: assetId.ToString(), + Amount: out.Amount, + } + assetList = append(assetList, asst) + } + } + + if err := s.repoManager.Assets().InsertAssetAnchor(ctx, domain.AssetAnchor{ + Outpoint: anchorPoint, + Assets: assetList, + }); err != nil { + return fmt.Errorf("error storing asset anchor: %s", err) + } + + return nil +} + +func (s *service) markTeleportInputsClaimed(ctx context.Context, grpAsset extension.AssetGroup) { + assetId := grpAsset.AssetId + if assetId == nil { + return + } + + assetIdStr := assetId.ToString() + + for _, in := range grpAsset.Inputs { + if in.Type != extension.AssetTypeTeleport { + continue + } + + intentId := hex.EncodeToString(in.Witness.Txid[:]) + teleportScript := hex.EncodeToString(in.Witness.Script) + if err := s.repoManager.Assets().UpdateTeleportAsset(ctx, teleportScript, intentId, assetIdStr, in.Vin, true); err != nil { + log.WithError(err).Warn("failed to update teleport asset") + } + } +} + +func assetMetadataFromGroup(metadata []extension.Metadata) []domain.AssetMetadata { + metadataList := make([]domain.AssetMetadata, 0, len(metadata)) + for _, meta := range metadata { + metadataList = append(metadataList, domain.AssetMetadata{ + Key: meta.Key, + Value: meta.Value, + }) + } + return metadataList +} + +func (s *service) updateAssetQuantity( + ctx context.Context, + assetID string, + totalIn, totalOut uint64, +) error { + if totalOut > totalIn { + delta := totalOut - totalIn + if err := s.repoManager.Assets().IncreaseAssetGroupQuantity(ctx, assetID, delta); err != nil { + return fmt.Errorf("error updating asset quantity: %s", err) + } + return nil + } + + if totalIn > totalOut { + delta := totalIn - totalOut + if err := s.repoManager.Assets().DecreaseAssetGroupQuantity(ctx, assetID, delta); err != nil { + return fmt.Errorf("error updating asset quantity: %s", err) + } + } + + return nil +} + +func getTeleportAssets(round *domain.Round) []TeleportAsset { + if len(round.VtxoTree) <= 0 { + return nil + } + + createdAt := time.Now().Unix() + expireAt := round.ExpiryTimestamp() + + events := make([]TeleportAsset, 0) + + collectTeleportAssets := func(groups []extension.AssetGroup, anchorOutpoint domain.Outpoint, createdAt, expiresAt int64, includeAmount bool) []TeleportAsset { + events := make([]TeleportAsset, 0) + for _, ast := range groups { + for outIdx, assetOut := range ast.Outputs { + if assetOut.Type != extension.AssetTypeTeleport { + continue + } + + if ast.AssetId == nil { + continue // skip teleport output with issuance asset id [should not happen] + } + + event := TeleportAsset{ + TeleportHash: hex.EncodeToString(assetOut.Script), + AnchorOutpoint: anchorOutpoint, + AssetID: ast.AssetId.ToString(), + OutputVout: uint32(outIdx), + CreatedAt: createdAt, + ExpiresAt: expiresAt, + } + if includeAmount { + event.Amount = assetOut.Amount + } + events = append(events, event) + } + } + + return events + } + + findAssetPacketInTx := func(tx *wire.MsgTx) (*extension.AssetPacket, domain.Outpoint) { + for i, out := range tx.TxOut { + if !extension.ContainsAssetPacket(out.PkScript) { + continue + } + + packet, err := extension.DecodeAssetPacket(*out) + if err != nil { + return nil, domain.Outpoint{} + } + + anchorOutpoint := domain.Outpoint{ + Txid: tx.TxID(), + VOut: uint32(i), + } + return packet, anchorOutpoint + } + + return nil, domain.Outpoint{} + } + + for _, node := range tree.FlatTxTree(round.VtxoTree).Leaves() { + tx, err := psbt.NewFromRawBytes(strings.NewReader(node.Tx), true) + if err != nil { + log.WithError(err).Warn("failed to parse tx") + continue + } + packet, anchorOutpoint := findAssetPacketInTx(tx.UnsignedTx) + if packet == nil { + continue + } + + events = append( + events, + collectTeleportAssets( + packet.Assets, + anchorOutpoint, + createdAt, + expireAt, + false, + )...) + + } + return events +} diff --git a/internal/core/application/types.go b/internal/core/application/types.go index b724b2588..b60e74878 100644 --- a/internal/core/application/types.go +++ b/internal/core/application/types.go @@ -7,6 +7,7 @@ import ( "github.com/arkade-os/arkd/internal/core/domain" "github.com/arkade-os/arkd/internal/core/ports" arklib "github.com/arkade-os/arkd/pkg/ark-lib" + "github.com/arkade-os/arkd/pkg/ark-lib/extension" "github.com/arkade-os/arkd/pkg/ark-lib/intent" "github.com/arkade-os/arkd/pkg/ark-lib/tree" "github.com/arkade-os/arkd/pkg/errors" @@ -120,6 +121,17 @@ type TransactionEvent struct { SpendableVtxos []domain.Vtxo SweptVtxos []domain.Outpoint CheckpointTxs map[string]TxData + TeleportAssets []TeleportAsset +} + +type TeleportAsset struct { + TeleportHash string + AssetID string + Amount uint64 + AnchorOutpoint domain.Outpoint + OutputVout uint32 + CreatedAt int64 + ExpiresAt int64 } type VtxoChainResp struct { @@ -173,6 +185,16 @@ type VirtualTxsResp struct { Page PageResp } +type AssetGroupResp struct { + AssetID string + AssetGroup AssetGroup + AnchorOutpoints []Outpoint +} + +type AssetGroup = domain.AssetGroup + +type AssetAnchor = domain.AssetAnchor + type Outpoint = domain.Outpoint type TxType int @@ -217,3 +239,8 @@ type boardingIntentInput struct { locktimeDisabled bool witnessUtxo *wire.TxOut } + +type AssetInput struct { + extension.AssetInput + AssetId string +} diff --git a/internal/core/application/utils.go b/internal/core/application/utils.go index c98bb4f93..8d64f56a6 100644 --- a/internal/core/application/utils.go +++ b/internal/core/application/utils.go @@ -11,6 +11,7 @@ import ( "github.com/arkade-os/arkd/internal/core/domain" "github.com/arkade-os/arkd/internal/core/ports" arklib "github.com/arkade-os/arkd/pkg/ark-lib" + "github.com/arkade-os/arkd/pkg/ark-lib/extension" "github.com/arkade-os/arkd/pkg/ark-lib/script" "github.com/arkade-os/arkd/pkg/ark-lib/tree" "github.com/arkade-os/arkd/pkg/ark-lib/txutils" @@ -18,6 +19,7 @@ import ( "github.com/btcsuite/btcd/btcec/v2" "github.com/btcsuite/btcd/btcec/v2/schnorr" "github.com/btcsuite/btcd/btcutil/psbt" + "github.com/btcsuite/btcd/txscript" "github.com/btcsuite/btcd/wire" log "github.com/sirupsen/logrus" ) @@ -119,7 +121,83 @@ func decodeTx(offchainTx domain.OffchainTx) (string, []domain.Outpoint, []domain txid := ptx.UnsignedTx.TxID() outs := make([]domain.Vtxo, 0, len(ptx.UnsignedTx.TxOut)) + + assetList := make([]domain.NormalAsset, 0) + assetVouts := make(map[uint32]struct{}) + assetOpReturnProcessed := false + for outIndex, out := range ptx.UnsignedTx.TxOut { + var pubKey string + var isSubDust bool + + if extension.ContainsAssetPacket(out.PkScript) { + if assetOpReturnProcessed { + continue + } + assetOpReturnProcessed = true + + decodedAssetPacket, err := extension.DecodeAssetPacket(*out) + if err != nil { + return "", nil, nil, fmt.Errorf( + "failed to decode asset group from opreturn: %s", + err, + ) + } + + allAssets := decodedAssetPacket.Assets + + for i, grpAsset := range allAssets { + var assetId extension.AssetId + + if grpAsset.AssetId == nil { + assetId = extension.AssetId{ + Txid: ptx.UnsignedTx.TxHash(), + Index: uint16(i), + } + } else { + assetId = *grpAsset.AssetId + } + for _, assetOut := range grpAsset.Outputs { + if assetOut.Type != extension.AssetTypeLocal { + continue + } + if _, exists := assetVouts[assetOut.Vout]; exists { + return "", nil, nil, fmt.Errorf( + "duplicate asset output vout %d", + assetOut.Vout, + ) + } + assetVouts[assetOut.Vout] = struct{}{} + + assetList = append(assetList, domain.NormalAsset{ + Outpoint: domain.Outpoint{ + Txid: txid, + VOut: assetOut.Vout, + }, + Amount: assetOut.Amount, + AssetID: assetId.ToString(), + }) + } + } + + subDustPacket, err := extension.DecodeSubDustPacket(*out) + if err != nil { + return "", nil, nil, fmt.Errorf( + "failed to decode sub-dust key from opreturn: %s", + err, + ) + } + if subDustPacket == nil || subDustPacket.Key == nil { + continue + } + + pubKey = hex.EncodeToString(schnorr.SerializePubKey(subDustPacket.Key)) + isSubDust = true + } else { + pubKey = hex.EncodeToString(out.PkScript[2:]) + isSubDust = script.IsSubDustScript(out.PkScript) + } + if bytes.Equal(out.PkScript, txutils.ANCHOR_PKSCRIPT) { continue } @@ -128,17 +206,30 @@ func decodeTx(offchainTx domain.OffchainTx) (string, []domain.Outpoint, []domain Txid: txid, VOut: uint32(outIndex), }, - PubKey: hex.EncodeToString(out.PkScript[2:]), + PubKey: pubKey, Amount: uint64(out.Value), ExpiresAt: offchainTx.ExpiryTimestamp, CommitmentTxids: offchainTx.CommitmentTxidsList(), RootCommitmentTxid: offchainTx.RootCommitmentTxId, Preconfirmed: true, - Swept: script.IsSubDustScript(out.PkScript), + Swept: isSubDust, CreatedAt: offchainTx.StartingTimestamp, }) } + // Add AssetGroup if Present + for _, asst := range assetList { + idx := int(asst.VOut) + if idx < 0 || idx >= len(outs) { + continue + } + + outs[idx].Assets = append(outs[idx].Assets, domain.Asset{ + AssetID: asst.AssetID, + Amount: asst.Amount, + }) + } + return txid, ins, outs, nil } @@ -226,6 +317,11 @@ func getNewVtxosFromRound(round *domain.Round) []domain.Vtxo { if bytes.Equal(out.PkScript, txutils.ANCHOR_PKSCRIPT) { continue } + // Skip any OP_RETURN output (asset packets) + // TODO: Make this more robust? + if bytes.HasPrefix(out.PkScript, []byte{txscript.OP_RETURN}) { + continue + } vtxoTapKey, err := schnorr.ParsePubKey(out.PkScript[2:]) if err != nil { diff --git a/internal/core/domain/asset_repo.go b/internal/core/domain/asset_repo.go new file mode 100644 index 000000000..6f1e52ddd --- /dev/null +++ b/internal/core/domain/asset_repo.go @@ -0,0 +1,68 @@ +package domain + +import ( + "context" +) + +type AssetAnchor struct { + Outpoint + Assets []NormalAsset +} + +type AssetMetadata struct { + Key string + Value string +} + +type NormalAsset struct { + Outpoint + Amount uint64 + AssetID string +} + +type AssetGroup struct { + ID string + Quantity uint64 + Immutable bool + ControlAssetID string + Metadata []AssetMetadata +} + +type TeleportAsset struct { + Script string + IntentID string + AssetID string + OutputIndex uint32 + Amount uint64 + IsClaimed bool +} + +type AssetRepository interface { + InsertAssetAnchor(ctx context.Context, anchor AssetAnchor) error + ListAssetAnchorsByAssetID(ctx context.Context, assetID string) ([]AssetAnchor, error) + UpdateAssetMetadataList(ctx context.Context, assetId string, metadatalist []AssetMetadata) error + InsertAssetGroup(ctx context.Context, assetGroup AssetGroup) error + GetAssetByOutpoint(ctx context.Context, outpoint Outpoint) (*NormalAsset, error) + GetAssetGroupByID(ctx context.Context, assetID string) (*AssetGroup, error) + IncreaseAssetGroupQuantity(ctx context.Context, assetID string, amount uint64) error + DecreaseAssetGroupQuantity(ctx context.Context, assetID string, amount uint64) error + ListMetadataByAssetID(ctx context.Context, assetID string) ([]AssetMetadata, error) + GetAssetAnchorByTxId(ctx context.Context, txId string) (*AssetAnchor, error) + InsertTeleportAsset(ctx context.Context, teleport TeleportAsset) error + GetTeleportAsset( + ctx context.Context, + script string, + intentID string, + assetID string, + outputIndex uint32, + ) (*TeleportAsset, error) + UpdateTeleportAsset( + ctx context.Context, + script string, + intentID string, + assetID string, + outputIndex uint32, + isClaimed bool, + ) error + Close() +} diff --git a/internal/core/domain/intent.go b/internal/core/domain/intent.go index 8594cb7bd..cbed5ead6 100644 --- a/internal/core/domain/intent.go +++ b/internal/core/domain/intent.go @@ -93,6 +93,7 @@ func (i Intent) validate(ignoreOuts bool) error { type Receiver struct { Amount uint64 OnchainAddress string // onchain + AssetId string // asset PubKey string // offchain } diff --git a/internal/core/domain/vtxo.go b/internal/core/domain/vtxo.go index 8816929c7..f721e4539 100644 --- a/internal/core/domain/vtxo.go +++ b/internal/core/domain/vtxo.go @@ -35,6 +35,11 @@ func (k Outpoint) String() string { return fmt.Sprintf("%s:%d", k.Txid, k.VOut) } +type Asset struct { + AssetID string + Amount uint64 +} + type Vtxo struct { Outpoint Amount uint64 @@ -50,6 +55,7 @@ type Vtxo struct { Preconfirmed bool ExpiresAt int64 CreatedAt int64 + Assets []Asset } func (v Vtxo) String() string { diff --git a/internal/core/ports/live_store.go b/internal/core/ports/live_store.go index 86f49fb03..566094157 100644 --- a/internal/core/ports/live_store.go +++ b/internal/core/ports/live_store.go @@ -37,7 +37,7 @@ type IntentStore interface { type ForfeitTxsStore interface { Init(ctx context.Context, connectors tree.FlatTxTree, intents []domain.Intent) error - Sign(ctx context.Context, txs []string) error + Verify(ctx context.Context, txs []string) error Reset(ctx context.Context) error Pop(ctx context.Context) ([]string, error) AllSigned(ctx context.Context) (bool, error) diff --git a/internal/core/ports/repo_manager.go b/internal/core/ports/repo_manager.go index 1e69fb4ba..2ce581670 100644 --- a/internal/core/ports/repo_manager.go +++ b/internal/core/ports/repo_manager.go @@ -9,6 +9,7 @@ type RepoManager interface { ScheduledSession() domain.ScheduledSessionRepo OffchainTxs() domain.OffchainTxRepository Convictions() domain.ConvictionRepository + Assets() domain.AssetRepository Fees() domain.FeeRepository Close() } diff --git a/internal/infrastructure/db/badger/asset_repo.go b/internal/infrastructure/db/badger/asset_repo.go new file mode 100644 index 000000000..4139a0129 --- /dev/null +++ b/internal/infrastructure/db/badger/asset_repo.go @@ -0,0 +1,582 @@ +package badgerdb + +import ( + "context" + "errors" + "fmt" + "path/filepath" + "sort" + "time" + + "github.com/arkade-os/arkd/internal/core/domain" + "github.com/dgraph-io/badger/v4" + "github.com/timshannon/badgerhold/v4" +) + +const assetStoreDir = "assets" + +type assetRepository struct { + store *badgerhold.Store +} + +type assetGroup struct { + ID string `badgerhold:"key"` + Quantity uint64 + Immutable bool + ControlAssetId string +} + +type assetMetadata struct { + Key string `badgerhold:"key"` + AssetID string `badgerhold:"index"` + MetaKey string + MetaValue string +} + +type assetAnchor struct { + AnchorTxid string `badgerhold:"key"` + AnchorVout uint32 +} + +type anchorAsset struct { + Key string `badgerhold:"key"` + AnchorID string `badgerhold:"index"` + AssetID string `badgerhold:"index"` + Vout uint32 + Amount uint64 +} + +type teleportAsset struct { + Key string `badgerhold:"key"` + Script string `badgerhold:"index"` + IntentID string `badgerhold:"index"` + AssetID string `badgerhold:"index"` + OutputIndex uint32 + Amount uint64 + IsClaimed bool +} + +func teleportAssetKey(script, intentID, assetID string, outputIndex uint32) string { + return fmt.Sprintf("%s:%s:%s:%d", script, intentID, assetID, outputIndex) +} + +func NewAssetRepository(config ...interface{}) (domain.AssetRepository, error) { + if len(config) != 2 { + return nil, fmt.Errorf("invalid config") + } + baseDir, ok := config[0].(string) + if !ok { + return nil, fmt.Errorf("invalid base directory") + } + var logger badger.Logger + if config[1] != nil { + logger, ok = config[1].(badger.Logger) + if !ok { + return nil, fmt.Errorf("invalid logger") + } + } + + var dir string + if len(baseDir) > 0 { + dir = filepath.Join(baseDir, assetStoreDir) + } + store, err := createDB(dir, logger) + if err != nil { + return nil, fmt.Errorf("failed to open asset store: %s", err) + } + + return &assetRepository{store}, nil +} + +func (r *assetRepository) Close() { + // nolint:all + r.store.Close() +} + +func (r *assetRepository) ListAssetAnchorsByAssetID( + ctx context.Context, + assetID string, +) ([]domain.AssetAnchor, error) { + query := badgerhold.Where("AssetID").Eq(assetID) + + var assets []anchorAsset + var err error + if tx := getTxFromContext(ctx); tx != nil { + err = r.store.TxFind(tx, &assets, query) + } else { + err = r.store.Find(&assets, query) + } + if err != nil { + if errors.Is(err, badgerhold.ErrNotFound) { + return []domain.AssetAnchor{}, nil + } + return nil, err + } + + anchorIDs := make(map[string]struct{}) + for _, asst := range assets { + anchorIDs[asst.AnchorID] = struct{}{} + } + + anchors := make([]domain.AssetAnchor, 0, len(anchorIDs)) + for anchorID := range anchorIDs { + anchor, err := r.GetAssetAnchorByTxId(ctx, anchorID) + if err != nil { + return nil, err + } + anchors = append(anchors, *anchor) + } + + return anchors, nil +} + +func (r *assetRepository) GetAssetByOutpoint( + ctx context.Context, + outpoint domain.Outpoint, +) (*domain.NormalAsset, error) { + key := anchorAssetKey(outpoint.Txid, outpoint.VOut) + var record anchorAsset + + var err error + if tx := getTxFromContext(ctx); tx != nil { + err = r.store.TxGet(tx, key, &record) + } else { + err = r.store.Get(key, &record) + } + if err != nil { + return nil, err + } + + return &domain.NormalAsset{ + Outpoint: domain.Outpoint{ + Txid: record.AnchorID, + VOut: record.Vout, + }, + Amount: record.Amount, + AssetID: record.AssetID, + }, nil +} + +func (r *assetRepository) ListMetadataByAssetID( + ctx context.Context, + assetID string, +) ([]domain.AssetMetadata, error) { + query := badgerhold.Where("AssetID").Eq(assetID) + + var metadata []assetMetadata + var err error + if tx := getTxFromContext(ctx); tx != nil { + err = r.store.TxFind(tx, &metadata, query) + } else { + err = r.store.Find(&metadata, query) + } + if err != nil { + if errors.Is(err, badgerhold.ErrNotFound) { + return []domain.AssetMetadata{}, nil + } + return nil, err + } + + meta := make([]domain.AssetMetadata, 0, len(metadata)) + for _, m := range metadata { + meta = append(meta, domain.AssetMetadata{ + Key: m.MetaKey, + Value: m.MetaValue, + }) + } + + return meta, nil +} + +func (r *assetRepository) InsertAssetAnchor(ctx context.Context, anchor domain.AssetAnchor) error { + anchorRecord := assetAnchor{ + AnchorTxid: anchor.Txid, + AnchorVout: anchor.VOut, + } + + return r.withRetryableWrite(ctx, func(tx *badger.Txn) error { + if err := r.store.TxInsert(tx, anchorRecord.AnchorTxid, anchorRecord); err != nil { + return err + } + + for _, asst := range anchor.Assets { + record := anchorAsset{ + Key: anchorAssetKey(anchorRecord.AnchorTxid, asst.VOut), + AnchorID: anchorRecord.AnchorTxid, + AssetID: asst.AssetID, + Vout: asst.VOut, + Amount: asst.Amount, + } + + if err := r.store.TxUpsert(tx, record.Key, record); err != nil { + return err + } + } + + return nil + }) +} + +func (r *assetRepository) GetAssetAnchorByTxId( + ctx context.Context, + txId string, +) (*domain.AssetAnchor, error) { + var anchor assetAnchor + + var err error + if tx := getTxFromContext(ctx); tx != nil { + err = r.store.TxGet(tx, txId, &anchor) + } else { + err = r.store.Get(txId, &anchor) + } + if err != nil { + return nil, err + } + + assets, err := r.listAnchorAssets(ctx, txId) + if err != nil { + return nil, err + } + + anchorAssets := make([]domain.NormalAsset, 0, len(assets)) + for _, asst := range assets { + anchorAssets = append(anchorAssets, domain.NormalAsset{ + Outpoint: domain.Outpoint{ + Txid: asst.AnchorID, + VOut: asst.Vout, + }, + Amount: asst.Amount, + AssetID: asst.AssetID, + }) + } + + return &domain.AssetAnchor{ + Outpoint: domain.Outpoint{ + Txid: anchor.AnchorTxid, + VOut: anchor.AnchorVout, + }, + Assets: anchorAssets, + }, nil +} + +func (r *assetRepository) InsertTeleportAsset( + ctx context.Context, + teleport domain.TeleportAsset, +) error { + key := teleportAssetKey( + teleport.Script, + teleport.IntentID, + teleport.AssetID, + teleport.OutputIndex, + ) + record := teleportAsset{ + Key: key, + Script: teleport.Script, + IntentID: teleport.IntentID, + AssetID: teleport.AssetID, + OutputIndex: teleport.OutputIndex, + Amount: teleport.Amount, + IsClaimed: teleport.IsClaimed, + } + + return r.withRetryableWrite(ctx, func(tx *badger.Txn) error { + return r.store.TxInsert(tx, record.Key, record) + }) +} + +func (r *assetRepository) UpdateTeleportAsset( + ctx context.Context, + script string, + intentID string, + assetID string, + outputIndex uint32, + isClaimed bool, +) error { + var teleport teleportAsset + key := teleportAssetKey(script, intentID, assetID, outputIndex) + + var err error + if tx := getTxFromContext(ctx); tx != nil { + err = r.store.TxGet(tx, key, &teleport) + } else { + err = r.store.Get(key, &teleport) + } + if err != nil { + return err + } + + return r.withRetryableWrite(ctx, func(tx *badger.Txn) error { + record := teleportAsset{ + Key: key, + Script: teleport.Script, + IntentID: teleport.IntentID, + AssetID: teleport.AssetID, + OutputIndex: teleport.OutputIndex, + Amount: teleport.Amount, + IsClaimed: isClaimed, + } + + return r.store.TxUpsert(tx, key, record) + }) +} + +func (r *assetRepository) GetTeleportAsset( + ctx context.Context, + script string, + intentID string, + assetID string, + outputIndex uint32, +) (*domain.TeleportAsset, error) { + var teleport teleportAsset + key := teleportAssetKey(script, intentID, assetID, outputIndex) + + var err error + if tx := getTxFromContext(ctx); tx != nil { + err = r.store.TxGet(tx, key, &teleport) + } else { + err = r.store.Get(key, &teleport) + } + if err != nil { + return nil, err + } + + return &domain.TeleportAsset{ + Script: teleport.Script, + IntentID: teleport.IntentID, + AssetID: teleport.AssetID, + OutputIndex: teleport.OutputIndex, + Amount: teleport.Amount, + IsClaimed: teleport.IsClaimed, + }, nil +} + +func (r *assetRepository) InsertAssetGroup(ctx context.Context, a domain.AssetGroup) error { + record := assetGroup{ + ID: a.ID, + Quantity: a.Quantity, + Immutable: a.Immutable, + ControlAssetId: a.ControlAssetID, + } + + return r.withRetryableWrite(ctx, func(tx *badger.Txn) error { + if err := r.store.TxInsert(tx, record.ID, record); err != nil { + return err + } + + for _, md := range a.Metadata { + meta := assetMetadata{ + Key: assetMetadataKey(a.ID, md.Key), + AssetID: a.ID, + MetaKey: md.Key, + MetaValue: md.Value, + } + + if err := r.store.TxUpsert(tx, meta.Key, meta); err != nil { + return err + } + } + + return nil + }) +} + +func (r *assetRepository) GetAssetGroupByID( + ctx context.Context, + assetID string, +) (*domain.AssetGroup, error) { + dbAsset, err := r.getAssetGroup(ctx, assetID) + if err != nil { + return nil, err + } + + metadata, err := r.ListMetadataByAssetID(ctx, assetID) + if err != nil { + return nil, err + } + + return &domain.AssetGroup{ + ID: dbAsset.ID, + Quantity: dbAsset.Quantity, + Immutable: dbAsset.Immutable, + ControlAssetID: dbAsset.ControlAssetId, + Metadata: metadata, + }, nil +} + +func (r *assetRepository) IncreaseAssetGroupQuantity( + ctx context.Context, + assetID string, + amount uint64, +) error { + return r.withRetryableWrite(ctx, func(tx *badger.Txn) error { + dbAsset, err := r.getAssetDetailsWithTx(tx, assetID) + if err != nil { + if errors.Is(err, badgerhold.ErrNotFound) { + return nil + } + return err + } + + dbAsset.Quantity += amount + + return r.store.TxUpsert(tx, assetID, dbAsset) + }) +} + +func (r *assetRepository) DecreaseAssetGroupQuantity( + ctx context.Context, + assetID string, + amount uint64, +) error { + return r.withRetryableWrite(ctx, func(tx *badger.Txn) error { + dbAsset, err := r.getAssetDetailsWithTx(tx, assetID) + if err != nil { + if errors.Is(err, badgerhold.ErrNotFound) { + return nil + } + return err + } + + if dbAsset.Quantity < amount { + return fmt.Errorf("insufficient quantity for asset %s", assetID) + } + + dbAsset.Quantity -= amount + + return r.store.TxUpsert(tx, assetID, dbAsset) + }) +} + +func (r *assetRepository) UpdateAssetMetadataList( + ctx context.Context, + assetId string, + metadatalist []domain.AssetMetadata, +) error { + return r.withRetryableWrite(ctx, func(tx *badger.Txn) error { + _, err := r.getAssetDetailsWithTx(tx, assetId) + if err != nil { + return err + } + + for _, md := range metadatalist { + meta := assetMetadata{ + Key: assetMetadataKey(assetId, md.Key), + AssetID: assetId, + MetaKey: md.Key, + MetaValue: md.Value, + } + + if err := r.store.TxUpsert(tx, meta.Key, meta); err != nil { + return err + } + } + + return nil + }) +} + +func (r *assetRepository) listAnchorAssets( + ctx context.Context, + anchorID string, +) ([]anchorAsset, error) { + var assets []anchorAsset + query := badgerhold.Where("AnchorID").Eq(anchorID) + + var err error + if tx := getTxFromContext(ctx); tx != nil { + err = r.store.TxFind(tx, &assets, query) + } else { + err = r.store.Find(&assets, query) + } + if err != nil { + if errors.Is(err, badgerhold.ErrNotFound) { + return []anchorAsset{}, nil + } + return nil, err + } + + sort.Slice(assets, func(i, j int) bool { + return assets[i].Vout < assets[j].Vout + }) + + return assets, nil +} + +func (r *assetRepository) getAssetGroup(ctx context.Context, assetID string) (*assetGroup, error) { + var record assetGroup + var err error + if tx := getTxFromContext(ctx); tx != nil { + err = r.store.TxGet(tx, assetID, &record) + } else { + err = r.store.Get(assetID, &record) + } + if err != nil { + return nil, err + } + + return &record, nil +} + +func (r *assetRepository) getAssetDetailsWithTx( + tx *badger.Txn, + assetID string, +) (*assetGroup, error) { + var record assetGroup + if err := r.store.TxGet(tx, assetID, &record); err != nil { + return nil, err + } + return &record, nil +} + +func (r *assetRepository) withRetryableWrite( + ctx context.Context, + fn func(tx *badger.Txn) error, +) error { + if tx := getTxFromContext(ctx); tx != nil { + return fn(tx) + } + + var err error + + for attempt := 0; attempt < maxRetries; attempt++ { + err = func() error { + tx := r.store.Badger().NewTransaction(true) + defer tx.Discard() + + if err := fn(tx); err != nil { + return err + } + + return tx.Commit() + }() + if err == nil { + return nil + } + + if errors.Is(err, badger.ErrConflict) { + time.Sleep(100 * time.Millisecond) + continue + } + + return err + } + + return err +} + +func getTxFromContext(ctx context.Context) *badger.Txn { + tx, ok := ctx.Value("tx").(*badger.Txn) + if !ok { + return nil + } + + return tx +} + +func anchorAssetKey(anchorID string, vout uint32) string { + return fmt.Sprintf("%s:%d", anchorID, vout) +} + +func assetMetadataKey(assetID, metaKey string) string { + return fmt.Sprintf("%s:%s", assetID, metaKey) +} diff --git a/internal/infrastructure/db/postgres/asset_repo.go b/internal/infrastructure/db/postgres/asset_repo.go new file mode 100644 index 000000000..9b0b0bdc4 --- /dev/null +++ b/internal/infrastructure/db/postgres/asset_repo.go @@ -0,0 +1,312 @@ +package pgdb + +import ( + "context" + "database/sql" + "fmt" + + "github.com/arkade-os/arkd/internal/core/domain" + "github.com/arkade-os/arkd/internal/infrastructure/db/postgres/sqlc/queries" +) + +type assetRepository struct { + db *sql.DB + querier *queries.Queries +} + +func NewAssetRepository(config ...interface{}) (domain.AssetRepository, error) { + if len(config) != 1 { + return nil, fmt.Errorf("invalid config") + } + db, ok := config[0].(*sql.DB) + if !ok { + return nil, fmt.Errorf("cannot open asset repository: invalid config") + } + + return &assetRepository{ + db: db, + querier: queries.New(db), + }, nil +} + +func (r *assetRepository) Close() { + _ = r.db.Close() +} + +func (r *assetRepository) ListAssetAnchorsByAssetID( + ctx context.Context, + assetID string, +) ([]domain.AssetAnchor, error) { + anchorsDB, err := r.querier.ListAssetAnchorsByAssetID(ctx, assetID) + if err != nil { + return nil, err + } + + anchors := make([]domain.AssetAnchor, 0, len(anchorsDB)) + for _, anchorDB := range anchorsDB { + anchor, err := r.GetAssetAnchorByTxId(ctx, anchorDB.AnchorTxid) + if err != nil { + return nil, err + } + anchors = append(anchors, *anchor) + } + + return anchors, nil +} + +func (r *assetRepository) GetAssetByOutpoint( + ctx context.Context, + outpoint domain.Outpoint, +) (*domain.NormalAsset, error) { + assetDB, err := r.querier.GetAsset(ctx, queries.GetAssetParams{ + AnchorID: outpoint.Txid, + Vout: int64(outpoint.VOut), + }) + if err != nil { + return nil, err + } + + return &domain.NormalAsset{ + Outpoint: domain.Outpoint{ + Txid: assetDB.AnchorID, + VOut: uint32(assetDB.Vout), + }, + Amount: uint64(assetDB.Amount), + AssetID: assetDB.AssetID, + }, nil +} + +func (r *assetRepository) InsertTeleportAsset( + ctx context.Context, + teleport domain.TeleportAsset, +) error { + return r.querier.CreateTeleportAsset(ctx, queries.CreateTeleportAssetParams{ + Script: teleport.Script, + IntentID: teleport.IntentID, + GroupIndex: int64(teleport.OutputIndex), + AssetID: teleport.AssetID, + Amount: int64(teleport.Amount), + IsClaimed: teleport.IsClaimed, + }) +} + +func (r *assetRepository) GetTeleportAsset( + ctx context.Context, + script string, intentID string, assetID string, outputIndex uint32, +) (*domain.TeleportAsset, error) { + teleportDB, err := r.querier.GetTeleportAsset(ctx, queries.GetTeleportAssetParams{ + Script: script, + IntentID: intentID, + AssetID: assetID, + GroupIndex: int64(outputIndex), + }) + if err != nil { + return nil, err + } + return &domain.TeleportAsset{ + Script: teleportDB.Script, + AssetID: teleportDB.AssetID, + IntentID: teleportDB.IntentID, + OutputIndex: uint32(teleportDB.GroupIndex), + Amount: uint64(teleportDB.Amount), + IsClaimed: teleportDB.IsClaimed, + }, nil +} + +func (r *assetRepository) UpdateTeleportAsset( + ctx context.Context, + script string, intentID string, assetID string, outputIndex uint32, isClaimed bool, +) error { + return r.querier.UpdateTeleportAsset(ctx, queries.UpdateTeleportAssetParams{ + IsClaimed: isClaimed, + Script: script, + IntentID: intentID, + AssetID: assetID, + GroupIndex: int64(outputIndex), + }) +} + +func (r *assetRepository) ListMetadataByAssetID( + ctx context.Context, + assetID string, +) ([]domain.AssetMetadata, error) { + res, err := r.querier.ListAssetMetadata(ctx, assetID) + if err != nil { + return nil, err + } + metadata := make([]domain.AssetMetadata, 0, len(res)) + for _, m := range res { + metadata = append(metadata, domain.AssetMetadata{ + Key: m.MetaKey, + Value: m.MetaValue, + }) + } + return metadata, nil +} + +func (r *assetRepository) InsertAssetAnchor(ctx context.Context, anchor domain.AssetAnchor) error { + err := r.querier.CreateAssetAnchor(ctx, queries.CreateAssetAnchorParams{ + AnchorTxid: anchor.Txid, + AnchorVout: int64(anchor.VOut), + }) + if err != nil { + return err + } + + for _, asst := range anchor.Assets { + err := r.querier.AddAsset(ctx, queries.AddAssetParams{ + AnchorID: anchor.Txid, + AssetID: asst.AssetID, + Vout: int64(asst.VOut), + Amount: int64(asst.Amount), + }) + if err != nil { + return err + } + } + + return nil +} + +func (r *assetRepository) GetAssetAnchorByTxId( + ctx context.Context, + txId string, +) (*domain.AssetAnchor, error) { + anchor, err := r.querier.GetAssetAnchor(ctx, txId) + if err != nil { + return nil, err + } + + assetListResp, err := r.querier.ListAsset(ctx, anchor.AnchorTxid) + if err != nil { + return nil, err + } + + assetList := make([]domain.NormalAsset, 0, len(assetListResp)) + for _, asst := range assetListResp { + assetList = append(assetList, domain.NormalAsset{ + Outpoint: domain.Outpoint{ + Txid: asst.AnchorID, + VOut: uint32(asst.Vout), + }, + Amount: uint64(asst.Amount), + AssetID: asst.AssetID, + }) + } + + return &domain.AssetAnchor{ + Outpoint: domain.Outpoint{ + Txid: anchor.AnchorTxid, + VOut: uint32(anchor.AnchorVout), + }, + Assets: assetList, + }, nil +} + +func (r *assetRepository) InsertAssetGroup( + ctx context.Context, + assetGroup domain.AssetGroup, +) error { + controlID := sql.NullString{} + if assetGroup.ControlAssetID != "" { + controlID = sql.NullString{ + String: assetGroup.ControlAssetID, + Valid: true, + } + } + err := r.querier.CreateAsset(ctx, queries.CreateAssetParams{ + ID: assetGroup.ID, + Quantity: int64(assetGroup.Quantity), + Immutable: assetGroup.Immutable, + ControlID: controlID, + }) + + if err != nil { + return err + } + + for _, md := range assetGroup.Metadata { + err := r.querier.UpsertAssetMetadata(ctx, queries.UpsertAssetMetadataParams{ + AssetID: assetGroup.ID, + MetaKey: md.Key, + MetaValue: md.Value, + }) + if err != nil { + return err + } + } + + return nil +} + +func (r *assetRepository) GetAssetGroupByID( + ctx context.Context, + assetID string, +) (*domain.AssetGroup, error) { + assetDB, err := r.querier.GetAssetGroup(ctx, assetID) + if err != nil { + return nil, err + } + + metadataDB, err := r.querier.ListAssetMetadata(ctx, assetID) + if err != nil { + return nil, err + } + + metadata := make([]domain.AssetMetadata, 0, len(metadataDB)) + for _, mdDB := range metadataDB { + metadata = append(metadata, domain.AssetMetadata{ + Key: mdDB.MetaKey, + Value: mdDB.MetaValue, + }) + } + + return &domain.AssetGroup{ + ID: assetDB.ID, + Quantity: uint64(assetDB.Quantity), + Immutable: assetDB.Immutable, + Metadata: metadata, + ControlAssetID: assetDB.ControlID.String, + }, nil +} + +func (r *assetRepository) IncreaseAssetGroupQuantity( + ctx context.Context, + assetID string, + amount uint64, +) error { + return r.querier.AddToAssetQuantity(ctx, queries.AddToAssetQuantityParams{ + ID: assetID, + Quantity: int64(amount), + }) +} + +func (r *assetRepository) DecreaseAssetGroupQuantity( + ctx context.Context, + assetID string, + amount uint64, +) error { + return r.querier.SubtractFromAssetQuantity(ctx, queries.SubtractFromAssetQuantityParams{ + ID: assetID, + Quantity: int64(amount), + }) +} + +func (r *assetRepository) UpdateAssetMetadataList( + ctx context.Context, + assetId string, + metadatalist []domain.AssetMetadata, +) error { + for _, md := range metadatalist { + err := r.querier.UpsertAssetMetadata(ctx, queries.UpsertAssetMetadataParams{ + AssetID: assetId, + MetaKey: md.Key, + MetaValue: md.Value, + }) + if err != nil { + return err + } + } + + return nil +} diff --git a/internal/infrastructure/db/postgres/migration/20251211132616_add_asset.down.sql b/internal/infrastructure/db/postgres/migration/20251211132616_add_asset.down.sql new file mode 100644 index 000000000..727a583c8 --- /dev/null +++ b/internal/infrastructure/db/postgres/migration/20251211132616_add_asset.down.sql @@ -0,0 +1,5 @@ +DROP TABLE IF EXISTS teleport_asset; +DROP TABLE IF EXISTS asset_metadata; +DROP TABLE IF EXISTS asset; +DROP TABLE IF EXISTS asset_anchor; +DROP TABLE IF EXISTS asset_group; diff --git a/internal/infrastructure/db/postgres/migration/20251211132616_add_asset.up.sql b/internal/infrastructure/db/postgres/migration/20251211132616_add_asset.up.sql new file mode 100644 index 000000000..2228a297d --- /dev/null +++ b/internal/infrastructure/db/postgres/migration/20251211132616_add_asset.up.sql @@ -0,0 +1,48 @@ +CREATE TABLE asset_group ( + id TEXT PRIMARY KEY, + immutable BOOLEAN NOT NULL DEFAULT FALSE, + quantity BIGINT NOT NULL, + control_id TEXT +); + +CREATE TABLE asset_anchor ( + anchor_txid TEXT PRIMARY KEY, + anchor_vout BIGINT NOT NULL +); + +CREATE TABLE asset ( + anchor_id TEXT NOT NULL, + asset_id TEXT NOT NULL, + vout BIGINT NOT NULL, + amount BIGINT NOT NULL, + PRIMARY KEY (anchor_id, vout), + FOREIGN KEY (anchor_id) + REFERENCES asset_anchor(anchor_txid) + ON DELETE CASCADE, + FOREIGN KEY (asset_id) + REFERENCES asset_group(id) + ON DELETE CASCADE +); + +CREATE TABLE asset_metadata ( + asset_id TEXT NOT NULL, + meta_key TEXT NOT NULL, + meta_value TEXT NOT NULL, + PRIMARY KEY (asset_id, meta_key), + FOREIGN KEY (asset_id) + REFERENCES asset_group(id) + ON DELETE CASCADE +); + +CREATE TABLE teleport_asset ( + script TEXT NOT NULL, + intent_id TEXT NOT NULL, + group_index BIGINT NOT NULL, + asset_id TEXT NOT NULL, + amount BIGINT NOT NULL, + is_claimed BOOLEAN NOT NULL DEFAULT FALSE, + PRIMARY KEY (script, intent_id, asset_id, group_index), + FOREIGN KEY (asset_id) + REFERENCES asset_group(id) + ON DELETE CASCADE +); diff --git a/internal/infrastructure/db/postgres/sqlc/queries/db.go b/internal/infrastructure/db/postgres/sqlc/queries/db.go index 51576a562..85679b3ce 100644 --- a/internal/infrastructure/db/postgres/sqlc/queries/db.go +++ b/internal/infrastructure/db/postgres/sqlc/queries/db.go @@ -1,6 +1,6 @@ // Code generated by sqlc. DO NOT EDIT. // versions: -// sqlc v1.30.0 +// sqlc v1.29.0 package queries diff --git a/internal/infrastructure/db/postgres/sqlc/queries/models.go b/internal/infrastructure/db/postgres/sqlc/queries/models.go index a365f9500..8206bbfcd 100644 --- a/internal/infrastructure/db/postgres/sqlc/queries/models.go +++ b/internal/infrastructure/db/postgres/sqlc/queries/models.go @@ -1,6 +1,6 @@ // Code generated by sqlc. DO NOT EDIT. // versions: -// sqlc v1.30.0 +// sqlc v1.29.0 package queries @@ -10,6 +10,31 @@ import ( "github.com/sqlc-dev/pqtype" ) +type Asset struct { + AnchorID string + AssetID string + Vout int64 + Amount int64 +} + +type AssetAnchor struct { + AnchorTxid string + AnchorVout int64 +} + +type AssetGroup struct { + ID string + Immutable bool + Quantity int64 + ControlID sql.NullString +} + +type AssetMetadatum struct { + AssetID string + MetaKey string + MetaValue string +} + type CheckpointTx struct { Txid string Tx string @@ -186,6 +211,15 @@ type ScheduledSession struct { UpdatedAt int64 } +type TeleportAsset struct { + Script string + IntentID string + GroupIndex int64 + AssetID string + Amount int64 + IsClaimed bool +} + type Tx struct { Txid string Tx string diff --git a/internal/infrastructure/db/postgres/sqlc/queries/query.sql.go b/internal/infrastructure/db/postgres/sqlc/queries/query.sql.go index 0e9f723f9..399a7db6e 100644 --- a/internal/infrastructure/db/postgres/sqlc/queries/query.sql.go +++ b/internal/infrastructure/db/postgres/sqlc/queries/query.sql.go @@ -1,6 +1,6 @@ // Code generated by sqlc. DO NOT EDIT. // versions: -// sqlc v1.30.0 +// sqlc v1.29.0 // source: query.sql package queries @@ -13,6 +13,31 @@ import ( "github.com/sqlc-dev/pqtype" ) +const addAsset = `-- name: AddAsset :exec +INSERT INTO asset (anchor_id, asset_id, vout, amount) +VALUES ($1, $2, $3, $4) +ON CONFLICT (anchor_id, vout) +DO UPDATE SET amount = EXCLUDED.amount +WHERE asset.asset_id = EXCLUDED.asset_id +` + +type AddAssetParams struct { + AnchorID string + AssetID string + Vout int64 + Amount int64 +} + +func (q *Queries) AddAsset(ctx context.Context, arg AddAssetParams) error { + _, err := q.db.ExecContext(ctx, addAsset, + arg.AnchorID, + arg.AssetID, + arg.Vout, + arg.Amount, + ) + return err +} + const addIntentFees = `-- name: AddIntentFees :exec INSERT INTO intent_fees ( offchain_input_fee_program, @@ -62,6 +87,22 @@ func (q *Queries) AddIntentFees(ctx context.Context, arg AddIntentFeesParams) er return err } +const addToAssetQuantity = `-- name: AddToAssetQuantity :exec +UPDATE asset_group +SET quantity = quantity + $1 +WHERE id = $2 +` + +type AddToAssetQuantityParams struct { + Quantity int64 + ID string +} + +func (q *Queries) AddToAssetQuantity(ctx context.Context, arg AddToAssetQuantityParams) error { + _, err := q.db.ExecContext(ctx, addToAssetQuantity, arg.Quantity, arg.ID) + return err +} + const clearIntentFees = `-- name: ClearIntentFees :exec INSERT INTO intent_fees ( offchain_input_fee_program, @@ -86,6 +127,214 @@ func (q *Queries) ClearScheduledSession(ctx context.Context) error { return err } +const createAsset = `-- name: CreateAsset :exec +INSERT INTO asset_group (id, quantity, immutable, control_id) +VALUES ($1, $2, $3, $4) +` + +type CreateAssetParams struct { + ID string + Quantity int64 + Immutable bool + ControlID sql.NullString +} + +func (q *Queries) CreateAsset(ctx context.Context, arg CreateAssetParams) error { + _, err := q.db.ExecContext(ctx, createAsset, + arg.ID, + arg.Quantity, + arg.Immutable, + arg.ControlID, + ) + return err +} + +const createAssetAnchor = `-- name: CreateAssetAnchor :exec +INSERT INTO asset_anchor (anchor_txid, anchor_vout) +VALUES ($1, $2) +` + +type CreateAssetAnchorParams struct { + AnchorTxid string + AnchorVout int64 +} + +func (q *Queries) CreateAssetAnchor(ctx context.Context, arg CreateAssetAnchorParams) error { + _, err := q.db.ExecContext(ctx, createAssetAnchor, arg.AnchorTxid, arg.AnchorVout) + return err +} + +const createTeleportAsset = `-- name: CreateTeleportAsset :exec +INSERT INTO teleport_asset (script, intent_id, asset_id, group_index, amount, is_claimed) +VALUES ($1, $2, $3, $4, $5, $6) +` + +type CreateTeleportAssetParams struct { + Script string + IntentID string + AssetID string + GroupIndex int64 + Amount int64 + IsClaimed bool +} + +func (q *Queries) CreateTeleportAsset(ctx context.Context, arg CreateTeleportAssetParams) error { + _, err := q.db.ExecContext(ctx, createTeleportAsset, + arg.Script, + arg.IntentID, + arg.AssetID, + arg.GroupIndex, + arg.Amount, + arg.IsClaimed, + ) + return err +} + +const deleteAsset = `-- name: DeleteAsset :exec +DELETE FROM asset +WHERE anchor_id = $1 AND vout = $2 +` + +type DeleteAssetParams struct { + AnchorID string + Vout int64 +} + +func (q *Queries) DeleteAsset(ctx context.Context, arg DeleteAssetParams) error { + _, err := q.db.ExecContext(ctx, deleteAsset, arg.AnchorID, arg.Vout) + return err +} + +const deleteAssetAnchor = `-- name: DeleteAssetAnchor :exec +DELETE FROM asset_anchor +WHERE anchor_txid = $1 +` + +func (q *Queries) DeleteAssetAnchor(ctx context.Context, anchorTxid string) error { + _, err := q.db.ExecContext(ctx, deleteAssetAnchor, anchorTxid) + return err +} + +const getAsset = `-- name: GetAsset :one +SELECT anchor_id, asset_id, vout, amount +FROM asset +WHERE anchor_id = $1 AND vout = $2 +` + +type GetAssetParams struct { + AnchorID string + Vout int64 +} + +func (q *Queries) GetAsset(ctx context.Context, arg GetAssetParams) (Asset, error) { + row := q.db.QueryRowContext(ctx, getAsset, arg.AnchorID, arg.Vout) + var i Asset + err := row.Scan( + &i.AnchorID, + &i.AssetID, + &i.Vout, + &i.Amount, + ) + return i, err +} + +const getAssetAnchor = `-- name: GetAssetAnchor :one +SELECT anchor_txid, anchor_vout +FROM asset_anchor +WHERE anchor_txid = $1 +` + +func (q *Queries) GetAssetAnchor(ctx context.Context, anchorTxid string) (AssetAnchor, error) { + row := q.db.QueryRowContext(ctx, getAssetAnchor, anchorTxid) + var i AssetAnchor + err := row.Scan(&i.AnchorTxid, &i.AnchorVout) + return i, err +} + +const getAssetGroup = `-- name: GetAssetGroup :one +SELECT id, quantity, immutable, control_id +FROM asset_group +WHERE id = $1 +` + +type GetAssetGroupRow struct { + ID string + Quantity int64 + Immutable bool + ControlID sql.NullString +} + +func (q *Queries) GetAssetGroup(ctx context.Context, id string) (GetAssetGroupRow, error) { + row := q.db.QueryRowContext(ctx, getAssetGroup, id) + var i GetAssetGroupRow + err := row.Scan( + &i.ID, + &i.Quantity, + &i.Immutable, + &i.ControlID, + ) + return i, err +} + +const getAssetMetadata = `-- name: GetAssetMetadata :one +SELECT asset_id, meta_key, meta_value +FROM asset_metadata +WHERE asset_id = $1 AND meta_key = $2 +` + +type GetAssetMetadataParams struct { + AssetID string + MetaKey string +} + +func (q *Queries) GetAssetMetadata(ctx context.Context, arg GetAssetMetadataParams) (AssetMetadatum, error) { + row := q.db.QueryRowContext(ctx, getAssetMetadata, arg.AssetID, arg.MetaKey) + var i AssetMetadatum + err := row.Scan(&i.AssetID, &i.MetaKey, &i.MetaValue) + return i, err +} + +const getTeleportAsset = `-- name: GetTeleportAsset :one +SELECT script, intent_id, asset_id, group_index, amount, is_claimed +FROM teleport_asset +WHERE script = $1 AND intent_id = $2 AND asset_id = $3 AND group_index = $4 +` + +type GetTeleportAssetParams struct { + Script string + IntentID string + AssetID string + GroupIndex int64 +} + +type GetTeleportAssetRow struct { + Script string + IntentID string + AssetID string + GroupIndex int64 + Amount int64 + IsClaimed bool +} + +func (q *Queries) GetTeleportAsset(ctx context.Context, arg GetTeleportAssetParams) (GetTeleportAssetRow, error) { + row := q.db.QueryRowContext(ctx, getTeleportAsset, + arg.Script, + arg.IntentID, + arg.AssetID, + arg.GroupIndex, + ) + var i GetTeleportAssetRow + err := row.Scan( + &i.Script, + &i.IntentID, + &i.AssetID, + &i.GroupIndex, + &i.Amount, + &i.IsClaimed, + ) + return i, err +} + const insertVtxoCommitmentTxid = `-- name: InsertVtxoCommitmentTxid :exec INSERT INTO vtxo_commitment_txid (vtxo_txid, vtxo_vout, commitment_txid) VALUES ($1, $2, $3) @@ -102,6 +351,143 @@ func (q *Queries) InsertVtxoCommitmentTxid(ctx context.Context, arg InsertVtxoCo return err } +const listAsset = `-- name: ListAsset :many +SELECT anchor_id, asset_id, vout, amount +FROM asset +WHERE anchor_id = $1 +ORDER BY vout +` + +func (q *Queries) ListAsset(ctx context.Context, anchorID string) ([]Asset, error) { + rows, err := q.db.QueryContext(ctx, listAsset, anchorID) + if err != nil { + return nil, err + } + defer rows.Close() + var items []Asset + for rows.Next() { + var i Asset + if err := rows.Scan( + &i.AnchorID, + &i.AssetID, + &i.Vout, + &i.Amount, + ); err != nil { + return nil, err + } + items = append(items, i) + } + if err := rows.Close(); err != nil { + return nil, err + } + if err := rows.Err(); err != nil { + return nil, err + } + return items, nil +} + +const listAssetAnchorsByAssetID = `-- name: ListAssetAnchorsByAssetID :many +SELECT DISTINCT aa.anchor_txid, aa.anchor_vout +FROM asset_anchor aa +JOIN asset a ON aa.anchor_txid = a.anchor_id +WHERE a.asset_id = $1 +ORDER BY aa.anchor_txid +` + +func (q *Queries) ListAssetAnchorsByAssetID(ctx context.Context, assetID string) ([]AssetAnchor, error) { + rows, err := q.db.QueryContext(ctx, listAssetAnchorsByAssetID, assetID) + if err != nil { + return nil, err + } + defer rows.Close() + var items []AssetAnchor + for rows.Next() { + var i AssetAnchor + if err := rows.Scan(&i.AnchorTxid, &i.AnchorVout); err != nil { + return nil, err + } + items = append(items, i) + } + if err := rows.Close(); err != nil { + return nil, err + } + if err := rows.Err(); err != nil { + return nil, err + } + return items, nil +} + +const listAssetGroup = `-- name: ListAssetGroup :many +SELECT id, quantity, immutable, control_id +FROM asset_group +ORDER BY id +` + +type ListAssetGroupRow struct { + ID string + Quantity int64 + Immutable bool + ControlID sql.NullString +} + +func (q *Queries) ListAssetGroup(ctx context.Context) ([]ListAssetGroupRow, error) { + rows, err := q.db.QueryContext(ctx, listAssetGroup) + if err != nil { + return nil, err + } + defer rows.Close() + var items []ListAssetGroupRow + for rows.Next() { + var i ListAssetGroupRow + if err := rows.Scan( + &i.ID, + &i.Quantity, + &i.Immutable, + &i.ControlID, + ); err != nil { + return nil, err + } + items = append(items, i) + } + if err := rows.Close(); err != nil { + return nil, err + } + if err := rows.Err(); err != nil { + return nil, err + } + return items, nil +} + +const listAssetMetadata = `-- name: ListAssetMetadata :many +SELECT asset_id, meta_key, meta_value +FROM asset_metadata +WHERE asset_id = $1 +ORDER BY meta_key +` + +func (q *Queries) ListAssetMetadata(ctx context.Context, assetID string) ([]AssetMetadatum, error) { + rows, err := q.db.QueryContext(ctx, listAssetMetadata, assetID) + if err != nil { + return nil, err + } + defer rows.Close() + var items []AssetMetadatum + for rows.Next() { + var i AssetMetadatum + if err := rows.Scan(&i.AssetID, &i.MetaKey, &i.MetaValue); err != nil { + return nil, err + } + items = append(items, i) + } + if err := rows.Close(); err != nil { + return nil, err + } + if err := rows.Err(); err != nil { + return nil, err + } + return items, nil +} + const selectActiveScriptConvictions = `-- name: SelectActiveScriptConvictions :many SELECT id, type, created_at, expires_at, crime_type, crime_round_id, crime_reason, pardoned, script FROM conviction WHERE script = $1 @@ -1580,6 +1966,22 @@ func (q *Queries) SelectVtxosWithPubkeys(ctx context.Context, arg SelectVtxosWit return items, nil } +const subtractFromAssetQuantity = `-- name: SubtractFromAssetQuantity :exec +UPDATE asset_group +SET quantity = quantity - $1 +WHERE id = $2 AND quantity >= $1 +` + +type SubtractFromAssetQuantityParams struct { + Quantity int64 + ID string +} + +func (q *Queries) SubtractFromAssetQuantity(ctx context.Context, arg SubtractFromAssetQuantityParams) error { + _, err := q.db.ExecContext(ctx, subtractFromAssetQuantity, arg.Quantity, arg.ID) + return err +} + const updateConvictionPardoned = `-- name: UpdateConvictionPardoned :exec UPDATE conviction SET pardoned = true WHERE id = $1 ` @@ -1589,6 +1991,31 @@ func (q *Queries) UpdateConvictionPardoned(ctx context.Context, id string) error return err } +const updateTeleportAsset = `-- name: UpdateTeleportAsset :exec +UPDATE teleport_asset +SET is_claimed = $1 +WHERE script = $2 AND intent_id = $3 AND asset_id = $4 AND group_index = $5 +` + +type UpdateTeleportAssetParams struct { + IsClaimed bool + Script string + IntentID string + AssetID string + GroupIndex int64 +} + +func (q *Queries) UpdateTeleportAsset(ctx context.Context, arg UpdateTeleportAssetParams) error { + _, err := q.db.ExecContext(ctx, updateTeleportAsset, + arg.IsClaimed, + arg.Script, + arg.IntentID, + arg.AssetID, + arg.GroupIndex, + ) + return err +} + const updateVtxoExpiration = `-- name: UpdateVtxoExpiration :exec UPDATE vtxo SET expires_at = $1 WHERE txid = $2 AND vout = $3 ` @@ -1694,6 +2121,24 @@ func (q *Queries) UpdateVtxoUnrolled(ctx context.Context, arg UpdateVtxoUnrolled return err } +const upsertAssetMetadata = `-- name: UpsertAssetMetadata :exec +INSERT INTO asset_metadata (asset_id, meta_key, meta_value) +VALUES ($1, $2, $3) +ON CONFLICT (asset_id, meta_key) +DO UPDATE SET meta_value = EXCLUDED.meta_value +` + +type UpsertAssetMetadataParams struct { + AssetID string + MetaKey string + MetaValue string +} + +func (q *Queries) UpsertAssetMetadata(ctx context.Context, arg UpsertAssetMetadataParams) error { + _, err := q.db.ExecContext(ctx, upsertAssetMetadata, arg.AssetID, arg.MetaKey, arg.MetaValue) + return err +} + const upsertCheckpointTx = `-- name: UpsertCheckpointTx :exec INSERT INTO checkpoint_tx (txid, tx, commitment_txid, is_root_commitment_txid, offchain_txid) VALUES ($1, $2, $3, $4, $5) diff --git a/internal/infrastructure/db/postgres/sqlc/query.sql b/internal/infrastructure/db/postgres/sqlc/query.sql index 18f58e81f..190562ba0 100644 --- a/internal/infrastructure/db/postgres/sqlc/query.sql +++ b/internal/infrastructure/db/postgres/sqlc/query.sql @@ -377,6 +377,102 @@ SELECT * FROM conviction WHERE crime_round_id = @round_id ORDER BY created_at ASC; +-- name: CreateAssetAnchor :exec +INSERT INTO asset_anchor (anchor_txid, anchor_vout) +VALUES (@anchor_txid, @anchor_vout); + +-- name: ListAssetAnchorsByAssetID :many +SELECT DISTINCT aa.anchor_txid, aa.anchor_vout +FROM asset_anchor aa +JOIN asset a ON aa.anchor_txid = a.anchor_id +WHERE a.asset_id = @asset_id +ORDER BY aa.anchor_txid; + +-- name: GetAssetAnchor :one +SELECT anchor_txid, anchor_vout +FROM asset_anchor +WHERE anchor_txid = @anchor_txid; + +-- name: DeleteAssetAnchor :exec +DELETE FROM asset_anchor +WHERE anchor_txid = @anchor_txid; + +-- name: UpsertAssetMetadata :exec +INSERT INTO asset_metadata (asset_id, meta_key, meta_value) +VALUES (@asset_id, @meta_key, @meta_value) +ON CONFLICT (asset_id, meta_key) +DO UPDATE SET meta_value = EXCLUDED.meta_value; + +-- name: GetAssetMetadata :one +SELECT asset_id, meta_key, meta_value +FROM asset_metadata +WHERE asset_id = @asset_id AND meta_key = @meta_key; + +-- name: ListAssetMetadata :many +SELECT asset_id, meta_key, meta_value +FROM asset_metadata +WHERE asset_id = @asset_id +ORDER BY meta_key; + +-- name: AddAsset :exec +INSERT INTO asset (anchor_id, asset_id, vout, amount) +VALUES (@anchor_id, @asset_id, @vout, @amount) +ON CONFLICT (anchor_id, vout) +DO UPDATE SET amount = EXCLUDED.amount +WHERE asset.asset_id = EXCLUDED.asset_id; + +-- name: GetAsset :one +SELECT anchor_id, asset_id, vout, amount +FROM asset +WHERE anchor_id = @anchor_id AND vout = @vout; + +-- name: DeleteAsset :exec +DELETE FROM asset +WHERE anchor_id = @anchor_id AND vout = @vout; + +-- name: ListAsset :many +SELECT anchor_id, asset_id, vout, amount +FROM asset +WHERE anchor_id = @anchor_id +ORDER BY vout; + +-- name: GetAssetGroup :one +SELECT id, quantity, immutable, control_id +FROM asset_group +WHERE id = @id; + +-- name: ListAssetGroup :many +SELECT id, quantity, immutable, control_id +FROM asset_group +ORDER BY id; + +-- name: AddToAssetQuantity :exec +UPDATE asset_group +SET quantity = quantity + @quantity +WHERE id = @id; + +-- name: SubtractFromAssetQuantity :exec +UPDATE asset_group +SET quantity = quantity - @quantity +WHERE id = @id AND quantity >= @quantity; + +-- name: CreateAsset :exec +INSERT INTO asset_group (id, quantity, immutable, control_id) +VALUES (@id, @quantity, @immutable, @control_id); + +-- name: CreateTeleportAsset :exec +INSERT INTO teleport_asset (script, intent_id, asset_id, group_index, amount, is_claimed) +VALUES (@script, @intent_id, @asset_id, @group_index, @amount, @is_claimed); + +-- name: GetTeleportAsset :one +SELECT script, intent_id, asset_id, group_index, amount, is_claimed +FROM teleport_asset +WHERE script = @script AND intent_id = @intent_id AND asset_id = @asset_id AND group_index = @group_index; + +-- name: UpdateTeleportAsset :exec +UPDATE teleport_asset +SET is_claimed = @is_claimed +WHERE script = @script AND intent_id = @intent_id AND asset_id = @asset_id AND group_index = @group_index; -- name: SelectLatestIntentFees :one SELECT * FROM intent_fees ORDER BY id DESC LIMIT 1; diff --git a/internal/infrastructure/db/service.go b/internal/infrastructure/db/service.go index 8f60f6612..6a8d7be41 100644 --- a/internal/infrastructure/db/service.go +++ b/internal/infrastructure/db/service.go @@ -17,11 +17,14 @@ import ( badgerdb "github.com/arkade-os/arkd/internal/infrastructure/db/badger" pgdb "github.com/arkade-os/arkd/internal/infrastructure/db/postgres" sqlitedb "github.com/arkade-os/arkd/internal/infrastructure/db/sqlite" + "github.com/arkade-os/arkd/pkg/ark-lib/extension" "github.com/arkade-os/arkd/pkg/ark-lib/script" "github.com/arkade-os/arkd/pkg/ark-lib/tree" "github.com/arkade-os/arkd/pkg/ark-lib/txutils" "github.com/btcsuite/btcd/btcec/v2/schnorr" "github.com/btcsuite/btcd/btcutil/psbt" + "github.com/btcsuite/btcd/txscript" + "github.com/btcsuite/btcd/wire" "github.com/golang-migrate/migrate/v4" migratepg "github.com/golang-migrate/migrate/v4/database/postgres" sqlitemigrate "github.com/golang-migrate/migrate/v4/database/sqlite" @@ -68,6 +71,11 @@ var ( "sqlite": sqlitedb.NewConvictionRepository, "postgres": pgdb.NewConvictionRepository, } + assetStoreTypes = map[string]func(...interface{}) (domain.AssetRepository, error){ + "sqlite": sqlitedb.NewAssetRepository, + "badger": badgerdb.NewAssetRepository, + "postgres": pgdb.NewAssetRepository, + } intentFeesStoreTypes = map[string]func(...interface{}) (domain.FeeRepository, error){ "badger": badgerdb.NewIntentFeesRepository, "sqlite": sqlitedb.NewIntentFeesRepository, @@ -94,6 +102,7 @@ type service struct { scheduledSessionStore domain.ScheduledSessionRepo offchainTxStore domain.OffchainTxRepository convictionStore domain.ConvictionRepository + assetStore domain.AssetRepository intentFeesStore domain.FeeRepository txDecoder ports.TxDecoder } @@ -123,6 +132,11 @@ func NewService(config ServiceConfig, txDecoder ports.TxDecoder) (ports.RepoMana if !ok { return nil, fmt.Errorf("invalid data store type: %s", config.DataStoreType) } + assetStoreFactory, ok := assetStoreTypes[config.DataStoreType] + if !ok { + return nil, fmt.Errorf("invalid data store type: %s", config.DataStoreType) + } + intentFeesStoreFactory, ok := intentFeesStoreTypes[config.DataStoreType] if !ok { return nil, fmt.Errorf("invalid data store type: %s", config.DataStoreType) @@ -134,6 +148,7 @@ func NewService(config ServiceConfig, txDecoder ports.TxDecoder) (ports.RepoMana var scheduledSessionStore domain.ScheduledSessionRepo var offchainTxStore domain.OffchainTxRepository var convictionStore domain.ConvictionRepository + var assetStore domain.AssetRepository var intentFeesStore domain.FeeRepository var err error @@ -167,6 +182,7 @@ func NewService(config ServiceConfig, txDecoder ports.TxDecoder) (ports.RepoMana if err != nil { return nil, fmt.Errorf("failed to open event store: %s", err) } + default: return nil, fmt.Errorf("unknown event store db type") } @@ -193,6 +209,10 @@ func NewService(config ServiceConfig, txDecoder ports.TxDecoder) (ports.RepoMana if err != nil { return nil, fmt.Errorf("failed to create conviction store: %w", err) } + assetStore, err = assetStoreFactory(config.DataStoreConfig...) + if err != nil { + return nil, fmt.Errorf("failed to create asset store: %w", err) + } intentFeesStore, err = intentFeesStoreFactory(config.DataStoreConfig...) if err != nil { return nil, fmt.Errorf("failed to create intent fees store: %w", err) @@ -264,6 +284,10 @@ func NewService(config ServiceConfig, txDecoder ports.TxDecoder) (ports.RepoMana if err != nil { return nil, fmt.Errorf("failed to create conviction store: %w", err) } + assetStore, err = assetStoreFactory(db) + if err != nil { + return nil, fmt.Errorf("failed to create asset store: %w", err) + } intentFeesStore, err = intentFeesStoreFactory(db) if err != nil { return nil, fmt.Errorf("failed to create intent fees store: %w", err) @@ -328,6 +352,10 @@ func NewService(config ServiceConfig, txDecoder ports.TxDecoder) (ports.RepoMana if err != nil { return nil, fmt.Errorf("failed to create conviction store: %w", err) } + assetStore, err = assetStoreFactory(db) + if err != nil { + return nil, fmt.Errorf("failed to create asset store: %w", err) + } intentFeesStore, err = intentFeesStoreFactory(db) if err != nil { return nil, fmt.Errorf("failed to create intent fees store: %w", err) @@ -342,6 +370,7 @@ func NewService(config ServiceConfig, txDecoder ports.TxDecoder) (ports.RepoMana offchainTxStore: offchainTxStore, txDecoder: txDecoder, convictionStore: convictionStore, + assetStore: assetStore, intentFeesStore: intentFeesStore, } @@ -364,6 +393,10 @@ func (s *service) Rounds() domain.RoundRepository { return s.roundStore } +func (s *service) Assets() domain.AssetRepository { + return s.assetStore +} + func (s *service) Vtxos() domain.VtxoRepository { return s.vtxoStore } @@ -497,20 +530,55 @@ func (s *service) updateProjectionsAfterOffchainTxEvents(events []domain.Event) // once the offchain tx is finalized, the user signed the checkpoint txs // thus, we can create the new vtxos in the db. newVtxos := make([]domain.Vtxo, 0, len(outs)) + assetOpReturnProcessed := false for outIndex, out := range outs { + var isSubDust bool + var pubKey []byte + // ignore anchors if bytes.Equal(out.PkScript, txutils.ANCHOR_PKSCRIPT) { continue } - isDust := script.IsSubDustScript(out.PkScript) + // ignore asset anchor + if extension.ContainsAssetPacket(out.PkScript) { + if assetOpReturnProcessed { + continue + } + assetOpReturnProcessed = true + + txOut := wire.TxOut{ + Value: int64(out.Amount), + PkScript: out.PkScript, + } + + if _, err := extension.DecodeAssetPacket(txOut); err != nil { + log.WithError(err).Warn("failed to decode asset group from opret") + continue + } + + subDustPacket, err := extension.DecodeSubDustPacket(txOut) + if err != nil { + log.WithError(err).Warn("failed to decode sub-dust key from opret") + continue + } + if subDustPacket == nil || subDustPacket.Key == nil { + continue + } + isSubDust = true + pubKey = schnorr.SerializePubKey(subDustPacket.Key) + + } else { + isSubDust = script.IsSubDustScript(out.PkScript) + pubKey = out.PkScript[2:] + } newVtxos = append(newVtxos, domain.Vtxo{ Outpoint: domain.Outpoint{ Txid: txid, VOut: uint32(outIndex), }, - PubKey: hex.EncodeToString(out.PkScript[2:]), + PubKey: hex.EncodeToString(pubKey), Amount: uint64(out.Amount), ExpiresAt: offchainTx.ExpiryTimestamp, CommitmentTxids: offchainTx.CommitmentTxidsList(), @@ -520,7 +588,7 @@ func (s *service) updateProjectionsAfterOffchainTxEvents(events []domain.Event) // mark the vtxo as "swept" if it is below dust limit to prevent it from being spent again in a future offchain tx // the only way to spend a swept vtxo is by collecting enough dust to cover the minSettlementVtxoAmount and then settle. // because sub-dust vtxos are using OP_RETURN output script, they can't be unilaterally exited. - Swept: isDust, + Swept: isSubDust, }) } @@ -580,6 +648,11 @@ func getNewVtxosFromRound(round *domain.Round) []domain.Vtxo { if bytes.Equal(out.PkScript, txutils.ANCHOR_PKSCRIPT) { continue } + // Skip any OP_RETURN output (asset packets) + // TODO: Make this more robust? + if bytes.HasPrefix(out.PkScript, []byte{txscript.OP_RETURN}) { + continue + } vtxoTapKey, err := schnorr.ParsePubKey(out.PkScript[2:]) if err != nil { diff --git a/internal/infrastructure/db/service_test.go b/internal/infrastructure/db/service_test.go index 1c29c0a0f..f1cb66079 100644 --- a/internal/infrastructure/db/service_test.go +++ b/internal/infrastructure/db/service_test.go @@ -181,7 +181,7 @@ func TestService(t *testing.T) { t.Run(tt.name, func(t *testing.T) { svc, err := db.NewService(tt.config, nil) require.NoError(t, err) - defer svc.Close() + require.NotNil(t, svc) testEventRepository(t, svc) testRoundRepository(t, svc) @@ -189,7 +189,10 @@ func TestService(t *testing.T) { testOffchainTxRepository(t, svc) testScheduledSessionRepository(t, svc) testConvictionRepository(t, svc) + testAssetRepository(t, svc) testFeeRepository(t, svc) + + svc.Close() }) } } @@ -1505,6 +1508,289 @@ func testConvictionRepository(t *testing.T, svc ports.RepoManager) { }) } +func testAssetRepository(t *testing.T, svc ports.RepoManager) { + t.Run("insert and get asset anchor", func(t *testing.T) { + ctx := context.Background() + + // Create asset groups first to satisfy FK constraints + err := svc.Assets().InsertAssetGroup(ctx, domain.AssetGroup{ID: "asset-1", Quantity: 1000}) + require.NoError(t, err) + err = svc.Assets().InsertAssetGroup(ctx, domain.AssetGroup{ID: "asset-2", Quantity: 2000}) + require.NoError(t, err) + + anchor := domain.AssetAnchor{ + Outpoint: domain.Outpoint{ + Txid: "txid-123", + VOut: 2, + }, + Assets: []domain.NormalAsset{ + { + Outpoint: domain.Outpoint{Txid: "txid-123", VOut: 0}, + Amount: 1000, + AssetID: "asset-1", + }, + { + Outpoint: domain.Outpoint{Txid: "txid-123", VOut: 1}, + Amount: 2000, + AssetID: "asset-2", + }, + }, + } + + err = svc.Assets().InsertAssetAnchor(ctx, anchor) + require.NoError(t, err, "InsertAssetAnchor should succeed") + + got, err := svc.Assets().GetAssetAnchorByTxId(ctx, anchor.Txid) + require.NoError(t, err, "GetAssetAnchorByTxId should succeed") + require.NotNil(t, got) + + require.Equal(t, anchor.Txid, got.Txid) + require.Equal(t, anchor.VOut, got.VOut) + require.ElementsMatch(t, anchor.Assets, got.Assets) + }) + + t.Run("insert and get asset group", func(t *testing.T) { + ctx := context.Background() + asset := domain.AssetGroup{ + ID: "asset-group-123", + Quantity: 5000, + Immutable: false, + Metadata: []domain.AssetMetadata{ + {Key: "name", Value: "My Asset"}, + {Key: "symbol", Value: "MAS"}, + }, + ControlAssetID: "control-asset-123", + } + + err := svc.Assets().InsertAssetGroup(ctx, asset) + require.NoError(t, err, "InsertAssetGroup should succeed") + + got, err := svc.Assets().GetAssetGroupByID(ctx, asset.ID) + require.NoError(t, err, "GetAssetGroupByID should succeed") + require.NotNil(t, got) + + require.Equal(t, asset.ID, got.ID) + require.Equal(t, asset.Quantity, got.Quantity) + require.Equal(t, asset.Immutable, got.Immutable) + require.ElementsMatch(t, asset.Metadata, got.Metadata) + require.Equal(t, asset.ControlAssetID, got.ControlAssetID) + }) + + t.Run("insert asset anchor accepts duplicate vout", func(t *testing.T) { + ctx := context.Background() + + // Create asset groups first to satisfy FK constraints + err := svc.Assets(). + InsertAssetGroup(ctx, domain.AssetGroup{ID: "asset-dup-1", Quantity: 1000}) + require.NoError(t, err) + err = svc.Assets(). + InsertAssetGroup(ctx, domain.AssetGroup{ID: "asset-dup-2", Quantity: 2000}) + require.NoError(t, err) + + anchor := domain.AssetAnchor{ + Outpoint: domain.Outpoint{ + Txid: "txid-dup-vout", + VOut: 0, + }, + Assets: []domain.NormalAsset{ + { + Outpoint: domain.Outpoint{Txid: "txid-dup-vout", VOut: 0}, + Amount: 1000, + AssetID: "asset-dup-1", + }, + { + Outpoint: domain.Outpoint{Txid: "txid-dup-vout", VOut: 0}, + Amount: 2000, + AssetID: "asset-dup-2", + }, + }, + } + + err = svc.Assets().InsertAssetAnchor(ctx, anchor) + require.NoError(t, err, "InsertAssetAnchor should succeed even with duplicate vout") + }) + + t.Run("list asset anchors by asset id", func(t *testing.T) { + ctx := context.Background() + + assetListID := "asset-list-1" + otherAssetID := "asset-list-2" + + // Create asset groups first to satisfy FK constraints + err := svc.Assets(). + InsertAssetGroup(ctx, domain.AssetGroup{ID: assetListID, Quantity: 10000}) + require.NoError(t, err) + err = svc.Assets(). + InsertAssetGroup(ctx, domain.AssetGroup{ID: otherAssetID, Quantity: 5000}) + require.NoError(t, err) + + anchor1 := domain.AssetAnchor{ + Outpoint: domain.Outpoint{ + Txid: "txid-asset-1", + VOut: 0, + }, + Assets: []domain.NormalAsset{ + { + Outpoint: domain.Outpoint{Txid: "txid-asset-1", VOut: 0}, + Amount: 5000, + AssetID: assetListID, + }, + { + Outpoint: domain.Outpoint{Txid: "txid-asset-1", VOut: 1}, + Amount: 2500, + AssetID: otherAssetID, + }, + }, + } + anchor2 := domain.AssetAnchor{ + Outpoint: domain.Outpoint{ + Txid: "txid-asset-2", + VOut: 1, + }, + Assets: []domain.NormalAsset{ + { + Outpoint: domain.Outpoint{Txid: "txid-asset-2", VOut: 2}, + Amount: 7500, + AssetID: assetListID, + }, + }, + } + + err = svc.Assets().InsertAssetAnchor(ctx, anchor1) + require.NoError(t, err, "InsertAssetAnchor should succeed") + + err = svc.Assets().InsertAssetAnchor(ctx, anchor2) + require.NoError(t, err, "InsertAssetAnchor should succeed") + + anchors, err := svc.Assets().ListAssetAnchorsByAssetID(ctx, assetListID) + require.NoError(t, err, "ListAssetAnchorsByAssetID should succeed") + require.Len(t, anchors, 2) + + gotTxids := make(map[string]struct{}) + for _, anchor := range anchors { + gotTxids[anchor.Txid] = struct{}{} + } + require.Contains(t, gotTxids, anchor1.Txid) + require.Contains(t, gotTxids, anchor2.Txid) + }) + + t.Run("get asset by outpoint", func(t *testing.T) { + ctx := context.Background() + + // Create asset group first to satisfy FK constraints + err := svc.Assets().InsertAssetGroup(ctx, domain.AssetGroup{ID: "asset-42", Quantity: 5000}) + require.NoError(t, err) + + anchor := domain.AssetAnchor{ + Outpoint: domain.Outpoint{ + Txid: "txid-by-outpoint", + VOut: 3, + }, + Assets: []domain.NormalAsset{ + { + Outpoint: domain.Outpoint{Txid: "txid-by-outpoint", VOut: 0}, + Amount: 1234, + AssetID: "asset-42", + }, + }, + } + + err = svc.Assets().InsertAssetAnchor(ctx, anchor) + require.NoError(t, err, "InsertAssetAnchor should succeed") + + got, err := svc.Assets(). + GetAssetByOutpoint(ctx, domain.Outpoint{Txid: "txid-by-outpoint", VOut: 0}) + require.NoError(t, err, "GetAssetByOutpoint should succeed") + require.NotNil(t, got) + require.Equal(t, anchor.Assets[0], *got) + }) + + t.Run("insert and update asset quantity", func(t *testing.T) { + ctx := context.Background() + + asset := domain.AssetGroup{ + ID: "asset-3", + Quantity: 10, + Immutable: true, + Metadata: []domain.AssetMetadata{ + {Key: "name", Value: "Test AssetGroup"}, + {Key: "symbol", Value: "TST"}, + }, + ControlAssetID: "controlAssetId", + } + + err := svc.Assets().InsertAssetGroup(ctx, asset) + require.NoError(t, err, "InsertAssetDetails should succeed") + + // Increase by 5 -> 15 + err = svc.Assets().IncreaseAssetGroupQuantity(ctx, asset.ID, 5) + require.NoError(t, err, "IncreaseAssetQuantity should succeed") + + // Decrease by 3 -> 12 + err = svc.Assets().DecreaseAssetGroupQuantity(ctx, asset.ID, 3) + require.NoError(t, err, "DecreaseAssetQuantity should succeed") + + // Assert final value in DB + assetD, err := svc.Assets().GetAssetGroupByID(ctx, asset.ID) + require.NoError(t, err, "GetAsseGroupByID should succeed") + + require.Equal(t, uint64(12), assetD.Quantity) + require.True(t, assetD.Immutable) + + md, err := svc.Assets().ListMetadataByAssetID(ctx, asset.ID) + require.NoError(t, err, "ListMetadataByAssetID should succeed") + + require.Len(t, md, len(asset.Metadata)) + require.ElementsMatch(t, asset.Metadata, md) + }) + + t.Run("insert get and update teleport asset", func(t *testing.T) { + ctx := context.Background() + script := randomString(32) + intentID := randomString(32) + assetID := "asset-3" + outputIndex := uint32(7) + amount := uint64(5000) + + asset := domain.TeleportAsset{ + Script: script, + IntentID: intentID, + AssetID: assetID, + OutputIndex: outputIndex, + Amount: amount, + IsClaimed: false, + } + + err := svc.Assets().InsertTeleportAsset(ctx, asset) + require.NoError(t, err, "InsertTeleportAsset should succeed") + + got, err := svc.Assets().GetTeleportAsset(ctx, script, intentID, assetID, outputIndex) + require.NoError(t, err, "GetTeleportAsset should succeed") + require.NotNil(t, got) + require.Equal(t, script, got.Script) + require.Equal(t, intentID, got.IntentID) + require.Equal(t, outputIndex, got.OutputIndex) + require.Equal(t, assetID, got.AssetID) + require.Equal(t, amount, got.Amount) + require.False(t, got.IsClaimed) + + err = svc.Assets().UpdateTeleportAsset(ctx, script, intentID, assetID, outputIndex, true) + require.NoError(t, err, "UpdateTeleportAsset should succeed") + + gotUpdated, err := svc.Assets(). + GetTeleportAsset(ctx, script, intentID, assetID, outputIndex) + require.NoError(t, err, "GetTeleportAsset after update should succeed") + require.NotNil(t, gotUpdated) + require.Equal(t, script, gotUpdated.Script) + require.Equal(t, intentID, gotUpdated.IntentID) + require.Equal(t, outputIndex, gotUpdated.OutputIndex) + require.Equal(t, assetID, gotUpdated.AssetID) + require.Equal(t, amount, gotUpdated.Amount) + require.True(t, gotUpdated.IsClaimed) + + }) +} + func testFeeRepository(t *testing.T, svc ports.RepoManager) { t.Run("test_fee_repository", func(t *testing.T) { ctx := context.Background() diff --git a/internal/infrastructure/db/sqlite/asset_repo.go b/internal/infrastructure/db/sqlite/asset_repo.go new file mode 100644 index 000000000..0dc138d2c --- /dev/null +++ b/internal/infrastructure/db/sqlite/asset_repo.go @@ -0,0 +1,320 @@ +package sqlitedb + +import ( + "context" + "database/sql" + "fmt" + + "github.com/arkade-os/arkd/internal/core/domain" + "github.com/arkade-os/arkd/internal/infrastructure/db/sqlite/sqlc/queries" +) + +type assetRepository struct { + db *sql.DB + querier *queries.Queries +} + +func NewAssetRepository(config ...interface{}) (domain.AssetRepository, error) { + if len(config) != 1 { + return nil, fmt.Errorf("invalid config") + } + db, ok := config[0].(*sql.DB) + if !ok { + return nil, fmt.Errorf("cannot open vtxo repository: invalid config") + } + + return &assetRepository{ + db: db, + querier: queries.New(db), + }, nil +} + +func (r *assetRepository) ListAssetAnchorsByAssetID( + ctx context.Context, + assetID string, +) ([]domain.AssetAnchor, error) { + anchorsDB, err := r.querier.ListAssetAnchorsByAssetID(ctx, assetID) + if err != nil { + return nil, err + } + + anchors := make([]domain.AssetAnchor, 0, len(anchorsDB)) + for _, anchorDB := range anchorsDB { + anchor, err := r.GetAssetAnchorByTxId(ctx, anchorDB.AnchorTxid) + if err != nil { + return nil, err + } + anchors = append(anchors, *anchor) + } + + return anchors, nil +} + +func (r *assetRepository) GetAssetByOutpoint( + ctx context.Context, + outpoint domain.Outpoint, +) (*domain.NormalAsset, error) { + { + assetDB, err := r.querier.GetAsset(ctx, queries.GetAssetParams{ + AnchorID: outpoint.Txid, + Vout: int64(outpoint.VOut), + }) + if err != nil { + return nil, err + } + return &domain.NormalAsset{ + Outpoint: domain.Outpoint{ + Txid: assetDB.AnchorID, + VOut: uint32(assetDB.Vout), + }, + Amount: uint64(assetDB.Amount), + AssetID: assetDB.AssetID, + }, nil + } +} + +func (r *assetRepository) InsertTeleportAsset( + ctx context.Context, + teleport domain.TeleportAsset, +) error { + return r.querier.CreateTeleportAsset(ctx, queries.CreateTeleportAssetParams{ + Script: teleport.Script, + IntentID: teleport.IntentID, + GroupIndex: int64(teleport.OutputIndex), + AssetID: teleport.AssetID, + Amount: int64(teleport.Amount), + IsClaimed: teleport.IsClaimed, + }) +} + +func (r *assetRepository) GetTeleportAsset( + ctx context.Context, + script string, intentID string, assetID string, outputIndex uint32, +) (*domain.TeleportAsset, error) { + teleportDB, err := r.querier.GetTeleportAsset(ctx, queries.GetTeleportAssetParams{ + Script: script, + IntentID: intentID, + AssetID: assetID, + GroupIndex: int64(outputIndex), + }) + if err != nil { + return nil, err + } + return &domain.TeleportAsset{ + Script: teleportDB.Script, + AssetID: teleportDB.AssetID, + IntentID: teleportDB.IntentID, + OutputIndex: uint32(teleportDB.GroupIndex), + Amount: uint64(teleportDB.Amount), + IsClaimed: teleportDB.IsClaimed, + }, nil +} + +func (r *assetRepository) UpdateTeleportAsset( + ctx context.Context, + script string, intentID string, assetID string, outputIndex uint32, isClaimed bool, +) error { + return r.querier.UpdateTeleportAsset(ctx, queries.UpdateTeleportAssetParams{ + IsClaimed: isClaimed, + Script: script, + IntentID: intentID, + AssetID: assetID, + GroupIndex: int64(outputIndex), + }) +} + +func (r *assetRepository) Close() { + _ = r.db.Close() +} + +func (r *assetRepository) ListMetadataByAssetID( + ctx context.Context, + assetID string, +) ([]domain.AssetMetadata, error) { + res, err := r.querier.ListAssetMetadata(ctx, assetID) + if err != nil { + return nil, err + } + metadata := make([]domain.AssetMetadata, 0, len(res)) + for _, m := range res { + metadata = append(metadata, domain.AssetMetadata{ + Key: m.MetaKey, + Value: m.MetaValue, + }) + } + return metadata, nil +} + +func (r *assetRepository) InsertAssetAnchor(ctx context.Context, anchor domain.AssetAnchor) error { + err := r.querier.CreateAssetAnchor(ctx, queries.CreateAssetAnchorParams{ + AnchorTxid: anchor.Txid, + AnchorVout: int64(anchor.VOut), + }) + + if err != nil { + return err + } + + for _, asst := range anchor.Assets { + err := r.querier.AddAsset(ctx, queries.AddAssetParams{ + AnchorID: anchor.Txid, + AssetID: asst.AssetID, + Vout: int64(asst.VOut), + Amount: int64(asst.Amount), + }) + + if err != nil { + return err + } + + } + + return nil +} + +func (r *assetRepository) GetAssetAnchorByTxId( + ctx context.Context, + txId string, +) (*domain.AssetAnchor, error) { + anchor, err := r.querier.GetAssetAnchor(ctx, txId) + if err != nil { + return nil, err + } + + assetListResp, err := r.querier.ListAsset(ctx, anchor.AnchorTxid) + if err != nil { + return nil, err + } + + assetList := make([]domain.NormalAsset, 0, len(assetListResp)) + for _, asst := range assetListResp { + assetList = append(assetList, domain.NormalAsset{ + Outpoint: domain.Outpoint{ + Txid: asst.AnchorID, + VOut: uint32(asst.Vout), + }, + Amount: uint64(asst.Amount), + AssetID: asst.AssetID, + }) + } + + return &domain.AssetAnchor{ + Outpoint: domain.Outpoint{ + Txid: anchor.AnchorTxid, + VOut: uint32(anchor.AnchorVout), + }, + Assets: assetList, + }, nil +} + +func (r *assetRepository) InsertAssetGroup( + ctx context.Context, + assetGroup domain.AssetGroup, +) error { + controlId := sql.NullString{} + if assetGroup.ControlAssetID != "" { + controlId = sql.NullString{ + String: assetGroup.ControlAssetID, + Valid: true, + } + } + + err := r.querier.CreateAsset(ctx, queries.CreateAssetParams{ + ID: assetGroup.ID, + Quantity: int64(assetGroup.Quantity), + Immutable: assetGroup.Immutable, + ControlID: controlId, + }) + + if err != nil { + return err + } + + for _, md := range assetGroup.Metadata { + err := r.querier.UpsertAssetMetadata(ctx, queries.UpsertAssetMetadataParams{ + AssetID: assetGroup.ID, + MetaKey: md.Key, + MetaValue: md.Value, + }) + + if err != nil { + return err + } + } + + return nil + +} + +func (r *assetRepository) GetAssetGroupByID( + ctx context.Context, + assetID string, +) (*domain.AssetGroup, error) { + assetDB, err := r.querier.GetAssetGroup(ctx, assetID) + if err != nil { + return nil, err + } + + metadataDB, err := r.querier.ListAssetMetadata(ctx, assetID) + if err != nil { + return nil, err + } + + metadata := make([]domain.AssetMetadata, 0, len(metadataDB)) + for _, mdDB := range metadataDB { + metadata = append(metadata, domain.AssetMetadata{ + Key: mdDB.MetaKey, + Value: mdDB.MetaValue, + }) + } + + return &domain.AssetGroup{ + ID: assetDB.ID, + Quantity: uint64(assetDB.Quantity), + Immutable: assetDB.Immutable, + Metadata: metadata, + ControlAssetID: assetDB.ControlID.String, + }, nil +} + +func (r *assetRepository) IncreaseAssetGroupQuantity( + ctx context.Context, + assetID string, + amount uint64, +) error { + return r.querier.AddToAssetQuantity(ctx, queries.AddToAssetQuantityParams{ + ID: assetID, + Quantity: int64(amount), + }) +} + +func (r *assetRepository) DecreaseAssetGroupQuantity( + ctx context.Context, + assetID string, + amount uint64, +) error { + return r.querier.SubtractFromAssetQuantity(ctx, queries.SubtractFromAssetQuantityParams{ + ID: assetID, + Quantity: int64(amount), + }) +} + +func (r *assetRepository) UpdateAssetMetadataList( + ctx context.Context, + assetId string, + metadatalist []domain.AssetMetadata, +) error { + for _, md := range metadatalist { + err := r.querier.UpsertAssetMetadata(ctx, queries.UpsertAssetMetadataParams{ + AssetID: assetId, + MetaKey: md.Key, + MetaValue: md.Value, + }) + + if err != nil { + return err + } + } + + return nil +} diff --git a/internal/infrastructure/db/sqlite/migration/20251208110753_add_asset.down.sql b/internal/infrastructure/db/sqlite/migration/20251208110753_add_asset.down.sql new file mode 100644 index 000000000..bf16acb29 --- /dev/null +++ b/internal/infrastructure/db/sqlite/migration/20251208110753_add_asset.down.sql @@ -0,0 +1,9 @@ +PRAGMA foreign_keys = OFF; + +DROP TABLE IF EXISTS teleport_asset; +DROP TABLE IF EXISTS asset_metadata; +DROP TABLE IF EXISTS asset; +DROP TABLE IF EXISTS asset_group; +DROP TABLE IF EXISTS asset_anchor; + +PRAGMA foreign_keys = ON; \ No newline at end of file diff --git a/internal/infrastructure/db/sqlite/migration/20251208110753_add_asset.up.sql b/internal/infrastructure/db/sqlite/migration/20251208110753_add_asset.up.sql new file mode 100644 index 000000000..a5e7287bb --- /dev/null +++ b/internal/infrastructure/db/sqlite/migration/20251208110753_add_asset.up.sql @@ -0,0 +1,44 @@ +PRAGMA foreign_keys = ON; + +CREATE TABLE asset_anchor ( + anchor_txid TEXT PRIMARY KEY, + anchor_vout INTEGER NOT NULL +); + +CREATE TABLE asset ( + anchor_id TEXT NOT NULL, + asset_id TEXT NOT NULL, + vout INTEGER NOT NULL, + amount INTEGER NOT NULL, + PRIMARY KEY (anchor_id, vout), + FOREIGN KEY (anchor_id) REFERENCES asset_anchor(anchor_txid) ON DELETE CASCADE, + FOREIGN KEY (asset_id) REFERENCES asset_group(id) ON DELETE CASCADE +); + +CREATE TABLE asset_metadata ( + asset_id TEXT NOT NULL, + meta_key TEXT NOT NULL, + meta_value TEXT NOT NULL, + PRIMARY KEY (asset_id, meta_key), + FOREIGN KEY (asset_id) REFERENCES asset_group(id) ON DELETE CASCADE +); + +CREATE TABLE asset_group ( + id TEXT PRIMARY KEY, + immutable BOOLEAN NOT NULL DEFAULT 0, + quantity INTEGER NOT NULL, + control_id TEXT +); + +CREATE TABLE teleport_asset ( + script TEXT NOT NULL, + intent_id TEXT NOT NULL, + group_index INTEGER NOT NULL, + asset_id TEXT NOT NULL, + amount INTEGER NOT NULL, + is_claimed BOOLEAN NOT NULL DEFAULT 0, + PRIMARY KEY (script, intent_id, asset_id, group_index), + FOREIGN KEY (asset_id) REFERENCES asset_group(id) ON DELETE CASCADE +); + + diff --git a/internal/infrastructure/db/sqlite/sqlc/queries/db.go b/internal/infrastructure/db/sqlite/sqlc/queries/db.go index 51576a562..85679b3ce 100644 --- a/internal/infrastructure/db/sqlite/sqlc/queries/db.go +++ b/internal/infrastructure/db/sqlite/sqlc/queries/db.go @@ -1,6 +1,6 @@ // Code generated by sqlc. DO NOT EDIT. // versions: -// sqlc v1.30.0 +// sqlc v1.29.0 package queries diff --git a/internal/infrastructure/db/sqlite/sqlc/queries/models.go b/internal/infrastructure/db/sqlite/sqlc/queries/models.go index 8c039a121..11ba0e7c3 100644 --- a/internal/infrastructure/db/sqlite/sqlc/queries/models.go +++ b/internal/infrastructure/db/sqlite/sqlc/queries/models.go @@ -1,6 +1,6 @@ // Code generated by sqlc. DO NOT EDIT. // versions: -// sqlc v1.30.0 +// sqlc v1.29.0 package queries @@ -8,6 +8,31 @@ import ( "database/sql" ) +type Asset struct { + AnchorID string + AssetID string + Vout int64 + Amount int64 +} + +type AssetAnchor struct { + AnchorTxid string + AnchorVout int64 +} + +type AssetGroup struct { + ID string + Immutable bool + Quantity int64 + ControlID sql.NullString +} + +type AssetMetadatum struct { + AssetID string + MetaKey string + MetaValue string +} + type CheckpointTx struct { Txid string Tx string @@ -173,6 +198,15 @@ type ScheduledSession struct { UpdatedAt int64 } +type TeleportAsset struct { + Script string + IntentID string + GroupIndex int64 + AssetID string + Amount int64 + IsClaimed bool +} + type Tx struct { Txid string Tx string diff --git a/internal/infrastructure/db/sqlite/sqlc/queries/query.sql.go b/internal/infrastructure/db/sqlite/sqlc/queries/query.sql.go index 68e07d029..2af4e5484 100644 --- a/internal/infrastructure/db/sqlite/sqlc/queries/query.sql.go +++ b/internal/infrastructure/db/sqlite/sqlc/queries/query.sql.go @@ -1,6 +1,6 @@ // Code generated by sqlc. DO NOT EDIT. // versions: -// sqlc v1.30.0 +// sqlc v1.29.0 // source: query.sql package queries @@ -11,6 +11,31 @@ import ( "strings" ) +const addAsset = `-- name: AddAsset :exec +INSERT INTO asset (anchor_id, asset_id, vout, amount) +VALUES (?, ?, ?, ?) +ON CONFLICT(anchor_id, vout) +DO UPDATE SET amount = excluded.amount +WHERE asset.asset_id = excluded.asset_id +` + +type AddAssetParams struct { + AnchorID string + AssetID string + Vout int64 + Amount int64 +} + +func (q *Queries) AddAsset(ctx context.Context, arg AddAssetParams) error { + _, err := q.db.ExecContext(ctx, addAsset, + arg.AnchorID, + arg.AssetID, + arg.Vout, + arg.Amount, + ) + return err +} + const addIntentFees = `-- name: AddIntentFees :exec INSERT INTO intent_fees ( offchain_input_fee_program, @@ -60,6 +85,22 @@ func (q *Queries) AddIntentFees(ctx context.Context, arg AddIntentFeesParams) er return err } +const addToAssetQuantity = `-- name: AddToAssetQuantity :exec +UPDATE asset_group +SET quantity = quantity + ? +WHERE id = ? +` + +type AddToAssetQuantityParams struct { + Quantity int64 + ID string +} + +func (q *Queries) AddToAssetQuantity(ctx context.Context, arg AddToAssetQuantityParams) error { + _, err := q.db.ExecContext(ctx, addToAssetQuantity, arg.Quantity, arg.ID) + return err +} + const clearIntentFees = `-- name: ClearIntentFees :exec INSERT INTO intent_fees ( offchain_input_fee_program, @@ -84,6 +125,214 @@ func (q *Queries) ClearScheduledSession(ctx context.Context) error { return err } +const createAsset = `-- name: CreateAsset :exec +INSERT INTO asset_group (id, quantity, immutable, control_id) +VALUES (?, ?, ?, ?) +` + +type CreateAssetParams struct { + ID string + Quantity int64 + Immutable bool + ControlID sql.NullString +} + +func (q *Queries) CreateAsset(ctx context.Context, arg CreateAssetParams) error { + _, err := q.db.ExecContext(ctx, createAsset, + arg.ID, + arg.Quantity, + arg.Immutable, + arg.ControlID, + ) + return err +} + +const createAssetAnchor = `-- name: CreateAssetAnchor :exec +INSERT INTO asset_anchor (anchor_txid, anchor_vout) +VALUES (?, ?) +` + +type CreateAssetAnchorParams struct { + AnchorTxid string + AnchorVout int64 +} + +func (q *Queries) CreateAssetAnchor(ctx context.Context, arg CreateAssetAnchorParams) error { + _, err := q.db.ExecContext(ctx, createAssetAnchor, arg.AnchorTxid, arg.AnchorVout) + return err +} + +const createTeleportAsset = `-- name: CreateTeleportAsset :exec +INSERT INTO teleport_asset (script, intent_id, asset_id, group_index, amount, is_claimed) +VALUES (?, ?, ?, ?, ?, ?) +` + +type CreateTeleportAssetParams struct { + Script string + IntentID string + AssetID string + GroupIndex int64 + Amount int64 + IsClaimed bool +} + +func (q *Queries) CreateTeleportAsset(ctx context.Context, arg CreateTeleportAssetParams) error { + _, err := q.db.ExecContext(ctx, createTeleportAsset, + arg.Script, + arg.IntentID, + arg.AssetID, + arg.GroupIndex, + arg.Amount, + arg.IsClaimed, + ) + return err +} + +const deleteAsset = `-- name: DeleteAsset :exec +DELETE FROM asset +WHERE anchor_id = ? AND vout = ? +` + +type DeleteAssetParams struct { + AnchorID string + Vout int64 +} + +func (q *Queries) DeleteAsset(ctx context.Context, arg DeleteAssetParams) error { + _, err := q.db.ExecContext(ctx, deleteAsset, arg.AnchorID, arg.Vout) + return err +} + +const deleteAssetAnchor = `-- name: DeleteAssetAnchor :exec +DELETE FROM asset_anchor +WHERE anchor_txid = ? +` + +func (q *Queries) DeleteAssetAnchor(ctx context.Context, anchorTxid string) error { + _, err := q.db.ExecContext(ctx, deleteAssetAnchor, anchorTxid) + return err +} + +const getAsset = `-- name: GetAsset :one +SELECT anchor_id, asset_id, vout, amount +FROM asset +WHERE anchor_id = ? AND vout = ? +` + +type GetAssetParams struct { + AnchorID string + Vout int64 +} + +func (q *Queries) GetAsset(ctx context.Context, arg GetAssetParams) (Asset, error) { + row := q.db.QueryRowContext(ctx, getAsset, arg.AnchorID, arg.Vout) + var i Asset + err := row.Scan( + &i.AnchorID, + &i.AssetID, + &i.Vout, + &i.Amount, + ) + return i, err +} + +const getAssetAnchor = `-- name: GetAssetAnchor :one +SELECT anchor_txid, anchor_vout +FROM asset_anchor +WHERE anchor_txid = ? +` + +func (q *Queries) GetAssetAnchor(ctx context.Context, anchorTxid string) (AssetAnchor, error) { + row := q.db.QueryRowContext(ctx, getAssetAnchor, anchorTxid) + var i AssetAnchor + err := row.Scan(&i.AnchorTxid, &i.AnchorVout) + return i, err +} + +const getAssetGroup = `-- name: GetAssetGroup :one +SELECT id, quantity, immutable, control_id +FROM asset_group +WHERE id = ? +` + +type GetAssetGroupRow struct { + ID string + Quantity int64 + Immutable bool + ControlID sql.NullString +} + +func (q *Queries) GetAssetGroup(ctx context.Context, id string) (GetAssetGroupRow, error) { + row := q.db.QueryRowContext(ctx, getAssetGroup, id) + var i GetAssetGroupRow + err := row.Scan( + &i.ID, + &i.Quantity, + &i.Immutable, + &i.ControlID, + ) + return i, err +} + +const getAssetMetadata = `-- name: GetAssetMetadata :one +SELECT asset_id, meta_key, meta_value +FROM asset_metadata +WHERE asset_id = ? AND meta_key = ? +` + +type GetAssetMetadataParams struct { + AssetID string + MetaKey string +} + +func (q *Queries) GetAssetMetadata(ctx context.Context, arg GetAssetMetadataParams) (AssetMetadatum, error) { + row := q.db.QueryRowContext(ctx, getAssetMetadata, arg.AssetID, arg.MetaKey) + var i AssetMetadatum + err := row.Scan(&i.AssetID, &i.MetaKey, &i.MetaValue) + return i, err +} + +const getTeleportAsset = `-- name: GetTeleportAsset :one +SELECT script, intent_id, asset_id, group_index, amount, is_claimed +FROM teleport_asset +WHERE script = ? AND intent_id = ? AND asset_id = ? AND group_index = ? +` + +type GetTeleportAssetParams struct { + Script string + IntentID string + AssetID string + GroupIndex int64 +} + +type GetTeleportAssetRow struct { + Script string + IntentID string + AssetID string + GroupIndex int64 + Amount int64 + IsClaimed bool +} + +func (q *Queries) GetTeleportAsset(ctx context.Context, arg GetTeleportAssetParams) (GetTeleportAssetRow, error) { + row := q.db.QueryRowContext(ctx, getTeleportAsset, + arg.Script, + arg.IntentID, + arg.AssetID, + arg.GroupIndex, + ) + var i GetTeleportAssetRow + err := row.Scan( + &i.Script, + &i.IntentID, + &i.AssetID, + &i.GroupIndex, + &i.Amount, + &i.IsClaimed, + ) + return i, err +} + const insertVtxoCommitmentTxid = `-- name: InsertVtxoCommitmentTxid :exec INSERT INTO vtxo_commitment_txid (vtxo_txid, vtxo_vout, commitment_txid) VALUES (?1, ?2, ?3) @@ -100,6 +349,143 @@ func (q *Queries) InsertVtxoCommitmentTxid(ctx context.Context, arg InsertVtxoCo return err } +const listAsset = `-- name: ListAsset :many +SELECT anchor_id, asset_id, vout, amount +FROM asset +WHERE anchor_id = ? +ORDER BY vout +` + +func (q *Queries) ListAsset(ctx context.Context, anchorID string) ([]Asset, error) { + rows, err := q.db.QueryContext(ctx, listAsset, anchorID) + if err != nil { + return nil, err + } + defer rows.Close() + var items []Asset + for rows.Next() { + var i Asset + if err := rows.Scan( + &i.AnchorID, + &i.AssetID, + &i.Vout, + &i.Amount, + ); err != nil { + return nil, err + } + items = append(items, i) + } + if err := rows.Close(); err != nil { + return nil, err + } + if err := rows.Err(); err != nil { + return nil, err + } + return items, nil +} + +const listAssetAnchorsByAssetID = `-- name: ListAssetAnchorsByAssetID :many +SELECT DISTINCT aa.anchor_txid, aa.anchor_vout +FROM asset_anchor aa +JOIN asset a ON aa.anchor_txid = a.anchor_id +WHERE a.asset_id = ? +ORDER BY aa.anchor_txid +` + +func (q *Queries) ListAssetAnchorsByAssetID(ctx context.Context, assetID string) ([]AssetAnchor, error) { + rows, err := q.db.QueryContext(ctx, listAssetAnchorsByAssetID, assetID) + if err != nil { + return nil, err + } + defer rows.Close() + var items []AssetAnchor + for rows.Next() { + var i AssetAnchor + if err := rows.Scan(&i.AnchorTxid, &i.AnchorVout); err != nil { + return nil, err + } + items = append(items, i) + } + if err := rows.Close(); err != nil { + return nil, err + } + if err := rows.Err(); err != nil { + return nil, err + } + return items, nil +} + +const listAssetGroup = `-- name: ListAssetGroup :many +SELECT id, quantity, immutable, control_id +FROM asset_group +ORDER BY id +` + +type ListAssetGroupRow struct { + ID string + Quantity int64 + Immutable bool + ControlID sql.NullString +} + +func (q *Queries) ListAssetGroup(ctx context.Context) ([]ListAssetGroupRow, error) { + rows, err := q.db.QueryContext(ctx, listAssetGroup) + if err != nil { + return nil, err + } + defer rows.Close() + var items []ListAssetGroupRow + for rows.Next() { + var i ListAssetGroupRow + if err := rows.Scan( + &i.ID, + &i.Quantity, + &i.Immutable, + &i.ControlID, + ); err != nil { + return nil, err + } + items = append(items, i) + } + if err := rows.Close(); err != nil { + return nil, err + } + if err := rows.Err(); err != nil { + return nil, err + } + return items, nil +} + +const listAssetMetadata = `-- name: ListAssetMetadata :many +SELECT asset_id, meta_key, meta_value +FROM asset_metadata +WHERE asset_id = ? +ORDER BY meta_key +` + +func (q *Queries) ListAssetMetadata(ctx context.Context, assetID string) ([]AssetMetadatum, error) { + rows, err := q.db.QueryContext(ctx, listAssetMetadata, assetID) + if err != nil { + return nil, err + } + defer rows.Close() + var items []AssetMetadatum + for rows.Next() { + var i AssetMetadatum + if err := rows.Scan(&i.AssetID, &i.MetaKey, &i.MetaValue); err != nil { + return nil, err + } + items = append(items, i) + } + if err := rows.Close(); err != nil { + return nil, err + } + if err := rows.Err(); err != nil { + return nil, err + } + return items, nil +} + const selectActiveScriptConvictions = `-- name: SelectActiveScriptConvictions :many SELECT id, type, created_at, expires_at, crime_type, crime_round_id, crime_reason, pardoned, script FROM conviction WHERE script = ?1 @@ -1647,6 +2033,23 @@ func (q *Queries) SelectVtxosWithPubkeys(ctx context.Context, arg SelectVtxosWit return items, nil } +const subtractFromAssetQuantity = `-- name: SubtractFromAssetQuantity :exec +UPDATE asset_group +SET quantity = quantity - ? +WHERE id = ? AND quantity >= ? +` + +type SubtractFromAssetQuantityParams struct { + Quantity int64 + ID string + Quantity_2 int64 +} + +func (q *Queries) SubtractFromAssetQuantity(ctx context.Context, arg SubtractFromAssetQuantityParams) error { + _, err := q.db.ExecContext(ctx, subtractFromAssetQuantity, arg.Quantity, arg.ID, arg.Quantity_2) + return err +} + const updateConvictionPardoned = `-- name: UpdateConvictionPardoned :exec UPDATE conviction SET pardoned = true WHERE id = ?1 ` @@ -1656,6 +2059,31 @@ func (q *Queries) UpdateConvictionPardoned(ctx context.Context, id string) error return err } +const updateTeleportAsset = `-- name: UpdateTeleportAsset :exec +UPDATE teleport_asset +SET is_claimed = ? +WHERE script = ? AND intent_id = ? AND asset_id = ? AND group_index = ? +` + +type UpdateTeleportAssetParams struct { + IsClaimed bool + Script string + IntentID string + AssetID string + GroupIndex int64 +} + +func (q *Queries) UpdateTeleportAsset(ctx context.Context, arg UpdateTeleportAssetParams) error { + _, err := q.db.ExecContext(ctx, updateTeleportAsset, + arg.IsClaimed, + arg.Script, + arg.IntentID, + arg.AssetID, + arg.GroupIndex, + ) + return err +} + const updateVtxoExpiration = `-- name: UpdateVtxoExpiration :exec UPDATE vtxo SET expires_at = ?1 WHERE txid = ?2 AND vout = ?3 ` @@ -1761,6 +2189,24 @@ func (q *Queries) UpdateVtxoUnrolled(ctx context.Context, arg UpdateVtxoUnrolled return err } +const upsertAssetMetadata = `-- name: UpsertAssetMetadata :exec +INSERT INTO asset_metadata (asset_id, meta_key, meta_value) +VALUES (?, ?, ?) +ON CONFLICT(asset_id, meta_key) +DO UPDATE SET meta_value = excluded.meta_value +` + +type UpsertAssetMetadataParams struct { + AssetID string + MetaKey string + MetaValue string +} + +func (q *Queries) UpsertAssetMetadata(ctx context.Context, arg UpsertAssetMetadataParams) error { + _, err := q.db.ExecContext(ctx, upsertAssetMetadata, arg.AssetID, arg.MetaKey, arg.MetaValue) + return err +} + const upsertCheckpointTx = `-- name: UpsertCheckpointTx :exec INSERT INTO checkpoint_tx (txid, tx, commitment_txid, is_root_commitment_txid, offchain_txid) VALUES (?1, ?2, ?3, ?4, ?5) diff --git a/internal/infrastructure/db/sqlite/sqlc/query.sql b/internal/infrastructure/db/sqlite/sqlc/query.sql index 8232b18ab..9ad40798b 100644 --- a/internal/infrastructure/db/sqlite/sqlc/query.sql +++ b/internal/infrastructure/db/sqlite/sqlc/query.sql @@ -381,6 +381,104 @@ SELECT * FROM conviction WHERE crime_round_id = @round_id ORDER BY created_at ASC; +-- name: CreateAssetAnchor :exec +INSERT INTO asset_anchor (anchor_txid, anchor_vout) +VALUES (?, ?); + +-- name: ListAssetAnchorsByAssetID :many +SELECT DISTINCT aa.anchor_txid, aa.anchor_vout +FROM asset_anchor aa +JOIN asset a ON aa.anchor_txid = a.anchor_id +WHERE a.asset_id = ? +ORDER BY aa.anchor_txid; + +-- name: GetAssetAnchor :one +SELECT anchor_txid, anchor_vout +FROM asset_anchor +WHERE anchor_txid = ?; + +-- name: DeleteAssetAnchor :exec +DELETE FROM asset_anchor +WHERE anchor_txid = ?; + +-- name: UpsertAssetMetadata :exec +INSERT INTO asset_metadata (asset_id, meta_key, meta_value) +VALUES (?, ?, ?) +ON CONFLICT(asset_id, meta_key) +DO UPDATE SET meta_value = excluded.meta_value; + +-- name: GetAssetMetadata :one +SELECT asset_id, meta_key, meta_value +FROM asset_metadata +WHERE asset_id = ? AND meta_key = ?; + + +-- name: ListAssetMetadata :many +SELECT asset_id, meta_key, meta_value +FROM asset_metadata +WHERE asset_id = ? +ORDER BY meta_key; + +-- name: AddAsset :exec +INSERT INTO asset (anchor_id, asset_id, vout, amount) +VALUES (?, ?, ?, ?) +ON CONFLICT(anchor_id, vout) +DO UPDATE SET amount = excluded.amount +WHERE asset.asset_id = excluded.asset_id; + +-- name: GetAsset :one +SELECT anchor_id, asset_id, vout, amount +FROM asset +WHERE anchor_id = ? AND vout = ?; + +-- name: DeleteAsset :exec +DELETE FROM asset +WHERE anchor_id = ? AND vout = ?; + +-- name: ListAsset :many +SELECT anchor_id, asset_id, vout, amount +FROM asset +WHERE anchor_id = ? +ORDER BY vout; + +-- name: GetAssetGroup :one +SELECT id, quantity, immutable, control_id +FROM asset_group +WHERE id = ?; + + +-- name: ListAssetGroup :many +SELECT id, quantity, immutable, control_id +FROM asset_group +ORDER BY id; + +-- name: AddToAssetQuantity :exec +UPDATE asset_group +SET quantity = quantity + ? +WHERE id = ?; + +-- name: SubtractFromAssetQuantity :exec +UPDATE asset_group +SET quantity = quantity - ? +WHERE id = ? AND quantity >= ?; + +-- name: CreateAsset :exec +INSERT INTO asset_group (id, quantity, immutable, control_id) +VALUES (?, ?, ?, ?); + +-- name: CreateTeleportAsset :exec +INSERT INTO teleport_asset (script, intent_id, asset_id, group_index, amount, is_claimed) +VALUES (?, ?, ?, ?, ?, ?); + +-- name: GetTeleportAsset :one +SELECT script, intent_id, asset_id, group_index, amount, is_claimed +FROM teleport_asset +WHERE script = ? AND intent_id = ? AND asset_id = ? AND group_index = ?; + +-- name: UpdateTeleportAsset :exec +UPDATE teleport_asset +SET is_claimed = ? +WHERE script = ? AND intent_id = ? AND asset_id = ? AND group_index = ?; -- name: SelectLatestIntentFees :one SELECT * FROM intent_fees ORDER BY id DESC LIMIT 1; @@ -426,4 +524,4 @@ VALUES ('', '', '', ''); -- name: SelectIntentByTxid :one SELECT id, txid, proof, message FROM intent -WHERE txid = @txid; \ No newline at end of file +WHERE txid = @txid; diff --git a/internal/infrastructure/live-store/inmemory/forfeits.go b/internal/infrastructure/live-store/inmemory/forfeits.go index bbdc47b03..1d50950ba 100644 --- a/internal/infrastructure/live-store/inmemory/forfeits.go +++ b/internal/infrastructure/live-store/inmemory/forfeits.go @@ -87,7 +87,7 @@ func (m *forfeitTxsStore) Init( return nil } -func (m *forfeitTxsStore) Sign(_ context.Context, txs []string) error { +func (m *forfeitTxsStore) Verify(_ context.Context, txs []string) error { if len(txs) == 0 { return nil } diff --git a/internal/infrastructure/live-store/live_store_test.go b/internal/infrastructure/live-store/live_store_test.go index 385f542d2..9b78c01d9 100644 --- a/internal/infrastructure/live-store/live_store_test.go +++ b/internal/infrastructure/live-store/live_store_test.go @@ -236,7 +236,7 @@ func runLiveStoreTests(t *testing.T, store ports.LiveStore) { wg.Add(1) go func(txStr string) { defer wg.Done() - err := store.ForfeitTxs().Sign(ctx, []string{txStr}) + err := store.ForfeitTxs().Verify(ctx, []string{txStr}) require.NoError(t, err) }(tx) } @@ -280,7 +280,7 @@ func runLiveStoreTests(t *testing.T, store ports.LiveStore) { wg2.Add(1) go func(txStr string) { defer wg2.Done() - err := store.ForfeitTxs().Sign(ctx, []string{txStr}) + err := store.ForfeitTxs().Verify(ctx, []string{txStr}) require.NoError(t, err) }(tx) } @@ -294,7 +294,7 @@ func runLiveStoreTests(t *testing.T, store ports.LiveStore) { require.True(t, allSigned) // sign after the session is deleted - require.Error(t, store.ForfeitTxs().Sign(ctx, []string{txs[0]})) + require.Error(t, store.ForfeitTxs().Verify(ctx, []string{txs[0]})) allSigned, err = store.ForfeitTxs().AllSigned(ctx) require.NoError(t, err) diff --git a/internal/infrastructure/live-store/redis/forfeits.go b/internal/infrastructure/live-store/redis/forfeits.go index d35f70448..bc3a64a64 100644 --- a/internal/infrastructure/live-store/redis/forfeits.go +++ b/internal/infrastructure/live-store/redis/forfeits.go @@ -115,7 +115,7 @@ func (s *forfeitTxsStore) Init( return fmt.Errorf("failed to init forfeit txs after max num of retries: %v", err) } -func (s *forfeitTxsStore) Sign(ctx context.Context, txs []string) error { +func (s *forfeitTxsStore) Verify(ctx context.Context, txs []string) error { if len(txs) == 0 { return nil } diff --git a/internal/infrastructure/tx-builder/covenantless/builder.go b/internal/infrastructure/tx-builder/covenantless/builder.go index d1caf6a09..570823bfe 100644 --- a/internal/infrastructure/tx-builder/covenantless/builder.go +++ b/internal/infrastructure/tx-builder/covenantless/builder.go @@ -10,6 +10,7 @@ import ( "github.com/arkade-os/arkd/internal/core/domain" "github.com/arkade-os/arkd/internal/core/ports" arklib "github.com/arkade-os/arkd/pkg/ark-lib" + "github.com/arkade-os/arkd/pkg/ark-lib/extension" "github.com/arkade-os/arkd/pkg/ark-lib/script" "github.com/arkade-os/arkd/pkg/ark-lib/tree" "github.com/arkade-os/arkd/pkg/ark-lib/txutils" @@ -290,6 +291,12 @@ func (b *txBuilder) VerifyForfeitTxs( return nil, err } + // verfity asset forfeit transaction + extensionAnchor, err := verifyAssetForfeitTransaction(indexedVtxos, tx) + if err != nil { + return nil, err + } + if len(tx.Inputs) != 2 { continue } @@ -453,10 +460,11 @@ func (b *txBuilder) VerifyForfeitTxs( prevouts = []*wire.TxOut{connectorOutput, vtxoPrevout} } - rebuilt, err := tree.BuildForfeitTx( + rebuilt, err := tree.BuildForfeitTxWithAnchor( inputs, sequences, prevouts, + extensionAnchor, forfeitScript, uint32(locktime), ) @@ -1178,3 +1186,78 @@ func (b *txBuilder) getForfeitScript() ([]byte, error) { return txscript.PayToAddrScript(addr) } + +func verifyAssetForfeitTransaction( + vtxoMap map[domain.Outpoint]domain.Vtxo, tx *psbt.Packet, +) (*wire.TxOut, error) { + + if tx == nil || tx.UnsignedTx == nil { + return nil, fmt.Errorf("nil forfeit packet or unsigned tx") + } + + txid := tx.UnsignedTx.TxID() + + for _, output := range tx.UnsignedTx.TxOut { + if !extension.ContainsAssetPacket(output.PkScript) { + continue + } + + assetPkt, err := extension.DecodeAssetPacket(*output) + if err != nil { + return nil, fmt.Errorf("decode asset packet for forfeit txid %s: %w", txid, err) + } + + for _, asset := range assetPkt.Assets { + if asset.AssetId == nil { + // Issuance assets don't have inputs to validate against existing VTXOs + continue + } + assetID := asset.AssetId.ToString() + for _, in := range asset.Inputs { + if in.Type == extension.AssetTypeTeleport { + continue + } + + if int(in.Vin) >= len(tx.UnsignedTx.TxIn) { + return nil, fmt.Errorf( + "asset input index out of range for txid %s: vin=%d", + txid, in.Vin, + ) + } + + prevOut := tx.UnsignedTx.TxIn[in.Vin].PreviousOutPoint + outpoint := domain.Outpoint{ + Txid: prevOut.Hash.String(), + VOut: prevOut.Index, + } + + vtxo, ok := vtxoMap[outpoint] + if !ok { + return nil, fmt.Errorf( + "vtxo not found for asset input txid=%s assetID=%s outpoint=%s", + txid, assetID, outpoint.String(), + ) + } + + foundAsset := false + for _, asset := range vtxo.Assets { + if asset.AssetID == assetID { + foundAsset = true + break + } + } + + if !foundAsset { + return nil, fmt.Errorf( + "vtxo %s missing asset extension for assetID %s", + outpoint.String(), assetID, + ) + } + } + + return output, nil + } + } + + return nil, nil +} diff --git a/internal/infrastructure/tx-builder/covenantless/utils.go b/internal/infrastructure/tx-builder/covenantless/utils.go index 1a15e49a4..31f1b379d 100644 --- a/internal/infrastructure/tx-builder/covenantless/utils.go +++ b/internal/infrastructure/tx-builder/covenantless/utils.go @@ -3,8 +3,10 @@ package txbuilder import ( "encoding/hex" "fmt" + "log" "github.com/arkade-os/arkd/internal/core/domain" + "github.com/arkade-os/arkd/pkg/ark-lib/extension" "github.com/arkade-os/arkd/pkg/ark-lib/script" "github.com/arkade-os/arkd/pkg/ark-lib/tree" "github.com/btcsuite/btcd/btcec/v2/schnorr" @@ -52,30 +54,77 @@ func getOutputVtxosLeaves( } leaves := make([]tree.Leaf, 0) + for i, intent := range intents { + cosigners := cosignersPublicKeys[i] + for _, receiver := range intent.Receivers { - if !receiver.IsOnchain() { - pubkeyBytes, err := hex.DecodeString(receiver.PubKey) + if receiver.IsOnchain() { + // Onchain outputs are not part of the vtxo tree. + continue + } + + // Decode and parse receiver pubkey once for both asset and non-asset cases. + pubkeyBytes, err := hex.DecodeString(receiver.PubKey) + if err != nil { + return nil, fmt.Errorf("receiver pubkey hex decode failed: %w", err) + } + + pubkey, err := schnorr.ParsePubKey(pubkeyBytes) + if err != nil { + return nil, fmt.Errorf("receiver pubkey parse failed: %w", err) + } + + // Plain offchain vtxo (no asset) + vtxoScript, err := script.P2TRScript(pubkey) + if err != nil { + return nil, fmt.Errorf("failed to create P2TR script: %w", err) + } + + // AssetGroup teleport case + if len(receiver.AssetId) > 0 { + assetId, err := extension.AssetIdFromString(receiver.AssetId) if err != nil { - return nil, fmt.Errorf("failed to decode pubkey: %s", err) + return nil, fmt.Errorf("failed to decode asset id: %w", err) } - pubkey, err := schnorr.ParsePubKey(pubkeyBytes) - if err != nil { - return nil, fmt.Errorf("failed to parse pubkey: %s", err) + if assetId == nil { + return nil, fmt.Errorf("asset id is nil") + } + + assetPacket := &extension.AssetPacket{ + Assets: []extension.AssetGroup{{ + AssetId: assetId, + Outputs: []extension.AssetOutput{{ + Type: extension.AssetTypeTeleport, + Script: vtxoScript, + Amount: receiver.Amount, + }}, + }}, } - vtxoScript, err := script.P2TRScript(pubkey) + assetOpret, err := assetPacket.EncodeAssetPacket() if err != nil { - return nil, fmt.Errorf("failed to create script: %s", err) + return nil, fmt.Errorf("failed to encode asset opreturn: %w", err) } leaves = append(leaves, tree.Leaf{ - Script: hex.EncodeToString(vtxoScript), - Amount: receiver.Amount, - CosignersPublicKeys: cosignersPublicKeys[i], + Script: hex.EncodeToString(assetOpret.PkScript), + Amount: uint64(assetOpret.Value), + CosignersPublicKeys: cosigners, }) + + continue } + + log.Printf("plain vtxo script %s", hex.EncodeToString(vtxoScript)) + + leaves = append(leaves, tree.Leaf{ + Script: hex.EncodeToString(vtxoScript), + Amount: receiver.Amount, + CosignersPublicKeys: cosigners, + }) + } } return leaves, nil diff --git a/internal/interface/grpc/handlers/indexer.go b/internal/interface/grpc/handlers/indexer.go index e2a74e8d9..0c44a20b1 100644 --- a/internal/interface/grpc/handlers/indexer.go +++ b/internal/interface/grpc/handlers/indexer.go @@ -23,6 +23,7 @@ type indexerService struct { eventsCh <-chan application.TransactionEvent scriptSubsHandler *broker[*arkv1.GetSubscriptionResponse] + teleportSubsHandler *broker[*arkv1.GetSubscriptionResponse] subscriptionTimeoutDuration time.Duration heartbeat time.Duration @@ -36,6 +37,7 @@ func NewIndexerService( indexerSvc: indexerSvc, eventsCh: eventsCh, scriptSubsHandler: newBroker[*arkv1.GetSubscriptionResponse](), + teleportSubsHandler: newBroker[*arkv1.GetSubscriptionResponse](), subscriptionTimeoutDuration: subscriptionTimeoutDuration, heartbeat: time.Duration(heartbeat) * time.Second, } @@ -45,6 +47,40 @@ func NewIndexerService( return svc } +func (e *indexerService) GetAssetGroup(ctx context.Context, request *arkv1.GetAssetGroupRequest, +) (*arkv1.GetAssetGroupResponse, error) { + assetId := request.GetAssetId() + if assetId == "" { + return nil, status.Errorf(codes.InvalidArgument, "missing asset id") + } + + resp, err := e.indexerSvc.GetAssetGroup(ctx, assetId) + if err != nil { + return nil, status.Errorf(codes.Internal, "%s", err.Error()) + } + if resp == nil { + return nil, status.Errorf(codes.NotFound, "asset not found: %s", assetId) + } + + assetMetadata := make([]*arkv1.AssetMetadata, 0) + for _, metadata := range resp.AssetGroup.Metadata { + assetMetadata = append(assetMetadata, &arkv1.AssetMetadata{ + Key: metadata.Key, + Value: metadata.Value, + }) + } + + return &arkv1.GetAssetGroupResponse{ + AssetId: assetId, + AssetGroup: &arkv1.AssetGroup{ + Id: resp.AssetGroup.ID, + Quantity: resp.AssetGroup.Quantity, + Immutable: resp.AssetGroup.Immutable, + Metadata: assetMetadata, + }, + }, nil +} + func (e *indexerService) GetCommitmentTx( ctx context.Context, request *arkv1.GetCommitmentTxRequest, ) (*arkv1.GetCommitmentTxResponse, error) { @@ -377,17 +413,30 @@ func (h *indexerService) GetSubscription( } h.scriptSubsHandler.stopTimeout(subscriptionId) + h.teleportSubsHandler.stopTimeout(subscriptionId) defer func() { topics := h.scriptSubsHandler.getTopics(subscriptionId) if len(topics) > 0 { h.scriptSubsHandler.startTimeout(subscriptionId, h.subscriptionTimeoutDuration) - return + } else { + h.scriptSubsHandler.removeListener(subscriptionId) + } + + teleportTopics := h.teleportSubsHandler.getTopics(subscriptionId) + if len(teleportTopics) > 0 { + h.teleportSubsHandler.startTimeout(subscriptionId, h.subscriptionTimeoutDuration) + } else { + h.teleportSubsHandler.removeListener(subscriptionId) } - h.scriptSubsHandler.removeListener(subscriptionId) }() - ch, err := h.scriptSubsHandler.getListenerChannel(subscriptionId) - if err != nil { + scriptCh, err := h.scriptSubsHandler.getListenerChannel(subscriptionId) + if err != nil && !strings.Contains(err.Error(), "listener not found") { + return status.Error(codes.Internal, err.Error()) + } + + teleportCh, err := h.teleportSubsHandler.getListenerChannel(subscriptionId) + if err != nil && !strings.Contains(err.Error(), "listener not found") { return status.Error(codes.Internal, err.Error()) } @@ -411,7 +460,12 @@ func (h *indexerService) GetSubscription( select { case <-stream.Context().Done(): return nil - case ev := <-ch: + case ev := <-scriptCh: + if err := stream.Send(ev); err != nil { + return err + } + resetTimer() + case ev := <-teleportCh: if err := stream.Send(ev); err != nil { return err } @@ -472,11 +526,17 @@ func (h *indexerService) SubscribeForScripts( h.scriptSubsHandler.pushListener(listener) h.scriptSubsHandler.startTimeout(subscriptionId, h.subscriptionTimeoutDuration) + h.teleportSubsHandler.pushListener(listener) + h.teleportSubsHandler.startTimeout(subscriptionId, h.subscriptionTimeoutDuration) } else { // update listener topic if err := h.scriptSubsHandler.addTopics(subscriptionId, scripts); err != nil { return nil, status.Error(codes.Internal, err.Error()) } + // update teleport listener topic + if err := h.teleportSubsHandler.addTopics(subscriptionId, scripts); err != nil { + return nil, status.Error(codes.Internal, err.Error()) + } } return &arkv1.SubscribeForScriptsResponse{ SubscriptionId: subscriptionId, @@ -485,7 +545,7 @@ func (h *indexerService) SubscribeForScripts( func (h *indexerService) listenToTxEvents() { for event := range h.eventsCh { - if !h.scriptSubsHandler.hasListeners() { + if !h.scriptSubsHandler.hasListeners() && !h.teleportSubsHandler.hasListeners() { continue } @@ -561,6 +621,53 @@ func (h *indexerService) listenToTxEvents() { }(l) } } + + teleportListenersCopy := h.teleportSubsHandler.getListenersCopy() + if len(teleportListenersCopy) > 0 { + parsedTeleportEvents := make([]*arkv1.TeleportEvent, 0) + for _, te := range event.TeleportAssets { + parsedTeleportEvents = append(parsedTeleportEvents, &arkv1.TeleportEvent{ + TeleportHash: te.TeleportHash, + AnchorOutpoint: te.AnchorOutpoint.String(), + AssetId: te.AssetID, + Amount: te.Amount, + OutputVout: te.OutputVout, + CreatedAt: te.CreatedAt, + ExpiresAt: te.ExpiresAt, + }) + } + + for _, l := range teleportListenersCopy { + teleportEvents := make([]*arkv1.TeleportEvent, 0) + involvedHashes := make([]string, 0) + + for _, tpEvent := range parsedTeleportEvents { + if _, ok := l.topics[tpEvent.TeleportHash]; ok { + involvedHashes = append(involvedHashes, tpEvent.TeleportHash) + teleportEvents = append(teleportEvents, tpEvent) + } + } + + if len(teleportEvents) > 0 { + go func(listener *listener[*arkv1.GetSubscriptionResponse]) { + select { + case listener.ch <- &arkv1.GetSubscriptionResponse{ + Data: &arkv1.GetSubscriptionResponse_Event{ + Event: &arkv1.IndexerSubscriptionEvent{ + Txid: event.Txid, + TeleportEvents: teleportEvents, + TeleportHashes: involvedHashes, + Tx: event.Tx, + }, + }, + }: + default: + // channel is full, skip this message to prevent blocking + } + }(l) + } + } + } } } @@ -696,6 +803,14 @@ func parseTimeRange(after, before int64) (int64, int64, error) { } func newIndexerVtxo(vtxo domain.Vtxo) *arkv1.IndexerVtxo { + assets := make([]*arkv1.IndexerAsset, 0) + for _, asset := range vtxo.Assets { + assets = append(assets, &arkv1.IndexerAsset{ + AssetId: asset.AssetID, + Amount: asset.Amount, + }) + } + return &arkv1.IndexerVtxo{ Outpoint: &arkv1.IndexerOutpoint{ Txid: vtxo.Txid, @@ -713,5 +828,6 @@ func newIndexerVtxo(vtxo domain.Vtxo) *arkv1.IndexerVtxo { CommitmentTxids: vtxo.CommitmentTxids, SettledBy: vtxo.SettledBy, ArkTxid: vtxo.ArkTxid, + Assets: assets, } } diff --git a/internal/interface/grpc/handlers/parser.go b/internal/interface/grpc/handlers/parser.go index 937f7494f..71f610e37 100644 --- a/internal/interface/grpc/handlers/parser.go +++ b/internal/interface/grpc/handlers/parser.go @@ -172,6 +172,21 @@ type vtxoList []domain.Vtxo func (v vtxoList) toProto() []*arkv1.Vtxo { list := make([]*arkv1.Vtxo, 0, len(v)) + + toAssets := func(vv domain.Vtxo) []*arkv1.Asset { + if len(vv.Assets) <= 0 { + return nil + } + assets := make([]*arkv1.Asset, 0) + for _, asset := range vv.Assets { + assets = append(assets, &arkv1.Asset{ + AssetId: asset.AssetID, + Amount: asset.Amount, + }) + } + return assets + } + for _, vv := range v { list = append(list, &arkv1.Vtxo{ Outpoint: &arkv1.Outpoint{ @@ -190,6 +205,7 @@ func (v vtxoList) toProto() []*arkv1.Vtxo { CreatedAt: vv.CreatedAt, SettledBy: vv.SettledBy, ArkTxid: vv.ArkTxid, + Assets: toAssets(vv), }) } diff --git a/internal/interface/grpc/permissions/permissions.go b/internal/interface/grpc/permissions/permissions.go index 2bb917588..df7f6aeb9 100644 --- a/internal/interface/grpc/permissions/permissions.go +++ b/internal/interface/grpc/permissions/permissions.go @@ -255,6 +255,10 @@ func Whitelist() map[string][]bakery.Op { Entity: EntityIndexer, Action: "read", }}, + fmt.Sprintf("/%s/GetAssetGroup", arkv1.IndexerService_ServiceDesc.ServiceName): {{ + Entity: EntityIndexer, + Action: "read", + }}, } } diff --git a/pkg/ark-lib/extension/asset.go b/pkg/ark-lib/extension/asset.go new file mode 100644 index 000000000..0b9007e96 --- /dev/null +++ b/pkg/ark-lib/extension/asset.go @@ -0,0 +1,157 @@ +package extension + +import ( + "encoding/hex" + "errors" + "fmt" + + "github.com/btcsuite/btcd/wire" +) + +const AssetVersion byte = 0x01 + +type AssetId struct { + Txid [32]byte + Index uint16 +} + +type AssetRefType uint8 + +const ( + AssetRefByID AssetRefType = 0x01 + AssetRefByGroup AssetRefType = 0x02 +) + +type AssetRef struct { + Type AssetRefType + AssetId AssetId + GroupIndex uint16 +} + +func AssetRefFromId(assetId AssetId) *AssetRef { + return &AssetRef{ + Type: AssetRefByID, + AssetId: assetId, + } +} + +func AssetRefFromGroupIndex(groupIndex uint16) *AssetRef { + return &AssetRef{ + Type: AssetRefByGroup, + GroupIndex: groupIndex, + } +} + +func (a AssetId) ToString() string { + var buf [34]byte + copy(buf[:32], a.Txid[:]) + // Big endian encoding for index + buf[32] = byte(a.Index >> 8) + buf[33] = byte(a.Index) + return hex.EncodeToString(buf[:]) +} + +// String implements fmt.Stringer +func (a AssetId) String() string { + return a.ToString() +} + +func AssetIdFromString(s string) (*AssetId, error) { + buf, err := hex.DecodeString(s) + if err != nil { + return nil, err + } + if len(buf) != 34 { + return nil, fmt.Errorf("invalid asset id length: %d", len(buf)) + } + + var assetId AssetId + copy(assetId.Txid[:], buf[:32]) + // Big endian decoding for index + assetId.Index = uint16(buf[32])<<8 | uint16(buf[33]) + return &assetId, nil +} + +type AssetGroup struct { + AssetId *AssetId + Immutable bool + Outputs []AssetOutput + ControlAsset *AssetRef + Inputs []AssetInput + Metadata []Metadata +} + +type AssetPacket struct { + Assets []AssetGroup + Version byte +} + +type Metadata struct { + Key string + Value string +} + +type AssetOutput struct { + Type AssetType + Vout uint32 // For Local + Script []byte // For Teleport + Amount uint64 +} + +type AssetType uint8 + +const ( + AssetTypeLocal AssetType = 0x01 + AssetTypeTeleport AssetType = 0x02 +) + +type TeleportWitness struct { + Script []byte + Txid [32]byte + Index uint32 +} + +type AssetInput struct { + Type AssetType + Vin uint32 // For Local + Witness TeleportWitness // For Teleport + Amount uint64 +} + +func (g *AssetPacket) EncodeAssetPacket() (wire.TxOut, error) { + opReturnPacket := &ExtensionPacket{ + Asset: g, + } + return opReturnPacket.EncodeExtensionPacket() +} + +func DecodeAssetPacket(txOut wire.TxOut) (*AssetPacket, error) { + packet, err := DecodeExtensionPacket(txOut) + if err != nil { + return nil, err + } + if packet.Asset == nil { + return nil, errors.New("missing asset payload") + } + return packet.Asset, nil +} + +func ContainsAssetPacket(opReturnData []byte) bool { + payload, _, err := parsePacketOpReturn(opReturnData) + return err == nil && len(payload) > 0 +} + +func DeriveAssetPacketFromTx(arkTx wire.MsgTx) (*AssetPacket, int, error) { + for i, output := range arkTx.TxOut { + if ContainsAssetPacket(output.PkScript) { + assetPacket, err := DecodeAssetPacket(*output) + if err != nil { + return nil, 0, fmt.Errorf("error decoding asset Opreturn: %s", err) + } + return assetPacket, i, nil + } + } + + return nil, 0, errors.New("no asset opreturn found in transaction") + +} diff --git a/pkg/ark-lib/extension/encoding.go b/pkg/ark-lib/extension/encoding.go new file mode 100644 index 000000000..815713bb6 --- /dev/null +++ b/pkg/ark-lib/extension/encoding.go @@ -0,0 +1,419 @@ +package extension + +import ( + "bytes" + "fmt" + "io" + + "github.com/lightningnetwork/lnd/tlv" +) + +// Presence byte masks +const ( + maskAssetId uint8 = 1 << 0 // 0x01 + maskControlAsset uint8 = 1 << 1 // 0x02 + maskMetadata uint8 = 1 << 2 // 0x04 + maskImmutable uint8 = 1 << 3 // 0x08 +) + +func (a *AssetGroup) Encode() ([]byte, error) { + var buf bytes.Buffer + var scratch [8]byte + + // 1. Calculate and write Presence Byte + var presence uint8 + if a.AssetId != nil { + presence |= maskAssetId + } + if a.ControlAsset != nil { + presence |= maskControlAsset + } + if len(a.Metadata) > 0 { + presence |= maskMetadata + } + if a.Immutable { + presence |= maskImmutable + } + if err := buf.WriteByte(presence); err != nil { + return nil, err + } + + // 2. Write fields in fixed order based on presence + + // AssetId + if (presence & maskAssetId) != 0 { + if _, err := buf.Write(a.AssetId.Txid[:]); err != nil { + return nil, err + } + if err := tlv.EUint16(&buf, &a.AssetId.Index, &scratch); err != nil { + return nil, err + } + } + + // ControlAsset + if (presence & maskControlAsset) != 0 { + if err := encodeAssetRef(&buf, a.ControlAsset, &scratch); err != nil { + return nil, err + } + } + + // Metadata + if (presence & maskMetadata) != 0 { + if err := encodeMetadataList(&buf, a.Metadata, &scratch); err != nil { + return nil, err + } + } + + // Immutable: No payload, presence bit is the value (true). + + // 3. Inputs + if err := encodeAssetInputList(&buf, a.Inputs, &scratch); err != nil { + return nil, err + } + + // 4. Outputs + if err := encodeAssetOutputList(&buf, a.Outputs, &scratch); err != nil { + return nil, err + } + + return buf.Bytes(), nil +} + +func (a *AssetGroup) Decode(r io.Reader) error { + var scratch [8]byte + + // 1. Read Presence Byte + var presenceBuf [1]byte + if _, err := io.ReadFull(r, presenceBuf[:]); err != nil { + return err + } + presence := presenceBuf[0] + + // 2. Read fields + + // AssetId + if (presence & maskAssetId) != 0 { + a.AssetId = &AssetId{} + if _, err := io.ReadFull(r, a.AssetId.Txid[:]); err != nil { + return err + } + if err := tlv.DUint16(r, &a.AssetId.Index, &scratch, 2); err != nil { + return err + } + } + + // ControlAsset + if (presence & maskControlAsset) != 0 { + var err error + a.ControlAsset, err = decodeAssetRef(r, &scratch) + if err != nil { + return err + } + } + + // Metadata + if (presence & maskMetadata) != 0 { + var err error + a.Metadata, err = decodeMetadataList(r, &scratch) + if err != nil { + return err + } + } + + // Immutable + if (presence & maskImmutable) != 0 { + a.Immutable = true + } else { + a.Immutable = false + } + + // 3. Inputs + var err error + a.Inputs, err = decodeAssetInputList(r, &scratch) + if err != nil { + return err + } + + // 4. Outputs + a.Outputs, err = decodeAssetOutputList(r, &scratch) + if err != nil { + return err + } + + return nil +} + +func encodeAssetRef(w io.Writer, ref *AssetRef, scratch *[8]byte) error { + if _, err := w.Write([]byte{byte(ref.Type)}); err != nil { + return err + } + switch ref.Type { + case AssetRefByID: + if _, err := w.Write(ref.AssetId.Txid[:]); err != nil { + return err + } + if err := tlv.EUint16(w, &ref.AssetId.Index, scratch); err != nil { + return err + } + case AssetRefByGroup: + if err := tlv.EUint16(w, &ref.GroupIndex, scratch); err != nil { + return err + } + default: + return fmt.Errorf("unknown asset ref type: %d", ref.Type) + } + return nil +} + +func decodeAssetRef(r io.Reader, scratch *[8]byte) (*AssetRef, error) { + var typBuf [1]byte + if _, err := io.ReadFull(r, typBuf[:]); err != nil { + return nil, err + } + typ := AssetRefType(typBuf[0]) + + ref := &AssetRef{Type: typ} + switch typ { + case AssetRefByID: + if _, err := io.ReadFull(r, ref.AssetId.Txid[:]); err != nil { + return nil, err + } + if err := tlv.DUint16(r, &ref.AssetId.Index, scratch, 2); err != nil { + return nil, err + } + case AssetRefByGroup: + if err := tlv.DUint16(r, &ref.GroupIndex, scratch, 2); err != nil { + return nil, err + } + default: + return nil, fmt.Errorf("unknown asset ref type: %d", typ) + } + return ref, nil +} + +func encodeMetadataList(w io.Writer, meta []Metadata, scratch *[8]byte) error { + if err := tlv.WriteVarInt(w, uint64(len(meta)), scratch); err != nil { + return err + } + for _, m := range meta { + keyBytes := []byte(m.Key) + valBytes := []byte(m.Value) + + if err := tlv.WriteVarInt(w, uint64(len(keyBytes)), scratch); err != nil { + return err + } + if _, err := w.Write(keyBytes); err != nil { + return err + } + if err := tlv.WriteVarInt(w, uint64(len(valBytes)), scratch); err != nil { + return err + } + if _, err := w.Write(valBytes); err != nil { + return err + } + } + return nil +} + +func decodeMetadataList(r io.Reader, scratch *[8]byte) ([]Metadata, error) { + count, err := tlv.ReadVarInt(r, scratch) + if err != nil { + return nil, err + } + + meta := make([]Metadata, count) + for i := uint64(0); i < count; i++ { + // Key + kLen, err := tlv.ReadVarInt(r, scratch) + if err != nil { + return nil, err + } + kBytes := make([]byte, kLen) + if _, err := io.ReadFull(r, kBytes); err != nil { + return nil, err + } + + // Value + vLen, err := tlv.ReadVarInt(r, scratch) + if err != nil { + return nil, err + } + vBytes := make([]byte, vLen) + if _, err := io.ReadFull(r, vBytes); err != nil { + return nil, err + } + + meta[i] = Metadata{Key: string(kBytes), Value: string(vBytes)} + } + return meta, nil +} + +func encodeAssetInputList(w io.Writer, inputs []AssetInput, scratch *[8]byte) error { + if err := tlv.WriteVarInt(w, uint64(len(inputs)), scratch); err != nil { + return err + } + for _, in := range inputs { + if _, err := w.Write([]byte{byte(in.Type)}); err != nil { + return err + } + switch in.Type { + case AssetTypeLocal: + if err := tlv.EUint32(w, &in.Vin, scratch); err != nil { + return err + } + if err := tlv.EUint64(w, &in.Amount, scratch); err != nil { + return err + } + case AssetTypeTeleport: + // Amount + if err := tlv.EUint64(w, &in.Amount, scratch); err != nil { + return err + } + // Witness + if err := tlv.WriteVarInt(w, uint64(len(in.Witness.Script)), scratch); err != nil { + return err + } + if _, err := w.Write(in.Witness.Script); err != nil { + return err + } + if _, err := w.Write(in.Witness.Txid[:]); err != nil { + return err + } + + if err := tlv.EUint32(w, &in.Witness.Index, scratch); err != nil { + return err + } + default: + return fmt.Errorf("unknown asset input type: %d", in.Type) + } + } + return nil +} + +func decodeAssetInputList(r io.Reader, scratch *[8]byte) ([]AssetInput, error) { + count, err := tlv.ReadVarInt(r, scratch) + if err != nil { + return nil, err + } + inputs := make([]AssetInput, count) + for i := uint64(0); i < count; i++ { + var typBuf [1]byte + if _, err := io.ReadFull(r, typBuf[:]); err != nil { + return nil, err + } + inputs[i].Type = AssetType(typBuf[0]) + + switch inputs[i].Type { + case AssetTypeLocal: + if err := tlv.DUint32(r, &inputs[i].Vin, scratch, 4); err != nil { + return nil, err + } + if err := tlv.DUint64(r, &inputs[i].Amount, scratch, 8); err != nil { + return nil, err + } + case AssetTypeTeleport: + if err := tlv.DUint64(r, &inputs[i].Amount, scratch, 8); err != nil { + return nil, err + } + + // Script + sLen, err := tlv.ReadVarInt(r, scratch) + if err != nil { + return nil, err + } + inputs[i].Witness.Script = make([]byte, sLen) + if _, err := io.ReadFull(r, inputs[i].Witness.Script); err != nil { + return nil, err + } + + // Txid + if _, err := io.ReadFull(r, inputs[i].Witness.Txid[:]); err != nil { + return nil, err + } + + // Index + if err := tlv.DUint32(r, &inputs[i].Witness.Index, scratch, 4); err != nil { + return nil, err + } + default: + return nil, fmt.Errorf("unknown asset input type: %d", inputs[i].Type) + } + } + return inputs, nil +} + +func encodeAssetOutputList(w io.Writer, outputs []AssetOutput, scratch *[8]byte) error { + if err := tlv.WriteVarInt(w, uint64(len(outputs)), scratch); err != nil { + return err + } + for _, out := range outputs { + if _, err := w.Write([]byte{byte(out.Type)}); err != nil { + return err + } + switch out.Type { + case AssetTypeLocal: + if err := tlv.EUint32(w, &out.Vout, scratch); err != nil { + return err + } + if err := tlv.EUint64(w, &out.Amount, scratch); err != nil { + return err + } + case AssetTypeTeleport: + // Script (Commitment) - variable length + if err := tlv.WriteVarInt(w, uint64(len(out.Script)), scratch); err != nil { + return err + } + if _, err := w.Write(out.Script); err != nil { + return err + } + if err := tlv.EUint64(w, &out.Amount, scratch); err != nil { + return err + } + default: + return fmt.Errorf("unknown asset output type: %d", out.Type) + } + } + return nil +} + +func decodeAssetOutputList(r io.Reader, scratch *[8]byte) ([]AssetOutput, error) { + count, err := tlv.ReadVarInt(r, scratch) + if err != nil { + return nil, err + } + outputs := make([]AssetOutput, count) + for i := uint64(0); i < count; i++ { + var typBuf [1]byte + if _, err := io.ReadFull(r, typBuf[:]); err != nil { + return nil, err + } + outputs[i].Type = AssetType(typBuf[0]) + + switch outputs[i].Type { + case AssetTypeLocal: + if err := tlv.DUint32(r, &outputs[i].Vout, scratch, 4); err != nil { + return nil, err + } + if err := tlv.DUint64(r, &outputs[i].Amount, scratch, 8); err != nil { + return nil, err + } + case AssetTypeTeleport: + // Script (Commitment) + sLen, err := tlv.ReadVarInt(r, scratch) + if err != nil { + return nil, err + } + outputs[i].Script = make([]byte, sLen) + if _, err := io.ReadFull(r, outputs[i].Script); err != nil { + return nil, err + } + + if err := tlv.DUint64(r, &outputs[i].Amount, scratch, 8); err != nil { + return nil, err + } + default: + return nil, fmt.Errorf("unknown asset output type: %d", outputs[i].Type) + } + } + return outputs, nil +} diff --git a/pkg/ark-lib/extension/extension.go b/pkg/ark-lib/extension/extension.go new file mode 100644 index 000000000..78830c76c --- /dev/null +++ b/pkg/ark-lib/extension/extension.go @@ -0,0 +1,282 @@ +package extension + +import ( + "bytes" + "errors" + "fmt" + "io" + + "github.com/btcsuite/btcd/btcec/v2/schnorr" + "github.com/btcsuite/btcd/txscript" + "github.com/btcsuite/btcd/wire" + "github.com/lightningnetwork/lnd/tlv" +) + +var ArkadeMagic []byte = []byte{0x41, 0x52, 0x4B} // "ARK" + +type ExtensionPacket struct { + Asset *AssetPacket + SubDust *SubDustPacket +} + +const ( + MarkerSubDustKey byte = 0x01 + MarkerAssetPayload byte = 0x00 +) + +func (packet *ExtensionPacket) EncodeExtensionPacket() (wire.TxOut, error) { + if packet == nil || (packet.Asset == nil && (packet.SubDust == nil || packet.SubDust.Key == nil)) { + return wire.TxOut{}, errors.New("empty op_return packet") + } + + var scratch [8]byte + var tlvData bytes.Buffer + if _, err := tlvData.Write(ArkadeMagic); err != nil { + return wire.TxOut{}, err + } + if packet.SubDust != nil && packet.SubDust.Key != nil { + if err := tlvData.WriteByte(MarkerSubDustKey); err != nil { + return wire.TxOut{}, err + } + subDustKey := schnorr.SerializePubKey(packet.SubDust.Key) + if err := tlv.WriteVarInt(&tlvData, uint64(len(subDustKey)), &scratch); err != nil { + return wire.TxOut{}, err + } + if _, err := tlvData.Write(subDustKey); err != nil { + return wire.TxOut{}, err + } + } + if packet.Asset != nil { + encodedAssets, err := encodeAssetPacket(packet.Asset.Assets) + if err != nil { + return wire.TxOut{}, err + } + + // Spec does not mention a version byte in the payload value for Type 0x00. + // "Value: Asset_Payload ... Asset_Payload: The TLV packet containing asset group data" + // "Packet := { GroupCount, Groups }" + // So we just write encodedAssets. + + if err := tlvData.WriteByte(MarkerAssetPayload); err != nil { + return wire.TxOut{}, err + } + if err := tlv.WriteVarInt(&tlvData, uint64(len(encodedAssets)), &scratch); err != nil { + return wire.TxOut{}, err + } + if _, err := tlvData.Write(encodedAssets); err != nil { + return wire.TxOut{}, err + } + } + + builder := txscript.NewScriptBuilder().AddOp(txscript.OP_RETURN) + builder.AddFullData(tlvData.Bytes()) + opReturnPubkey, err := builder.Script() + if err != nil { + return wire.TxOut{}, err + } + + var amount uint64 = 0 + if packet.SubDust != nil { + amount = packet.SubDust.Amount + } + + return wire.TxOut{ + Value: int64(amount), + PkScript: opReturnPubkey, + }, nil +} + +func DecodeExtensionPacket(txOut wire.TxOut) (*ExtensionPacket, error) { + opReturnData := txOut.PkScript + + if len(opReturnData) == 0 || opReturnData[0] != txscript.OP_RETURN { + return nil, errors.New("OP_RETURN not present") + } + + assetPayload, subDustKey, err := parsePacketOpReturn(opReturnData) + if err != nil { + return nil, err + } + + packet := &ExtensionPacket{} + + if len(assetPayload) > 0 { + assetPacket, err := decodeAssetPacket(assetPayload) + if err != nil { + return nil, err + } + packet.Asset = assetPacket + } + + if len(subDustKey) > 0 { + key, err := schnorr.ParsePubKey(subDustKey) + if err != nil { + return nil, err + } + packet.SubDust = &SubDustPacket{Key: key, Amount: uint64(txOut.Value)} + } + + if packet.Asset == nil && packet.SubDust == nil { + return nil, errors.New("missing op_return payload") + } + + return packet, nil +} + +// parsePacketOpReturn extracts the asset payload and optional sub-dust pubkey from an OP_RETURN script. +// (OP_RETURN ...). +func parsePacketOpReturn(opReturnData []byte) ([]byte, []byte, error) { + if len(opReturnData) == 0 || opReturnData[0] != txscript.OP_RETURN { + return nil, nil, errors.New("OP_RETURN not present") + } + + tokenizer := txscript.MakeScriptTokenizer(0, opReturnData) + if !tokenizer.Next() || tokenizer.Opcode() != txscript.OP_RETURN { + if err := tokenizer.Err(); err != nil { + return nil, nil, err + } + return nil, nil, errors.New("invalid OP_RETURN script") + } + + var payload []byte + + for tokenizer.Next() { + data := tokenizer.Data() + if data == nil { + return nil, nil, errors.New("invalid OP_RETURN data push") + } + + payload = append(payload, data...) + } + + if err := tokenizer.Err(); err != nil { + return nil, nil, err + } + + if len(payload) == 0 { + return nil, nil, errors.New("missing OP_RETURN payload") + } + + if len(payload) < len(ArkadeMagic) || !bytes.HasPrefix(payload, ArkadeMagic) { + return nil, nil, errors.New("invalid op_return payload magic") + } + + payload = payload[len(ArkadeMagic):] + + var subDustKey []byte + var assetPayload []byte + reader := bytes.NewReader(payload) + var scratch [8]byte + + for reader.Len() > 0 { + typ, err := reader.ReadByte() + if err != nil { + return nil, nil, err + } + + length, err := tlv.ReadVarInt(reader, &scratch) + if err != nil { + return nil, nil, err + } + if uint64(reader.Len()) < length { + return nil, nil, errors.New("invalid TLV length for OP_RETURN payload") + } + + value := make([]byte, length) + if _, err := io.ReadFull(reader, value); err != nil { + return nil, nil, err + } + + switch typ { + case MarkerSubDustKey: + if subDustKey == nil { + subDustKey = value + } + case MarkerAssetPayload: + if assetPayload == nil { + assetPayload = value + } + } + } + + if len(assetPayload) == 0 && len(subDustKey) == 0 { + return nil, nil, errors.New("missing op_return payload") + } + + return assetPayload, subDustKey, nil +} + +func encodeAssetPacket(assets []AssetGroup) ([]byte, error) { + var scratch [8]byte + var buf bytes.Buffer + + totalCount := uint64(len(assets)) + + if err := tlv.WriteVarInt(&buf, totalCount, &scratch); err != nil { + return nil, err + } + + for _, asset := range assets { + encodedAsset, err := asset.Encode() + if err != nil { + return nil, err + } + + // No length prefix, groups are self-delimiting/known + if _, err := buf.Write(encodedAsset); err != nil { + return nil, err + } + } + + return buf.Bytes(), nil +} + +func decodeAssetPacket(payload []byte) (*AssetPacket, error) { + reader := bytes.NewReader(payload) + var scratch [8]byte + + assetCount, err := tlv.ReadVarInt(reader, &scratch) + if err != nil { + return nil, fmt.Errorf("invalid asset group count: %w", err) + } + + if assetCount == 0 { + return nil, errors.New("empty asset group") + } + + assets := make([]AssetGroup, 0, int(assetCount)) + for i := uint64(0); i < assetCount; i++ { + var decoded AssetGroup + if err := decoded.Decode(reader); err != nil { + return nil, fmt.Errorf("failed to decode asset group %d: %w", i, err) + } + + assets = append(assets, decoded) + } + + for i := range assets { + normalizeAssetSlices(&assets[i]) + } + + if reader.Len() != 0 { + return nil, errors.New("unexpected trailing bytes in asset group payload") + } + + group := &AssetPacket{ + Assets: assets, + } + + return group, nil +} + +func normalizeAssetSlices(a *AssetGroup) { + if len(a.Inputs) == 0 { + a.Inputs = nil + } + if len(a.Outputs) == 0 { + a.Outputs = nil + } + if len(a.Metadata) == 0 { + a.Metadata = nil + } +} diff --git a/pkg/ark-lib/extension/extension_test.go b/pkg/ark-lib/extension/extension_test.go new file mode 100644 index 000000000..3e564c21b --- /dev/null +++ b/pkg/ark-lib/extension/extension_test.go @@ -0,0 +1,311 @@ +package extension + +import ( + "bytes" + "encoding/hex" + "io" + "testing" + + "github.com/btcsuite/btcd/btcec/v2" + "github.com/btcsuite/btcd/btcec/v2/schnorr" + "github.com/btcsuite/btcd/txscript" + "github.com/lightningnetwork/lnd/tlv" + "github.com/stretchr/testify/require" +) + +func TestExtension(t *testing.T) { + t.Parallel() + t.Run("AssetEncodeDecodeRoundTrip", testAssetEncodeDecodeRoundTrip) + t.Run("AssetGroupEncodeDecode", testAssetGroupEncodeDecode) + t.Run("AssetGroupEncodeDecodeWithSubDustKey", testAssetGroupEncodeDecodeWithSubDustKey) + t.Run("AssetIdStringConversion", testAssetIdStringConversion) + t.Run("AssetOutputListEncodeDecode", testAssetOutputListEncodeDecode) + t.Run("AssetInputListEncodeDecode", testAssetInputListEncodeDecode) + t.Run("AssetGroupEncodeDecodeWithGroupIndexRef", testAssetGroupEncodeDecodeWithGroupIndexRef) +} + +func testAssetEncodeDecodeRoundTrip(t *testing.T) { + asset := AssetGroup{ + AssetId: &AssetId{ + Txid: deterministicBytesArray(0x3c), + Index: 2, + }, + Outputs: []AssetOutput{ + { + Type: AssetTypeLocal, + Amount: 11, + Vout: 0, + }, + { + Type: AssetTypeTeleport, + Script: deterministicTxhash(0xcc), + Amount: 22, + }, + }, + ControlAsset: AssetRefFromId(AssetId{ + Txid: deterministicBytesArray(0x3c), + Index: 1, + }), + Inputs: []AssetInput{ + { + Type: AssetTypeLocal, + Vin: 7, + Amount: 20, + }, + { + Type: AssetTypeTeleport, + // Vin is not encoded for Teleport inputs + Vin: 0, + Witness: TeleportWitness{ + Script: []byte{0x00, 0x01, 0x02, 0x03}, + Txid: deterministicBytesArray(0x55), + Index: 123, + }, + Amount: 40, + }, + }, + Metadata: []Metadata{ + {Key: "purpose", Value: "roundtrip"}, + {Key: "owner", Value: "arkade"}, + }, + Immutable: true, + } + + encoded, err := asset.Encode() + require.NoError(t, err) + require.NotEmpty(t, encoded) + + var decoded AssetGroup + + require.NoError(t, decoded.Decode(bytes.NewReader(encoded))) + require.Equal(t, asset, decoded) +} + +func testAssetGroupEncodeDecode(t *testing.T) { + controlAsset := AssetGroup{ + AssetId: ptrAssetId(deterministicAssetId(0x11)), + Outputs: []AssetOutput{{Type: AssetTypeTeleport, Script: deterministicTxhash(0xdd), Amount: 1}}, + ControlAsset: deterministicAssetRefId(0x3c), + Metadata: []Metadata{{Key: "kind", Value: "control"}}, + } + + normalAsset := AssetGroup{ + AssetId: ptrAssetId(deterministicAssetId(0x12)), + Outputs: []AssetOutput{{Type: AssetTypeLocal, Amount: 10, Vout: 1}}, + ControlAsset: deterministicAssetRefId(0x3c), + Inputs: []AssetInput{{ + Type: AssetTypeLocal, + Vin: 1, + Amount: 5, + }}, + Metadata: []Metadata{{Key: "kind", Value: "normal"}}, + } + + packet := AssetPacket{ + Assets: []AssetGroup{controlAsset, normalAsset}, + } + + extPacket := &ExtensionPacket{Asset: &packet} + txOut, err := extPacket.EncodeExtensionPacket() + require.NoError(t, err) + + decodedExt, err := DecodeExtensionPacket(txOut) + require.NoError(t, err) + require.NotNil(t, decodedExt.Asset) + require.Equal(t, packet, *decodedExt.Asset) +} + +func testAssetGroupEncodeDecodeWithGroupIndexRef(t *testing.T) { + t.Parallel() + + groupIndex := uint16(1) + assetGroup := AssetGroup{ + AssetId: ptrAssetId(deterministicAssetId(0x21)), + ControlAsset: AssetRefFromGroupIndex(groupIndex), + Outputs: []AssetOutput{{Type: AssetTypeLocal, Amount: 10, Vout: 0}}, + } + + encoded, err := assetGroup.Encode() + require.NoError(t, err) + + var decoded AssetGroup + require.NoError(t, decoded.Decode(bytes.NewReader(encoded))) + require.NotNil(t, decoded.ControlAsset) + require.Equal(t, AssetRefByGroup, decoded.ControlAsset.Type) + require.Equal(t, groupIndex, decoded.ControlAsset.GroupIndex) +} + +func testAssetIdStringConversion(t *testing.T) { + txid := deterministicBytesArray(0x01) + index := uint16(12345) + assetId := AssetId{Txid: txid, Index: index} + + s := assetId.ToString() + decoded, err := AssetIdFromString(s) + require.NoError(t, err) + require.Equal(t, &assetId, decoded) + + // Test invalid hex + _, err = AssetIdFromString("invalid") + require.Error(t, err) + + // Test invalid length + _, err = AssetIdFromString(hex.EncodeToString(make([]byte, 35))) + require.Error(t, err) +} + +func testAssetGroupEncodeDecodeWithSubDustKey(t *testing.T) { + subDustKey := deterministicPubKey(t, 0x55) + normalAsset := AssetGroup{ + AssetId: ptrAssetId(deterministicAssetId(0x12)), + Outputs: []AssetOutput{{Type: AssetTypeLocal, Amount: 10, Vout: 1}}, + ControlAsset: deterministicAssetRefId(0xaa), + } + + assetPacket := AssetPacket{ + Assets: []AssetGroup{normalAsset}, + } + + opReturnPacket := &ExtensionPacket{ + Asset: &assetPacket, + SubDust: &SubDustPacket{Key: &subDustKey, Amount: 220}, + } + + txOut, err := opReturnPacket.EncodeExtensionPacket() + require.NoError(t, err) + + tokenizer := txscript.MakeScriptTokenizer(0, txOut.PkScript) + require.True(t, tokenizer.Next()) + require.Equal(t, txscript.OP_RETURN, int(tokenizer.Opcode())) + require.True(t, tokenizer.Next()) + payload := tokenizer.Data() + require.NotEmpty(t, payload) + require.False(t, tokenizer.Next()) + require.NoError(t, tokenizer.Err()) + + require.True(t, bytes.HasPrefix(payload, ArkadeMagic)) + reader := bytes.NewReader(payload[len(ArkadeMagic):]) + var scratch [8]byte + + typ, err := reader.ReadByte() + require.NoError(t, err) + require.Equal(t, MarkerSubDustKey, typ) + + length, err := tlv.ReadVarInt(reader, &scratch) + require.NoError(t, err) + subDustValue := make([]byte, length) + _, err = io.ReadFull(reader, subDustValue) + require.NoError(t, err) + require.Equal(t, schnorr.SerializePubKey(&subDustKey), subDustValue) + + typ, err = reader.ReadByte() + require.NoError(t, err) + require.Equal(t, MarkerAssetPayload, typ) + + length, err = tlv.ReadVarInt(reader, &scratch) + require.NoError(t, err) + assetValue := make([]byte, length) + _, err = io.ReadFull(reader, assetValue) + require.NoError(t, err) + require.NotEmpty(t, assetValue) + // No version byte check as it is removed + + decodedExt, err := DecodeExtensionPacket(txOut) + require.NoError(t, err) + require.NotNil(t, decodedExt.SubDust) + require.True(t, subDustKey.IsEqual(decodedExt.SubDust.Key)) + require.NotNil(t, decodedExt.Asset) + require.Len(t, decodedExt.Asset.Assets, 1) + require.Equal(t, normalAsset, decodedExt.Asset.Assets[0]) +} + +func testAssetOutputListEncodeDecode(t *testing.T) { + outputs := []AssetOutput{ + { + Type: AssetTypeLocal, + Vout: 0, + Amount: 100, + }, + { + Type: AssetTypeTeleport, + Script: deterministicTxhash(0xEE), + Amount: 200, + }, + } + + var scratch [8]byte + var buf bytes.Buffer + require.NoError(t, encodeAssetOutputList(&buf, outputs, &scratch)) + + var decoded []AssetOutput + reader := bytes.NewReader(buf.Bytes()) + decoded, err := decodeAssetOutputList(reader, &scratch) + require.NoError(t, err) + require.Equal(t, outputs, decoded) +} + +func testAssetInputListEncodeDecode(t *testing.T) { + inputs := []AssetInput{ + { + Type: AssetTypeLocal, + Amount: 80, + Vin: 1, + }, + { + Type: AssetTypeTeleport, + Vin: 0, + Amount: 20, + Witness: TeleportWitness{ + Script: []byte{0xde, 0xad, 0xbe, 0xef}, + Txid: deterministicBytesArray(0x11), + Index: 456, + }, + }, + } + + var scratch [8]byte + var buf bytes.Buffer + require.NoError(t, encodeAssetInputList(&buf, inputs, &scratch)) + + var decoded []AssetInput + reader := bytes.NewReader(buf.Bytes()) + decoded, err := decodeAssetInputList(reader, &scratch) + require.NoError(t, err) + require.Equal(t, inputs, decoded) +} + +func deterministicPubKey(t *testing.T, seed byte) btcec.PublicKey { + t.Helper() + + keyBytes := bytes.Repeat([]byte{seed}, 32) + priv, pub := btcec.PrivKeyFromBytes(keyBytes) + require.NotNil(t, priv) + require.NotNil(t, pub) + + return *pub +} + +func deterministicTxhash(seed byte) []byte { + return bytes.Repeat([]byte{seed}, 32) +} + +func deterministicBytesArray(seed byte) [32]byte { + var arr [32]byte + copy(arr[:], deterministicTxhash(seed)) + return arr +} + +func deterministicAssetId(seed byte) AssetId { + return AssetId{ + Txid: deterministicBytesArray(seed), + Index: 0, + } +} + +func ptrAssetId(id AssetId) *AssetId { + return &id +} + +func deterministicAssetRefId(seed byte) *AssetRef { + return AssetRefFromId(deterministicAssetId(seed)) +} diff --git a/pkg/ark-lib/extension/subdust.go b/pkg/ark-lib/extension/subdust.go new file mode 100644 index 000000000..84ab8462e --- /dev/null +++ b/pkg/ark-lib/extension/subdust.go @@ -0,0 +1,26 @@ +package extension + +import ( + "github.com/btcsuite/btcd/btcec/v2" + "github.com/btcsuite/btcd/wire" +) + +type SubDustPacket struct { + Key *btcec.PublicKey + Amount uint64 +} + +func (packet *SubDustPacket) EncodeSubDustPacket() (wire.TxOut, error) { + opReturnPacket := &ExtensionPacket{ + SubDust: packet, + } + return opReturnPacket.EncodeExtensionPacket() +} + +func DecodeSubDustPacket(txOut wire.TxOut) (*SubDustPacket, error) { + packet, err := DecodeExtensionPacket(txOut) + if err != nil { + return nil, err + } + return packet.SubDust, nil +} diff --git a/pkg/ark-lib/go.mod b/pkg/ark-lib/go.mod index 90b8801a2..95b02f3e2 100644 --- a/pkg/ark-lib/go.mod +++ b/pkg/ark-lib/go.mod @@ -11,6 +11,7 @@ require ( github.com/btcsuite/btcd/btcutil/psbt v1.1.9 github.com/btcsuite/btcd/chaincfg/chainhash v1.1.0 github.com/btcsuite/btcwallet v0.16.10-0.20240718224643-db3a4a2543bd + github.com/lightningnetwork/lnd/tlv v1.2.6 github.com/stretchr/testify v1.10.0 ) @@ -23,7 +24,6 @@ require ( github.com/kr/pretty v0.3.1 // indirect github.com/lightninglabs/neutrino/cache v1.1.2 // indirect github.com/lightningnetwork/lnd/fn v1.2.1 // indirect - github.com/lightningnetwork/lnd/tlv v1.2.6 // indirect github.com/rogpeppe/go-internal v1.13.1 // indirect github.com/stoewer/go-strcase v1.2.0 // indirect go.etcd.io/bbolt v1.3.10 // indirect diff --git a/pkg/ark-lib/intent/message.go b/pkg/ark-lib/intent/message.go index 74ed95052..696be5cf5 100644 --- a/pkg/ark-lib/intent/message.go +++ b/pkg/ark-lib/intent/message.go @@ -39,6 +39,12 @@ type RegisterMessage struct { CosignersPublicKeys []string `json:"cosigners_public_keys"` } +type AssetOutput struct { + AssetId string `json:"asset_id"` + TeleportHash string `json:"teleport_hash"` + Amount uint64 `json:"amount"` +} + func (m RegisterMessage) Encode() (string, error) { encoded, err := json.Marshal(m) if err != nil { diff --git a/pkg/ark-lib/intent/proof.go b/pkg/ark-lib/intent/proof.go index 918fab175..a31acbece 100644 --- a/pkg/ark-lib/intent/proof.go +++ b/pkg/ark-lib/intent/proof.go @@ -45,6 +45,7 @@ type Input struct { OutPoint *wire.OutPoint Sequence uint32 WitnessUtxo *wire.TxOut + IsExtended bool } // Verify takes an encoded b64 proof tx and a message to validate the proof @@ -191,6 +192,11 @@ func (p Proof) GetOutpoints() []wire.OutPoint { return outpoints } +type IntentOutpoint struct { + wire.OutPoint + IsSeal bool +} + // ContainsOutputs returns true if the proof specifies outputs to register in ark batches func (p Proof) ContainsOutputs() bool { if len(p.UnsignedTx.TxOut) == 0 { diff --git a/pkg/ark-lib/offchain/tx_test.go b/pkg/ark-lib/offchain/tx_test.go new file mode 100644 index 000000000..c3e6fe00b --- /dev/null +++ b/pkg/ark-lib/offchain/tx_test.go @@ -0,0 +1,256 @@ +package offchain + +import ( + "bytes" + "crypto/sha256" + "testing" + + arklib "github.com/arkade-os/arkd/pkg/ark-lib" + "github.com/arkade-os/arkd/pkg/ark-lib/extension" + "github.com/arkade-os/arkd/pkg/ark-lib/script" + "github.com/arkade-os/arkd/pkg/ark-lib/txutils" + "github.com/btcsuite/btcd/btcec/v2" + "github.com/btcsuite/btcd/chaincfg/chainhash" + "github.com/btcsuite/btcd/txscript" + "github.com/btcsuite/btcd/wire" + "github.com/btcsuite/btcwallet/waddrmgr" + "github.com/stretchr/testify/require" +) + +func TestRebuildAssetTxs(t *testing.T) { + ownerKey := mustKey(t, "asset-rebuild-owner-ca") + signerKey := mustKey(t, "asset-rebuild-signer-ca") + + collaborativeClosure := &script.MultisigClosure{ + PubKeys: []*btcec.PublicKey{ownerKey.PubKey(), signerKey.PubKey()}, + } + + dustAmount := int64(1000) + controlVtxo, controlTapKey := buildVtxoInputWithSeed(t, dustAmount, collaborativeClosure, "control-vtxo") + normalVtxo, normalTapKey := buildVtxoInputWithSeed(t, dustAmount, collaborativeClosure, "normal-vtxo") + changeVtxo, changeTapKey := buildVtxoInputWithSeed(t, dustAmount, collaborativeClosure, "change-vtxo") + + // Create asset group with matching control asset + assetInputTxhash := sha256.Sum256([]byte("asset-input")) + var assetInTxId [32]byte + copy(assetInTxId[:], assetInputTxhash[:]) + + caInputTxId := sha256.Sum256([]byte("control-input")) + var caID [32]byte + copy(caID[:], caInputTxId[:]) + + controlAsset := extension.AssetGroup{ + AssetId: &extension.AssetId{Txid: caID, Index: 0}, + Inputs: []extension.AssetInput{{ + Type: extension.AssetTypeLocal, + Vin: 0, + Amount: 7, + }}, + Outputs: []extension.AssetOutput{ + { + Type: extension.AssetTypeLocal, + Amount: 1, + Vout: 0, + }, + }, + Metadata: []extension.Metadata{{Key: "type", Value: "control"}}, + } + + normalAsset := extension.AssetGroup{ + AssetId: &extension.AssetId{Txid: assetInTxId, Index: 0}, + ControlAsset: extension.AssetRefFromId(extension.AssetId{Txid: caID, Index: 0}), + Inputs: []extension.AssetInput{{ + Type: extension.AssetTypeLocal, + Vin: 1, + Amount: 5, + }}, + Outputs: []extension.AssetOutput{ + { + Type: extension.AssetTypeLocal, + Amount: 100, + Vout: 1, + }, + }, + Metadata: []extension.Metadata{{Key: "type", Value: "normal"}}, + } + + assetGroup := &extension.AssetPacket{ + Assets: []extension.AssetGroup{controlAsset, normalAsset}, + Version: extension.AssetVersion, + } + + opPacket := &extension.ExtensionPacket{ + Asset: assetGroup, + SubDust: &extension.SubDustPacket{ + Key: normalTapKey, + Amount: 220, + }, + } + opret, err := opPacket.EncodeExtensionPacket() + require.NoError(t, err) + + changeValue := changeVtxo.Amount - opret.Value + require.GreaterOrEqual(t, changeValue, int64(0)) + + outputs := []*wire.TxOut{ + {Value: controlVtxo.Amount, PkScript: mustP2TRScript(t, controlTapKey)}, + {Value: normalVtxo.Amount, PkScript: mustP2TRScript(t, normalTapKey)}, + &opret, + {Value: changeValue, PkScript: mustP2TRScript(t, changeTapKey)}, + } + + signerScript := mustClosureScript(t, &script.CSVMultisigClosure{ + MultisigClosure: script.MultisigClosure{ + PubKeys: []*btcec.PublicKey{signerKey.PubKey()}, + }, + Locktime: arklib.RelativeLocktime{Type: arklib.LocktimeTypeBlock, Value: 4}, + }) + + arkTx, checkpoints, err := BuildTxs( + []VtxoInput{controlVtxo, normalVtxo, changeVtxo}, outputs, signerScript, + ) + require.NoError(t, err) + require.Len(t, checkpoints, 3) + + // Build ins as SubmitOffchainTx does. + vtxoByHash := map[string]VtxoInput{ + controlVtxo.Outpoint.Hash.String(): controlVtxo, + normalVtxo.Outpoint.Hash.String(): normalVtxo, + changeVtxo.Outpoint.Hash.String(): changeVtxo, + } + ins := make([]VtxoInput, 0, len(checkpoints)) + for _, cp := range checkpoints { + prev := cp.UnsignedTx.TxIn[0].PreviousOutPoint + orig := vtxoByHash[prev.Hash.String()] + ins = append(ins, VtxoInput{ + Outpoint: &prev, + Tapscript: orig.Tapscript, + RevealedTapscripts: orig.RevealedTapscripts, + Amount: orig.Amount, + }) + } + + checkpointTxMap := make(map[string]string) + for _, cp := range checkpoints { + encoded, err := cp.B64Encode() + require.NoError(t, err) + checkpointTxMap[cp.UnsignedTx.TxHash().String()] = encoded + } + + outputsNoAnchor := make([]*wire.TxOut, 0, len(arkTx.UnsignedTx.TxOut)-1) + assetGroupIndex := -1 + for idx, out := range arkTx.UnsignedTx.TxOut { + if bytes.Equal(out.PkScript, txutils.ANCHOR_PKSCRIPT) { + continue + } + outputsNoAnchor = append(outputsNoAnchor, out) + if extension.ContainsAssetPacket(out.PkScript) { + assetGroupIndex = idx + } + } + require.NotEqual(t, -1, assetGroupIndex) + + rebuiltArk, rebuiltCheckpoints, err := BuildTxs( + ins, outputsNoAnchor, signerScript, + ) + require.NoError(t, err) + require.Len(t, rebuiltCheckpoints, len(checkpoints)) + require.Equal(t, arkTx.UnsignedTx.TxID(), rebuiltArk.UnsignedTx.TxID()) + + // Verify asset group matches and points to rebuilt checkpoints. + origPacket, err := extension.DecodeAssetPacket(*outputsNoAnchor[assetGroupIndex]) + require.NoError(t, err) + rebuiltPacket, err := extension.DecodeAssetPacket(*rebuiltArk.UnsignedTx.TxOut[assetGroupIndex]) + require.NoError(t, err) + + require.NotNil(t, rebuiltPacket) + require.Len(t, rebuiltPacket.Assets, 2) + require.Equal(t, len(origPacket.Assets[0].Inputs), len(rebuiltPacket.Assets[0].Inputs)) + require.Equal(t, len(origPacket.Assets[1].Inputs), len(rebuiltPacket.Assets[1].Inputs)) + + // Map rebuilt checkpoint txids for quick lookup. + rebuiltCheckpointIDs := make(map[string]struct{}) + for _, cp := range rebuiltCheckpoints { + rebuiltCheckpointIDs[cp.UnsignedTx.TxHash().String()] = struct{}{} + } + + for _, in := range rebuiltPacket.Assets[0].Inputs { + require.Equal(t, extension.AssetTypeLocal, in.Type) + require.Less(t, int(in.Vin), len(rebuiltArk.UnsignedTx.TxIn)) + } + for _, in := range rebuiltPacket.Assets[1].Inputs { + require.Equal(t, extension.AssetTypeLocal, in.Type) + require.Less(t, int(in.Vin), len(rebuiltArk.UnsignedTx.TxIn)) + } +} + +func mustKey(t *testing.T, seed string) *btcec.PrivateKey { + t.Helper() + + sum := sha256.Sum256([]byte(seed)) + key, _ := btcec.PrivKeyFromBytes(sum[:]) + return key +} + +func mustClosureScript(t *testing.T, closure script.Closure) []byte { + t.Helper() + + scriptBytes, err := closure.Script() + require.NoError(t, err) + return scriptBytes +} + +func mustP2TRScript(t *testing.T, key *btcec.PublicKey) []byte { + t.Helper() + + pkScript, err := script.P2TRScript(key) + require.NoError(t, err) + return pkScript +} + +func buildVtxoInput(t *testing.T, amount int64, closure script.Closure) (VtxoInput, *btcec.PublicKey) { + t.Helper() + + vtxoScript := &script.TapscriptsVtxoScript{ + Closures: []script.Closure{closure}, + } + tapKey, tapTree, err := vtxoScript.TapTree() + require.NoError(t, err) + + leafScript := mustClosureScript(t, closure) + tapLeafHash := txscript.NewBaseTapLeaf(leafScript).TapHash() + proof, err := tapTree.GetTaprootMerkleProof(tapLeafHash) + require.NoError(t, err) + + controlBlock, err := txscript.ParseControlBlock(proof.ControlBlock) + require.NoError(t, err) + + revealedTapscripts, err := vtxoScript.Encode() + require.NoError(t, err) + + outpoint := &wire.OutPoint{ + Hash: chainhash.DoubleHashH(leafScript), + Index: 0, + } + + return VtxoInput{ + Outpoint: outpoint, + Amount: amount, + Tapscript: &waddrmgr.Tapscript{ + ControlBlock: controlBlock, + RevealedScript: proof.Script, + }, + RevealedTapscripts: revealedTapscripts, + }, tapKey +} + +func buildVtxoInputWithSeed( + t *testing.T, amount int64, closure script.Closure, seed string, +) (VtxoInput, *btcec.PublicKey) { + vtxo, tapKey := buildVtxoInput(t, amount, closure) + vtxo.Outpoint = &wire.OutPoint{ + Hash: chainhash.DoubleHashH([]byte(seed)), + Index: 0, + } + return vtxo, tapKey +} diff --git a/pkg/ark-lib/script/script.go b/pkg/ark-lib/script/script.go index 449ba19ce..0f228b5eb 100644 --- a/pkg/ark-lib/script/script.go +++ b/pkg/ark-lib/script/script.go @@ -124,9 +124,17 @@ func SubDustScript(taprootKey *btcec.PublicKey) ([]byte, error) { } func IsSubDustScript(script []byte) bool { - return len(script) == 32+1+1 && - script[0] == txscript.OP_RETURN && - script[1] == 0x20 + tokenizer := txscript.MakeScriptTokenizer(0, script) + if !tokenizer.Next() || tokenizer.Opcode() != txscript.OP_RETURN { + return false + } + + if !tokenizer.Next() { + return false + } + + data := tokenizer.Data() + return data != nil && len(data) == schnorr.PubKeyBytesLen } func EncodeTaprootSignature(sig []byte, sigHashType txscript.SigHashType) []byte { diff --git a/pkg/ark-lib/script/vtxo_script.go b/pkg/ark-lib/script/vtxo_script.go index d2a6ee3a4..5cf2efade 100644 --- a/pkg/ark-lib/script/vtxo_script.go +++ b/pkg/ark-lib/script/vtxo_script.go @@ -34,6 +34,37 @@ func NewDefaultVtxoScript( } } +func NewTeleportVtxoScript( + owner, signer *btcec.PublicKey, teleportPreimageHash []byte, exitDelay arklib.RelativeLocktime, +) *TapscriptsVtxoScript { + + preimageCondition, _ := txscript.NewScriptBuilder(). + AddOp(txscript.OP_SHA256). + AddData(teleportPreimageHash). + AddOp(txscript.OP_EQUAL). + Script() + + claimConditionClosure := &ConditionMultisigClosure{ + Condition: preimageCondition, + MultisigClosure: MultisigClosure{ + PubKeys: []*btcec.PublicKey{owner, signer}, + }, + } + + unilateralDelayCLosure := &CSVMultisigClosure{ + MultisigClosure: MultisigClosure{PubKeys: []*btcec.PublicKey{owner}}, + Locktime: exitDelay, + } + + return &TapscriptsVtxoScript{ + []Closure{ + claimConditionClosure, + unilateralDelayCLosure, + }, + } + +} + func ParseVtxoScript(scripts []string) (VtxoScript, error) { if len(scripts) == 0 { return nil, fmt.Errorf("empty tapscripts array") diff --git a/pkg/ark-lib/tree/builder.go b/pkg/ark-lib/tree/builder.go index 049467bee..1c4d4e7e0 100644 --- a/pkg/ark-lib/tree/builder.go +++ b/pkg/ark-lib/tree/builder.go @@ -103,7 +103,7 @@ type node interface { } type leaf struct { - output *wire.TxOut + outputs []*wire.TxOut inputScript []byte cosigners []*btcec.PublicKey } @@ -121,11 +121,17 @@ func (l *leaf) getChildren() []node { } func (l *leaf) getAmount() int64 { - return l.output.Value + totalAmount := int64(0) + for _, output := range l.outputs { + totalAmount += output.Value + } + return totalAmount } func (l *leaf) getOutputs() ([]*wire.TxOut, error) { - return []*wire.TxOut{l.output, txutils.AnchorOutput()}, nil + outputs := l.outputs + outputs = append(outputs, txutils.AnchorOutput()) + return outputs, nil } func (l *leaf) tree( @@ -292,8 +298,11 @@ func createTxTree(receivers []Leaf, tapTreeRoot []byte, radix int) (root node, e return nil, fmt.Errorf("failed to create script pubkey: %w", err) } + var outputs []*wire.TxOut + outputs = []*wire.TxOut{{Value: int64(r.Amount), PkScript: pkScript}} + leafNode := &leaf{ - output: &wire.TxOut{Value: int64(r.Amount), PkScript: pkScript}, + outputs: outputs, inputScript: inputScript, cosigners: cosigners, } diff --git a/pkg/ark-lib/tree/forfeit_tx.go b/pkg/ark-lib/tree/forfeit_tx.go index 50f663d21..e21145297 100644 --- a/pkg/ark-lib/tree/forfeit_tx.go +++ b/pkg/ark-lib/tree/forfeit_tx.go @@ -6,6 +6,60 @@ import ( "github.com/btcsuite/btcd/wire" ) +func BuildForfeitTxWithAnchor( + inputs []*wire.OutPoint, sequences []uint32, prevouts []*wire.TxOut, + extensionAnchor *wire.TxOut, + signerScript []byte, txLocktime uint32, +) (*psbt.Packet, error) { + + sumPrevout := int64(0) + for _, prevout := range prevouts { + sumPrevout += prevout.Value + } + sumPrevout -= txutils.ANCHOR_VALUE + + forfeitOut := wire.NewTxOut(sumPrevout, signerScript) + return BuildForfeitTxWithOutputAndAnchor(inputs, sequences, prevouts, forfeitOut, extensionAnchor, txLocktime) +} + +func BuildForfeitTxWithOutputAndAnchor( + inputs []*wire.OutPoint, sequences []uint32, prevouts []*wire.TxOut, + forfeitOutput *wire.TxOut, + extensionAnchor *wire.TxOut, + txLocktime uint32, +) (*psbt.Packet, error) { + version := int32(3) + outs := []*wire.TxOut{forfeitOutput} + if extensionAnchor != nil { + outs = append(outs, extensionAnchor) + } + outs = append(outs, txutils.AnchorOutput()) + + partialTx, err := psbt.New( + inputs, + outs, + version, + txLocktime, + sequences, + ) + if err != nil { + return nil, err + } + + updater, err := psbt.NewUpdater(partialTx) + if err != nil { + return nil, err + } + + for i, prevout := range prevouts { + if err := updater.AddInWitnessUtxo(prevout, i); err != nil { + return nil, err + } + } + + return partialTx, nil +} + func BuildForfeitTx( inputs []*wire.OutPoint, sequences []uint32, prevouts []*wire.TxOut, signerScript []byte, txLocktime uint32, @@ -27,7 +81,9 @@ func BuildForfeitTxWithOutput( txLocktime uint32, ) (*psbt.Packet, error) { version := int32(3) - outs := []*wire.TxOut{forfeitOutput, txutils.AnchorOutput()} + outs := []*wire.TxOut{forfeitOutput} + + outs = append(outs, txutils.AnchorOutput()) partialTx, err := psbt.New( inputs, diff --git a/pkg/errors/errors.go b/pkg/errors/errors.go index 9822a8927..ef05d7cb2 100644 --- a/pkg/errors/errors.go +++ b/pkg/errors/errors.go @@ -332,6 +332,112 @@ var OFFCHAIN_TX_INSUFFICIENT_FEE = Code[OffchainTxInsufficientFeeMetadata]{ grpccodes.InvalidArgument, } -var INTENT_FEE_EVALUATION_FAILED = Code[any]{33, "INTENT_FEE_EVALUATION_FAILED", grpccodes.Internal} +// Asset validation metadata types +type AssetValidationMetadata struct { + AssetID string `json:"asset_id,omitempty"` + Message string `json:"message,omitempty"` +} + +type AssetInputMetadata struct { + AssetID string `json:"asset_id,omitempty"` + InputIndex int `json:"input_index"` + Txid string `json:"txid,omitempty"` +} + +type AssetOutputMetadata struct { + AssetID string `json:"asset_id,omitempty"` + OutputIndex int `json:"output_index"` + OutputType int `json:"output_type,omitempty"` +} + +type ControlAssetMetadata struct { + AssetID string `json:"asset_id,omitempty"` + ControlAssetID string `json:"control_asset_id,omitempty"` + GroupIndex int `json:"group_index,omitempty"` +} + +type TeleportValidationMetadata struct { + AssetID string `json:"asset_id,omitempty"` + Script string `json:"script,omitempty"` + OutputIndex int `json:"output_index,omitempty"` +} + +type CheckpointValidationMetadata struct { + Txid string `json:"txid"` + InputIndex int `json:"input_index,omitempty"` +} + +type OffchainTxValidationMetadata struct { + Txid string `json:"txid"` + OutputIndex int `json:"output_index,omitempty"` +} + +var ASSET_VALIDATION_FAILED = Code[AssetValidationMetadata]{ + 33, + "ASSET_VALIDATION_FAILED", + grpccodes.InvalidArgument, +} + +var ASSET_NOT_FOUND = Code[AssetValidationMetadata]{ + 34, + "ASSET_NOT_FOUND", + grpccodes.NotFound, +} + +var ASSET_INPUT_INVALID = Code[AssetInputMetadata]{ + 35, + "ASSET_INPUT_INVALID", + grpccodes.InvalidArgument, +} + +var ASSET_OUTPUT_INVALID = Code[AssetOutputMetadata]{ + 36, + "ASSET_OUTPUT_INVALID", + grpccodes.InvalidArgument, +} + +var CONTROL_ASSET_INVALID = Code[ControlAssetMetadata]{ + 37, + "CONTROL_ASSET_INVALID", + grpccodes.InvalidArgument, +} + +var CONTROL_ASSET_NOT_FOUND = Code[ControlAssetMetadata]{ + 38, + "CONTROL_ASSET_NOT_FOUND", + grpccodes.NotFound, +} + +var TELEPORT_VALIDATION_FAILED = Code[TeleportValidationMetadata]{ + 39, + "TELEPORT_VALIDATION_FAILED", + grpccodes.InvalidArgument, +} + +var CHECKPOINT_TX_INVALID = Code[CheckpointValidationMetadata]{ + 40, + "CHECKPOINT_TX_INVALID", + grpccodes.InvalidArgument, +} + +var CHECKPOINT_TX_NOT_FOUND = Code[CheckpointValidationMetadata]{ + 41, + "CHECKPOINT_TX_NOT_FOUND", + grpccodes.NotFound, +} + +var OFFCHAIN_TX_INVALID = Code[OffchainTxValidationMetadata]{ + 42, + "OFFCHAIN_TX_INVALID", + grpccodes.InvalidArgument, +} + +var ASSET_PACKET_INVALID = Code[AssetValidationMetadata]{ + 43, + "ASSET_PACKET_INVALID", + grpccodes.InvalidArgument, +} + +var INTENT_FEE_EVALUATION_FAILED = Code[any]{44, "INTENT_FEE_EVALUATION_FAILED", grpccodes.Internal} var INTENT_NOT_FOUND = Code[any]{34, "INTENT_NOT_FOUND", grpccodes.NotFound}