Skip to content

Commit 6b20e04

Browse files
authored
Refine print order and skip device contents (#908)
1 parent 4445f72 commit 6b20e04

File tree

3 files changed

+27
-24
lines changed

3 files changed

+27
-24
lines changed

docs_input/basics/sparse_tensor.rst

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -57,13 +57,13 @@ its constituent buffers is constructed as follows::
5757
The result of the print statement is shown below::
5858

5959
tensor_impl_2_f32: SparseTensor{float} Rank: 2, Sizes:[4, 8], Levels:[4, 8]
60-
nse = 5
6160
format = ( d0, d1 ) -> ( d0 : compressed(non-unique), d1 : singleton )
61+
space = CUDA managed memory
62+
nse = 5
6263
pos[0] = ( 0 5 )
6364
crd[0] = ( 0 0 3 3 3 )
6465
crd[1] = ( 0 1 2 3 5 )
6566
values = ( 1.0000e+00 2.0000e+00 3.0000e+00 4.0000e+00 5.0000e+00 )
66-
space = CUDA managed memory
6767

6868
Note that, like dense tensors, sparse tensors provide ()-operations
6969
for indexing. However, users should **never** use the ()-operator

examples/sparse_tensor.cu

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -71,12 +71,12 @@ int main([[maybe_unused]] int argc, [[maybe_unused]] char **argv)
7171
// | 0, 0, 3, 4, 0, 5, 0, 0 |
7272
//
7373
// tensor_impl_2_f32: SparseTensor{float} Rank: 2, Sizes:[4, 8], Levels:[4, 8]
74-
// nse = 5
7574
// format = ( d0, d1 ) -> ( d0 : compressed(non-unique), d1 : singleton )
75+
// space = CUDA managed memory
76+
// nse = 5
7677
// crd[0] = ( 0 0 3 3 3 )
7778
// crd[1] = ( 0 1 2 3 5 )
7879
// values = ( 1.0000e+00 2.0000e+00 3.0000e+00 4.0000e+00 5.0000e+00 )
79-
// space = CUDA managed memory
8080
//
8181
auto vals = make_tensor<float>({5});
8282
auto idxi = make_tensor<int>({5});

include/matx/core/print.h

Lines changed: 23 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -565,31 +565,34 @@ namespace matx {
565565
cudaDeviceSynchronize();
566566
if constexpr (is_sparse_tensor_v<Op>) {
567567
using Format = typename Op::Format;
568-
index_t nse = op.Nse();
569-
fprintf(fp, "nse = %" MATX_INDEX_T_FMT "\n", nse);
570568
fprintf(fp, "format = ");
571-
Format::print();
572-
for (int lvlIdx = 0; lvlIdx < Format::LVL; lvlIdx++) {
573-
if (const index_t pend = op.posSize(lvlIdx)) {
574-
fprintf(fp, "pos[%d] = (", lvlIdx);
575-
for (index_t i = 0; i < pend; i++) {
576-
PrintVal(fp, op.POSData(lvlIdx)[i]);
569+
Format::print();
570+
const auto kind = GetPointerKind(op.Data());
571+
fprintf(fp, ")\nspace = %s\n", SpaceString(kind).c_str());
572+
const auto nse = op.Nse();
573+
fprintf(fp, "nse = %" MATX_INDEX_T_FMT "\n", nse);
574+
if (HostPrintable(kind)) {
575+
for (int lvlIdx = 0; lvlIdx < Format::LVL; lvlIdx++) {
576+
if (const index_t pend = op.posSize(lvlIdx)) {
577+
fprintf(fp, "pos[%d] = (", lvlIdx);
578+
for (index_t i = 0; i < pend; i++) {
579+
PrintVal(fp, op.POSData(lvlIdx)[i]);
580+
}
581+
fprintf(fp, ")\n");
577582
}
578-
fprintf(fp, ")\n");
579-
}
580-
if (const index_t cend = op.crdSize(lvlIdx)) {
581-
fprintf(fp, "crd[%d] = (", lvlIdx);
582-
for (index_t i = 0; i < cend; i++) {
583-
PrintVal(fp, op.CRDData(lvlIdx)[i]);
583+
if (const index_t cend = op.crdSize(lvlIdx)) {
584+
fprintf(fp, "crd[%d] = (", lvlIdx);
585+
for (index_t i = 0; i < cend; i++) {
586+
PrintVal(fp, op.CRDData(lvlIdx)[i]);
587+
}
588+
fprintf(fp, ")\n");
584589
}
585-
fprintf(fp, ")\n");
590+
}
591+
fprintf(fp, "values = (");
592+
for (index_t i = 0; i < nse; i++) {
593+
PrintVal(fp, op.Data()[i]);
586594
}
587595
}
588-
fprintf(fp, "values = (");
589-
for (index_t i = 0; i < nse; i++) {
590-
PrintVal(fp, op.Data()[i]);
591-
}
592-
fprintf(fp, ")\nspace = %s\n", SpaceString(GetPointerKind(op.Data())).c_str());
593596
}
594597
else if constexpr (is_tensor_view_v<Op>) {
595598
// If the user is printing a tensor with a const pointer underlying the data, we need to do the lookup

0 commit comments

Comments
 (0)