Skip to content

Lluo/lan fix torch compile gpt2 #3671

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Draft
wants to merge 2 commits into
base: release/2.8
Choose a base branch
from
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
19 changes: 18 additions & 1 deletion core/runtime/execute_engine.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -121,7 +121,24 @@ void setup_input_tensors(
// Shape tensor inputs are casted to int64 explicitly.
// Refer to
// https://github.com/NVIDIA/TensorRT/blob/d2f4ef789a9a6ffdf37b55c3f81b486225f6b380/samples/common/sampleInference.cpp#L435
auto input_cpu = inputs[i].clone().contiguous().cpu().to(torch::kInt64);
at::Tensor cloned_input;

// Check if it's a scalar tensor (0-dimensional)
if (inputs[i].dim() == 0 && inputs[i].numel() == 1) {
// It's a scalar tensor, create a proper tensor from the scalar value
int64_t scalar_value = inputs[i].item<int64_t>();
LOG_DEBUG("Input " << i << " is a scalar tensor with value: " << scalar_value);
cloned_input = torch::tensor({scalar_value}, torch::kInt64);
LOG_DEBUG("cloned_input dim: " << cloned_input.dim() << " ; numel: " << cloned_input.numel());
} else {
// It's a regular tensor
LOG_DEBUG(
"Input " << i << " is a regular tensor"
<< " inputs[i]: " << inputs[i]);
cloned_input = inputs[i].clone();
}
auto input_cpu = cloned_input.contiguous().cpu().to(torch::kInt64);

std::vector<int64_t> inputs_cpu_vec(
input_cpu.data_ptr<int64_t>(), input_cpu.data_ptr<int64_t>() + input_cpu.numel());
inputShapeTensorValues.emplace_back(inputs_cpu_vec);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -430,7 +430,7 @@ def create_output_allocator(self) -> None:
def forward(self, *inputs: torch.Tensor) -> torch.Tensor | Tuple[torch.Tensor, ...]:

def run_standard_execution() -> torch.Tensor | Tuple[torch.Tensor, ...]:
shape_changed = self.validate_input_shapes(inputs)
shape_changed = self.validate_input_shapes(contiguous_inputs)
(
need_cudagraphs_record,
can_use_pre_allocated_outputs,
Expand Down
Loading