@@ -184,8 +184,7 @@ static void basicAutogradNotImplementedFallbackImpl(
184
184
// users typically call .backward() and backprop through
185
185
// the entire program).
186
186
if (t.is_view () && is_mutable_output) {
187
- // NOLINTNEXTLINE(cppcoreguidelines-pro-type-const-cast)
188
- auto & base = const_cast <at::TensorBase&>(t._base ());
187
+ const auto & base = t._base ();
189
188
if (base.requires_grad ()) {
190
189
// Can only register_hook on tensors that require grad.
191
190
base.register_hook ([op_name](const at::TensorBase& grad) {
@@ -210,8 +209,7 @@ static void basicAutogradNotImplementedFallbackImpl(
210
209
// rebase_history assumes single Tensor(a!) return, and in general
211
210
// custom ops don't have a good in-place story.
212
211
if (!is_mutable_output) {
213
- // NOLINTNEXTLINE(cppcoreguidelines-pro-type-const-cast)
214
- set_history (const_cast <at::Tensor&>(t), grad_fn);
212
+ set_history (t, grad_fn);
215
213
}
216
214
},
217
215
stack,
@@ -418,11 +416,9 @@ static void autogradNotImplementedFallbackImpl(
418
416
[&](size_t idx_tensor, size_t idx_ret, const at::Tensor& t) {
419
417
if (isDifferentiableType (t.scalar_type ())) {
420
418
if (is_inplace_output[idx_ret]) {
421
- // NOLINTNEXTLINE(cppcoreguidelines-pro-type-const-cast)
422
- rebase_history (const_cast <at::Tensor&>(t), grad_fn);
419
+ rebase_history (t, grad_fn);
423
420
} else {
424
- // NOLINTNEXTLINE(cppcoreguidelines-pro-type-const-cast)
425
- set_history (const_cast <at::Tensor&>(t), grad_fn);
421
+ set_history (t, grad_fn);
426
422
}
427
423
}
428
424
},
0 commit comments