Skip to content

Instantly share code, notes, and snippets.

@dlibenzi
Created July 9, 2020 13:43
Show Gist options
  • Select an option

  • Save dlibenzi/c5333bbf854cfcbed872ed7b1eed1cf2 to your computer and use it in GitHub Desktop.

Select an option

Save dlibenzi/c5333bbf854cfcbed872ed7b1eed1cf2 to your computer and use it in GitHub Desktop.
Traceback (most recent call last):
File "/usr/local/google/home/dlibenzi/tmp/TRASH/grad_grad.py", line 17, in <module>
(dw * dw).mean().backward()
File "/usr/local/google/home/dlibenzi/anaconda3/envs/pytorch/lib/python3.6/site-packages/torch/tensor.py", line 185, in backward
torch.autograd.backward(self, gradient, retain_graph, create_graph)
File "/usr/local/google/home/dlibenzi/anaconda3/envs/pytorch/lib/python3.6/site-packages/torch/autograd/__init__.py", line 127, in backward
allow_unreachable=True) # allow_unreachable flag
RuntimeError: /usr/local/google/home/dlibenzi/google-git/pytorch/xla/third_party/tensorflow/bazel-tensorflow/tensorflow/compiler/xla/xla_client/debug_macros.h:27 : Check failed: status.status() == ::tensorflow::Status::OK() (Invalid argument: conv_backward_input: Size of out_backprop doesn't match computed: actual = 3, computed = 4 spatial_dim: 2 input: 32 filter: 16 output: 3 stride: 1 dilation: 2 vs. OK)
*** Begin stack trace ***
tensorflow::CurrentStackTrace[abi:cxx11]()
xla::XlaOp ConsumeValue<xla::XlaOp>(stream_executor::port::StatusOr<xla::XlaOp>&&)
torch_xla::BuildConvolutionOverrideable(xla::XlaOp, xla::XlaOp, absl::lts_2020_02_25::Span<long long const>, absl::lts_2020_02_25::Span<long long const>, absl::lts_2020_02_25::Span<long long const>, bool, absl::lts_2020_02_25::Span<long long const>, long long)
std::function<xla::XlaOp (absl::lts_2020_02_25::Span<xla::XlaOp const>)>::operator()(absl::lts_2020_02_25::Span<xla::XlaOp const>) const
torch_xla::ir::ops::InferOutputShape(absl::lts_2020_02_25::Span<xla::Shape const>, std::function<xla::XlaOp (absl::lts_2020_02_25::Span<xla::XlaOp const>)> const&)
std::function<xla::Shape ()>::operator()() const
torch_xla::ir::Node::GetOpShape(std::function<xla::Shape ()> const&) const
torch_xla::ir::Node::Node(torch_xla::ir::OpKind, absl::lts_2020_02_25::Span<torch_xla::ir::Value const>, std::function<xla::Shape ()> const&, unsigned long, absl::lts_2020_02_25::uint128)
torch_xla::ir::ops::ConvolutionOverrideable::ConvolutionOverrideable(torch_xla::ir::Value const&, torch_xla::ir::Value const&, std::vector<long long, std::allocator<long long> >, std::vector<long long, std::allocator<long long> >, std::vector<long long, std::allocator<long long> >, bool, std::vector<long long, std::allocator<long long> >, long long)
void __gnu_cxx::new_allocator<torch_xla::ir::ops::ConvolutionOverrideable>::construct<torch_xla::ir::ops::ConvolutionOverrideable, torch_xla::ir::Value, torch_xla::ir::Value, std::vector<long long, std::allocator<long long> >, std::vector<long long, std::allocator<long long> >, std::vector<long long, std::allocator<long long> >, bool&, std::vector<long long, std::allocator<long long> >, long long&>(torch_xla::ir::ops::ConvolutionOverrideable*, torch_xla::ir::Value&&, torch_xla::ir::Value&&, std::vector<long long, std::allocator<long long> >&&, std::vector<long long, std::allocator<long long> >&&, std::vector<long long, std::allocator<long long> >&&, bool&, std::vector<long long, std::allocator<long long> >&&, long long&)
void std::allocator_traits<std::allocator<torch_xla::ir::ops::ConvolutionOverrideable> >::construct<torch_xla::ir::ops::ConvolutionOverrideable, torch_xla::ir::Value, torch_xla::ir::Value, std::vector<long long, std::allocator<long long> >, std::vector<long long, std::allocator<long long> >, std::vector<long long, std::allocator<long long> >, bool&, std::vector<long long, std::allocator<long long> >, long long&>(std::allocator<torch_xla::ir::ops::ConvolutionOverrideable>&, torch_xla::ir::ops::ConvolutionOverrideable*, torch_xla::ir::Value&&, torch_xla::ir::Value&&, std::vector<long long, std::allocator<long long> >&&, std::vector<long long, std::allocator<long long> >&&, std::vector<long long, std::allocator<long long> >&&, bool&, std::vector<long long, std::allocator<long long> >&&, long long&)
std::_Sp_counted_ptr_inplace<torch_xla::ir::ops::ConvolutionOverrideable, std::allocator<torch_xla::ir::ops::ConvolutionOverrideable>, (__gnu_cxx::_Lock_policy)2>::_Sp_counted_ptr_inplace<torch_xla::ir::Value, torch_xla::ir::Value, std::vector<long long, std::allocator<long long> >, std::vector<long long, std::allocator<long long> >, std::vector<long long, std::allocator<long long> >, bool&, std::vector<long long, std::allocator<long long> >, long long&>(std::allocator<torch_xla::ir::ops::ConvolutionOverrideable>, torch_xla::ir::Value&&, torch_xla::ir::Value&&, std::vector<long long, std::allocator<long long> >&&, std::vector<long long, std::allocator<long long> >&&, std::vector<long long, std::allocator<long long> >&&, bool&, std::vector<long long, std::allocator<long long> >&&, long long&)
std::__shared_count<(__gnu_cxx::_Lock_policy)2>::__shared_count<torch_xla::ir::ops::ConvolutionOverrideable, std::allocator<torch_xla::ir::ops::ConvolutionOverrideable>, torch_xla::ir::Value, torch_xla::ir::Value, std::vector<long long, std::allocator<long long> >, std::vector<long long, std::allocator<long long> >, std::vector<long long, std::allocator<long long> >, bool&, std::vector<long long, std::allocator<long long> >, long long&>(torch_xla::ir::ops::ConvolutionOverrideable*&, std::_Sp_alloc_shared_tag<std::allocator<torch_xla::ir::ops::ConvolutionOverrideable> >, torch_xla::ir::Value&&, torch_xla::ir::Value&&, std::vector<long long, std::allocator<long long> >&&, std::vector<long long, std::allocator<long long> >&&, std::vector<long long, std::allocator<long long> >&&, bool&, std::vector<long long, std::allocator<long long> >&&, long long&)
std::__shared_ptr<torch_xla::ir::ops::ConvolutionOverrideable, (__gnu_cxx::_Lock_policy)2>::__shared_ptr<std::allocator<torch_xla::ir::ops::ConvolutionOverrideable>, torch_xla::ir::Value, torch_xla::ir::Value, std::vector<long long, std::allocator<long long> >, std::vector<long long, std::allocator<long long> >, std::vector<long long, std::allocator<long long> >, bool&, std::vector<long long, std::allocator<long long> >, long long&>(std::_Sp_alloc_shared_tag<std::allocator<torch_xla::ir::ops::ConvolutionOverrideable> >, torch_xla::ir::Value&&, torch_xla::ir::Value&&, std::vector<long long, std::allocator<long long> >&&, std::vector<long long, std::allocator<long long> >&&, std::vector<long long, std::allocator<long long> >&&, bool&, std::vector<long long, std::allocator<long long> >&&, long long&)
std::shared_ptr<torch_xla::ir::ops::ConvolutionOverrideable>::shared_ptr<std::allocator<torch_xla::ir::ops::ConvolutionOverrideable>, torch_xla::ir::Value, torch_xla::ir::Value, std::vector<long long, std::allocator<long long> >, std::vector<long long, std::allocator<long long> >, std::vector<long long, std::allocator<long long> >, bool&, std::vector<long long, std::allocator<long long> >, long long&>(std::_Sp_alloc_shared_tag<std::allocator<torch_xla::ir::ops::ConvolutionOverrideable> >, torch_xla::ir::Value&&, torch_xla::ir::Value&&, std::vector<long long, std::allocator<long long> >&&, std::vector<long long, std::allocator<long long> >&&, std::vector<long long, std::allocator<long long> >&&, bool&, std::vector<long long, std::allocator<long long> >&&, long long&)
std::shared_ptr<torch_xla::ir::ops::ConvolutionOverrideable> std::allocate_shared<torch_xla::ir::ops::ConvolutionOverrideable, std::allocator<torch_xla::ir::ops::ConvolutionOverrideable>, torch_xla::ir::Value, torch_xla::ir::Value, std::vector<long long, std::allocator<long long> >, std::vector<long long, std::allocator<long long> >, std::vector<long long, std::allocator<long long> >, bool&, std::vector<long long, std::allocator<long long> >, long long&>(std::allocator<torch_xla::ir::ops::ConvolutionOverrideable> const&, torch_xla::ir::Value&&, torch_xla::ir::Value&&, std::vector<long long, std::allocator<long long> >&&, std::vector<long long, std::allocator<long long> >&&, std::vector<long long, std::allocator<long long> >&&, bool&, std::vector<long long, std::allocator<long long> >&&, long long&)
std::shared_ptr<torch_xla::ir::ops::ConvolutionOverrideable> std::make_shared<torch_xla::ir::ops::ConvolutionOverrideable, torch_xla::ir::Value, torch_xla::ir::Value, std::vector<long long, std::allocator<long long> >, std::vector<long long, std::allocator<long long> >, std::vector<long long, std::allocator<long long> >, bool&, std::vector<long long, std::allocator<long long> >, long long&>(torch_xla::ir::Value&&, torch_xla::ir::Value&&, std::vector<long long, std::allocator<long long> >&&, std::vector<long long, std::allocator<long long> >&&, std::vector<long long, std::allocator<long long> >&&, bool&, std::vector<long long, std::allocator<long long> >&&, long long&)
std::shared_ptr<torch_xla::ir::Node> torch_xla::ir::MakeNode<torch_xla::ir::ops::ConvolutionOverrideable, torch_xla::ir::Value, torch_xla::ir::Value, std::vector<long long, std::allocator<long long> >, std::vector<long long, std::allocator<long long> >, std::vector<long long, std::allocator<long long> >, bool&, std::vector<long long, std::allocator<long long> >, long long&>(torch_xla::ir::Value&&, torch_xla::ir::Value&&, std::vector<long long, std::allocator<long long> >&&, std::vector<long long, std::allocator<long long> >&&, std::vector<long long, std::allocator<long long> >&&, bool&, std::vector<long long, std::allocator<long long> >&&, long long&)
torch_xla::XLATensor::convolution_overrideable(torch_xla::XLATensor const&, torch_xla::XLATensor const&, std::vector<long long, std::allocator<long long> >, std::vector<long long, std::allocator<long long> >, std::vector<long long, std::allocator<long long> >, bool, std::vector<long long, std::allocator<long long> >, long long)
torch_xla::AtenXlaType::convolution_overrideable(at::Tensor const&, at::Tensor const&, at::Tensor const&, c10::ArrayRef<long>, c10::ArrayRef<long>, c10::ArrayRef<long>, bool, c10::ArrayRef<long>, long)
c10::impl::detail::WrapFunctionIntoRuntimeFunctor_<at::Tensor (*)(at::Tensor const&, at::Tensor const&, at::Tensor const&, c10::ArrayRef<long>, c10::ArrayRef<long>, c10::ArrayRef<long>, bool, c10::ArrayRef<long>, long), at::Tensor, c10::guts::typelist::typelist<at::Tensor const&, at::Tensor const&, at::Tensor const&, c10::ArrayRef<long>, c10::ArrayRef<long>, c10::ArrayRef<long>, bool, c10::ArrayRef<long>, long> >::operator()(at::Tensor const&, at::Tensor const&, at::Tensor const&, c10::ArrayRef<long>, c10::ArrayRef<long>, c10::ArrayRef<long>, bool, c10::ArrayRef<long>, long)
c10::impl::wrap_kernel_functor_unboxed_<c10::impl::detail::WrapFunctionIntoRuntimeFunctor_<at::Tensor (*)(at::Tensor const&, at::Tensor const&, at::Tensor const&, c10::ArrayRef<long>, c10::ArrayRef<long>, c10::ArrayRef<long>, bool, c10::ArrayRef<long>, long), at::Tensor, c10::guts::typelist::typelist<at::Tensor const&, at::Tensor const&, at::Tensor const&, c10::ArrayRef<long>, c10::ArrayRef<long>, c10::ArrayRef<long>, bool, c10::ArrayRef<long>, long> >, at::Tensor (at::Tensor const&, at::Tensor const&, at::Tensor const&, c10::ArrayRef<long>, c10::ArrayRef<long>, c10::ArrayRef<long>, bool, c10::ArrayRef<long>, long)>::call(c10::OperatorKernel*, at::Tensor const&, at::Tensor const&, at::Tensor const&, c10::ArrayRef<long>, c10::ArrayRef<long>, c10::ArrayRef<long>, bool, c10::ArrayRef<long>, long)
at::convolution_overrideable(at::Tensor const&, at::Tensor const&, at::Tensor const&, c10::ArrayRef<long>, c10::ArrayRef<long>, c10::ArrayRef<long>, bool, c10::ArrayRef<long>, long)
at::convolution_overrideable(at::Tensor const&, at::Tensor const&, at::Tensor const&, c10::ArrayRef<long>, c10::ArrayRef<long>, c10::ArrayRef<long>, bool, c10::ArrayRef<long>, long)
at::native::_convolution(at::Tensor const&, at::Tensor const&, at::Tensor const&, c10::ArrayRef<long>, c10::ArrayRef<long>, c10::ArrayRef<long>, bool, c10::ArrayRef<long>, long, bool, bool, bool)
at::_convolution(at::Tensor const&, at::Tensor const&, at::Tensor const&, c10::ArrayRef<long>, c10::ArrayRef<long>, c10::ArrayRef<long>, bool, c10::ArrayRef<long>, long, bool, bool, bool)
at::native::_convolution_double_backward(at::Tensor const&, at::Tensor const&, at::Tensor const&, at::Tensor const&, at::Tensor const&, at::Tensor const&, c10::ArrayRef<long>, c10::ArrayRef<long>, c10::ArrayRef<long>, bool, c10::ArrayRef<long>, long, bool, bool, bool, std::array<bool, 3ul>)
at::_convolution_double_backward(at::Tensor const&, at::Tensor const&, at::Tensor const&, at::Tensor const&, at::Tensor const&, at::Tensor const&, c10::ArrayRef<long>, c10::ArrayRef<long>, c10::ArrayRef<long>, bool, c10::ArrayRef<long>, long, bool, bool, bool, std::array<bool, 3ul>)
torch::autograd::generated::ConvolutionBackwardOverrideableBackward::apply(std::vector<at::Tensor, std::allocator<at::Tensor> >&&)
torch::autograd::Engine::evaluate_function(std::shared_ptr<torch::autograd::GraphTask>&, torch::autograd::Node*, torch::autograd::InputBuffer&, std::shared_ptr<torch::autograd::ReadyQueue> const&)
torch::autograd::Engine::thread_main(std::shared_ptr<torch::autograd::GraphTask> const&)
torch::autograd::Engine::thread_init(int, std::shared_ptr<torch::autograd::ReadyQueue> const&, bool)
torch::autograd::python::PythonEngine::thread_init(int, std::shared_ptr<torch::autograd::ReadyQueue> const&, bool)
clone
*** End stack trace ***
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment