提交 c3bad0de 编写于 作者: Jerry Zhang's avatar Jerry Zhang 提交者: Soumith Chintala

at::view (#23452) (#23604)

Summary:
accidently calls clone, but what we want is creating an empty tensor and set storage.

Pull Request resolved: https://github.com/pytorch/pytorch/pull/23452
ghstack-source-id: 87438096

Differential Revision: D16442756

fbshipit-source-id: 6d5663f82c9bd4e9de8fc846c52992477843af6a
上级 18cbf113
......@@ -194,22 +194,6 @@
arguments:
- THTensor* self
]]
[[
name: _th_view
cname: newView
cpu_half: True
cpu_bool: True
cuda_bool: True
cpu_bfloat16: True
variants:
- function
device_guard: False
return: THTensor*
arguments:
- THTensor* self
- arg: IntArrayRefSize size
long_args: True
]]
[[
name: _th_resize_as_
cname: resizeAs
......@@ -2698,23 +2682,6 @@
- double p
]]
# In theory, this could be a part of the above declaration. But in
# practice this leads to all sorts of problems with ambiguous overloads.
# So we add it here with a separate name.
[[
name: _th_alias
return: THTensor*
cpu_half: True
cpu_bool: True
cuda_bool: True
cpu_bfloat16: True
variants:
- function
options:
- cname: newWithTensor
arguments:
- THTensor* self
]]
[[
name: _th_copy_ignoring_overlaps_
cname: copyIgnoringOverlaps
......
......@@ -898,20 +898,36 @@ Tensor numpy_T(const Tensor &self) {
Tensor view(const Tensor& self, IntArrayRef size) {
auto inferred_size = at::infer_size(size, self.numel());
if (self.sizes() == inferred_size) {
return self;
}
auto stride = at::detail::computeStride(self.sizes(),
self.strides(),
inferred_size);
self.strides(),
inferred_size);
TORCH_CHECK(stride.has_value(), "view size is "
"not compatible with input tensor's size and stride (at least one dimension"
" spans across two contiguous subspaces). Use .reshape(...) instead.");
auto stride_value = *stride;
auto self_ = self.clone();
self_.set_(self.storage(), self.storage_offset(), inferred_size,
stride_value);
auto self_ = self.alias();
self_.set_(
self.storage(), self.storage_offset(), inferred_size, stride_value);
return self_;
}
Tensor alias(const Tensor& self) {
Tensor self_;
if (self.is_quantized()) {
auto impl = c10::make_intrusive<QTensorImpl>(
Storage(self.storage()),
self.type_id(),
get_qtensorimpl(self)->quantizer());
impl->set_storage_offset(self.storage_offset());
impl->set_sizes_and_strides(self.sizes(), self.strides());
self_ = Tensor(std::move(impl));
} else {
auto impl = c10::make_intrusive<TensorImpl>(Storage(self.storage()),
self.type_id());
impl->set_storage_offset(self.storage_offset());
impl->set_sizes_and_strides(self.sizes(), self.strides());
self_ = Tensor(std::move(impl));
}
return self_;
}
......
......@@ -2973,7 +2973,7 @@
dispatch:
CPU: legacy::cpu::_th_set_
CUDA: legacy::cuda::_th_set_
QuantizedCPU: set_storage_cpu
QuantizedCPU: set_storage
- func: set_(Tensor(a!) self, Tensor source) -> Tensor(a!)
variants: method
......@@ -4240,9 +4240,6 @@
- func: alias(Tensor(a) self) -> Tensor(a)
variants: method, function
dispatch:
CPU: legacy::cpu::_th_alias
CUDA: legacy::cuda::_th_alias
- func: _addr(Tensor self, Tensor vec1, Tensor vec2, *, Scalar beta=1, Scalar alpha=1) -> Tensor
dispatch:
......
......@@ -97,7 +97,7 @@ Tensor per_tensor_affine_qtensor_cpu(const Tensor& self, double scale, int64_t z
return dst;
}
Tensor& set_storage_cpu(Tensor& self, Storage storage, int64_t storage_offset, IntArrayRef sizes, IntArrayRef strides) {
Tensor& set_storage(Tensor& self, Storage storage, int64_t storage_offset, IntArrayRef sizes, IntArrayRef strides) {
auto* self_ = self.unsafeGetTensorImpl();
self_->set_storage(storage);
self_->set_storage_offset(storage_offset);
......
......@@ -283,6 +283,17 @@ void TestNegativeDim(DeprecatedTypeProperties& type) {
ASSERT_ANY_THROW(tensor.reshape({-5, -5}));
}
void TestView(DeprecatedTypeProperties& type) {
// Testing the tensor view path, which is different from
// the Variable view path, see https://github.com/pytorch/pytorch/pull/23452
// for details
Tensor tensor = randn({3, 4}, type);;
Tensor viewed = tensor.view({3, 4});
tensor.resize_({6, 2});
ASSERT_TRUE(tensor.sizes().equals({6, 2}));
ASSERT_TRUE(viewed.sizes().equals({3, 4}));
}
void test(DeprecatedTypeProperties& type) {
TestResize(type);
TestOnesAndDot(type);
......@@ -310,6 +321,7 @@ void test(DeprecatedTypeProperties& type) {
TestIndexingMixedDevice(type);
TestDispatch();
TestNegativeDim(type);
TestView(type);
}
TEST(BasicTest, BasicTestCPU) {
......
......@@ -3,6 +3,7 @@
#else
#include <ATen/InferSize.h>
#include <ATen/NativeFunctions.h>
#include <new>
#ifdef BUILD_NAMEDTENSOR
#include <ATen/NamedTensorUtils.h>
......@@ -66,17 +67,7 @@ THTensor *THTensor_(new)(void)
/* Pointer-copy init */
THTensor *THTensor_(newWithTensor)(THTensor *tensor)
{
THTensor *self = c10::make_intrusive<at::TensorImpl, at::UndefinedTensorImpl>(
c10::intrusive_ptr<at::StorageImpl>::reclaim(THStorage_(new)()),
at::CPUTensorId()
).release();
THTensor_(setStorageNd)(self,
THTensor_getStoragePtr(tensor),
tensor->storage_offset(),
tensor->dim(),
THTensor_getSizePtr(tensor),
THTensor_getStridePtr(tensor));
return self;
return at::native::alias(THTensor_wrap(tensor)).unsafeReleaseTensorImpl();
}
/* Storage init */
......@@ -193,22 +184,6 @@ THTensor *THTensor_(newTranspose)(THTensor *tensor, int dimension1_, int dimensi
return self;
}
THTensor *THTensor_(newView)(THTensor *tensor, at::IntArrayRef size)
{
ptrdiff_t numel = THTensor_(nElement)(tensor);
THTensor *self = THTensor_(new)();
auto inferred_size = at::infer_size(size, numel);
auto stride = THTensor_compute_stride(tensor->sizes(),
tensor->strides(),
inferred_size);
THArgCheck(stride.has_value(), 2, "view size is "
"not compatible with input tensor's size and stride (at least one dimension spans "
"across two contiguous subspaces). Use .reshape(...) instead.");
auto stride_value = *stride;
THTensor_setStorage(self, THTensor_getStoragePtr(tensor), tensor->storage_offset(), inferred_size, stride_value);
return self;
}
/* Resize */
void THTensor_(resize)(THTensor *self, at::IntArrayRef size, at::IntArrayRef stride)
{
......
......@@ -10,7 +10,6 @@
TH_CPP_API void THTensor_(setStorage)(THTensor *self, THStorage *storage_, ptrdiff_t storageOffset_,
at::IntArrayRef size_, at::IntArrayRef stride_);
TH_CPP_API THTensor *THTensor_(newView)(THTensor *tensor, at::IntArrayRef size);
/* strides.data() might be NULL */
TH_CPP_API THTensor *THTensor_(newWithStorage)(THStorage *storage, ptrdiff_t storageOffset,
at::IntArrayRef sizes, at::IntArrayRef strides);
......
......@@ -3,6 +3,7 @@
#else
#include <ATen/InferSize.h>
#include <ATen/NativeFunctions.h>
/**** access methods ****/
THCStorage *THCTensor_(storage)(THCState *state, const THCTensor *self)
......@@ -72,18 +73,7 @@ THCTensor *THCTensor_(new)(THCState *state)
/* Pointer-copy init */
THCTensor *THCTensor_(newWithTensor)(THCState *state, THCTensor *tensor)
{
THCTensor *self = c10::make_intrusive<at::TensorImpl, at::UndefinedTensorImpl>(
c10::intrusive_ptr<at::StorageImpl>::reclaim(THCStorage_(new)(state)),
at::CUDATensorId()
).release();
THCTensor_(setStorageNd)(state,
self,
THTensor_getStoragePtr(tensor),
tensor->storage_offset(),
tensor->dim(),
THTensor_getSizePtr(tensor),
THTensor_getStridePtr(tensor));
return self;
return at::native::alias(THTensor_wrap(tensor)).unsafeReleaseTensorImpl();
}
/* Storage init */
......@@ -197,34 +187,6 @@ THCTensor *THCTensor_(newTranspose)(THCState *state, THCTensor *tensor, int dime
return self;
}
THCTensor *THCTensor_(newView)(THCState *state, THCTensor *tensor, at::IntArrayRef size)
{
ptrdiff_t numel = THCTensor_(nElement)(state, tensor);
auto inferred_size = at::infer_size(size, numel);
auto stride = THTensor_compute_stride(tensor->sizes(),
tensor->strides(),
inferred_size);
THArgCheck(stride.has_value(), 2, "view size is "
"not compatible with input tensor's size and stride (at least one dimension spans "
"across two contiguous subspaces). Use .reshape(...) instead.");
auto stride_value = *stride;
// NOTE: This path of constructing the Tensor directly with the viewed Storage is necessary
// to allow `view` not to have a device_guard. Taking the common TH path of allocating a storage
// on the current device [via THCTensor_(new)] and then swapping out the storage later can change
// the device out from under the tensor. Having the device be consistent through a Tensor's lifetime
// is an invariant we wish to keep to support caching, simplicity, etc.
auto storage = tensor->storage();
THCTensor *self = c10::make_intrusive<at::TensorImpl, at::UndefinedTensorImpl>(
std::move(storage),
at::CUDATensorId()
).release();
THCTensor_setStorage(state, self, THTensor_getStoragePtr(tensor), tensor->storage_offset(), inferred_size, stride_value);
return self;
}
// Collapses the first two dimensions of a tensor.
// Assumes the input tensor is contiguous.
THCTensor *THCTensor_(newFoldBatchDim)(THCState *state, THCTensor *input) {
......@@ -237,7 +199,7 @@ THCTensor *THCTensor_(newFoldBatchDim)(THCState *state, THCTensor *input) {
for (int i = 2; i < in_dims; i++) {
new_size[i - 1] = THCTensor_(size)(state, input, i);
}
THCTensor *output = THCTensor_(newView)(state, input, new_size);
THCTensor *output = at::native::view(THTensor_wrap(input), new_size).unsafeReleaseTensorImpl();
return output;
}
......
Markdown 格式
0% or
您添加了 0 到此讨论。请谨慎行事。
先完成此消息的编辑!
想要评论请 注册