提交 e366af7d 编写于 作者: Hong Xu's avatar Hong Xu 提交者: Facebook Github Bot

Add TORCH_CHECK to disable sub for bool tensors (#23519)

Summary:
This resolves two issues in one shot:

- sub shouldn't be available for bool type.
- When sub is applied to an unsupported type, the current error messages
  shows "add_cpu/add_cuda is not implemented for [type]". They should be
  "sub_cpu/sub_cuda" instead.
Pull Request resolved: https://github.com/pytorch/pytorch/pull/23519

Differential Revision: D16548770

Pulled By: izdeby

fbshipit-source-id: fe404a2a97b8d11bd180ec41364bf8e68414fb15
上级 3c986dff
......@@ -100,7 +100,18 @@ Tensor& mul_(Tensor& self, const Tensor& other) {
return native::mul_out(self, self, other);
}
// Basic checking for all sub functions.
static inline void sub_check(const Tensor& self, const Tensor& other) {
TORCH_CHECK(self.scalar_type() != kBool || other.scalar_type() != kBool,
"Subtraction, the `-` operator, with two bool tensors is not supported. "
"Use the `^` operator instead.")
TORCH_CHECK(self.scalar_type() != kBool && other.scalar_type() != kBool,
"Subtraction, the `-` operator, with a bool tensor is not supported. "
"If you are trying to invert a mask, use the `~` or `bitwise_not()` operator instead.");
}
Tensor& sub_out(Tensor& result, const Tensor& self, const Tensor& other, Scalar alpha) {
sub_check(self, other);
if (other.is_sparse()) {
if (!self.sizes().equals(other.sizes())) {
AT_ERROR("sizes do not match");
......@@ -121,6 +132,7 @@ Tensor& sub_out(Tensor& result, const Tensor& self, const Tensor& other, Scalar
}
Tensor sub(const Tensor& self, const Tensor& other, Scalar alpha) {
sub_check(self, other);
Tensor result;
if (other.is_sparse()) {
result = at::empty({0}, self.options());
......
......@@ -18,7 +18,7 @@ void add_kernel(TensorIterator& iter, Scalar alpha_scalar) {
auto alpha = alpha_scalar.to<bool>();
cpu_kernel(iter, [=](bool a, bool b) -> bool { return a + b * alpha; });
} else {
AT_DISPATCH_ALL_TYPES(iter.dtype(), "add_cpu", [&]() {
AT_DISPATCH_ALL_TYPES(iter.dtype(), "add_cpu/sub_cpu", [&]() {
auto alpha = alpha_scalar.to<scalar_t>();
auto alpha_vec = Vec256<scalar_t>(alpha);
cpu_kernel_vec(iter,
......
......@@ -13,7 +13,7 @@
namespace at { namespace native {
void add_kernel_cuda(TensorIterator& iter, Scalar alpha_scalar) {
AT_DISPATCH_ALL_TYPES_AND2(kHalf, kBool, iter.dtype(), "add_cuda", [&]() {
AT_DISPATCH_ALL_TYPES_AND2(kHalf, kBool, iter.dtype(), "add_cuda/sub_cuda", [&]() {
auto alpha = alpha_scalar.to<scalar_t>();
gpu_kernel_with_scalars(iter, [alpha]GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t {
return a + alpha * b;
......@@ -22,7 +22,7 @@ void add_kernel_cuda(TensorIterator& iter, Scalar alpha_scalar) {
}
static void sub_kernel_cuda(TensorIterator& iter, Scalar alpha_scalar) {
return add_kernel_cuda(iter, -alpha_scalar);
add_kernel_cuda(iter, -alpha_scalar);
}
void div_kernel_cuda(TensorIterator& iter) {
......
......@@ -1706,6 +1706,23 @@ class _TestTorchMixin(object):
expected = torch.zeros(2, 3, device=device).bool()
self.assertEqual(res, expected)
def test_bool_sub(self):
for device in torch.testing.get_all_device_types():
m1 = torch.tensor([True, False, False, True, False, False], dtype=torch.bool, device=device)
m2 = torch.tensor([True, True, False, False, False, True], dtype=torch.bool, device=device)
self.assertRaisesRegex(RuntimeError,
r"Subtraction, the `\-` operator, with two bool tensors is not supported. "
r"Use the `\^` operator instead.",
lambda: m1 - m2)
self.assertRaisesRegex(RuntimeError,
r"Subtraction, the `\-` operator, with a bool tensor is not supported. "
r"If you are trying to invert a mask, use the `\~` or `bitwise_not\(\)` operator instead.",
lambda: 1 - m1)
self.assertRaisesRegex(RuntimeError,
r"Subtraction, the `\-` operator, with a bool tensor is not supported. "
r"If you are trying to invert a mask, use the `\~` or `bitwise_not\(\)` operator instead.",
lambda: m2 - 1)
def test_csub(self):
# with a tensor
a = torch.randn(100, 90)
......
Markdown 格式
0% or
您添加了 0 到此讨论。请谨慎行事。
先完成此消息的编辑!
想要评论请 注册