[TensorExpr] Remove 'Placeholder' class. (#64887)
Summary:
Pull Request resolved: https://github.com/pytorch/pytorch/pull/64887
BufHandle has exactly the same functionality and should be used instead.
Differential Revision:
D30889483
D30889483
Test Plan: Imported from OSS
Reviewed By: navahgar
Pulled By: ZolotukhinM
fbshipit-source-id: 365fe8e396731b88920535a3de96bd3301aaa3f3
diff --git a/benchmarks/cpp/tensorexpr/bench_approx.cpp b/benchmarks/cpp/tensorexpr/bench_approx.cpp
index 425d19f..f2b3975 100644
--- a/benchmarks/cpp/tensorexpr/bench_approx.cpp
+++ b/benchmarks/cpp/tensorexpr/bench_approx.cpp
@@ -30,7 +30,7 @@
static void relu_nnc(benchmark::State& state) {
auto N = VarHandle("N", kInt);
- Placeholder A("A", kFloat, {N});
+ BufHandle A("A", {N}, kFloat);
auto clamp = 0;
torch::jit::tensorexpr::Tensor B = Compute("B", {N}, [&](const VarHandle& i){
auto A_elem = [&]() {
@@ -64,7 +64,7 @@
static void log_nnc_sleef(benchmark::State& state) {
auto N = VarHandle("N", kInt);
- Placeholder A("A", kFloat, {N});
+ BufHandle A("A", {N}, kFloat);
torch::jit::tensorexpr::Tensor B =
Compute("B", {N}, [&](const VarHandle& i) {
return log(A.load(i));
@@ -93,7 +93,7 @@
static void log_nnc_fast(benchmark::State& state) {
auto N = VarHandle("N", kInt);
- Placeholder A("A", kFloat, {N});
+ BufHandle A("A", {N}, kFloat);
torch::jit::tensorexpr::Tensor B =
Compute("B", {N}, [&](const VarHandle& i) {
return fast_log(A.load(i));
@@ -122,7 +122,7 @@
static void log_nnc_vml(benchmark::State& state) {
auto N = VarHandle("N", kInt);
- Placeholder A("A", kFloat, {N});
+ BufHandle A("A", {N}, kFloat);
torch::jit::tensorexpr::Tensor B =
Compute("B", {N}, [&](const VarHandle& i) {
return log_vml(A.load(i));
@@ -161,7 +161,7 @@
static void logit_nnc_sleef(benchmark::State& state) {
auto N = VarHandle("N", kInt);
- Placeholder A("A", kFloat, {N});
+ BufHandle A("A", {N}, kFloat);
auto clamp = 1e-6f;
tensorexpr::Tensor B = Compute("B", {N}, [&](const VarHandle& i) {
auto A_elem = [&]() {
@@ -197,7 +197,7 @@
static void logit_nnc_fast(benchmark::State& state) {
auto N = VarHandle("N", kInt);
- Placeholder A("A", kFloat, {N});
+ BufHandle A("A", {N}, kFloat);
auto clamp = 1e-6f;
tensorexpr::Tensor B = Compute("B", {N}, [&](const VarHandle& i) {
auto A_elem = [&]() {
@@ -233,7 +233,7 @@
static void logit_nnc_vml(benchmark::State& state) {
auto N = VarHandle("N", kInt);
- Placeholder A("A", kFloat, {N});
+ BufHandle A("A", {N}, kFloat);
auto clamp = 1e-6f;
tensorexpr::Tensor B = Compute("B", {N}, [&](const VarHandle& i) {
auto A_elem = [&]() {
@@ -310,7 +310,7 @@
static void tanh_nnc_fast(benchmark::State& state) {
auto N = VarHandle("N", kInt);
- Placeholder A("A", kFloat, {N});
+ BufHandle A("A", {N}, kFloat);
torch::jit::tensorexpr::Tensor B =
Compute("B", {N}, [&](const VarHandle& i) {
return fast_tanh(A.load(i));
diff --git a/benchmarks/cpp/tensorexpr/bench_batchnorm.cpp b/benchmarks/cpp/tensorexpr/bench_batchnorm.cpp
index 702ed1c..4753ca9 100644
--- a/benchmarks/cpp/tensorexpr/bench_batchnorm.cpp
+++ b/benchmarks/cpp/tensorexpr/bench_batchnorm.cpp
@@ -75,11 +75,11 @@
BENCHMARK_DEFINE_F(BatchNorm, NNC)(benchmark::State& state) {
- Placeholder input("input", kFloat, {N_, C_, H_, W_});
- Placeholder weight("weight", kFloat, {C_});
- Placeholder bias("bias", kFloat, {C_});
- Placeholder mean("mean", kFloat, {C_});
- Placeholder var("var", kFloat, {C_});
+ BufHandle input("input", {N_, C_, H_, W_}, kFloat);
+ BufHandle weight("weight", {C_}, kFloat);
+ BufHandle bias("bias", {C_}, kFloat);
+ BufHandle mean("mean", {C_}, kFloat);
+ BufHandle var("var", {C_}, kFloat);
VarHandle eps("eps", kFloat);
using axis = const VarHandle&;
@@ -137,11 +137,11 @@
BENCHMARK_DEFINE_F(BatchNorm, NNCRelu)(benchmark::State& state) {
- Placeholder input("input", kFloat, {N_, C_, H_, W_});
- Placeholder weight("weight", kFloat, {C_});
- Placeholder bias("bias", kFloat, {C_});
- Placeholder mean("mean", kFloat, {C_});
- Placeholder var("var", kFloat, {C_});
+ BufHandle input("input", {N_, C_, H_, W_}, kFloat);
+ BufHandle weight("weight", {C_}, kFloat);
+ BufHandle bias("bias", {C_}, kFloat);
+ BufHandle mean("mean", {C_}, kFloat);
+ BufHandle var("var", {C_}, kFloat);
VarHandle eps("eps", kFloat);
using axis = const VarHandle&;
diff --git a/benchmarks/cpp/tensorexpr/bench_compile.cpp b/benchmarks/cpp/tensorexpr/bench_compile.cpp
index f204377..7856c1d 100644
--- a/benchmarks/cpp/tensorexpr/bench_compile.cpp
+++ b/benchmarks/cpp/tensorexpr/bench_compile.cpp
@@ -11,7 +11,7 @@
for (auto _ : state) {
constexpr int N = 512;
te::VarHandle n("n", te::kInt);
- te::Placeholder A(te::BufHandle("A", {N}, te::kFloat));
+ te::BufHandle A("A", {N}, te::kFloat);
te::Tensor relu = te::Compute("relu", {{n, "n"}}, [&](const te::VarHandle& i) {
return te::Max::make(A.load(i), 0.f, false);
});
@@ -40,7 +40,7 @@
static void BM_CompileSwishLLVMOnly(benchmark::State& state) {
constexpr int N = 512;
te::VarHandle n("n", te::kInt);
- te::Placeholder A(te::BufHandle("A", {N}, te::kFloat));
+ te::BufHandle A("A", {N}, te::kFloat);
te::Tensor relu = te::Compute("relu", {{n, "n"}}, [&](const te::VarHandle& i) {
return te::Max::make(A.load(i), 0.f, false);
});
diff --git a/benchmarks/cpp/tensorexpr/bench_concat.cpp b/benchmarks/cpp/tensorexpr/bench_concat.cpp
index c108c86..70bfb42 100644
--- a/benchmarks/cpp/tensorexpr/bench_concat.cpp
+++ b/benchmarks/cpp/tensorexpr/bench_concat.cpp
@@ -51,12 +51,12 @@
size_t num_inputs = inputs_.size();
size_t num_dims = 2;
- std::vector<Placeholder> inputs;
+ std::vector<BufHandle> inputs;
for (size_t i = 0; i < num_inputs; ++i) {
- inputs.emplace_back(Placeholder(
+ inputs.emplace_back(BufHandle(
"input" + std::to_string(i),
- kFloat,
- {input_sizes_[i][0], input_sizes_[i][1]}));
+ {input_sizes_[i][0], input_sizes_[i][1]},
+ kFloat));
}
Tensor output = Compute(
@@ -112,14 +112,14 @@
{alloc<IntImm>(output_size_[0]), alloc<IntImm>(output_size_[1])}),
kFloat);
- std::vector<Placeholder> inputs;
+ std::vector<BufHandle> inputs;
std::vector<StmtPtr> for_stmts(num_inputs);
int cumulative_input_sizes = 0;
for (size_t i = 0; i < num_inputs; ++i) {
- inputs.emplace_back(Placeholder(
+ inputs.emplace_back(BufHandle(
"input" + std::to_string(i),
- kFloat,
- {input_sizes_[i][0], input_sizes_[i][1]}));
+ {input_sizes_[i][0], input_sizes_[i][1]},
+ kFloat));
std::vector<VarPtr> for_vars(num_inputs);
for (size_t d = 0; d < num_dims; ++d) {
for_vars[d] =
@@ -131,7 +131,7 @@
{for_vars[0],
alloc<Add>(for_vars[1], alloc<IntImm>(cumulative_input_sizes))}),
alloc<Load>(
- inputs[i].data(),
+ inputs[i].node(),
std::vector<ExprPtr>({for_vars[0], for_vars[1]})));
auto for_st = alloc<For>(
for_vars[0],
diff --git a/benchmarks/cpp/tensorexpr/bench_gemm.cpp b/benchmarks/cpp/tensorexpr/bench_gemm.cpp
index ec13b09..568d40d 100644
--- a/benchmarks/cpp/tensorexpr/bench_gemm.cpp
+++ b/benchmarks/cpp/tensorexpr/bench_gemm.cpp
@@ -41,8 +41,8 @@
BENCHMARK_DEFINE_F(Gemm, TensorExprNoopt)(benchmark::State& state) {
- te::Placeholder AP(te::BufHandle("A", {M, K}, te::kFloat));
- te::Placeholder BP(te::BufHandle("B", {K, N}, te::kFloat));
+ te::BufHandle AP("A", {M, K}, te::kFloat);
+ te::BufHandle BP("B", {K, N}, te::kFloat);
te::Tensor CT = te::Reduce(
"gemm",
{{M, "M"}, {N, "N"}},
@@ -64,8 +64,8 @@
BENCHMARK_DEFINE_F(Gemm, TensorExprTile32x32)(benchmark::State& state) {
- te::Placeholder AP(te::BufHandle("A", {M, K}, te::kFloat));
- te::Placeholder BP(te::BufHandle("B", {K, N}, te::kFloat));
+ te::BufHandle AP("A", {M, K}, te::kFloat);
+ te::BufHandle BP("B", {K, N}, te::kFloat);
te::Tensor CT = te::Reduce(
"gemm",
{{M, "M"}, {N, "N"}},
@@ -123,8 +123,8 @@
BENCHMARK_DEFINE_F(Gemm, TensorExprTile4x16)(benchmark::State& state) {
- te::Placeholder AP(te::BufHandle("A", {M, K}, te::kFloat));
- te::Placeholder BP(te::BufHandle("B", {K, N}, te::kFloat));
+ te::BufHandle AP("A", {M, K}, te::kFloat);
+ te::BufHandle BP("B", {K, N}, te::kFloat);
te::Tensor CT = te::Reduce(
"gemm",
{{M, "M"}, {N, "N"}},
@@ -182,8 +182,8 @@
BENCHMARK_DEFINE_F(Gemm, TensorExprTile4x16VecUnroll)(benchmark::State& state) {
- te::Placeholder AP(te::BufHandle("A", {M, K}, te::kFloat));
- te::Placeholder BP(te::BufHandle("B", {K, N}, te::kFloat));
+ te::BufHandle AP("A", {M, K}, te::kFloat);
+ te::BufHandle BP("B", {K, N}, te::kFloat);
te::Tensor CT = te::Reduce(
"gemm",
{{M, "M"}, {N, "N"}},
@@ -249,8 +249,8 @@
BENCHMARK_DEFINE_F(Gemm, TensorExprTile4x16Cache)(benchmark::State& state) {
- te::Placeholder AP(te::BufHandle("A", {M, K}, te::kFloat));
- te::Placeholder BP(te::BufHandle("B", {K, N}, te::kFloat));
+ te::BufHandle AP("A", {M, K}, te::kFloat);
+ te::BufHandle BP("B", {K, N}, te::kFloat);
te::Tensor CT = te::Reduce(
"gemm",
{{M, "M"}, {N, "N"}},
diff --git a/benchmarks/cpp/tensorexpr/bench_parallel.cpp b/benchmarks/cpp/tensorexpr/bench_parallel.cpp
index 178a879..8f98c98 100644
--- a/benchmarks/cpp/tensorexpr/bench_parallel.cpp
+++ b/benchmarks/cpp/tensorexpr/bench_parallel.cpp
@@ -35,8 +35,8 @@
};
BENCHMARK_DEFINE_F(ParallelAdd, Simple)(benchmark::State& state) {
- Placeholder a_buf("a", kFloat, {M});
- Placeholder b_buf("b", kFloat, {M});
+ BufHandle a_buf("a", {M}, kFloat);
+ BufHandle b_buf("b", {M}, kFloat);
Tensor c_tensor = Compute(
"c", {{M, "m"}}, [&](const VarHandle& m) {
return a_buf.load(m) + b_buf.load(m);
diff --git a/benchmarks/cpp/tensorexpr/bench_reduce.cpp b/benchmarks/cpp/tensorexpr/bench_reduce.cpp
index e053317..cb1c2f9 100644
--- a/benchmarks/cpp/tensorexpr/bench_reduce.cpp
+++ b/benchmarks/cpp/tensorexpr/bench_reduce.cpp
@@ -220,7 +220,7 @@
int M = A.numel();
- te::Placeholder AP(te::BufHandle("A", {M}, te::kFloat));
+ te::BufHandle AP("A", {M}, te::kFloat);
te::Tensor BT = te::Reduce(
"reduce_full",
{{1, "N"}},
@@ -252,7 +252,7 @@
int M = A.numel();
- te::Placeholder AP(te::BufHandle("A", {M}, te::kFloat));
+ te::BufHandle AP("A", {M}, te::kFloat);
te::Tensor BT = te::Reduce(
"reduce_full",
{{1, "N"}},
@@ -292,7 +292,7 @@
int M = A.numel();
- te::Placeholder AP(te::BufHandle("A", {M}, te::kFloat));
+ te::BufHandle AP("A", {M}, te::kFloat);
te::Tensor BT = te::Reduce(
"reduce_full",
{{1, "N"}},
@@ -334,7 +334,7 @@
const int kChunkSize = 8;
TORCH_CHECK(M % kChunkSize == 0);
- te::Placeholder AP(te::BufHandle("A", {M}, te::kFloat));
+ te::BufHandle AP("A", {M}, te::kFloat);
te::Tensor BT = te::Reduce(
"reduce_full",
{},
@@ -384,8 +384,8 @@
const int M = A.numel();
const int kChunkSize = 8;
- te::Placeholder a("A", te::kFloat, {M});
- te::Tensor b = te::computeSum({a.handle(), te::IntList({0}), false}, at::kFloat);
+ te::BufHandle a("A", {M}, te::kFloat);
+ te::Tensor b = te::computeSum({a, te::IntList({0}), false}, at::kFloat);
te::LoopNest nest({b});
auto loops = nest.getLoopStmtsFor(b);
@@ -446,8 +446,8 @@
BENCHMARK_DEFINE_F(Reduce2DCol, OpSchedule)(benchmark::State& state) {
constexpr int kCacheSize = 1 << 12;
- te::Placeholder a("A", te::kFloat, {M, N});
- te::Tensor b = te::computeSum({a.handle(), te::IntList({0}), false}, at::kFloat);
+ te::BufHandle a("A", {M, N}, te::kFloat);
+ te::Tensor b = te::computeSum({a, te::IntList({0}), false}, at::kFloat);
te::LoopNest nest({b});
auto sch = state.range(2);
@@ -552,8 +552,8 @@
BENCHMARK_DEFINE_F(Reduce2DRow, OpSchedule)(benchmark::State& state) {
constexpr int kChunkSize = 8;
- te::Placeholder a("A", te::kFloat, {M, N});
- te::Tensor b = te::computeSum({a.handle(), te::IntList({1}), false}, at::kFloat);
+ te::BufHandle a("A", {M, N}, te::kFloat);
+ te::Tensor b = te::computeSum({a, te::IntList({1}), false}, at::kFloat);
te::LoopNest nest({b});
auto sch = state.range(2);
diff --git a/benchmarks/cpp/tensorexpr/bench_signed_log1p.cpp b/benchmarks/cpp/tensorexpr/bench_signed_log1p.cpp
index 155b408..0454530 100644
--- a/benchmarks/cpp/tensorexpr/bench_signed_log1p.cpp
+++ b/benchmarks/cpp/tensorexpr/bench_signed_log1p.cpp
@@ -42,8 +42,8 @@
}
void runNNC(benchmark::State& state) {
- Placeholder input_ph(
- "input", kFloat, {input_size_int_[0], input_size_int_[1]});
+ BufHandle input_ph(
+ "input", {input_size_int_[0], input_size_int_[1]}, kFloat);
Tensor abs_result = Compute(
"aten_abs",
{{input_size_int_[0], "M"}, {input_size_int_[1], "N"}},
@@ -56,8 +56,8 @@
[&](const VarHandle& m, const VarHandle& n) {
return log1p(abs_result.load(m, n));
});
- Tensor sign_result = computeSign(
- {input_ph.handle()}, {input_size_int_[0], input_size_int_[1]});
+ Tensor sign_result =
+ computeSign({input_ph}, {input_size_int_[0], input_size_int_[1]});
Tensor output = Compute(
"aten_mul",
{{input_size_int_[0], "M"}, {input_size_int_[1], "N"}},
@@ -90,8 +90,8 @@
}
void runNNCLogVml(benchmark::State& state) {
- Placeholder input_ph(
- "input", kFloat, {input_size_int_[0], input_size_int_[1]});
+ BufHandle input_ph(
+ "input", {input_size_int_[0], input_size_int_[1]}, kFloat);
Tensor abs_result = Compute(
"aten_abs",
{{input_size_int_[0], "M"}, {input_size_int_[1], "N"}},
@@ -104,8 +104,8 @@
[&](const VarHandle& m, const VarHandle& n) {
return log_vml(abs_result.load(m, n) + ExprHandle(1));
});
- Tensor sign_result = computeSign(
- {input_ph.handle()}, {input_size_int_[0], input_size_int_[1]});
+ Tensor sign_result =
+ computeSign({input_ph}, {input_size_int_[0], input_size_int_[1]});
Tensor output = Compute(
"aten_mul",
{{input_size_int_[0], "M"}, {input_size_int_[1], "N"}},
diff --git a/test/cpp/tensorexpr/test_approx.cpp b/test/cpp/tensorexpr/test_approx.cpp
index 8de395f..e1a576a 100644
--- a/test/cpp/tensorexpr/test_approx.cpp
+++ b/test/cpp/tensorexpr/test_approx.cpp
@@ -31,7 +31,7 @@
TEST(Approx, log_vml) {
te::VarHandle N("N", te::kInt);
- te::Placeholder A("A", te::kFloat, {N});
+ te::BufHandle A("A", {N}, te::kFloat);
te::Tensor B = te::Compute(
"B", {N}, [&](const te::VarHandle& i) { return log_vml(A.load(i)); });
diff --git a/test/cpp/tensorexpr/test_aten.cpp b/test/cpp/tensorexpr/test_aten.cpp
index 040b7b0..ecc6365 100644
--- a/test/cpp/tensorexpr/test_aten.cpp
+++ b/test/cpp/tensorexpr/test_aten.cpp
@@ -16,8 +16,8 @@
TEST(ATen, _cast_Float) {
const int kTotalSize = 128;
- Placeholder a_buf(BufHandle("A", {ExprHandle(kTotalSize)}, kInt));
- Placeholder b_buf(BufHandle("B", {ExprHandle(kTotalSize)}, kFloat));
+ BufHandle a_buf("A", {ExprHandle(kTotalSize)}, kInt);
+ BufHandle b_buf("B", {ExprHandle(kTotalSize)}, kFloat);
VarHandle index = VarHandle("index", kInt);
ExprHandle load_a = a_buf.load(index);
@@ -43,8 +43,8 @@
TEST(ATen, negInt) {
const int kTotalSize = 128;
- Placeholder a_buf(BufHandle("A", {ExprHandle(kTotalSize)}, kInt));
- Placeholder b_buf(BufHandle("B", {ExprHandle(kTotalSize)}, kInt));
+ BufHandle a_buf("A", {ExprHandle(kTotalSize)}, kInt);
+ BufHandle b_buf("B", {ExprHandle(kTotalSize)}, kInt);
VarHandle index = VarHandle("index", kInt);
ExprHandle load_a = a_buf.load(index);
@@ -70,8 +70,8 @@
TEST(ATen, negFloat) {
const int kTotalSize = 128;
- Placeholder a_buf(BufHandle("A", {ExprHandle(kTotalSize)}, kFloat));
- Placeholder b_buf(BufHandle("B", {ExprHandle(kTotalSize)}, kFloat));
+ BufHandle a_buf("A", {ExprHandle(kTotalSize)}, kFloat);
+ BufHandle b_buf("B", {ExprHandle(kTotalSize)}, kFloat);
VarHandle index = VarHandle("index", kInt);
ExprHandle load_a = a_buf.load(index);
@@ -97,10 +97,10 @@
TEST(ATen, addInt) {
const int kTotalSize = 128;
- Placeholder a_buf(BufHandle("A", {ExprHandle(kTotalSize)}, kInt));
- Placeholder b_buf(BufHandle("B", {ExprHandle(kTotalSize)}, kInt));
- Placeholder c_buf(BufHandle("C", {ExprHandle(kTotalSize)}, kInt));
- Placeholder d_buf(BufHandle("D", {ExprHandle(kTotalSize)}, kInt));
+ BufHandle a_buf("A", {ExprHandle(kTotalSize)}, kInt);
+ BufHandle b_buf("B", {ExprHandle(kTotalSize)}, kInt);
+ BufHandle c_buf("C", {ExprHandle(kTotalSize)}, kInt);
+ BufHandle d_buf("D", {ExprHandle(kTotalSize)}, kInt);
VarHandle index = VarHandle("index", kInt);
ExprHandle load_a = a_buf.load(index);
@@ -133,10 +133,10 @@
TEST(ATen, addFloat) {
const int kTotalSize = 128;
- Placeholder a_buf(BufHandle("A", {ExprHandle(kTotalSize)}, kFloat));
- Placeholder b_buf(BufHandle("B", {ExprHandle(kTotalSize)}, kFloat));
- Placeholder c_buf(BufHandle("C", {ExprHandle(kTotalSize)}, kFloat));
- Placeholder d_buf(BufHandle("D", {ExprHandle(kTotalSize)}, kFloat));
+ BufHandle a_buf("A", {ExprHandle(kTotalSize)}, kFloat);
+ BufHandle b_buf("B", {ExprHandle(kTotalSize)}, kFloat);
+ BufHandle c_buf("C", {ExprHandle(kTotalSize)}, kFloat);
+ BufHandle d_buf("D", {ExprHandle(kTotalSize)}, kFloat);
VarHandle index = VarHandle("index", kInt);
ExprHandle load_a = a_buf.load(index);
@@ -169,10 +169,10 @@
TEST(ATen, subInt) {
const int kTotalSize = 128;
- Placeholder a_buf(BufHandle("A", {ExprHandle(kTotalSize)}, kInt));
- Placeholder b_buf(BufHandle("B", {ExprHandle(kTotalSize)}, kInt));
- Placeholder c_buf(BufHandle("C", {ExprHandle(kTotalSize)}, kInt));
- Placeholder d_buf(BufHandle("D", {ExprHandle(kTotalSize)}, kInt));
+ BufHandle a_buf("A", {ExprHandle(kTotalSize)}, kInt);
+ BufHandle b_buf("B", {ExprHandle(kTotalSize)}, kInt);
+ BufHandle c_buf("C", {ExprHandle(kTotalSize)}, kInt);
+ BufHandle d_buf("D", {ExprHandle(kTotalSize)}, kInt);
VarHandle index = VarHandle("index", kInt);
ExprHandle load_a = a_buf.load(index);
@@ -205,10 +205,10 @@
TEST(ATen, subFloat) {
const int kTotalSize = 128;
- Placeholder a_buf(BufHandle("A", {ExprHandle(kTotalSize)}, kFloat));
- Placeholder b_buf(BufHandle("B", {ExprHandle(kTotalSize)}, kFloat));
- Placeholder c_buf(BufHandle("C", {ExprHandle(kTotalSize)}, kFloat));
- Placeholder d_buf(BufHandle("D", {ExprHandle(kTotalSize)}, kFloat));
+ BufHandle a_buf("A", {ExprHandle(kTotalSize)}, kFloat);
+ BufHandle b_buf("B", {ExprHandle(kTotalSize)}, kFloat);
+ BufHandle c_buf("C", {ExprHandle(kTotalSize)}, kFloat);
+ BufHandle d_buf("D", {ExprHandle(kTotalSize)}, kFloat);
VarHandle index = VarHandle("index", kInt);
ExprHandle load_a = a_buf.load(index);
@@ -241,10 +241,10 @@
TEST(ATen, lerp) {
const int kTotalSize = 128;
- Placeholder a_buf(BufHandle("A", {ExprHandle(kTotalSize)}, kFloat));
- Placeholder b_buf(BufHandle("B", {ExprHandle(kTotalSize)}, kFloat));
- Placeholder c_buf(BufHandle("C", {ExprHandle(kTotalSize)}, kFloat));
- Placeholder d_buf(BufHandle("D", {ExprHandle(kTotalSize)}, kFloat));
+ BufHandle a_buf("A", {ExprHandle(kTotalSize)}, kFloat);
+ BufHandle b_buf("B", {ExprHandle(kTotalSize)}, kFloat);
+ BufHandle c_buf("C", {ExprHandle(kTotalSize)}, kFloat);
+ BufHandle d_buf("D", {ExprHandle(kTotalSize)}, kFloat);
VarHandle index = VarHandle("index", kInt);
ExprHandle load_a = a_buf.load(index);
@@ -277,11 +277,11 @@
TEST(ATen, addcmulInt) {
const int kTotalSize = 128;
- Placeholder a_buf(BufHandle("A", {ExprHandle(kTotalSize)}, kInt));
- Placeholder b_buf(BufHandle("B", {ExprHandle(kTotalSize)}, kInt));
- Placeholder c_buf(BufHandle("C", {ExprHandle(kTotalSize)}, kInt));
- Placeholder d_buf(BufHandle("D", {ExprHandle(kTotalSize)}, kInt));
- Placeholder e_buf(BufHandle("E", {ExprHandle(kTotalSize)}, kInt));
+ BufHandle a_buf("A", {ExprHandle(kTotalSize)}, kInt);
+ BufHandle b_buf("B", {ExprHandle(kTotalSize)}, kInt);
+ BufHandle c_buf("C", {ExprHandle(kTotalSize)}, kInt);
+ BufHandle d_buf("D", {ExprHandle(kTotalSize)}, kInt);
+ BufHandle e_buf("E", {ExprHandle(kTotalSize)}, kInt);
VarHandle index = VarHandle("index", kInt);
ExprHandle load_a = a_buf.load(index);
@@ -318,11 +318,11 @@
TEST(ATen, addcmulFloat) {
const int kTotalSize = 128;
- Placeholder a_buf(BufHandle("A", {ExprHandle(kTotalSize)}, kFloat));
- Placeholder b_buf(BufHandle("B", {ExprHandle(kTotalSize)}, kFloat));
- Placeholder c_buf(BufHandle("C", {ExprHandle(kTotalSize)}, kFloat));
- Placeholder d_buf(BufHandle("D", {ExprHandle(kTotalSize)}, kFloat));
- Placeholder e_buf(BufHandle("E", {ExprHandle(kTotalSize)}, kFloat));
+ BufHandle a_buf("A", {ExprHandle(kTotalSize)}, kFloat);
+ BufHandle b_buf("B", {ExprHandle(kTotalSize)}, kFloat);
+ BufHandle c_buf("C", {ExprHandle(kTotalSize)}, kFloat);
+ BufHandle d_buf("D", {ExprHandle(kTotalSize)}, kFloat);
+ BufHandle e_buf("E", {ExprHandle(kTotalSize)}, kFloat);
VarHandle index = VarHandle("index", kInt);
ExprHandle load_a = a_buf.load(index);
@@ -359,9 +359,9 @@
TEST(ATen, mulInt) {
const int kTotalSize = 128;
- Placeholder a_buf(BufHandle("A", {ExprHandle(kTotalSize)}, kInt));
- Placeholder b_buf(BufHandle("B", {ExprHandle(kTotalSize)}, kInt));
- Placeholder c_buf(BufHandle("C", {ExprHandle(kTotalSize)}, kInt));
+ BufHandle a_buf("A", {ExprHandle(kTotalSize)}, kInt);
+ BufHandle b_buf("B", {ExprHandle(kTotalSize)}, kInt);
+ BufHandle c_buf("C", {ExprHandle(kTotalSize)}, kInt);
VarHandle index = VarHandle("index", kInt);
ExprHandle load_a = a_buf.load(index);
@@ -390,9 +390,9 @@
TEST(ATen, mulFloat) {
const int kTotalSize = 128;
- Placeholder a_buf(BufHandle("A", {ExprHandle(kTotalSize)}, kFloat));
- Placeholder b_buf(BufHandle("B", {ExprHandle(kTotalSize)}, kFloat));
- Placeholder c_buf(BufHandle("C", {ExprHandle(kTotalSize)}, kFloat));
+ BufHandle a_buf("A", {ExprHandle(kTotalSize)}, kFloat);
+ BufHandle b_buf("B", {ExprHandle(kTotalSize)}, kFloat);
+ BufHandle c_buf("C", {ExprHandle(kTotalSize)}, kFloat);
VarHandle index = VarHandle("index", kInt);
ExprHandle load_a = a_buf.load(index);
@@ -421,9 +421,9 @@
TEST(ATen, divInt) {
const int kTotalSize = 128;
- Placeholder a_buf(BufHandle("A", {ExprHandle(kTotalSize)}, kInt));
- Placeholder b_buf(BufHandle("B", {ExprHandle(kTotalSize)}, kInt));
- Placeholder c_buf(BufHandle("C", {ExprHandle(kTotalSize)}, kInt));
+ BufHandle a_buf("A", {ExprHandle(kTotalSize)}, kInt);
+ BufHandle b_buf("B", {ExprHandle(kTotalSize)}, kInt);
+ BufHandle c_buf("C", {ExprHandle(kTotalSize)}, kInt);
VarHandle index = VarHandle("index", kInt);
ExprHandle load_a = a_buf.load(index);
@@ -452,9 +452,9 @@
TEST(ATen, divFloat) {
const int kTotalSize = 128;
- Placeholder a_buf(BufHandle("A", {ExprHandle(kTotalSize)}, kFloat));
- Placeholder b_buf(BufHandle("B", {ExprHandle(kTotalSize)}, kFloat));
- Placeholder c_buf(BufHandle("C", {ExprHandle(kTotalSize)}, kFloat));
+ BufHandle a_buf("A", {ExprHandle(kTotalSize)}, kFloat);
+ BufHandle b_buf("B", {ExprHandle(kTotalSize)}, kFloat);
+ BufHandle c_buf("C", {ExprHandle(kTotalSize)}, kFloat);
VarHandle index = VarHandle("index", kInt);
ExprHandle load_a = a_buf.load(index);
@@ -483,9 +483,9 @@
TEST(ATen, maxInt) {
const int kTotalSize = 128;
- Placeholder a_buf(BufHandle("A", {ExprHandle(kTotalSize)}, kInt));
- Placeholder b_buf(BufHandle("B", {ExprHandle(kTotalSize)}, kInt));
- Placeholder c_buf(BufHandle("C", {ExprHandle(kTotalSize)}, kInt));
+ BufHandle a_buf("A", {ExprHandle(kTotalSize)}, kInt);
+ BufHandle b_buf("B", {ExprHandle(kTotalSize)}, kInt);
+ BufHandle c_buf("C", {ExprHandle(kTotalSize)}, kInt);
VarHandle index = VarHandle("index", kInt);
ExprHandle load_a = a_buf.load(index);
@@ -514,9 +514,9 @@
TEST(ATen, maxFloat) {
const int kTotalSize = 128;
- Placeholder a_buf(BufHandle("A", {ExprHandle(kTotalSize)}, kFloat));
- Placeholder b_buf(BufHandle("B", {ExprHandle(kTotalSize)}, kFloat));
- Placeholder c_buf(BufHandle("C", {ExprHandle(kTotalSize)}, kFloat));
+ BufHandle a_buf("A", {ExprHandle(kTotalSize)}, kFloat);
+ BufHandle b_buf("B", {ExprHandle(kTotalSize)}, kFloat);
+ BufHandle c_buf("C", {ExprHandle(kTotalSize)}, kFloat);
VarHandle index = VarHandle("index", kInt);
ExprHandle load_a = a_buf.load(index);
@@ -545,9 +545,9 @@
TEST(ATen, minInt) {
const int kTotalSize = 128;
- Placeholder a_buf(BufHandle("A", {ExprHandle(kTotalSize)}, kInt));
- Placeholder b_buf(BufHandle("B", {ExprHandle(kTotalSize)}, kInt));
- Placeholder c_buf(BufHandle("C", {ExprHandle(kTotalSize)}, kInt));
+ BufHandle a_buf("A", {ExprHandle(kTotalSize)}, kInt);
+ BufHandle b_buf("B", {ExprHandle(kTotalSize)}, kInt);
+ BufHandle c_buf("C", {ExprHandle(kTotalSize)}, kInt);
VarHandle index = VarHandle("index", kInt);
ExprHandle load_a = a_buf.load(index);
@@ -576,9 +576,9 @@
TEST(ATen, minFloat) {
const int kTotalSize = 128;
- Placeholder a_buf(BufHandle("A", {ExprHandle(kTotalSize)}, kFloat));
- Placeholder b_buf(BufHandle("B", {ExprHandle(kTotalSize)}, kFloat));
- Placeholder c_buf(BufHandle("C", {ExprHandle(kTotalSize)}, kFloat));
+ BufHandle a_buf("A", {ExprHandle(kTotalSize)}, kFloat);
+ BufHandle b_buf("B", {ExprHandle(kTotalSize)}, kFloat);
+ BufHandle c_buf("C", {ExprHandle(kTotalSize)}, kFloat);
VarHandle index = VarHandle("index", kInt);
ExprHandle load_a = a_buf.load(index);
@@ -607,8 +607,8 @@
void __ubsan_ignore_float_divide_by_zero__ testATenreciprocal() {
const int kTotalSize = 128;
- Placeholder a_buf(BufHandle("A", {ExprHandle(kTotalSize)}, kFloat));
- Placeholder b_buf(BufHandle("B", {ExprHandle(kTotalSize)}, kFloat));
+ BufHandle a_buf("A", {ExprHandle(kTotalSize)}, kFloat);
+ BufHandle b_buf("B", {ExprHandle(kTotalSize)}, kFloat);
VarHandle index = VarHandle("index", kInt);
ExprHandle load_a = a_buf.load(index);
@@ -633,8 +633,8 @@
TEST(ATen, reluInt) {
const int kTotalSize = 128;
- Placeholder a_buf(BufHandle("A", {ExprHandle(kTotalSize)}, kInt));
- Placeholder b_buf(BufHandle("B", {ExprHandle(kTotalSize)}, kInt));
+ BufHandle a_buf("A", {ExprHandle(kTotalSize)}, kInt);
+ BufHandle b_buf("B", {ExprHandle(kTotalSize)}, kInt);
VarHandle index = VarHandle("index", kInt);
ExprHandle load_a = a_buf.load(index);
@@ -659,8 +659,8 @@
TEST(ATen, reluFloat) {
const int kTotalSize = 128;
- Placeholder a_buf(BufHandle("A", {ExprHandle(kTotalSize)}, kFloat));
- Placeholder b_buf(BufHandle("B", {ExprHandle(kTotalSize)}, kFloat));
+ BufHandle a_buf("A", {ExprHandle(kTotalSize)}, kFloat);
+ BufHandle b_buf("B", {ExprHandle(kTotalSize)}, kFloat);
VarHandle index = VarHandle("index", kInt);
ExprHandle load_a = a_buf.load(index);
@@ -687,8 +687,8 @@
TEST(ATen, logFloat) {
const int kTotalSize = 128;
- Placeholder a_buf(BufHandle("A", {ExprHandle(kTotalSize)}, kFloat));
- Placeholder b_buf(BufHandle("B", {ExprHandle(kTotalSize)}, kFloat));
+ BufHandle a_buf("A", {ExprHandle(kTotalSize)}, kFloat);
+ BufHandle b_buf("B", {ExprHandle(kTotalSize)}, kFloat);
VarHandle index = VarHandle("index", kInt);
ExprHandle load_a = a_buf.load(index);
@@ -713,8 +713,8 @@
TEST(ATen, fastLogFloat) {
const int kTotalSize = 128;
- Placeholder a_buf(BufHandle("A", {ExprHandle(kTotalSize)}, kFloat));
- Placeholder b_buf(BufHandle("B", {ExprHandle(kTotalSize)}, kFloat));
+ BufHandle a_buf("A", {ExprHandle(kTotalSize)}, kFloat);
+ BufHandle b_buf("B", {ExprHandle(kTotalSize)}, kFloat);
VarHandle index = VarHandle("index", kInt);
ExprHandle load_a = a_buf.load(index);
@@ -744,8 +744,8 @@
TEST(ATen, fastTanhFloat) {
const int kTotalSize = 128;
- Placeholder a_buf(BufHandle("A", {ExprHandle(kTotalSize)}, kFloat));
- Placeholder b_buf(BufHandle("B", {ExprHandle(kTotalSize)}, kFloat));
+ BufHandle a_buf("A", {ExprHandle(kTotalSize)}, kFloat);
+ BufHandle b_buf("B", {ExprHandle(kTotalSize)}, kFloat);
VarHandle index = VarHandle("index", kInt);
ExprHandle load_a = a_buf.load(index);
@@ -775,8 +775,8 @@
TEST(ATen, fastSigmoidFloat) {
const int kTotalSize = 128;
- Placeholder a_buf(BufHandle("A", {ExprHandle(kTotalSize)}, kFloat));
- Placeholder b_buf(BufHandle("B", {ExprHandle(kTotalSize)}, kFloat));
+ BufHandle a_buf("A", {ExprHandle(kTotalSize)}, kFloat);
+ BufHandle b_buf("B", {ExprHandle(kTotalSize)}, kFloat);
VarHandle index = VarHandle("index", kInt);
ExprHandle load_a = a_buf.load(index);
@@ -807,8 +807,8 @@
TEST(ATen, log10Float) {
const int kTotalSize = 128;
- Placeholder a_buf(BufHandle("A", {ExprHandle(kTotalSize)}, kFloat));
- Placeholder b_buf(BufHandle("B", {ExprHandle(kTotalSize)}, kFloat));
+ BufHandle a_buf("A", {ExprHandle(kTotalSize)}, kFloat);
+ BufHandle b_buf("B", {ExprHandle(kTotalSize)}, kFloat);
VarHandle index = VarHandle("index", kInt);
ExprHandle load_a = a_buf.load(index);
@@ -833,8 +833,8 @@
TEST(ATen, log2Float) {
const int kTotalSize = 128;
- Placeholder a_buf(BufHandle("A", {ExprHandle(kTotalSize)}, kFloat));
- Placeholder b_buf(BufHandle("B", {ExprHandle(kTotalSize)}, kFloat));
+ BufHandle a_buf("A", {ExprHandle(kTotalSize)}, kFloat);
+ BufHandle b_buf("B", {ExprHandle(kTotalSize)}, kFloat);
VarHandle index = VarHandle("index", kInt);
ExprHandle load_a = a_buf.load(index);
@@ -859,8 +859,8 @@
TEST(ATen, expFloat) {
const int kTotalSize = 128;
- Placeholder a_buf(BufHandle("A", {ExprHandle(kTotalSize)}, kFloat));
- Placeholder b_buf(BufHandle("B", {ExprHandle(kTotalSize)}, kFloat));
+ BufHandle a_buf("A", {ExprHandle(kTotalSize)}, kFloat);
+ BufHandle b_buf("B", {ExprHandle(kTotalSize)}, kFloat);
VarHandle index = VarHandle("index", kInt);
ExprHandle load_a = a_buf.load(index);
@@ -886,8 +886,8 @@
TEST(ATen, erfFloat) {
const int kTotalSize = 128;
- Placeholder a_buf(BufHandle("A", {ExprHandle(kTotalSize)}, kFloat));
- Placeholder b_buf(BufHandle("B", {ExprHandle(kTotalSize)}, kFloat));
+ BufHandle a_buf("A", {ExprHandle(kTotalSize)}, kFloat);
+ BufHandle b_buf("B", {ExprHandle(kTotalSize)}, kFloat);
VarHandle index = VarHandle("index", kInt);
ExprHandle load_a = a_buf.load(index);
@@ -913,8 +913,8 @@
TEST(ATen, cosFloat) {
const int kTotalSize = 128;
- Placeholder a_buf(BufHandle("A", {ExprHandle(kTotalSize)}, kFloat));
- Placeholder b_buf(BufHandle("B", {ExprHandle(kTotalSize)}, kFloat));
+ BufHandle a_buf("A", {ExprHandle(kTotalSize)}, kFloat);
+ BufHandle b_buf("B", {ExprHandle(kTotalSize)}, kFloat);
VarHandle index = VarHandle("index", kInt);
ExprHandle load_a = a_buf.load(index);
@@ -940,9 +940,9 @@
TEST(ATen, eqInt) {
constexpr int N = 128;
- Placeholder a(BufHandle("A", {N}, kInt));
- Placeholder b(BufHandle("B", {N}, kInt));
- Placeholder c(BufHandle("C", {N}, kInt));
+ BufHandle a("A", {N}, kInt);
+ BufHandle b("B", {N}, kInt);
+ BufHandle c("C", {N}, kInt);
std::vector<int> a_buffer(N, 1);
std::vector<int> b_buffer(N, 1);
std::vector<int> c_buffer(N, 0);
@@ -965,9 +965,9 @@
TEST(ATen, geInt) {
constexpr int N = 128;
- Placeholder a(BufHandle("A", {N}, kInt));
- Placeholder b(BufHandle("B", {N}, kInt));
- Placeholder c(BufHandle("C", {N}, kInt));
+ BufHandle a("A", {N}, kInt);
+ BufHandle b("B", {N}, kInt);
+ BufHandle c("C", {N}, kInt);
std::vector<int> a_buffer(N, 5);
std::vector<int> b_buffer(N, 5);
std::vector<int> c_buffer(N, 0);
@@ -990,9 +990,9 @@
TEST(ATen, gtInt) {
constexpr int N = 128;
- Placeholder a(BufHandle("A", {N}, kInt));
- Placeholder b(BufHandle("B", {N}, kInt));
- Placeholder c(BufHandle("C", {N}, kInt));
+ BufHandle a("A", {N}, kInt);
+ BufHandle b("B", {N}, kInt);
+ BufHandle c("C", {N}, kInt);
std::vector<int> a_buffer(N, 6);
std::vector<int> b_buffer(N, 3);
std::vector<int> c_buffer(N, 0);
@@ -1015,9 +1015,9 @@
TEST(ATen, leInt) {
constexpr int N = 128;
- Placeholder a(BufHandle("A", {N}, kInt));
- Placeholder b(BufHandle("B", {N}, kInt));
- Placeholder c(BufHandle("C", {N}, kInt));
+ BufHandle a("A", {N}, kInt);
+ BufHandle b("B", {N}, kInt);
+ BufHandle c("C", {N}, kInt);
std::vector<int> a_buffer(N, 5);
std::vector<int> b_buffer(N, 5);
std::vector<int> c_buffer(N, 0);
@@ -1040,9 +1040,9 @@
TEST(ATen, ltInt) {
constexpr int N = 128;
- Placeholder a(BufHandle("A", {N}, kInt));
- Placeholder b(BufHandle("B", {N}, kInt));
- Placeholder c(BufHandle("C", {N}, kInt));
+ BufHandle a("A", {N}, kInt);
+ BufHandle b("B", {N}, kInt);
+ BufHandle c("C", {N}, kInt);
std::vector<int> a_buffer(N, 5);
std::vector<int> b_buffer(N, 5);
std::vector<int> c_buffer(N, 1);
diff --git a/test/cpp/tensorexpr/test_boundsinference.cpp b/test/cpp/tensorexpr/test_boundsinference.cpp
index 2eb0dfb..3a16451 100644
--- a/test/cpp/tensorexpr/test_boundsinference.cpp
+++ b/test/cpp/tensorexpr/test_boundsinference.cpp
@@ -47,7 +47,7 @@
// For this loop bounds inference should yield the following:
// {{b, kStore, 0, 99}, {a, kLoad, 0, 99}}
ExprHandle n(100);
- Placeholder a(BufHandle("a", {n}, kFloat));
+ BufHandle a("a", {n}, kFloat);
Tensor b =
Compute("b", {{n, "i"}}, [&](const VarHandle& i) { return a.load(i); });
LoopNest l({b});
@@ -55,9 +55,9 @@
// We should have two entries: one for 'b' and one for 'a'.
ASSERT_EQ(bounds_info.size(), 2);
- ASSERT_EQ(bounds_info.at(a.data()).size(), 1);
- ASSERT_EQ(bounds_info.at(a.data())[0].kind, kLoad);
- verifyConstBounds(bounds_info.at(a.data())[0], {{0, 99}});
+ ASSERT_EQ(bounds_info.at(a.node()).size(), 1);
+ ASSERT_EQ(bounds_info.at(a.node())[0].kind, kLoad);
+ verifyConstBounds(bounds_info.at(a.node())[0], {{0, 99}});
ASSERT_EQ(bounds_info.at(b.buf()).size(), 1);
ASSERT_EQ(bounds_info.at(b.buf())[0].kind, kStore);
@@ -71,7 +71,7 @@
// For this loop bounds inference should yield the following:
// {{b, kStore, 0, n-1}, {a, kLoad, 0, n-1}}
VarHandle n("n", kInt);
- Placeholder a(BufHandle("a", {n}, kFloat));
+ BufHandle a("a", {n}, kFloat);
Tensor b =
Compute("b", {{n, "i"}}, [&](const VarHandle& i) { return a.load(i); });
LoopNest l({b});
@@ -79,9 +79,9 @@
// We should have two entries: one for 'b' and one for 'a'.
ASSERT_EQ(bounds_info.size(), 2);
- ASSERT_EQ(bounds_info.at(a.data()).size(), 1);
- ASSERT_EQ(bounds_info.at(a.data())[0].kind, kLoad);
- verifyConstBounds(bounds_info.at(a.data())[0], {{0, -1}});
+ ASSERT_EQ(bounds_info.at(a.node()).size(), 1);
+ ASSERT_EQ(bounds_info.at(a.node())[0].kind, kLoad);
+ verifyConstBounds(bounds_info.at(a.node())[0], {{0, -1}});
ASSERT_EQ(bounds_info.at(b.buf()).size(), 1);
ASSERT_EQ(bounds_info.at(b.buf())[0].kind, kStore);
@@ -95,7 +95,7 @@
// For this loop bounds inference should yield the following:
// {{b, kStore, 0, 99}, {a, kLoad, 0, 109}}
ExprHandle n(100);
- Placeholder a(BufHandle("a", {n + 10}, kFloat));
+ BufHandle a("a", {n + 10}, kFloat);
Tensor b = Compute("b", {{n, "i"}}, [&](const VarHandle& i) {
return a.load(i) * a.load(i + 10);
});
@@ -104,9 +104,9 @@
// We should have two entries: one for 'b' and one for 'a'.
ASSERT_EQ(bounds_info.size(), 2);
- ASSERT_EQ(bounds_info.at(a.data()).size(), 1);
- ASSERT_EQ(bounds_info.at(a.data())[0].kind, kLoad);
- verifyConstBounds(bounds_info.at(a.data())[0], {{0, 109}});
+ ASSERT_EQ(bounds_info.at(a.node()).size(), 1);
+ ASSERT_EQ(bounds_info.at(a.node())[0].kind, kLoad);
+ verifyConstBounds(bounds_info.at(a.node())[0], {{0, 109}});
ASSERT_EQ(bounds_info.at(b.buf()).size(), 1);
ASSERT_EQ(bounds_info.at(b.buf())[0].kind, kStore);
@@ -124,7 +124,7 @@
// c[y,x] = a[y,x] * b[y,x]
ExprHandle W(320);
ExprHandle H(200);
- Placeholder a(BufHandle("a", {H, W}, kFloat));
+ BufHandle a("a", {H, W}, kFloat);
Tensor b = Compute(
"b", {{H, "y"}, {W, "x"}}, [&](const VarHandle& y, const VarHandle& x) {
return x * y;
@@ -141,9 +141,9 @@
auto bounds_info = inferBounds(loops[0]);
ASSERT_EQ(bounds_info.size(), 3);
- ASSERT_EQ(bounds_info.at(a.data()).size(), 1);
- ASSERT_EQ(bounds_info.at(a.data())[0].kind, kLoad);
- verifyConstBounds(bounds_info.at(a.data())[0], {{0, 199}, {0, 319}});
+ ASSERT_EQ(bounds_info.at(a.node()).size(), 1);
+ ASSERT_EQ(bounds_info.at(a.node())[0].kind, kLoad);
+ verifyConstBounds(bounds_info.at(a.node())[0], {{0, 199}, {0, 319}});
ASSERT_EQ(bounds_info.at(b.buf()).size(), 1);
ASSERT_EQ(bounds_info.at(b.buf())[0].kind, kLoad);
@@ -158,9 +158,9 @@
auto bounds_info = inferBounds(loops[1]);
ASSERT_EQ(bounds_info.size(), 3);
- ASSERT_EQ(bounds_info.at(a.data()).size(), 1);
- ASSERT_EQ(bounds_info.at(a.data())[0].kind, kLoad);
- verifyConstBounds(bounds_info.at(a.data())[0], {{-1, -1}, {0, 319}});
+ ASSERT_EQ(bounds_info.at(a.node()).size(), 1);
+ ASSERT_EQ(bounds_info.at(a.node())[0].kind, kLoad);
+ verifyConstBounds(bounds_info.at(a.node())[0], {{-1, -1}, {0, 319}});
ASSERT_EQ(bounds_info.at(b.buf()).size(), 1);
ASSERT_EQ(bounds_info.at(b.buf())[0].kind, kLoad);
@@ -175,9 +175,9 @@
auto bounds_info = inferBounds(body);
ASSERT_EQ(bounds_info.size(), 3);
- ASSERT_EQ(bounds_info.at(a.data()).size(), 1);
- ASSERT_EQ(bounds_info.at(a.data())[0].kind, kLoad);
- verifyConstBounds(bounds_info.at(a.data())[0], {{-1, -1}, {-1, -1}});
+ ASSERT_EQ(bounds_info.at(a.node()).size(), 1);
+ ASSERT_EQ(bounds_info.at(a.node())[0].kind, kLoad);
+ verifyConstBounds(bounds_info.at(a.node())[0], {{-1, -1}, {-1, -1}});
ASSERT_EQ(bounds_info.at(b.buf()).size(), 1);
ASSERT_EQ(bounds_info.at(b.buf())[0].kind, kLoad);
@@ -202,7 +202,7 @@
// for i_tail in 0..100%16:
// b[i_tail + (100/16)*16] = a[i_tail + (100/16)*16];
ExprHandle n(100);
- Placeholder a(BufHandle("a", {n}, kFloat));
+ BufHandle a("a", {n}, kFloat);
Tensor b =
Compute("b", {{n, "i"}}, [&](const VarHandle& i) { return a.load(i); });
LoopNest l({b});
@@ -220,9 +220,9 @@
auto bounds_info = inferBounds(outer);
ASSERT_EQ(bounds_info.size(), 2);
- ASSERT_EQ(bounds_info.at(a.data()).size(), 1);
- ASSERT_EQ(bounds_info.at(a.data())[0].kind, kLoad);
- verifyConstBounds(bounds_info.at(a.data())[0], {{0, 95}});
+ ASSERT_EQ(bounds_info.at(a.node()).size(), 1);
+ ASSERT_EQ(bounds_info.at(a.node())[0].kind, kLoad);
+ verifyConstBounds(bounds_info.at(a.node())[0], {{0, 95}});
ASSERT_EQ(bounds_info.at(b.buf()).size(), 1);
ASSERT_EQ(bounds_info.at(b.buf())[0].kind, kStore);
@@ -233,9 +233,9 @@
auto bounds_info = inferBounds(tail);
ASSERT_EQ(bounds_info.size(), 2);
- ASSERT_EQ(bounds_info.at(a.data()).size(), 1);
- ASSERT_EQ(bounds_info.at(a.data())[0].kind, kLoad);
- verifyConstBounds(bounds_info.at(a.data())[0], {{96, 99}});
+ ASSERT_EQ(bounds_info.at(a.node()).size(), 1);
+ ASSERT_EQ(bounds_info.at(a.node())[0].kind, kLoad);
+ verifyConstBounds(bounds_info.at(a.node())[0], {{96, 99}});
ASSERT_EQ(bounds_info.at(b.buf()).size(), 1);
ASSERT_EQ(bounds_info.at(b.buf())[0].kind, kStore);
@@ -256,7 +256,7 @@
ExprHandle H(200);
ExprHandle CW(32);
ExprHandle CH(20);
- Placeholder a(BufHandle("a", {H, W}, kFloat));
+ BufHandle a("a", {H, W}, kFloat);
Tensor b = Compute(
"b", {{H, "y"}, {W, "x"}}, [&](const VarHandle& y, const VarHandle& x) {
return x * y;
@@ -273,9 +273,9 @@
auto bounds_info = inferBounds(loops[0]);
ASSERT_EQ(bounds_info.size(), 3);
- ASSERT_EQ(bounds_info.at(a.data()).size(), 1);
- ASSERT_EQ(bounds_info.at(a.data())[0].kind, kLoad);
- verifyConstBounds(bounds_info.at(a.data())[0], {{100, 119}, {100, 131}});
+ ASSERT_EQ(bounds_info.at(a.node()).size(), 1);
+ ASSERT_EQ(bounds_info.at(a.node())[0].kind, kLoad);
+ verifyConstBounds(bounds_info.at(a.node())[0], {{100, 119}, {100, 131}});
ASSERT_EQ(bounds_info.at(b.buf()).size(), 1);
ASSERT_EQ(bounds_info.at(b.buf())[0].kind, kLoad);
@@ -290,9 +290,9 @@
auto bounds_info = inferBounds(loops[1]);
ASSERT_EQ(bounds_info.size(), 3);
- ASSERT_EQ(bounds_info.at(a.data()).size(), 1);
- ASSERT_EQ(bounds_info.at(a.data())[0].kind, kLoad);
- verifyConstBounds(bounds_info.at(a.data())[0], {{-1, -1}, {100, 131}});
+ ASSERT_EQ(bounds_info.at(a.node()).size(), 1);
+ ASSERT_EQ(bounds_info.at(a.node())[0].kind, kLoad);
+ verifyConstBounds(bounds_info.at(a.node())[0], {{-1, -1}, {100, 131}});
ASSERT_EQ(bounds_info.at(b.buf()).size(), 1);
ASSERT_EQ(bounds_info.at(b.buf())[0].kind, kLoad);
@@ -307,9 +307,9 @@
auto bounds_info = inferBounds(body);
ASSERT_EQ(bounds_info.size(), 3);
- ASSERT_EQ(bounds_info.at(a.data()).size(), 1);
- ASSERT_EQ(bounds_info.at(a.data())[0].kind, kLoad);
- verifyConstBounds(bounds_info.at(a.data())[0], {{-1, -1}, {-1, -1}});
+ ASSERT_EQ(bounds_info.at(a.node()).size(), 1);
+ ASSERT_EQ(bounds_info.at(a.node())[0].kind, kLoad);
+ verifyConstBounds(bounds_info.at(a.node())[0], {{-1, -1}, {-1, -1}});
ASSERT_EQ(bounds_info.at(b.buf()).size(), 1);
ASSERT_EQ(bounds_info.at(b.buf())[0].kind, kLoad);
@@ -323,7 +323,7 @@
TEST(BoundsInference, Adjacent) {
ExprHandle H(6);
- Placeholder a(BufHandle("a", {20}, kFloat));
+ BufHandle a("a", {20}, kFloat);
Tensor b =
Compute("b", {{H, "x"}}, [&](const VarHandle& x) { return a.load(x); });
Tensor c = Compute(
@@ -337,9 +337,9 @@
ASSERT_EQ(bounds_info.size(), 2);
// reads from a[0:5], writes to b[0:5]
- ASSERT_EQ(bounds_info.at(a.data()).size(), 1);
- ASSERT_EQ(bounds_info.at(a.data())[0].kind, kLoad);
- verifyConstBounds(bounds_info.at(a.data())[0], {{0, 5}});
+ ASSERT_EQ(bounds_info.at(a.node()).size(), 1);
+ ASSERT_EQ(bounds_info.at(a.node())[0].kind, kLoad);
+ verifyConstBounds(bounds_info.at(a.node())[0], {{0, 5}});
ASSERT_EQ(bounds_info.at(b.buf()).size(), 1);
ASSERT_EQ(bounds_info.at(b.buf())[0].kind, kStore);
@@ -351,9 +351,9 @@
ASSERT_EQ(bounds_info.size(), 2);
// reads from a[0+6:5+6], writes to c[0:5]
- ASSERT_EQ(bounds_info.at(a.data()).size(), 1);
- ASSERT_EQ(bounds_info.at(a.data())[0].kind, kLoad);
- verifyConstBounds(bounds_info.at(a.data())[0], {{6, 11}});
+ ASSERT_EQ(bounds_info.at(a.node()).size(), 1);
+ ASSERT_EQ(bounds_info.at(a.node())[0].kind, kLoad);
+ verifyConstBounds(bounds_info.at(a.node())[0], {{6, 11}});
ASSERT_EQ(bounds_info.at(c.buf()).size(), 1);
ASSERT_EQ(bounds_info.at(c.buf())[0].kind, kStore);
@@ -366,9 +366,9 @@
// Should be union of above 2 bounds, but this time the bounds of A can be
// merged.
- ASSERT_EQ(bounds_info.at(a.data()).size(), 1);
- ASSERT_EQ(bounds_info.at(a.data())[0].kind, kLoad);
- verifyConstBounds(bounds_info.at(a.data())[0], {{0, 11}});
+ ASSERT_EQ(bounds_info.at(a.node()).size(), 1);
+ ASSERT_EQ(bounds_info.at(a.node())[0].kind, kLoad);
+ verifyConstBounds(bounds_info.at(a.node())[0], {{0, 11}});
ASSERT_EQ(bounds_info.at(b.buf()).size(), 1);
ASSERT_EQ(bounds_info.at(b.buf())[0].kind, kStore);
@@ -381,7 +381,7 @@
}
TEST(BoundsInference, MultipleTopLoopLoad) {
- Placeholder a(BufHandle("a", {100}, kFloat));
+ BufHandle a("a", {100}, kFloat);
Tensor b =
Compute("b", {{64, "x"}}, [&](const VarHandle& x) { return a.load(x); });
Tensor c = Compute(
@@ -396,7 +396,7 @@
// a only read.
{
- auto bounds = bounds_info[a.data()];
+ auto bounds = bounds_info[a.node()];
ASSERT_EQ(bounds.size(), 1);
// One dimension.
auto bound = bounds[0];
diff --git a/test/cpp/tensorexpr/test_conv.cpp b/test/cpp/tensorexpr/test_conv.cpp
index 1937277..4f43e4f 100644
--- a/test/cpp/tensorexpr/test_conv.cpp
+++ b/test/cpp/tensorexpr/test_conv.cpp
@@ -26,11 +26,11 @@
constexpr int kPad = 1, kStride = 2, kGroups = C;
constexpr int CperG = C / kGroups;
- te::Placeholder input("input", te::kFloat, {N, C, H, W});
- te::Placeholder weight("weight", te::kFloat, {K, CperG, R, S});
- te::Placeholder bias("bias", te::kFloat, {K});
- te::Tensor output = te::conv2d_depthwise(
- input.handle(), weight.handle(), bias.handle(), kStride, kPad, kGroups);
+ te::BufHandle input("input", {N, C, H, W}, te::kFloat);
+ te::BufHandle weight("weight", {K, CperG, R, S}, te::kFloat);
+ te::BufHandle bias("bias", {K}, te::kFloat);
+ te::Tensor output =
+ te::conv2d_depthwise(input, weight, bias, kStride, kPad, kGroups);
te::LoopNest loop({output});
loop.simplify();
@@ -57,10 +57,10 @@
constexpr int kPad = 1, kStride = 2, kGroups = C;
constexpr int CperG = C / kGroups;
- te::Placeholder input("input", te::kFloat, {N, C, H, W});
- te::Placeholder weight("weight", te::kFloat, {K, CperG, R, S});
- te::Tensor output = te::conv2d_depthwise(
- input.handle(), weight.handle(), kStride, kPad, kGroups);
+ te::BufHandle input("input", {N, C, H, W}, te::kFloat);
+ te::BufHandle weight("weight", {K, CperG, R, S}, te::kFloat);
+ te::Tensor output =
+ te::conv2d_depthwise(input, weight, kStride, kPad, kGroups);
te::LoopNest loop({output});
loop.simplify();
@@ -90,12 +90,11 @@
te::VarHandle kStride_var("kStride", te::kInt);
te::VarHandle kGroups_var("kGroups", te::kInt);
- te::Placeholder input("input", te::kFloat, {N_var, C_var, H_var, W_var});
- te::Placeholder weight(
- "weight", te::kFloat, {K_var, CperG_var, R_var, S_var});
+ te::BufHandle input("input", {N_var, C_var, H_var, W_var}, te::kFloat);
+ te::BufHandle weight("weight", {K_var, CperG_var, R_var, S_var}, te::kFloat);
te::Tensor output = te::conv2d_depthwise(
- input.handle(),
- weight.handle(),
+ input,
+ weight,
N_var,
C_var,
H_var,
@@ -187,8 +186,8 @@
ASSERT_EQ(ref.size(2), OH);
ASSERT_EQ(ref.size(3), OW);
- te::Placeholder inputB(te::BufHandle("input", {N, C, H, W}, te::kFloat));
- te::Placeholder filterB(te::BufHandle("filter", {K, C, R, S}, te::kFloat));
+ te::BufHandle inputB("input", {N, C, H, W}, te::kFloat);
+ te::BufHandle filterB("filter", {K, C, R, S}, te::kFloat);
te::Tensor conv = te::Reduce(
"conv",
diff --git a/test/cpp/tensorexpr/test_cpp_codegen.cpp b/test/cpp/tensorexpr/test_cpp_codegen.cpp
index d40caa1..2603611 100644
--- a/test/cpp/tensorexpr/test_cpp_codegen.cpp
+++ b/test/cpp/tensorexpr/test_cpp_codegen.cpp
@@ -145,8 +145,8 @@
}
TEST(CppPrinter, LoadStore) {
- Placeholder a(BufHandle("A", {2, 3}, kInt));
- Placeholder b(BufHandle("B", {3, 4}, kInt));
+ BufHandle a("A", {2, 3}, kInt);
+ BufHandle b("B", {3, 4}, kInt);
auto store = b.store({2, 2}, a.load(1, 1));
STR_CHECK(
store, "B[(0 + 2 * (1 * 4)) + 2 * 1] = A[(0 + 1 * (1 * 3)) + 1 * 1];\n");
@@ -176,9 +176,9 @@
TEST(CppPrinter, For) {
constexpr int N = 1024;
- Placeholder a(BufHandle("A", {N}, kInt));
- Placeholder b(BufHandle("B", {N}, kInt));
- Placeholder c(BufHandle("C", {N}, kInt));
+ BufHandle a("A", {N}, kInt);
+ BufHandle b("B", {N}, kInt);
+ BufHandle c("C", {N}, kInt);
VarHandle i("i", kInt);
auto f = For::make(i, 0, N, c.store({i}, Add::make(a.load(i), b.load(i))));
const std::string pattern = R"(
@@ -190,7 +190,7 @@
}
TEST(CppPrinter, Cond) {
- Placeholder x(BufHandle("X", {1}, kInt));
+ BufHandle x("X", {1}, kInt);
auto cmp = CompareSelect::make(x.load(0), 10, CompareSelectOperation::kLT);
auto cond =
Cond::make(cmp, x.store({0}, x.load(0) + 1), x.store({0}, x.load(0) - 1));
diff --git a/test/cpp/tensorexpr/test_cuda.cpp b/test/cpp/tensorexpr/test_cuda.cpp
index 164ff77..6c1a11a 100644
--- a/test/cpp/tensorexpr/test_cuda.cpp
+++ b/test/cpp/tensorexpr/test_cuda.cpp
@@ -31,8 +31,8 @@
const int block_count = 16;
const int block_size = 128;
Dtype dtype = ToDtype<ctype>();
- Placeholder a_buf("a", dtype, {num_iter, block_count, block_size});
- Placeholder b_buf("b", dtype, {num_iter, block_count, block_size});
+ BufHandle a_buf("a", {num_iter, block_count, block_size}, dtype);
+ BufHandle b_buf("b", {num_iter, block_count, block_size}, dtype);
Tensor c = Compute(
"c",
{
@@ -96,7 +96,7 @@
const int block_count = 16;
const int block_size = 128;
Dtype dtype = ToDtype<float>();
- Placeholder a_buf("a", dtype, {num_iter, block_count, block_size});
+ BufHandle a_buf("a", {num_iter, block_count, block_size}, dtype);
Tensor c = Compute(
"c",
{
@@ -160,8 +160,8 @@
}
static void testCudaTestVectorAdd02_impl(int N, int block_size) {
- Placeholder a_buf("a", kFloat, {N});
- Placeholder b_buf("b", kFloat, {N});
+ BufHandle a_buf("a", {N}, kFloat);
+ BufHandle b_buf("b", {N}, kFloat);
Tensor c = Compute(
"c",
{
@@ -220,7 +220,7 @@
TEST(Cuda, HalfCast_CUDA) {
auto half = ToDtype<at::Half>();
- Placeholder a("a", half, {4});
+ BufHandle a("a", {4}, half);
Tensor b = Compute("b", {{4, "n"}}, [&](const VarHandle& i) {
return Cast::make(kFloat, a.load(i));
});
@@ -260,8 +260,8 @@
auto testWithSize = [](int32_t M, int32_t N) {
VarHandle m("m", kInt);
VarHandle n("n", kInt);
- Placeholder a(BufHandle("a", {m, n}, kFloat));
- Placeholder b(BufHandle("b", {m, n}, kFloat));
+ BufHandle a("a", {m, n}, kFloat);
+ BufHandle b("b", {m, n}, kFloat);
Tensor c = Compute(
"c", {{m, "m"}, {n, "n"}}, [&](const VarHandle& i, const VarHandle& j) {
return a.load(i, j) + b.load(i, j);
@@ -379,7 +379,7 @@
TEST(Cuda, DynamicShapeSplit_CUDA) {
constexpr int N = 4096;
VarHandle n("n", kInt);
- Placeholder a(BufHandle("a", {n}, kFloat));
+ BufHandle a("a", {n}, kFloat);
Tensor b = Compute(
"b", {{n, "n"}}, [&](const VarHandle& i) { return a.load(i) * 2.0f; });
LoopNest l({b});
@@ -427,8 +427,8 @@
TEST(Cuda, OneBlockOneThreadGlobalReduce1_CUDA) {
const static int N = 1024;
- Placeholder data_buf("data", kFloat, {N});
- Placeholder output_buf("output", kFloat, {1});
+ BufHandle data_buf("data", {N}, kFloat);
+ BufHandle output_buf("output", {1}, kFloat);
// The test adds the following code for trivial reduction:
// for (int bidx = 0; bidx < 1; bidx++) { // blockIdx.x
@@ -442,8 +442,8 @@
StorePtr init_store = output_buf.store({0}, 0.f);
VarHandle i1("i1", kInt);
- ExprHandle load_data = Load::make(BufHandle(data_buf.data()), {i1});
- ExprHandle load_output = Load::make(BufHandle(output_buf.data()), {0});
+ ExprHandle load_data = Load::make(data_buf, {i1});
+ ExprHandle load_output = Load::make(output_buf, {0});
ExprHandle add_value = load_output + load_data;
StorePtr store_output = output_buf.store({0}, add_value);
ForPtr for_output = For::make(i1, 0, N, store_output);
@@ -505,8 +505,8 @@
// b[0] = b[0] + a[t] // implied atomic
// clang-format on
- Placeholder a_buf("a", kFloat, {N});
- Placeholder b_buf("b", kFloat, {1});
+ BufHandle a_buf("a", {N}, kFloat);
+ BufHandle b_buf("b", {1}, kFloat);
StorePtr init_store = b_buf.store({0}, 0.f);
VarHandle t("t", kInt);
@@ -524,8 +524,8 @@
// for t in 0..1024: // thread-idx
// b[0] = b[0] + a[t] // implied atomic
- ExprHandle load_a = Load::make(BufHandle(a_buf.data()), {t});
- ExprHandle load_b = Load::make(BufHandle(b_buf.data()), {0});
+ ExprHandle load_a = Load::make(a_buf, {t});
+ ExprHandle load_b = Load::make(b_buf, {0});
ExprHandle add_value = load_b + load_a;
StorePtr store_b = b_buf.store({0}, add_value);
ForPtr for_b = For::make(t, 0, N, store_b, thread_idx_options);
@@ -585,8 +585,8 @@
// covered by its own thread-idx
const static int N = 1024;
- Placeholder a_buf("a", kFloat, {2});
- Placeholder b_buf("b", kFloat, {N});
+ BufHandle a_buf("a", {2}, kFloat);
+ BufHandle b_buf("b", {N}, kFloat);
VarHandle k("k", kInt);
VarHandle l("l", kInt);
@@ -597,7 +597,7 @@
// for n in 0..2:
// a[0] = a[0] + n
StorePtr store_a0_0 = a_buf.store({0}, 0.f);
- ExprHandle load_a0 = Load::make(BufHandle(a_buf.data()), {0});
+ ExprHandle load_a0 = Load::make(a_buf, {0});
ExprHandle v1 = load_a0 + n;
StorePtr store_a0_v1 = a_buf.store({0}, v1);
ForPtr loop_a_0 = For::make(n, 0, 2, store_a0_v1);
@@ -686,8 +686,8 @@
LoopOptions block_idx_opt;
block_idx_opt.set_gpu_block_index(0);
- Placeholder a("a", kFloat, {1, M, N});
- Placeholder b("b", kFloat, {1});
+ BufHandle a("a", {1, M, N}, kFloat);
+ BufHandle b("b", {1}, kFloat);
VarHandle k("k", kInt);
VarHandle m("m", kInt);
VarHandle n("n", kInt);
@@ -715,8 +715,7 @@
// for n in 0..64: // thread_idx
// c(n) = c(n) + a(k, m, n)
ExprHandle load_cn = Load::make(kFloat, c, {n});
- ExprHandle a_kmn =
- Load::make(BufHandle(a.data()), {k * (M * N) + m * N + n});
+ ExprHandle a_kmn = Load::make(a, {k * (M * N) + m * N + n});
ExprHandle v_add = load_cn + a_kmn;
StorePtr store_cn_v = Store::make(c, {n}, v_add);
ForPtr loop_n2 = For::make(n, 0, N, store_cn_v, thread_idx_opt);
@@ -821,8 +820,8 @@
LoopOptions block_idx_opt;
block_idx_opt.set_gpu_block_index(0);
- Placeholder a("a", kFloat, {1, M, N});
- Placeholder b("b", kFloat, {1});
+ BufHandle a("a", {1, M, N}, kFloat);
+ BufHandle b("b", {1}, kFloat);
VarHandle k("k", kInt);
VarHandle m("m", kInt);
VarHandle n("n", kInt);
@@ -913,7 +912,7 @@
TEST(Cuda, HalfSupport_CUDA) {
auto half = ToDtype<at::Half>();
- Placeholder a("a", half, {4});
+ BufHandle a("a", {4}, half);
Tensor b = Compute("b", {{4, "n"}}, [&](const VarHandle& i) {
return Cast::make(half, ExprHandle(2.0f) * a.load(i));
});
@@ -970,7 +969,7 @@
TEST(Cuda, HalfPropagation_CUDA) {
auto half = ToDtype<at::Half>();
- Placeholder a("a", half, {4});
+ BufHandle a("a", {4}, half);
Tensor relu = Compute("relu", {{4, "n"}}, [&](const VarHandle& i) {
return Max::make(a.load(i), ExprHandle(alloc<HalfImm>(0)), true);
});
@@ -1017,9 +1016,9 @@
}
TEST(Cuda, UnusedHalfArgument_CUDA) {
- Placeholder a("a", kFloat, {4});
+ BufHandle a("a", {4}, kFloat);
auto half = ToDtype<at::Half>();
- Placeholder b("b", half, {4});
+ BufHandle b("b", {4}, half);
Tensor relu = Compute("relu", {{4, "n"}}, [&](const VarHandle& i) {
return Max::make(a.load(i), ExprHandle(alloc<FloatImm>(0)), true);
});
@@ -1073,9 +1072,9 @@
}
TEST(Cuda, PrioritizeDependents_CUDA) {
- Placeholder a("a", kFloat, {10});
- Placeholder b("b", kFloat, {12});
- Placeholder c("c", kFloat, {12});
+ BufHandle a("a", {10}, kFloat);
+ BufHandle b("b", {12}, kFloat);
+ BufHandle c("c", {12}, kFloat);
LoopOptions block_idx_opt;
block_idx_opt.set_gpu_block_index(0);
@@ -1088,8 +1087,8 @@
* c[i] = (i < 10 ? a[i] + b[i] : b[i]);
* }
*/
- ExprHandle load_a = Load::make(BufHandle(a.data()), {i});
- ExprHandle load_b = Load::make(BufHandle(b.data()), {i});
+ ExprHandle load_a = a.load({i});
+ ExprHandle load_b = b.load({i});
ExprHandle cmp = CompareSelect::make(i, 10, CompareSelectOperation::kLT);
ExprHandle ite = IfThenElse::make(cmp, Add::make(load_a, load_b), load_b);
@@ -1148,8 +1147,8 @@
TEST(Cuda, MaskBlockDim_CUDA) {
int A_SIZE = 100;
int B_SIZE = 50;
- Placeholder a_buf("a", kFloat, {A_SIZE});
- Placeholder b_buf("b", kFloat, {B_SIZE});
+ BufHandle a_buf("a", {A_SIZE}, kFloat);
+ BufHandle b_buf("b", {B_SIZE}, kFloat);
Tensor c = Compute("c", {{A_SIZE, "i"}}, [&](const VarHandle& i) {
return a_buf.load(i) + 10;
});
@@ -1240,8 +1239,8 @@
TEST(Cuda, MaskThreadDim_CUDA) {
int A_SIZE = 50;
int B_SIZE = 100;
- Placeholder a_buf("a", kFloat, {A_SIZE});
- Placeholder b_buf("b", kFloat, {B_SIZE});
+ BufHandle a_buf("a", {A_SIZE}, kFloat);
+ BufHandle b_buf("b", {B_SIZE}, kFloat);
Tensor c = Compute("c", {{A_SIZE, "i"}}, [&](const VarHandle& i) {
return a_buf.load(i) + 10;
});
@@ -1334,8 +1333,8 @@
TEST(Cuda, MaskMultiBlockDim_CUDA) {
int A_SIZE = 100;
int B_SIZE = 50;
- Placeholder a_buf("a", kFloat, {A_SIZE});
- Placeholder b_buf("b", kFloat, {B_SIZE});
+ BufHandle a_buf("a", {A_SIZE}, kFloat);
+ BufHandle b_buf("b", {B_SIZE}, kFloat);
Tensor c = Compute("c", {{A_SIZE, "i"}}, [&](const VarHandle& i) {
return a_buf.load(i) + 10;
});
@@ -1427,8 +1426,8 @@
TEST(Cuda, MaskBlockAndThreadDim_CUDA) {
int A_SIZE = 100;
int B_SIZE = 50;
- Placeholder a_buf("a", kFloat, {A_SIZE});
- Placeholder b_buf("b", kFloat, {B_SIZE});
+ BufHandle a_buf("a", {A_SIZE}, kFloat);
+ BufHandle b_buf("b", {B_SIZE}, kFloat);
Tensor c = Compute("c", {{A_SIZE, "i"}}, [&](const VarHandle& i) {
return a_buf.load(i) + 10;
});
@@ -1519,8 +1518,8 @@
int OUTER_SIZE = 10;
int A_SIZE = 100;
int B_SIZE = 50;
- Placeholder a_buf("a", kFloat, {OUTER_SIZE, A_SIZE});
- Placeholder b_buf("b", kFloat, {OUTER_SIZE, B_SIZE});
+ BufHandle a_buf("a", {OUTER_SIZE, A_SIZE}, kFloat);
+ BufHandle b_buf("b", {OUTER_SIZE, B_SIZE}, kFloat);
Tensor c = Compute(
"C",
{{OUTER_SIZE, "i"}, {A_SIZE, "j"}},
@@ -1648,8 +1647,8 @@
VarHandle OUTER_SIZE("OUTER_SIZE", kInt);
VarHandle A_SIZE("A_SIZE", kInt);
VarHandle B_SIZE("B_SIZE", kInt);
- Placeholder a_buf("a", kFloat, {OUTER_SIZE, A_SIZE});
- Placeholder b_buf("b", kFloat, {OUTER_SIZE, B_SIZE});
+ BufHandle a_buf("a", {OUTER_SIZE, A_SIZE}, kFloat);
+ BufHandle b_buf("b", {OUTER_SIZE, B_SIZE}, kFloat);
Tensor c = Compute(
"C",
{{OUTER_SIZE, "i"}, {A_SIZE, "j"}},
@@ -1783,10 +1782,10 @@
int OUTER_SIZE = 10;
int A_SIZE = 100;
int B_SIZE = 50;
- Placeholder a_buf("a", kFloat, {OUTER_SIZE, A_SIZE});
- Placeholder b_buf("b", kFloat, {OUTER_SIZE, B_SIZE});
- Placeholder c_buf("c", kFloat, {OUTER_SIZE, A_SIZE});
- Placeholder d_buf("d", kFloat, {OUTER_SIZE, B_SIZE});
+ BufHandle a_buf("a", {OUTER_SIZE, A_SIZE}, kFloat);
+ BufHandle b_buf("b", {OUTER_SIZE, B_SIZE}, kFloat);
+ BufHandle c_buf("c", {OUTER_SIZE, A_SIZE}, kFloat);
+ BufHandle d_buf("d", {OUTER_SIZE, B_SIZE}, kFloat);
// Can't build this using Compute and transforms yet.
LoopOptions blockBound;
@@ -1921,10 +1920,10 @@
int OUTER_SIZE = 10;
int A_SIZE = 100;
int B_SIZE = 50;
- Placeholder a_buf("a", kFloat, {OUTER_SIZE, A_SIZE});
- Placeholder b_buf("b", kFloat, {OUTER_SIZE, B_SIZE});
- Placeholder c_buf("c", kFloat, {OUTER_SIZE, A_SIZE});
- Placeholder d_buf("d", kFloat, {OUTER_SIZE, B_SIZE});
+ BufHandle a_buf("a", {OUTER_SIZE, A_SIZE}, kFloat);
+ BufHandle b_buf("b", {OUTER_SIZE, B_SIZE}, kFloat);
+ BufHandle c_buf("c", {OUTER_SIZE, A_SIZE}, kFloat);
+ BufHandle d_buf("d", {OUTER_SIZE, B_SIZE}, kFloat);
// Can't build this using Compute and transforms yet.
LoopOptions blockBound;
@@ -2059,8 +2058,8 @@
int OUTER_SIZE = 10;
int A_SIZE = 30;
int B_SIZE = 15;
- Placeholder a_buf("a", kFloat, {OUTER_SIZE, A_SIZE});
- Placeholder b_buf("b", kFloat, {OUTER_SIZE, B_SIZE});
+ BufHandle a_buf("a", {OUTER_SIZE, A_SIZE}, kFloat);
+ BufHandle b_buf("b", {OUTER_SIZE, B_SIZE}, kFloat);
Tensor c = Compute(
"C",
{{OUTER_SIZE, "i"}, {A_SIZE, "j"}},
@@ -2189,8 +2188,8 @@
int OUTER_B_SIZE = 5;
int A_SIZE = 30;
int B_SIZE = 15;
- Placeholder a_buf("a", kFloat, {OUTER_A_SIZE, A_SIZE});
- Placeholder b_buf("b", kFloat, {OUTER_B_SIZE, B_SIZE});
+ BufHandle a_buf("a", {OUTER_A_SIZE, A_SIZE}, kFloat);
+ BufHandle b_buf("b", {OUTER_B_SIZE, B_SIZE}, kFloat);
Tensor c = Compute(
"C",
{{OUTER_A_SIZE, "i"}, {A_SIZE, "j"}},
diff --git a/test/cpp/tensorexpr/test_expr.cpp b/test/cpp/tensorexpr/test_expr.cpp
index d240535..b738978 100644
--- a/test/cpp/tensorexpr/test_expr.cpp
+++ b/test/cpp/tensorexpr/test_expr.cpp
@@ -60,8 +60,8 @@
}
TEST(Expr, LetStmtTest01) {
- Placeholder a_buf("a", kFloat, {1});
- Placeholder b_buf("b", kFloat, {1});
+ BufHandle a_buf("a", {1}, kFloat);
+ BufHandle b_buf("b", {1}, kFloat);
ExprHandle load_a = a_buf.load(0);
VarHandle var = VarHandle("v", kFloat);
@@ -157,9 +157,9 @@
const int kVectorCount = 128;
const int kTotalSize = kVectorSize * kVectorCount;
- Placeholder a_buf(BufHandle("A", {ExprHandle(kTotalSize)}, kFloat));
- Placeholder b_buf(BufHandle("B", {ExprHandle(kTotalSize)}, kFloat));
- Placeholder c_buf(BufHandle("C", {ExprHandle(kTotalSize)}, kFloat));
+ BufHandle a_buf("A", {kTotalSize}, kFloat);
+ BufHandle b_buf("B", {kTotalSize}, kFloat);
+ BufHandle c_buf("C", {kTotalSize}, kFloat);
/*
Build the following:
@@ -199,9 +199,9 @@
TEST(Expr, CompareSelectEQ) {
constexpr int N = 1024;
- Placeholder a(BufHandle("A", {N}, kInt));
- Placeholder b(BufHandle("B", {N}, kInt));
- Placeholder c(BufHandle("C", {N}, kInt));
+ BufHandle a("A", {N}, kInt);
+ BufHandle b("B", {N}, kInt);
+ BufHandle c("C", {N}, kInt);
std::vector<int> a_buffer(N, 1);
std::vector<int> b_buffer(N, 1);
std::vector<int> c_buffer(N, 0);
@@ -237,9 +237,9 @@
// different from the output dtype and verifies that it works correctly:
// result = ((int)lhs == (int)rhs) ? (float)retval1 : (float)retval2
constexpr int N = 1024;
- Placeholder a(BufHandle("A", {N}, kInt));
- Placeholder b(BufHandle("B", {N}, kInt));
- Placeholder c(BufHandle("C", {N}, kFloat));
+ BufHandle a("A", {N}, kInt);
+ BufHandle b("B", {N}, kInt);
+ BufHandle c("C", {N}, kFloat);
std::vector<int> a_buffer(N, 1);
std::vector<int> b_buffer(N, 1);
std::vector<float> c_buffer(N, 0.0f);
@@ -275,8 +275,8 @@
TEST(Expr, IntrinsicsDtypes) {
constexpr int N = 256;
- Placeholder a(BufHandle("A", {N}, kDouble));
- Placeholder b(BufHandle("B", {N}, kDouble));
+ BufHandle a("A", {N}, kDouble);
+ BufHandle b("B", {N}, kDouble);
std::vector<double> a_buffer(N, -10.0);
std::vector<double> b_buffer(N, 0.0);
std::vector<double> b_ref(N, 10.0);
@@ -539,9 +539,9 @@
TEST(Expr, DynamicShapeAdd) {
auto testWithSize = [](int32_t size) {
VarHandle n("n", kInt);
- Placeholder a(BufHandle("a", {n}, kFloat));
- Placeholder b(BufHandle("b", {n}, kFloat));
- Placeholder c(BufHandle("c", {n}, kFloat));
+ BufHandle a("a", {n}, kFloat);
+ BufHandle b("b", {n}, kFloat);
+ BufHandle c("c", {n}, kFloat);
VarHandle i("i", kInt);
StmtPtr s = For::make(i, 0, n, c.store({i}, a.load(i) + b.load(i)));
std::vector<float> aData(size, 1.0f);
@@ -558,7 +558,7 @@
void testCond01() {
const int N = 16;
PaddedBuffer<float> a_v(N);
- Placeholder a_buf("a", kFloat, {N});
+ BufHandle a_buf("a", {N}, kFloat);
VarHandle index = VarHandle("index", kInt);
StmtPtr assign_x2 = a_buf.store({index}, cast<float>(index) * 2);
StmtPtr assign_x3 = a_buf.store({index}, cast<float>(index) * 3);
@@ -615,7 +615,7 @@
void testStmtClone() {
const int N = 16;
- Placeholder a_buf("a", kInt, {N});
+ BufHandle a_buf("a", {N}, kInt);
VarHandle index = VarHandle("index", kInt);
StmtPtr body = a_buf.store({index}, 5);
StmtPtr loop = For::make(index, 0, N, body);
diff --git a/test/cpp/tensorexpr/test_external_calls.cpp b/test/cpp/tensorexpr/test_external_calls.cpp
index 176158e..4dd89dc 100644
--- a/test/cpp/tensorexpr/test_external_calls.cpp
+++ b/test/cpp/tensorexpr/test_external_calls.cpp
@@ -20,9 +20,9 @@
using namespace torch::jit::tensorexpr;
TEST(ExternalCall, Conv2d_float) {
- Placeholder Input("Input", kFloat, {1, 3, 224, 224});
- Placeholder Weight("Weight", kFloat, {16, 3, 3, 3});
- Placeholder Bias("Bias", kFloat, {16});
+ BufHandle Input("Input", {1, 3, 224, 224}, kFloat);
+ BufHandle Weight("Weight", {16, 3, 3, 3}, kFloat);
+ BufHandle Bias("Bias", {16}, kFloat);
BufHandle ResultBuf("Result", {1, 16, 112, 112}, kFloat);
int64_t stride = 2;
int64_t pad = 1;
@@ -34,9 +34,7 @@
ExternalCall::make(
ResultBuf,
"nnc_aten_conv2d",
- {BufHandle(Input.data()),
- BufHandle(Weight.data()),
- BufHandle(Bias.data())},
+ {Input, Weight, Bias},
{stride, stride, pad, pad, dilation, dilation, groups}));
LoopNest l({Result});
l.prepareForCodegen();
@@ -83,9 +81,9 @@
TEST(ExternalCall, Conv2d_int) {
// A similar test, but now using kInt tensors
- Placeholder Input("Input", kInt, {1, 3, 224, 224});
- Placeholder Weight("Weight", kInt, {16, 3, 3, 3});
- Placeholder Bias("Bias", kInt, {16});
+ BufHandle Input("Input", {1, 3, 224, 224}, kInt);
+ BufHandle Weight("Weight", {16, 3, 3, 3}, kInt);
+ BufHandle Bias("Bias", {16}, kInt);
BufHandle ResultBuf("Result", {1, 16, 112, 112}, kInt);
int64_t stride = 2;
int64_t pad = 1;
@@ -97,9 +95,7 @@
ExternalCall::make(
ResultBuf,
"nnc_aten_conv2d",
- {BufHandle(Input.data()),
- BufHandle(Weight.data()),
- BufHandle(Bias.data())},
+ {Input, Weight, Bias},
{stride, stride, pad, pad, dilation, dilation, groups}));
LoopNest l({Result});
l.prepareForCodegen();
@@ -144,17 +140,13 @@
}
TEST(ExternalCall, Conv2d_nobias_noargs) {
- Placeholder Input("Input", kFloat, {1, 16, 112, 112});
- Placeholder Weight("Weight", kFloat, {16, 16, 1, 1});
+ BufHandle Input("Input", {1, 16, 112, 112}, kFloat);
+ BufHandle Weight("Weight", {16, 16, 1, 1}, kFloat);
BufHandle ResultBuf("Result", {1, 16, 112, 112}, kFloat);
Tensor Result = Tensor(
ResultBuf.node(),
- ExternalCall::make(
- ResultBuf,
- "nnc_aten_conv2d",
- {BufHandle(Input.data()), BufHandle(Weight.data())},
- {}));
+ ExternalCall::make(ResultBuf, "nnc_aten_conv2d", {Input, Weight}, {}));
LoopNest l({Result});
l.prepareForCodegen();
l.simplify();
@@ -189,9 +181,9 @@
}
TEST(ExternalCall, Addmm_float) {
- Placeholder Input("Input", kFloat, {100, 300});
- Placeholder Mat1("Mat1", kFloat, {100, 200});
- Placeholder Mat2("Mat2", kFloat, {200, 300});
+ BufHandle Input("Input", {100, 300}, kFloat);
+ BufHandle Mat1("Mat1", {100, 200}, kFloat);
+ BufHandle Mat2("Mat2", {200, 300}, kFloat);
BufHandle ResultBuf("Result", {100, 300}, kFloat);
int64_t beta = 2;
int64_t alpha = 2;
@@ -199,12 +191,7 @@
Tensor Result = Tensor(
ResultBuf.node(),
ExternalCall::make(
- ResultBuf,
- "nnc_aten_addmm",
- {BufHandle(Input.data()),
- BufHandle(Mat1.data()),
- BufHandle(Mat2.data())},
- {beta, alpha}));
+ ResultBuf, "nnc_aten_addmm", {Input, Mat1, Mat2}, {beta, alpha}));
LoopNest l({Result});
l.prepareForCodegen();
l.simplify();
@@ -245,7 +232,7 @@
TEST(ExternalCall, Prepacked_Linear_float) {
using namespace at::native::xnnpack;
- Placeholder Input("Input", kFloat, {100, 200});
+ BufHandle Input("Input", {100, 200}, kFloat);
BufHandle ResultBuf("Result", {100, 300}, kFloat);
// Calculate reference result using at::linear.
@@ -273,13 +260,13 @@
auto prepacked = linear_clamp_prepack_op.call(
weight, bias, c10::optional<at::Scalar>(), c10::optional<at::Scalar>());
- Placeholder DummyPrepacked("DummyPrepacked", kFloat, {1});
+ BufHandle DummyPrepacked("DummyPrepacked", {1}, kFloat);
Tensor Result = Tensor(
ResultBuf.node(),
ExternalCall::make(
ResultBuf,
"nnc_prepacked_linear_clamp_run",
- {BufHandle(Input.data()), BufHandle(DummyPrepacked.data())},
+ {Input, DummyPrepacked},
{}));
LoopNest l({Result});
l.prepareForCodegen();
@@ -308,7 +295,7 @@
TEST(ExternalCall, Prepacked_Conv2d_float) {
using namespace at::native::xnnpack;
- Placeholder Input("Input", kFloat, {1, 3, 224, 224});
+ BufHandle Input("Input", {1, 3, 224, 224}, kFloat);
BufHandle ResultBuf("Result", {1, 16, 112, 112}, kFloat);
int64_t stride = 2;
int64_t pad = 1;
@@ -358,13 +345,13 @@
c10::optional<at::Scalar>(),
c10::optional<at::Scalar>());
- Placeholder DummyPrepacked("DummyPrepacked", kFloat, {1});
+ BufHandle DummyPrepacked("DummyPrepacked", {1}, kFloat);
Tensor Result = Tensor(
ResultBuf.node(),
ExternalCall::make(
ResultBuf,
"nnc_prepacked_conv2d_clamp_run",
- {BufHandle(Input.data()), BufHandle(DummyPrepacked.data())},
+ {Input, DummyPrepacked},
{}));
LoopNest l({Result});
l.prepareForCodegen();
@@ -415,17 +402,13 @@
auto intV = std::vector<int>(v.begin(), v.end());
return std::vector<ExprHandle>(intV.begin(), intV.end());
};
- Placeholder A("A", kFloat, toExprHandleVec(aShape));
- Placeholder B("", kFloat, toExprHandleVec(bShape));
+ BufHandle A("A", toExprHandleVec(aShape), kFloat);
+ BufHandle B("B", toExprHandleVec(bShape), kFloat);
BufHandle ResultBuf("Result", toExprHandleVec(resShape), kFloat);
Tensor Result = Tensor(
ResultBuf.node(),
- ExternalCall::make(
- ResultBuf,
- externCallName,
- {BufHandle(A.data()), BufHandle(B.data())},
- {}));
+ ExternalCall::make(ResultBuf, externCallName, {A, B}, {}));
LoopNest l({Result});
l.prepareForCodegen();
l.simplify();
@@ -500,13 +483,12 @@
std::vector<ExprHandle> externCallArgs;
std::tie(aShape, resShape, torchFunc, externCallName, externCallArgs) =
curTest;
- Placeholder A("A", kFloat, toExprHandleVec(aShape));
+ BufHandle A("A", toExprHandleVec(aShape), kFloat);
BufHandle ResultBuf("Result", toExprHandleVec(resShape), kFloat);
Tensor Result = Tensor(
ResultBuf.node(),
- ExternalCall::make(
- ResultBuf, externCallName, {BufHandle(A.data())}, externCallArgs));
+ ExternalCall::make(ResultBuf, externCallName, {A}, externCallArgs));
LoopNest l({Result});
l.prepareForCodegen();
l.simplify();
diff --git a/test/cpp/tensorexpr/test_llvm.cpp b/test/cpp/tensorexpr/test_llvm.cpp
index 92a7766..61748b0 100644
--- a/test/cpp/tensorexpr/test_llvm.cpp
+++ b/test/cpp/tensorexpr/test_llvm.cpp
@@ -231,8 +231,8 @@
TEST(LLVM, fastLogFloat) {
const int kTotalSize = 128 * 128;
- Placeholder a_buf(BufHandle("A", {ExprHandle(kTotalSize)}, kFloat));
- Placeholder b_buf(BufHandle("B", {ExprHandle(kTotalSize)}, kFloat));
+ BufHandle a_buf("A", {ExprHandle(kTotalSize)}, kFloat);
+ BufHandle b_buf("B", {ExprHandle(kTotalSize)}, kFloat);
VarHandle index = VarHandle("index", kInt);
ExprHandle load_a = a_buf.load(index);
@@ -261,7 +261,7 @@
}
TEST(LLVM, LetTest01) {
- Placeholder a(BufHandle("A", {1}, kFloat));
+ BufHandle a("A", {1}, kFloat);
std::vector<float> v = {1, 0};
std::vector<void*> args({v.data()});
VarHandle x("x", kFloat);
@@ -276,7 +276,7 @@
}
TEST(LLVM, LetTest02) {
- Placeholder a(BufHandle("A", {1}, kFloat));
+ BufHandle a("A", {1}, kFloat);
std::vector<float> v = {1, 0};
std::vector<void*> args({v.data()});
VarHandle x("x", kFloat);
@@ -294,7 +294,7 @@
}
TEST(LLVM, LetTestMultitype) {
- Placeholder a(BufHandle("A", {1}, kDouble));
+ BufHandle a("A", {1}, kDouble);
std::vector<double> v = {1, 0};
std::vector<void*> args({v.data()});
VarHandle x("x", kByte);
@@ -315,7 +315,7 @@
}
TEST(LLVM, BufferTest) {
- Placeholder a(BufHandle("A", {32}, kFloat));
+ BufHandle a("A", {32}, kFloat);
std::vector<int32_t> v(5);
std::vector<void*> args({v.data()});
auto rv = IntImm::make(0);
@@ -324,7 +324,7 @@
}
TEST(LLVM, BlockTest) {
- Placeholder a(BufHandle("A", {32}, kInt));
+ BufHandle a("A", {32}, kInt);
std::vector<int32_t> v = {1, 2};
std::vector<void*> args({v.data()});
@@ -341,8 +341,8 @@
}
TEST(LLVM, LoadStoreTest) {
- Placeholder a(BufHandle("A", {1}, kInt));
- Placeholder b(BufHandle("B", {1}, kInt));
+ BufHandle a("A", {1}, kInt);
+ BufHandle b("B", {1}, kInt);
std::vector<int32_t> a_buffer = {42};
std::vector<int32_t> b_buffer = {-11};
@@ -355,9 +355,9 @@
}
TEST(LLVM, IfThenElseTest) {
- Placeholder a(BufHandle("A", {1}, kInt));
- Placeholder b(BufHandle("B", {1}, kInt));
- Placeholder c(BufHandle("C", {1}, kInt));
+ BufHandle a("A", {1}, kInt);
+ BufHandle b("B", {1}, kInt);
+ BufHandle c("C", {1}, kInt);
std::vector<int32_t> a_buffer = {42};
std::vector<int32_t> b_buffer = {-11};
std::vector<int32_t> c_buffer = {1};
@@ -372,7 +372,7 @@
// if (x < 10) x = x + 1
TEST(LLVM, CondNoFalseBlockTest) {
- Placeholder x(BufHandle("X", {1}, kInt));
+ BufHandle x("X", {1}, kInt);
auto cmp = CompareSelect::make(x.load(0), 10, CompareSelectOperation::kLT);
auto cond = Cond::make(cmp, x.store({0}, x.load(0) + 1), nullptr);
@@ -395,7 +395,7 @@
// x = x - 1;
// }
TEST(LLVM, CondTest) {
- Placeholder x(BufHandle("X", {1}, kInt));
+ BufHandle x("X", {1}, kInt);
auto cmp = CompareSelect::make(x.load(0), 10, CompareSelectOperation::kLT);
auto cond =
Cond::make(cmp, x.store({0}, x.load(0) + 1), x.store({0}, x.load(0) - 1));
@@ -431,7 +431,7 @@
// }
// }
TEST(LLVM, CondNestedTest) {
- Placeholder x(BufHandle("X", {1}, kInt));
+ BufHandle x("X", {1}, kInt);
auto true_cmp =
CompareSelect::make(x.load(0), 5, CompareSelectOperation::kGT);
auto true_cond = Cond::make(
@@ -485,8 +485,8 @@
}
TEST(LLVM, VecLoadStoreTest) {
- Placeholder a(BufHandle("A", {1}, kInt));
- Placeholder b(BufHandle("B", {1}, kInt));
+ BufHandle a("A", {1}, kInt);
+ BufHandle b("B", {1}, kInt);
std::vector<int32_t> a_buffer = {1, 1, 1, 1};
std::vector<int32_t> b_buffer = {2, 2, 2, 2};
@@ -506,8 +506,8 @@
#define FLOAT_INTRINSICS_TEST(Name, Lanes) \
TEST(LLVM, VecFloat_##Name##Lane##Lanes##Test) { \
- Placeholder a(BufHandle("A", {1}, kFloat)); \
- Placeholder b(BufHandle("B", {1}, kFloat)); \
+ BufHandle a("A", {1}, kFloat); \
+ BufHandle b("B", {1}, kFloat); \
float val = 0.5f; \
std::vector<float> a_buffer(Lanes, val); \
std::vector<float> b_buffer(Lanes, val); \
@@ -544,8 +544,8 @@
#define DOUBLE_INTRINSICS_TEST(Name, Lanes) \
TEST(LLVM, VecDouble_##Name##Lane##Lanes##Test) { \
- Placeholder a(BufHandle("A", {1}, kDouble)); \
- Placeholder b(BufHandle("B", {1}, kDouble)); \
+ BufHandle a("A", {1}, kDouble); \
+ BufHandle b("B", {1}, kDouble); \
float val = 0.5f; \
std::vector<double> a_buffer(Lanes, val); \
std::vector<double> b_buffer(Lanes, val); \
@@ -581,12 +581,12 @@
#undef DOUBLE_INTRINSICS_TEST
TEST(LLVM, VectorizerLoadStoreTest) {
- Placeholder a(BufHandle("A", {1}, kInt));
+ BufHandle a("A", {1}, kInt);
Tensor c =
Compute("c", {{4, "i"}}, [&](const VarHandle& i) { return a.load(i); });
- Placeholder c_buf(BufHandle(c.buf()));
+ BufHandle c_buf(c.buf());
LoopNest l({c});
StmtPtr s = l.root_stmt();
ASSERT_TRUE(LoopNest::vectorize(to<For>(to<Block>(s)->front())));
@@ -603,13 +603,13 @@
}
TEST(LLVM, VectorizeBitCast) {
- Placeholder a(BufHandle("A", {128}, kInt));
+ BufHandle a("A", {128}, kInt);
Tensor c = Compute("c", {{128, "i"}}, [&](const VarHandle& i) {
return bitcast<float>(a.load(i));
});
- Placeholder c_buf(BufHandle(c.buf()));
+ BufHandle c_buf(c.buf());
LoopNest l({c});
StmtPtr s = l.root_stmt();
ASSERT_TRUE(LoopNest::vectorize(to<For>(to<Block>(s)->front())));
@@ -629,8 +629,8 @@
TEST(LLVM, MemcpyTest) {
constexpr int N = 32;
- Placeholder a(BufHandle("A", {N}, kInt));
- Placeholder b(BufHandle("B", {N}, kInt));
+ BufHandle a("A", {N}, kInt);
+ BufHandle b("B", {N}, kInt);
std::vector<int32_t> a_buffer(N, 42);
std::vector<int32_t> b_buffer(N, 0);
@@ -650,7 +650,7 @@
TEST(LLVM, BzeroTest) {
constexpr int N = 32;
- Placeholder b(BufHandle("B", {N}, kInt));
+ BufHandle b("B", {N}, kInt);
std::vector<int32_t> b_buffer(N, 11);
VarHandle i("i", kInt);
@@ -667,9 +667,9 @@
TEST(LLVM, ElemwiseAdd) {
constexpr int N = 1024;
- Placeholder a(BufHandle("A", {N}, kInt));
- Placeholder b(BufHandle("B", {N}, kInt));
- Placeholder c(BufHandle("C", {N}, kInt));
+ BufHandle a("A", {N}, kInt);
+ BufHandle b("B", {N}, kInt);
+ BufHandle c("C", {N}, kInt);
std::vector<int32_t> a_buffer(N, 41);
std::vector<int32_t> b_buffer(N, 1);
std::vector<int32_t> c_buffer(N, 1);
@@ -692,9 +692,9 @@
TEST(LLVM, ElemwiseAddFloat) {
constexpr int N = 1024;
- Placeholder a(BufHandle("A", {N}, kFloat));
- Placeholder b(BufHandle("B", {N}, kFloat));
- Placeholder c(BufHandle("C", {N}, kFloat));
+ BufHandle a("A", {N}, kFloat);
+ BufHandle b("B", {N}, kFloat);
+ BufHandle c("C", {N}, kFloat);
std::vector<float> a_buffer(N, 41);
std::vector<float> b_buffer(N, 1);
std::vector<float> c_buffer(N, 1);
@@ -717,8 +717,8 @@
TEST(LLVM, ElemwiseLog10Float) {
constexpr int N = 1024;
- Placeholder a(BufHandle("A", {N}, kFloat));
- Placeholder b(BufHandle("B", {N}, kFloat));
+ BufHandle a("A", {N}, kFloat);
+ BufHandle b("B", {N}, kFloat);
std::vector<float> a_buffer(N, 10.0f);
std::vector<float> b_buffer(N, 2.0f);
@@ -743,8 +743,8 @@
TEST(LLVM, ElemwiseLog1pFloat) {
constexpr int N = 1024;
- Placeholder a(BufHandle("A", {N}, kFloat));
- Placeholder b(BufHandle("B", {N}, kFloat));
+ BufHandle a("A", {N}, kFloat);
+ BufHandle b("B", {N}, kFloat);
std::vector<float> a_buffer(N, expf(3.0f) - 1);
std::vector<float> b_buffer(N, 42.0f);
@@ -769,9 +769,9 @@
TEST(LLVM, ElemwiseMaxInt) {
constexpr int N = 1024;
- Placeholder a(BufHandle("A", {N}, kInt));
- Placeholder b(BufHandle("B", {N}, kInt));
- Placeholder c(BufHandle("C", {N}, kInt));
+ BufHandle a("A", {N}, kInt);
+ BufHandle b("B", {N}, kInt);
+ BufHandle c("C", {N}, kInt);
std::vector<int> a_buffer(N, 41);
std::vector<int> b_buffer(N, 1);
std::vector<int> c_buffer(N, 1);
@@ -795,9 +795,9 @@
TEST(LLVM, ElemwiseMinInt) {
constexpr int N = 1024;
- Placeholder a(BufHandle("A", {N}, kInt));
- Placeholder b(BufHandle("B", {N}, kInt));
- Placeholder c(BufHandle("C", {N}, kInt));
+ BufHandle a("A", {N}, kInt);
+ BufHandle b("B", {N}, kInt);
+ BufHandle c("C", {N}, kInt);
std::vector<int> a_buffer(N, 41);
std::vector<int> b_buffer(N, 1);
std::vector<int> c_buffer(N, 1);
@@ -821,9 +821,9 @@
TEST(LLVM, ElemwiseMaxFloat) {
constexpr int N = 1024;
- Placeholder a(BufHandle("A", {N}, kFloat));
- Placeholder b(BufHandle("B", {N}, kFloat));
- Placeholder c(BufHandle("C", {N}, kFloat));
+ BufHandle a("A", {N}, kFloat);
+ BufHandle b("B", {N}, kFloat);
+ BufHandle c("C", {N}, kFloat);
std::vector<float> a_buffer(N, 41);
std::vector<float> b_buffer(N, 1);
std::vector<float> c_buffer(N, 1);
@@ -847,9 +847,9 @@
TEST(LLVM, ElemwiseMaxNaNFloat) {
constexpr int N = 1024;
- Placeholder a(BufHandle("A", {N}, kFloat));
- Placeholder b(BufHandle("B", {N}, kFloat));
- Placeholder c(BufHandle("C", {N}, kFloat));
+ BufHandle a("A", {N}, kFloat);
+ BufHandle b("B", {N}, kFloat);
+ BufHandle c("C", {N}, kFloat);
std::vector<float> a_buffer(N, NAN);
std::vector<float> b_buffer(N, 1);
std::vector<float> c_buffer(N, 1);
@@ -874,9 +874,9 @@
TEST(LLVM, ElemwiseMinFloat) {
constexpr int N = 1024;
- Placeholder a(BufHandle("A", {N}, kFloat));
- Placeholder b(BufHandle("B", {N}, kFloat));
- Placeholder c(BufHandle("C", {N}, kFloat));
+ BufHandle a("A", {N}, kFloat);
+ BufHandle b("B", {N}, kFloat);
+ BufHandle c("C", {N}, kFloat);
std::vector<float> a_buffer(N, 41);
std::vector<float> b_buffer(N, 1);
std::vector<float> c_buffer(N, 1);
@@ -900,9 +900,9 @@
TEST(LLVM, ElemwiseMinNaNFloat) {
constexpr int N = 1024;
- Placeholder a(BufHandle("A", {N}, kFloat));
- Placeholder b(BufHandle("B", {N}, kFloat));
- Placeholder c(BufHandle("C", {N}, kFloat));
+ BufHandle a("A", {N}, kFloat);
+ BufHandle b("B", {N}, kFloat);
+ BufHandle c("C", {N}, kFloat);
std::vector<float> a_buffer(N, NAN);
std::vector<float> b_buffer(N, 1);
std::vector<float> c_buffer(N, 1);
@@ -927,9 +927,9 @@
TEST(LLVM, ElemwiseMod) {
constexpr int N = 1024;
- Placeholder a(BufHandle("A", {N}, kInt));
- Placeholder b(BufHandle("B", {N}, kInt));
- Placeholder c(BufHandle("C", {N}, kInt));
+ BufHandle a("A", {N}, kInt);
+ BufHandle b("B", {N}, kInt);
+ BufHandle c("C", {N}, kInt);
std::vector<int32_t> a_buffer(N, 41);
std::vector<int32_t> b_buffer(N, 23);
std::vector<int32_t> c_buffer(N, 18);
@@ -952,9 +952,9 @@
TEST(LLVM, CompareSelectIntEQ) {
constexpr int N = 1024;
- Placeholder a(BufHandle("A", {N}, kInt));
- Placeholder b(BufHandle("B", {N}, kInt));
- Placeholder c(BufHandle("C", {N}, kInt));
+ BufHandle a("A", {N}, kInt);
+ BufHandle b("B", {N}, kInt);
+ BufHandle c("C", {N}, kInt);
std::vector<int> a_buffer(N, 1);
std::vector<int> b_buffer(N, 1);
std::vector<int> c_buffer(N, 0);
@@ -992,9 +992,9 @@
TEST(LLVM, CompareSelectFloatEQ) {
constexpr int N = 1024;
- Placeholder a(BufHandle("A", {N}, kFloat));
- Placeholder b(BufHandle("B", {N}, kFloat));
- Placeholder c(BufHandle("C", {N}, kInt));
+ BufHandle a("A", {N}, kFloat);
+ BufHandle b("B", {N}, kFloat);
+ BufHandle c("C", {N}, kInt);
std::vector<float> a_buffer(N, 1.0f);
std::vector<float> b_buffer(N, 1.0f);
std::vector<int> c_buffer(N, 0);
@@ -1025,9 +1025,9 @@
TEST(LLVM, CompareSelectByteGT) {
constexpr int N = 1024;
- Placeholder a(BufHandle("A", {N}, kByte));
- Placeholder b(BufHandle("B", {N}, kByte));
- Placeholder c(BufHandle("C", {N}, kInt));
+ BufHandle a("A", {N}, kByte);
+ BufHandle b("B", {N}, kByte);
+ BufHandle c("C", {N}, kInt);
std::vector<uint8_t> a_buffer(N, 0);
std::vector<uint8_t> b_buffer(N, 0);
std::vector<int> c_buffer(N, 0);
@@ -1065,9 +1065,9 @@
TEST(LLVM, CompareSelectByteGE) {
constexpr int N = 1024;
- Placeholder a(BufHandle("A", {N}, kByte));
- Placeholder b(BufHandle("B", {N}, kByte));
- Placeholder c(BufHandle("C", {N}, kInt));
+ BufHandle a("A", {N}, kByte);
+ BufHandle b("B", {N}, kByte);
+ BufHandle c("C", {N}, kInt);
std::vector<uint8_t> a_buffer(N, 0);
std::vector<uint8_t> b_buffer(N, 0);
std::vector<int> c_buffer(N, 0);
@@ -1100,9 +1100,9 @@
TEST(LLVM, CompareSelectByteLT) {
constexpr int N = 1024;
- Placeholder a(BufHandle("A", {N}, kByte));
- Placeholder b(BufHandle("B", {N}, kByte));
- Placeholder c(BufHandle("C", {N}, kInt));
+ BufHandle a("A", {N}, kByte);
+ BufHandle b("B", {N}, kByte);
+ BufHandle c("C", {N}, kInt);
std::vector<uint8_t> a_buffer(N, 0);
std::vector<uint8_t> b_buffer(N, 128);
std::vector<int> c_buffer(N, 0);
@@ -1140,9 +1140,9 @@
TEST(LLVM, CompareSelectByteLE) {
constexpr int N = 1024;
- Placeholder a(BufHandle("A", {N}, kByte));
- Placeholder b(BufHandle("B", {N}, kByte));
- Placeholder c(BufHandle("C", {N}, kInt));
+ BufHandle a("A", {N}, kByte);
+ BufHandle b("B", {N}, kByte);
+ BufHandle c("C", {N}, kInt);
std::vector<uint8_t> a_buffer(N, 0);
std::vector<uint8_t> b_buffer(N, 128);
std::vector<int> c_buffer(N, 0);
@@ -1174,7 +1174,7 @@
}
TEST(LLVM, StoreFloat) {
- Placeholder result(BufHandle("result", {1}, kFloat));
+ BufHandle result("result", {1}, kFloat);
std::vector<float> result_buffer = {0.0f};
auto expr = result.store({0}, FloatImm::make(3.14f));
LLVMCodeGen cg(expr, {result});
@@ -1190,7 +1190,7 @@
});
LoopNest l({tensor});
StmtPtr stmt = l.root_stmt();
- Placeholder f_buf(BufHandle(tensor.buf()));
+ BufHandle f_buf(tensor.buf());
LLVMCodeGen cg(stmt, {f_buf});
PaddedBuffer<float> f_v(N, "f_v");
@@ -1206,13 +1206,13 @@
TEST(LLVM, ComputeMul) {
const int N = 1024;
- Placeholder a(BufHandle("a", {N}, kFloat));
- Placeholder b(BufHandle("b", {N}, kFloat));
+ BufHandle a("a", {N}, kFloat);
+ BufHandle b("b", {N}, kFloat);
Tensor c = Compute("c", {{N, "i"}}, [&](const VarHandle& i) {
return a.load(i) * b.load(i);
});
- Placeholder c_buf(BufHandle(c.buf()));
+ BufHandle c_buf(c.buf());
LoopNest l({c});
StmtPtr s = l.root_stmt();
@@ -1229,14 +1229,14 @@
TEST(LLVM, BroadcastAdd) {
const int M = 32;
const int N = 1024;
- Placeholder a(BufHandle("a", {M, N}, kFloat));
- Placeholder b(BufHandle("b", {N}, kFloat));
+ BufHandle a("a", {M, N}, kFloat);
+ BufHandle b("b", {N}, kFloat);
Tensor c = Compute(
"c", {{M, "i"}, {N, "j"}}, [&](const VarHandle& i, const VarHandle& j) {
return a.load(i, j) + b.load(j);
});
- Placeholder c_buf(BufHandle(c.buf()));
+ BufHandle c_buf(c.buf());
LoopNest l({c});
l.prepareForCodegen();
StmtPtr s = l.root_stmt();
@@ -1289,9 +1289,9 @@
TEST(LLVM, DynamicShapeAdd) {
auto testWithSize = [](int32_t size) {
VarHandle n("n", kInt);
- Placeholder a(BufHandle("a", {n}, kFloat));
- Placeholder b(BufHandle("b", {n}, kFloat));
- Placeholder c(BufHandle("c", {n}, kFloat));
+ BufHandle a("a", {n}, kFloat);
+ BufHandle b("b", {n}, kFloat);
+ BufHandle c("c", {n}, kFloat);
VarHandle i("i", kInt);
StmtPtr s = For::make(i, 0, n, c.store({i}, a.load(i) + b.load(i)));
std::vector<float> aData(size, 1.0f);
@@ -1310,9 +1310,9 @@
TEST(LLVM, BindDynamicShapeAdd) {
auto testWithSize = [](int32_t size) {
VarHandle n("n", kInt);
- Placeholder a(BufHandle("a", {n}, kFloat));
- Placeholder b(BufHandle("b", {n}, kFloat));
- Placeholder c(BufHandle("c", {n}, kFloat));
+ BufHandle a("a", {n}, kFloat);
+ BufHandle b("b", {n}, kFloat);
+ BufHandle c("c", {n}, kFloat);
VarHandle i("i", kInt);
StmtPtr s = For::make(i, 0, n, c.store({i}, a.load(i) + b.load(i)));
std::vector<float> aData(size, 1.0f);
@@ -1330,8 +1330,8 @@
TEST(LLVM, TensorDynamicShapeAdd) {
auto testWithSize = [](int32_t size) {
VarHandle n("n", kInt);
- Placeholder a(BufHandle("a", {n}, kFloat));
- Placeholder b(BufHandle("b", {n}, kFloat));
+ BufHandle a("a", {n}, kFloat);
+ BufHandle b("b", {n}, kFloat);
Tensor c = Compute("c", {{n, "n"}}, [&](const VarHandle& i) {
return a.load(i) + b.load(i);
});
@@ -1353,8 +1353,8 @@
auto testWithSize = [](int32_t M, int32_t N) {
VarHandle m("m", kInt);
VarHandle n("n", kInt);
- Placeholder a(BufHandle("a", {m, n}, kFloat));
- Placeholder b(BufHandle("b", {m, n}, kFloat));
+ BufHandle a("a", {m, n}, kFloat);
+ BufHandle b("b", {m, n}, kFloat);
Tensor c = Compute(
"c", {{m, "m"}, {n, "n"}}, [&](const VarHandle& i, const VarHandle& j) {
return a.load(i, j) + b.load(i, j);
@@ -1383,7 +1383,7 @@
}
TEST(LLVM, EliminatedStmt) {
- Placeholder a(BufHandle("a", {1}, kFloat));
+ BufHandle a("a", {1}, kFloat);
Tensor c = Compute("c", {{0, "m"}}, [&](const VarHandle& m) { return m; });
@@ -1402,7 +1402,7 @@
int N = 64;
const int kTotalSize = M * N;
- Placeholder a("a", kFloat, {1, M, N});
+ BufHandle a("a", {1, M, N}, kFloat);
// TODO: why doesn't implicit vector<DimArg> work?
std::vector<DimArg> axis = {DimArg(1)};
@@ -1439,7 +1439,7 @@
int N = 64;
const int kTotalSize = M * N;
- Placeholder a("a", kFloat, {1, M, N});
+ BufHandle a("a", {1, M, N}, kFloat);
// TODO: why doesn't implicit vector<DimArg> work?
std::vector<DimArg> axis = {DimArg(1)};
@@ -1487,7 +1487,7 @@
int N = 64;
const int kTotalSize = M * N;
- Placeholder a("a", kFloat, {1, M, N});
+ BufHandle a("a", {1, M, N}, kFloat);
Tensor b = Reduce("sum", {{1, "K"}}, Sum(), a, {{M, "M"}, {N, "N"}});
LoopNest loopnest({b});
@@ -1652,8 +1652,8 @@
int N = 32;
int K = 48;
- Placeholder AP(BufHandle("A", {M, K}, kFloat));
- Placeholder BP(BufHandle("B", {K, N}, kFloat));
+ BufHandle AP("A", {M, K}, kFloat);
+ BufHandle BP("B", {K, N}, kFloat);
Tensor CT = Reduce(
"gemm",
{{M, "M"}, {N, "N"}},
@@ -1732,8 +1732,8 @@
TEST(LLVM, CallRaw) {
const int M = 32;
VarHandle N("N", kInt);
- Placeholder a(BufHandle("a", {M, N}, kFloat));
- Placeholder b(BufHandle("b", {N}, kFloat));
+ BufHandle a("a", {M, N}, kFloat);
+ BufHandle b("b", {N}, kFloat);
Tensor c = Compute(
"c", {{M, "i"}, {N, "j"}}, [&](const VarHandle& i, const VarHandle& j) {
return a.load(i, j) + b.load(j);
@@ -1772,9 +1772,9 @@
TEST(LLVM, CustomTarget) {
constexpr int M = 16;
- Placeholder a("a", kFloat, {M});
- Placeholder b("b", kFloat, {M});
- Placeholder c("c", kFloat, {M});
+ BufHandle a("a", {M}, kFloat);
+ BufHandle b("b", {M}, kFloat);
+ BufHandle c("c", {M}, kFloat);
Tensor d = Compute("d", {{M, "m"}}, [&](const VarHandle& m) {
return a.load(m) * b.load(m) + c.load(m);
});
diff --git a/test/cpp/tensorexpr/test_loopnest.cpp b/test/cpp/tensorexpr/test_loopnest.cpp
index feae1ca..d55ee3e 100644
--- a/test/cpp/tensorexpr/test_loopnest.cpp
+++ b/test/cpp/tensorexpr/test_loopnest.cpp
@@ -577,8 +577,8 @@
TEST(LoopNest, ExprSplitWithMask01) {
const int M = 26;
const int N = 5;
- Placeholder a_buf("a", kFloat, {M, N});
- Placeholder b_buf("b", kFloat, {M, N});
+ BufHandle a_buf("a", {M, N}, kFloat);
+ BufHandle b_buf("b", {M, N}, kFloat);
Tensor tensor = Compute(
"f", {{M, "m"}, {N, "n"}}, [&](const ExprHandle& m, const ExprHandle& n) {
return a_buf.load(m, n) + b_buf.load(m, n) + 1.0f;
@@ -611,8 +611,8 @@
// insert any masks.
TEST(LoopNest, ExprSplitWithMaskRepeatedNoMask) {
const int M = 64;
- Placeholder a_buf("a", kFloat, {M});
- Placeholder b_buf("b", kFloat, {M});
+ BufHandle a_buf("a", {M}, kFloat);
+ BufHandle b_buf("b", {M}, kFloat);
Tensor tensor = Compute("f", {{M, "m"}}, [&](const ExprHandle& m) {
return a_buf.load(m) + b_buf.load(m) + 1.0f;
});
@@ -695,8 +695,8 @@
TEST(LoopNest, TileSimple) {
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
const int M = 64, N = 64;
- Placeholder a_buf("a", kFloat, {M, N});
- Placeholder b_buf("b", kFloat, {M, N});
+ BufHandle a_buf("a", {M, N}, kFloat);
+ BufHandle b_buf("b", {M, N}, kFloat);
Tensor tensor = Compute(
"f", {{M, "m"}, {N, "n"}}, [&](const ExprHandle& m, const ExprHandle& n) {
return a_buf.load({m, n}) + b_buf.load({m, n}) + 1.0f;
@@ -740,8 +740,8 @@
TEST(LoopNest, TileWithTails) {
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
const int M = 64, N = 64;
- Placeholder a_buf("a", kFloat, {M, N});
- Placeholder b_buf("b", kFloat, {M, N});
+ BufHandle a_buf("a", {M, N}, kFloat);
+ BufHandle b_buf("b", {M, N}, kFloat);
Tensor tensor = Compute(
"f", {{M, "m"}, {N, "n"}}, [&](const ExprHandle& m, const ExprHandle& n) {
return a_buf.load({m, n}) + b_buf.load({m, n}) + 1.0f;
@@ -786,8 +786,8 @@
TEST(LoopNest, TileInMiddle) {
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
const int M = 8, N = 8, L = 8, K = 8;
- Placeholder a_buf("a", kFloat, {M, N, L, K});
- Placeholder b_buf("b", kFloat, {M, N, L, K});
+ BufHandle a_buf("a", {M, N, L, K}, kFloat);
+ BufHandle b_buf("b", {M, N, L, K}, kFloat);
Tensor tensor = Compute(
"f",
{{M, "m"}, {N, "n"}, {L, "l"}, {K, "k"}},
@@ -845,8 +845,8 @@
TEST(LoopNest, SplitWithTailWithLoopOptions) {
const int M = 21;
- Placeholder a_buf("a", kFloat, {M});
- Placeholder b_buf("b", kFloat, {M});
+ BufHandle a_buf("a", {M}, kFloat);
+ BufHandle b_buf("b", {M}, kFloat);
Tensor tensor = Compute("f", {{M, "m"}}, [&](const ExprHandle& m) {
return a_buf.load(m) + b_buf.load(m) + 1.0f;
});
@@ -875,8 +875,8 @@
TEST(LoopNest, SplitWithMaskWithLoopOptions) {
const int M = 21;
- Placeholder a_buf("a", kFloat, {M});
- Placeholder b_buf("b", kFloat, {M});
+ BufHandle a_buf("a", {M}, kFloat);
+ BufHandle b_buf("b", {M}, kFloat);
Tensor tensor = Compute("f", {{M, "m"}}, [&](const ExprHandle& m) {
return a_buf.load(m) + b_buf.load(m) + 1.0f;
});
@@ -901,8 +901,8 @@
const int M = 4;
const int N = 5;
const int K = 6;
- Placeholder a_buf("a", kFloat, {M, N});
- Placeholder b_buf("b", kFloat, {N, K});
+ BufHandle a_buf("a", {M, N}, kFloat);
+ BufHandle b_buf("b", {N, K}, kFloat);
Tensor c = Compute(
"broadcast_add",
{{M, "m"}, {N, "n"}, {K, "k"}},
@@ -949,8 +949,8 @@
const int M = 4;
const int N = 5;
const int K = 6;
- Placeholder a_buf("a", kFloat, {M, N});
- Placeholder b_buf("b", kFloat, {N, K});
+ BufHandle a_buf("a", {M, N}, kFloat);
+ BufHandle b_buf("b", {N, K}, kFloat);
Tensor c = Compute(
"broadcast_add",
{{M, "m"}, {N, "n"}, {K, "k"}},
@@ -1005,10 +1005,10 @@
const int M = 4;
const int N = 5;
const int K = 6;
- Placeholder a_buf("a", kFloat, {M, N});
- Placeholder b_buf("b", kFloat, {N, K});
- Placeholder c_buf("c", kFloat, {M, N});
- Placeholder d_buf("d", kFloat, {M, K});
+ BufHandle a_buf("a", {M, N}, kFloat);
+ BufHandle b_buf("b", {N, K}, kFloat);
+ BufHandle c_buf("c", {M, N}, kFloat);
+ BufHandle d_buf("d", {M, K}, kFloat);
Tensor x = Compute(
"x",
@@ -1085,10 +1085,10 @@
const int M = 4;
const int N = 5;
const int K = 6;
- Placeholder a_buf("a", kFloat, {M, N});
- Placeholder b_buf("b", kFloat, {N, K});
- Placeholder c_buf("c", kFloat, {M, N});
- Placeholder d_buf("d", kFloat, {M, K});
+ BufHandle a_buf("a", {M, N}, kFloat);
+ BufHandle b_buf("b", {N, K}, kFloat);
+ BufHandle c_buf("c", {M, N}, kFloat);
+ BufHandle d_buf("d", {M, K}, kFloat);
Tensor x = Compute(
"x",
@@ -1306,8 +1306,8 @@
const int M = 4;
const int N = 5;
const int K = 6;
- Placeholder a_buf("a", kFloat, {M, N});
- Placeholder b_buf("b", kFloat, {N, K});
+ BufHandle a_buf("a", {M, N}, kFloat);
+ BufHandle b_buf("b", {N, K}, kFloat);
Tensor x = Compute(
"x",
@@ -1709,7 +1709,7 @@
const int kVectorCount = 128;
const int kTotalSize = kVectorSize * kVectorCount;
- Placeholder a_buf(BufHandle("A", {ExprHandle(kTotalSize)}, kFloat));
+ BufHandle a_buf("A", {ExprHandle(kTotalSize)}, kFloat);
Tensor b = Compute(
"f", {{kTotalSize, "i"}}, [&](const std::vector<VarHandle>& axes) {
@@ -1741,10 +1741,10 @@
const int kVectorCount = 128;
const int kTotalSize = kVectorSize * kVectorCount;
- Placeholder a(BufHandle("A", {ExprHandle(kTotalSize)}, kFloat));
- Placeholder b(BufHandle("B", {ExprHandle(kTotalSize)}, kFloat));
- Placeholder c(BufHandle("C", {ExprHandle(kTotalSize)}, kFloat));
- Placeholder d(BufHandle("D", {ExprHandle(kTotalSize)}, kFloat));
+ BufHandle a("A", {ExprHandle(kTotalSize)}, kFloat);
+ BufHandle b("B", {ExprHandle(kTotalSize)}, kFloat);
+ BufHandle c("C", {ExprHandle(kTotalSize)}, kFloat);
+ BufHandle d("D", {ExprHandle(kTotalSize)}, kFloat);
Tensor e = Compute("e", {{kTotalSize, "i"}}, [&](const VarHandle& i) {
return a.load(i) + b.load(i);
@@ -1778,8 +1778,8 @@
auto testWithSize = [](int32_t M, int32_t N) {
VarHandle m("m", kInt);
VarHandle n("n", kInt);
- Placeholder a(BufHandle("a", {m, n}, kFloat));
- Placeholder b(BufHandle("b", {m, n}, kFloat));
+ BufHandle a("a", {m, n}, kFloat);
+ BufHandle b("b", {m, n}, kFloat);
Tensor c = Compute(
"c", {{m, "m"}, {n, "n"}}, [&](const VarHandle& i, const VarHandle& j) {
return a.load(i, j) + b.load(i, j);
@@ -2168,7 +2168,7 @@
int H = 256;
int R = 3;
int Pad = 1;
- Placeholder IP("input", kFloat, {H});
+ BufHandle IP("input", {H}, kFloat);
Tensor A =
Compute("A", {{N, "np"}, {H + 2 * Pad, "hp"}}, [&](Axis n, Axis h) {
@@ -2459,19 +2459,16 @@
});
LoopNest l({tensor});
- Placeholder extra(BufHandle("res", {6, 3}, kFloat));
+ BufHandle extra("res", {6, 3}, kFloat);
auto loops = l.getAllLoopNestsWritingToBuf(tensor.buf()).at(0);
VarHandle i = VarHandle(loops[0]->var());
- StmtPtr store_1 =
- Store::make(BufHandle(extra.data()), {i, 0}, ExprHandle(1.f));
- StmtPtr store_2 =
- Store::make(BufHandle(extra.data()), {i, 1}, ExprHandle(2.f));
+ StmtPtr store_1 = Store::make(extra, {i, 0}, ExprHandle(1.f));
+ StmtPtr store_2 = Store::make(extra, {i, 1}, ExprHandle(2.f));
// stmt 3 is the Function body.
- StmtPtr store_3 =
- Store::make(BufHandle(extra.data()), {i, 2}, ExprHandle(4.f));
+ StmtPtr store_3 = Store::make(extra, {i, 2}, ExprHandle(4.f));
loops[0]->body()->prepend_stmt(store_1);
loops[1]->body()->prepend_stmt(store_2);
@@ -2590,7 +2587,7 @@
[](const std::vector<VarHandle>&) { return -1; });
LoopNest l({c});
- Placeholder extra(BufHandle("extra", {5}, kInt));
+ BufHandle extra("extra", {5}, kInt);
auto loops = l.getAllLoopNestsWritingToBuf(c.buf()).at(0);
int j = 0;
@@ -2598,10 +2595,10 @@
// Add an increment at each layer of the loop which counts the number of
// times the loop executes.
LoadPtr load =
- alloc<Load>(extra.data(), std::vector<ExprPtr>({alloc<IntImm>(j)}));
+ alloc<Load>(extra.node(), std::vector<ExprPtr>({alloc<IntImm>(j)}));
AddPtr add = alloc<Add>(load, alloc<IntImm>(1));
StmtPtr store = alloc<Store>(
- extra.data(), std::vector<ExprPtr>({alloc<IntImm>(j)}), add);
+ extra.node(), std::vector<ExprPtr>({alloc<IntImm>(j)}), add);
if (prepend) {
l->body()->prepend_stmt(store);
}
@@ -2702,10 +2699,10 @@
const int M = 4;
const int N = 5;
const int K = 6;
- Placeholder a_buf("a", kFloat, {M, N});
- Placeholder b_buf("b", kFloat, {N, K});
- Placeholder c_buf("c", kFloat, {M, N});
- Placeholder d_buf("d", kFloat, {M, K});
+ BufHandle a_buf("a", {M, N}, kFloat);
+ BufHandle b_buf("b", {N, K}, kFloat);
+ BufHandle c_buf("c", {M, N}, kFloat);
+ BufHandle d_buf("d", {M, K}, kFloat);
Tensor x = Compute(
"x",
@@ -3253,7 +3250,7 @@
TEST(LoopNest, NormalizeAndSplitWithTail) {
// Create a dummy tensor to construct LoopNest.
ExprHandle n(100);
- Placeholder a(BufHandle("a", {n}, kFloat));
+ BufHandle a("a", {n}, kFloat);
Tensor b =
Compute("b", {{n, "i"}}, [&](const VarHandle& i) { return a.load(i); });
LoopNest l({b});
@@ -3529,7 +3526,7 @@
const int N = 7;
VarHandle m("m", kInt);
VarHandle n("n", kInt);
- Placeholder b(BufHandle("b", {m, n}, kFloat));
+ BufHandle b("b", {m, n}, kFloat);
Tensor c = Reduce("sum", {{M, "m"}}, Sum(), b, {{N, "n"}});
LoopNest loop({c});
HashProvider hasher;
@@ -3584,7 +3581,7 @@
TEST(LoopNest, DetectInlineRankMismatch) {
const int kTotalSize = 8;
- Placeholder a_buf(BufHandle("A", {ExprHandle(kTotalSize)}, kFloat));
+ BufHandle a_buf("A", {ExprHandle(kTotalSize)}, kFloat);
Tensor a = Compute("a", {{kTotalSize, "i"}}, [&](const VarHandle& i) {
return a_buf.load(i);
});
@@ -3976,7 +3973,7 @@
TEST(LoopNest, InlineConstantIndex) {
const int N = 10;
- Placeholder x_buf("a", kFloat, {1, N, 1});
+ BufHandle x_buf("a", {1, N, 1}, kFloat);
Tensor y = Compute(
"f",
{{1, "m"}, {N, "n"}, {1, "o"}},
@@ -4576,16 +4573,15 @@
ASSERT_EQ(hash_before, hash_after);
}
-static std::pair<std::unique_ptr<Placeholder>, Tensor> colReduce(int M, int N) {
- auto a =
- std::make_unique<Placeholder>("a", kFloat, std::vector<ExprHandle>{M, N});
+static std::pair<BufHandle, Tensor> colReduce(int M, int N) {
+ BufHandle a("a", {M, N}, kFloat);
Tensor t = Reduce(
"b",
{{N, "n"}},
Sum(),
- [&](const VarHandle& n, const VarHandle& m) { return a->load(m, n); },
+ [&](const VarHandle& n, const VarHandle& m) { return a.load(m, n); },
{{M, "m"}});
- return {std::move(a), t};
+ return {a, t};
}
static StmtPtr splitTailReorder(Tensor b) {
@@ -4629,7 +4625,7 @@
return nest.root_stmt();
}
-static void checkColReduce(StmtPtr s, Placeholder& p, Tensor t) {
+static void checkColReduce(StmtPtr s, BufHandle p, Tensor t) {
int M = immediateAs<int>(p.dim(0));
int N = immediateAs<int>(p.dim(1));
PaddedBuffer<float> a(M, N);
@@ -4669,7 +4665,7 @@
)IR";
torch::jit::testing::FileCheck().run(verification_pattern, oss.str());
- checkColReduce(s, *p.first, p.second);
+ checkColReduce(s, p.first, p.second);
}
TEST(LoopNest, ColReduceSplitTailUnevenReorder) {
@@ -4694,21 +4690,21 @@
)IR";
torch::jit::testing::FileCheck().run(verification_pattern, oss.str());
- checkColReduce(s, *p.first, p.second);
+ checkColReduce(s, p.first, p.second);
}
TEST(LoopNest, ColReduceSplitMaskEvenReorder) {
constexpr int M = 76, N = 128;
auto p = colReduce(M, N);
StmtPtr s = splitMaskReorder(p.second);
- checkColReduce(s, *p.first, p.second);
+ checkColReduce(s, p.first, p.second);
}
TEST(LoopNest, ColReduceSplitMaskUnevenReorder) {
constexpr int M = 76, N = 100;
auto p = colReduce(M, N);
StmtPtr s = splitMaskReorder(p.second);
- checkColReduce(s, *p.first, p.second);
+ checkColReduce(s, p.first, p.second);
}
TEST(LoopNest, ReorderAxisWithMultipleConds) {
@@ -4752,7 +4748,7 @@
TEST(LoopNest, VectorizeUse) {
constexpr int N = 8;
- Placeholder a("a", kFloat, {N});
+ BufHandle a("a", {N}, kFloat);
Tensor b = Compute(
"b", {{N, "n"}}, [&](const VarHandle& n) { return a.load(n) + 1.0f; });
Tensor c = Compute(
@@ -4782,8 +4778,8 @@
TEST(LoopNest, Int64Direct) {
constexpr int64_t N = 12;
- Placeholder a("a", kLong, {N});
- Placeholder b("b", kLong, {N});
+ BufHandle a("a", {N}, kLong);
+ BufHandle b("b", {N}, kLong);
VarHandle n("n", kLong);
StmtPtr s = For::make(
n, LongImm::make(0l), N, b.store({n}, a.load({n}) + LongImm::make(1l)));
@@ -4795,7 +4791,7 @@
TEST(LoopNest, Int64Compute) {
constexpr int64_t N = 12;
- Placeholder a("a", kLong, {N});
+ BufHandle a("a", {N}, kLong);
Tensor b = Compute("b", {{N, "n"}}, [&](const VarHandle& n) {
return a.load(n) + LongImm::make(1l);
});
diff --git a/test/cpp/tensorexpr/test_memdependency.cpp b/test/cpp/tensorexpr/test_memdependency.cpp
index c9990dc..d3ac6f4 100644
--- a/test/cpp/tensorexpr/test_memdependency.cpp
+++ b/test/cpp/tensorexpr/test_memdependency.cpp
@@ -2693,8 +2693,8 @@
*/
// Can determine if 2 loops created by Compute are dependent.
- Placeholder a_buf("a", kFloat, {4, 5});
- Placeholder b_buf("b", kFloat, {5, 6});
+ BufHandle a_buf("a", {4, 5}, kFloat);
+ BufHandle b_buf("b", {5, 6}, kFloat);
Tensor c = Compute(
"broadcast_add",
{{4, "m"}, {5, "n"}, {6, "k"}},
@@ -2710,13 +2710,13 @@
LoopNest l({d}, {c, d});
- MemDependencyChecker analyzer({a_buf.data(), b_buf.data()}, {d.buf()});
+ MemDependencyChecker analyzer({a_buf.node(), b_buf.node()}, {d.buf()});
l.root_stmt()->accept(&analyzer);
// Sanity test: Output depends on input.
- ASSERT_TRUE(analyzer.dependsIndirectly(d.buf(), a_buf.data()));
- ASSERT_TRUE(analyzer.dependsIndirectly(d.buf(), b_buf.data()));
+ ASSERT_TRUE(analyzer.dependsIndirectly(d.buf(), a_buf.node()));
+ ASSERT_TRUE(analyzer.dependsIndirectly(d.buf(), b_buf.node()));
// Second loop depends on first loop.
auto c_loop = l.getLoopStmtsFor(c)[0];
@@ -2738,8 +2738,8 @@
// Check inlining affects the number of accesses returned.
- Placeholder a_buf("a", kFloat, {4, 5});
- Placeholder b_buf("b", kFloat, {5, 6});
+ BufHandle a_buf("a", {4, 5}, kFloat);
+ BufHandle b_buf("b", {5, 6}, kFloat);
Tensor c = Compute(
"broadcast_add",
{{4, "m"}, {5, "n"}, {6, "k"}},
@@ -2756,12 +2756,12 @@
LoopNest l({d}, {c, d});
l.computeInline(c.buf());
- MemDependencyChecker analyzer({a_buf.data(), b_buf.data()}, {d.buf()});
+ MemDependencyChecker analyzer({a_buf.node(), b_buf.node()}, {d.buf()});
l.root_stmt()->accept(&analyzer);
// Sanity test: Output depends on input.
- ASSERT_TRUE(analyzer.dependsIndirectly(d.buf(), a_buf.data()));
- ASSERT_TRUE(analyzer.dependsIndirectly(d.buf(), b_buf.data()));
+ ASSERT_TRUE(analyzer.dependsIndirectly(d.buf(), a_buf.node()));
+ ASSERT_TRUE(analyzer.dependsIndirectly(d.buf(), b_buf.node()));
// broadcast_add tensor should not appear in trace at all.
for (auto& wi : analyzer.getHistory()) {
@@ -2773,8 +2773,8 @@
using namespace analysis;
// Split an axis, so the number of loops != the number of dimensions.
- Placeholder a_buf("a", kFloat, {4, 5});
- Placeholder b_buf("b", kFloat, {5, 6});
+ BufHandle a_buf("a", {4, 5}, kFloat);
+ BufHandle b_buf("b", {5, 6}, kFloat);
Tensor c = Compute(
"broadcast_add",
{{4, "m"}, {5, "n"}, {6, "k"}},
@@ -2784,12 +2784,12 @@
LoopNest l({c});
- MemDependencyChecker analyzer_before({a_buf.data(), b_buf.data()}, {c.buf()});
+ MemDependencyChecker analyzer_before({a_buf.node(), b_buf.node()}, {c.buf()});
l.root_stmt()->accept(&analyzer_before);
l.splitWithTail(l.getLoopStmtsFor(c)[0], 2);
- MemDependencyChecker analyzer_after({a_buf.data(), b_buf.data()}, {c.buf()});
+ MemDependencyChecker analyzer_after({a_buf.node(), b_buf.node()}, {c.buf()});
StmtPtr stmt = IRSimplifier::simplify(l.root_stmt());
stmt->accept(&analyzer_after);
@@ -2819,8 +2819,8 @@
using namespace analysis;
// Reorder an axis, so the loop order doesn't match the indexing order.
- Placeholder a_buf("a", kFloat, {4, 5});
- Placeholder b_buf("b", kFloat, {5, 6});
+ BufHandle a_buf("a", {4, 5}, kFloat);
+ BufHandle b_buf("b", {5, 6}, kFloat);
Tensor c = Compute(
"broadcast_add",
{{4, "m"}, {5, "n"}, {6, "k"}},
@@ -2830,13 +2830,13 @@
LoopNest l({c});
- MemDependencyChecker analyzer_before({a_buf.data(), b_buf.data()}, {c.buf()});
+ MemDependencyChecker analyzer_before({a_buf.node(), b_buf.node()}, {c.buf()});
l.root_stmt()->accept(&analyzer_before);
auto loops = l.getLoopStmtsFor(c);
l.reorderAxis(loops[0], loops[1]);
- MemDependencyChecker analyzer_after({a_buf.data(), b_buf.data()}, {c.buf()});
+ MemDependencyChecker analyzer_after({a_buf.node(), b_buf.node()}, {c.buf()});
StmtPtr stmt = IRSimplifier::simplify(l.root_stmt());
stmt->accept(&analyzer_after);
@@ -2884,8 +2884,8 @@
// Can determine dependencies of a Reduction.
- Placeholder a(BufHandle("a", {2, 3, 6}, kFloat));
- Placeholder b(BufHandle("b", {2, 3, 6}, kFloat));
+ BufHandle a("a", {2, 3, 6}, kFloat);
+ BufHandle b("b", {2, 3, 6}, kFloat);
Tensor c = Compute(
"scale",
@@ -2896,13 +2896,13 @@
Tensor d = Reduce("sum", {{2, "l1"}}, Sum(), c, {{3, "n1"}, {6, "m1"}});
LoopNest l({d}, {c, d});
- MemDependencyChecker analyzer({a.data(), b.data()}, {d.buf()});
+ MemDependencyChecker analyzer({a.node(), b.node()}, {d.buf()});
l.root_stmt()->accept(&analyzer);
// Sanity test: Output depends on input.
- ASSERT_TRUE(analyzer.dependsIndirectly(d.buf(), a.data()));
- ASSERT_TRUE(analyzer.dependsIndirectly(d.buf(), b.data()));
+ ASSERT_TRUE(analyzer.dependsIndirectly(d.buf(), a.node()));
+ ASSERT_TRUE(analyzer.dependsIndirectly(d.buf(), b.node()));
// Second loop depends on first loop.
auto c_loop = l.getLoopStmtsFor(c)[0];
@@ -2911,8 +2911,8 @@
// Reduction depends on both inputs.
auto reduces = NodeFinder<ReduceOp>::find(l.root_stmt());
- ASSERT_TRUE(analyzer.dependsIndirectly(reduces[0], a.data()));
- ASSERT_TRUE(analyzer.dependsIndirectly(reduces[0], b.data()));
+ ASSERT_TRUE(analyzer.dependsIndirectly(reduces[0], a.node()));
+ ASSERT_TRUE(analyzer.dependsIndirectly(reduces[0], b.node()));
}
TEST(MemDependency, MemDependencyCheckerComputeGEMM) {
@@ -2921,8 +2921,8 @@
int K = 2048;
using namespace analysis;
- Placeholder AP(BufHandle("A", {M, K}, kFloat));
- Placeholder BP(BufHandle("B", {K, N}, kFloat));
+ BufHandle AP("A", {M, K}, kFloat);
+ BufHandle BP("B", {K, N}, kFloat);
Tensor CT = Reduce(
"gemm",
{{M, "M"}, {N, "N"}},
@@ -2984,8 +2984,8 @@
stmt->accept(&analyzer_unlowered);
// Outputs depend on inputs.
- ASSERT_TRUE(analyzer_unlowered.dependsIndirectly(CT.buf(), AP.data()));
- ASSERT_TRUE(analyzer_unlowered.dependsIndirectly(CT.buf(), BP.data()));
+ ASSERT_TRUE(analyzer_unlowered.dependsIndirectly(CT.buf(), AP.node()));
+ ASSERT_TRUE(analyzer_unlowered.dependsIndirectly(CT.buf(), BP.node()));
// The last write to gemm should cover the total bound of the output.
std::shared_ptr<AccessInfo> outputAccess =
@@ -3003,8 +3003,8 @@
// Likewise the first read from each input cover the entire range of the
// input.
- auto aInput = analyzer_unlowered.input(AP.data());
- auto bInput = analyzer_unlowered.input(BP.data());
+ auto aInput = analyzer_unlowered.input(AP.node());
+ auto bInput = analyzer_unlowered.input(BP.node());
// A single dependent each.
ASSERT_EQ(aInput->dependents().size(), 1);
diff --git a/test/cpp/tensorexpr/test_ops.cpp b/test/cpp/tensorexpr/test_ops.cpp
index 586c093..d4eae6a 100644
--- a/test/cpp/tensorexpr/test_ops.cpp
+++ b/test/cpp/tensorexpr/test_ops.cpp
@@ -25,8 +25,8 @@
constexpr int M = 8;
constexpr int N = 16;
- Placeholder a("a", kFloat, {M, N});
- Tensor b = computeSum({a.handle(), dims, false}, c10::kFloat);
+ BufHandle a("a", {M, N}, kFloat);
+ Tensor b = computeSum({a, dims, false}, c10::kFloat);
auto cg = compile({a}, {b});
auto at = at::arange(M * N, at::kFloat).view({M, N});
diff --git a/test/cpp/tensorexpr/test_reductions.cpp b/test/cpp/tensorexpr/test_reductions.cpp
index 3d2c0ec..d86f046 100644
--- a/test/cpp/tensorexpr/test_reductions.cpp
+++ b/test/cpp/tensorexpr/test_reductions.cpp
@@ -26,7 +26,7 @@
TEST(Reductions, ReduceSum0D_1) {
const int M = 10;
- Placeholder b(BufHandle("b", {M}, kFloat));
+ BufHandle b("b", {M}, kFloat);
std::vector<float> in(M);
for (int j = 0; j < M; ++j) {
in[j] = j;
@@ -51,7 +51,7 @@
TEST(Reductions, ReduceSum0D_2) {
const int M = 10;
- Placeholder b(BufHandle("b", {}, kFloat));
+ BufHandle b("b", {}, kFloat);
std::vector<float> in(1);
in[0] = 77.7;
@@ -71,7 +71,7 @@
// Sum an array to a single value.
TEST(Reductions, ReduceSum1D) {
- Placeholder b(BufHandle("b", {10}, kFloat));
+ BufHandle b("b", {10}, kFloat);
std::vector<float> in(10);
for (int j = 0; j < 10; ++j) {
in[j] = j;
@@ -98,7 +98,7 @@
VarHandle m("m", kInt);
VarHandle n("n", kInt);
- Placeholder b(BufHandle("b", {m, n}, kFloat));
+ BufHandle b("b", {m, n}, kFloat);
std::vector<float> in(M * N);
for (int i = 0; i < M; ++i) {
for (int j = 0; j < N; ++j) {
@@ -135,7 +135,7 @@
const int M = 10;
VarHandle m("m", kInt);
- Placeholder b(BufHandle("b", {2, 3, m}, kFloat));
+ BufHandle b("b", {2, 3, m}, kFloat);
Tensor c = Reduce("sum", {{2, "l"}, {3, "n"}}, Sum(), b, {{m, "m"}});
LoopNest loop({c});
@@ -184,7 +184,7 @@
}
// This is the same as just reducing the original result across that axis.
- Placeholder c_buf(BufHandle(c.buf()));
+ BufHandle c_buf(c.buf());
Tensor e = Reduce("sum3", {{2, "l"}}, Sum(), c_buf, {{3, "m"}});
LoopNest loop3({e});
loop3.prepareForCodegen();
@@ -201,9 +201,9 @@
// Sum a large (10 D) Tensor 5 dimensions in.
TEST(Reductions, ReduceSum10D) {
- Placeholder in_(BufHandle("in_", {2, 3, 2, 3, 2, 3, 2, 3, 2, 3}, kFloat));
+ BufHandle in_("in_", {2, 3, 2, 3, 2, 3, 2, 3, 2, 3}, kFloat);
const int InputSize = 2 * 3 * 2 * 3 * 2 * 3 * 2 * 3 * 2 * 3;
- Placeholder out_(BufHandle("out_", {2, 3, 2, 3, 2}, kFloat));
+ BufHandle out_("out_", {2, 3, 2, 3, 2}, kFloat);
const int OutputSize = 2 * 3 * 2 * 3 * 2;
std::vector<float> in(InputSize, 1.f);
@@ -236,7 +236,7 @@
const int M = 4;
const int N = 4;
- Placeholder b(BufHandle("b", {M, N}, kFloat));
+ BufHandle b("b", {M, N}, kFloat);
std::vector<float> in(M * N);
for (int i = 0; i < M; ++i) {
for (int j = 0; j < N; ++j) {
@@ -272,7 +272,7 @@
// Maximum reductions.
TEST(Reductions, ReduceMax) {
- Placeholder in_(BufHandle("b", {10}, kFloat));
+ BufHandle in_("b", {10}, kFloat);
std::vector<float> in(10);
std::vector<float> out(1, -1.f);
@@ -292,7 +292,7 @@
ASSERT_EQ(out[0], 9);
- Placeholder in2_(BufHandle("b", {2, 5}, kFloat));
+ BufHandle in2_("b", {2, 5}, kFloat);
std::vector<float> out2(2, -1.f);
Tensor m2d = Reduce("max", {{2, "n"}}, Maximum(kFloat), in2_, {{5, "m"}});
@@ -312,7 +312,7 @@
// Minimum reduction, with custom initialization.
TEST(Reductions, ReduceMinCustomInitializer) {
VarHandle minInit("minInit", kFloat);
- Placeholder in_(BufHandle("b", {10}, kFloat));
+ BufHandle in_("b", {10}, kFloat);
std::vector<float> in(10);
std::vector<float> out(1, -1.f);
@@ -348,7 +348,7 @@
// TODO: this is very awkward without logical And/Or operators.
TEST(Reductions, ReduceAnyAll) {
VarHandle searchValue("searchValue", kInt);
- Placeholder b(BufHandle("b", {4, 10}, kInt));
+ BufHandle b("b", {4, 10}, kInt);
Reducer anyEqSV(ExprHandle(0), [](ExprHandle a, ExprHandle b) {
return CompareSelect::make(a, 1, 1, b, kEQ);
@@ -431,8 +431,8 @@
}
TEST(Reductions, ReduceMatmul2D) {
- Placeholder tA(BufHandle("tA", {3, 2}, kFloat));
- Placeholder tB(BufHandle("tB", {2, 3}, kFloat));
+ BufHandle tA("tA", {3, 2}, kFloat);
+ BufHandle tB("tB", {2, 3}, kFloat);
std::vector<float> tA_(6);
std::vector<float> tB_(6);
@@ -471,7 +471,7 @@
}
TEST(Reductions, ReduceRfactorLike) {
- Placeholder in(BufHandle("in", {10, 10}, kFloat));
+ BufHandle in("in", {10, 10}, kFloat);
std::vector<float> in_(100);
for (int i = 0; i < 100; ++i) {
in_[i] = i;
@@ -480,7 +480,7 @@
std::vector<float> out(1, -1.f);
Tensor l1 = Reduce("l1", {{10, "i"}}, Sum(), in, {{10, "j"}});
- Placeholder in_rf(BufHandle(l1.buf()));
+ BufHandle in_rf(l1.buf());
Tensor l2 = Reduce("l2", {}, Sum(), in_rf, {{10, "i"}});
@@ -499,8 +499,8 @@
const int M = 10;
VarHandle m("m", kInt);
- Placeholder a(BufHandle("a", {2, 3}, kFloat));
- Placeholder b(BufHandle("b", {2, 3, m}, kFloat));
+ BufHandle a("a", {2, 3}, kFloat);
+ BufHandle b("b", {2, 3, m}, kFloat);
Tensor c = Reduce("sum", {{2, "l1"}, {3, "n1"}}, Sum(), b, {{m, "m1"}});
Tensor d = Compute(
@@ -542,8 +542,8 @@
const int M = 10;
VarHandle m("m", kInt);
- Placeholder a(BufHandle("a", {2, 3, m}, kFloat));
- Placeholder b(BufHandle("b", {2, 3, m}, kFloat));
+ BufHandle a("a", {2, 3, m}, kFloat);
+ BufHandle b("b", {2, 3, m}, kFloat);
Tensor c = Compute(
"scale",
@@ -588,7 +588,7 @@
}
TEST(Reductions, SplitReduceAxis) {
- Placeholder in(BufHandle("in", {16, 8}, kFloat));
+ BufHandle in("in", {16, 8}, kFloat);
std::vector<float> in_(16 * 8);
for (int i = 0; i < 16; ++i) {
@@ -617,7 +617,7 @@
}
TEST(Reductions, SplitNonReduceAxis) {
- Placeholder in(BufHandle("in", {16, 8}, kFloat));
+ BufHandle in("in", {16, 8}, kFloat);
std::vector<float> in_(16 * 8);
for (int i = 0; i < 16; ++i) {
@@ -653,7 +653,7 @@
SumOp(c(k, n), 0, a(k, m, n), {m})
*/
- Placeholder in(BufHandle("in", {1, 12, 6}, kFloat));
+ BufHandle in("in", {1, 12, 6}, kFloat);
std::vector<float> in_(12 * 6, 1.f);
Tensor tensor_ = Reduce("sum", {{1, "k"}, {12, "n"}}, Sum(), in, {{6, "m"}});
@@ -700,7 +700,7 @@
VarHandle m("m", kInt);
VarHandle n("n", kInt);
- Placeholder b(BufHandle("b", {m, n}, kFloat));
+ BufHandle b("b", {m, n}, kFloat);
std::vector<float> in(M * N);
for (int j = 0; j < M * N; ++j) {
in[j] = j;
@@ -733,7 +733,7 @@
VarHandle n("n", kInt);
VarHandle k("k", kInt);
- Placeholder b(BufHandle("b", {m, n, k}, kFloat));
+ BufHandle b("b", {m, n, k}, kFloat);
std::vector<float> in(M * N * K);
for (int j = 0; j < M * N * K; ++j) {
in[j] = j;
@@ -766,7 +766,7 @@
VarHandle n("n", kInt);
VarHandle k("k", kInt);
- Placeholder b(BufHandle("b", {m, n, k}, kFloat));
+ BufHandle b("b", {m, n, k}, kFloat);
std::vector<float> in(M * N * K);
for (int j = 0; j < M * N * K; ++j) {
in[j] = j;
@@ -791,7 +791,7 @@
}
TEST(Reductions, ReduceRepeatedInternalRfactor) {
- Placeholder in_(BufHandle("in_", {2, 3, 4, 5, 6}, kFloat));
+ BufHandle in_("in_", {2, 3, 4, 5, 6}, kFloat);
const int InputSize = 2 * 3 * 4 * 5 * 6;
std::vector<float> in(InputSize, 1.f);
@@ -840,7 +840,7 @@
const int N = 10;
const int K = 10;
- Placeholder b(BufHandle("b", {M, N, K}, kFloat));
+ BufHandle b("b", {M, N, K}, kFloat);
std::vector<float> in(M * N * K);
for (int j = 0; j < M * N * K; ++j) {
in[j] = j;
@@ -870,7 +870,7 @@
const int M = 10;
const int N = 10;
const int K = 10;
- Placeholder b(BufHandle("b", {M, N, K}, kFloat));
+ BufHandle b("b", {M, N, K}, kFloat);
std::vector<float> in(M * N * K);
for (int j = 0; j < M * N * K; ++j) {
in[j] = j;
@@ -902,7 +902,7 @@
const int N = 10;
const int K = 10;
- Placeholder b(BufHandle("b", {M, N, K}, kFloat));
+ BufHandle b("b", {M, N, K}, kFloat);
std::vector<float> in(M * N * K);
for (int j = 0; j < M * N * K; ++j) {
in[j] = j;
@@ -933,7 +933,7 @@
const int N = 10;
const int K = 10;
- Placeholder b(BufHandle("b", {M, N, K}, kFloat));
+ BufHandle b("b", {M, N, K}, kFloat);
std::vector<float> in(M * N * K);
for (int j = 0; j < M * N * K; ++j) {
in[j] = j;
@@ -963,7 +963,7 @@
const int M = 10;
const int N = 10;
const int K = 10;
- Placeholder b(BufHandle("b", {M, N, K}, kFloat));
+ BufHandle b("b", {M, N, K}, kFloat);
std::vector<float> in(M * N * K);
for (int j = 0; j < M * N * K; ++j) {
in[j] = j;
@@ -994,7 +994,7 @@
const int N = 10;
const int K = 10;
- Placeholder b(BufHandle("b", {M, N, K}, kFloat));
+ BufHandle b("b", {M, N, K}, kFloat);
std::vector<float> in(M * N * K);
for (int j = 0; j < M * N * K; ++j) {
in[j] = j;
@@ -1027,7 +1027,7 @@
const int K = 10;
const int SPLIT_FACTOR = 4;
- Placeholder b(BufHandle("b", {M, N, K}, kFloat));
+ BufHandle b("b", {M, N, K}, kFloat);
std::vector<float> in(M * N * K);
for (int m = 0; m < M; ++m) {
for (int j = 0; j < N * K; ++j) {
@@ -1068,7 +1068,7 @@
const int K = 10;
const int SPLIT_FACTOR = 16;
- Placeholder b(BufHandle("b", {N, K}, kFloat));
+ BufHandle b("b", {N, K}, kFloat);
std::vector<float> in(N * K);
for (int j = 0; j < N * K; ++j) {
in[j] = j;
@@ -1123,8 +1123,8 @@
const int N = 5;
const int K = 6;
- Placeholder a_buf("a", kFloat, {M});
- Placeholder b_buf("b", kFloat, {M, N, K});
+ BufHandle a_buf("a", {M}, kFloat);
+ BufHandle b_buf("b", {M, N, K}, kFloat);
Tensor x = Reduce("x", {{M, "m1"}}, Sum(), b_buf, {{N, "n1"}, {K, "k1"}});
Tensor y = Compute("y", {{M, "m2"}}, [&](const VarHandle& m) {
@@ -1155,8 +1155,8 @@
const int N = 5;
const int K = 6;
- Placeholder a_buf("a", kFloat, {M, N, K});
- Placeholder b_buf("b", kFloat, {M, N, K});
+ BufHandle a_buf("a", {M, N, K}, kFloat);
+ BufHandle b_buf("b", {M, N, K}, kFloat);
Tensor x = Compute(
"x",
@@ -1208,8 +1208,8 @@
const int N = 5;
const int K = 6;
- Placeholder a_buf("a", kFloat, {M, N, K});
- Placeholder b_buf("b", kFloat, {M, N, K});
+ BufHandle a_buf("a", {M, N, K}, kFloat);
+ BufHandle b_buf("b", {M, N, K}, kFloat);
Tensor x = Compute(
"x",
@@ -1265,8 +1265,8 @@
int N = 3;
int M = 2;
- Placeholder a(BufHandle("a", {L, N, M}, kFloat));
- Placeholder b(BufHandle("b", {L, N, M}, kFloat));
+ BufHandle a("a", {L, N, M}, kFloat);
+ BufHandle b("b", {L, N, M}, kFloat);
Tensor c = Compute(
"scale",
@@ -1340,8 +1340,8 @@
int N = 3;
int M = 2;
- Placeholder a(BufHandle("a", {L, N, M}, kFloat));
- Placeholder b(BufHandle("b", {L, N, M}, kFloat));
+ BufHandle a("a", {L, N, M}, kFloat);
+ BufHandle b("b", {L, N, M}, kFloat);
Tensor c = Compute(
"scale",
@@ -1413,8 +1413,8 @@
int N = 3;
int M = 2;
- Placeholder a(BufHandle("a", {L, N, M}, kFloat));
- Placeholder b(BufHandle("b", {L, N, M}, kFloat));
+ BufHandle a("a", {L, N, M}, kFloat);
+ BufHandle b("b", {L, N, M}, kFloat);
Tensor c = Compute(
"scale",
@@ -1482,8 +1482,8 @@
}
TEST(Reductions, ReductionCacheBodyAccess) {
- Placeholder a(BufHandle("a", {24, 32, 12}, kFloat));
- Placeholder b(BufHandle("b", {24, 32, 12}, kFloat));
+ BufHandle a("a", {24, 32, 12}, kFloat);
+ BufHandle b("b", {24, 32, 12}, kFloat);
Tensor c = Compute(
"scale",
@@ -1521,8 +1521,8 @@
}
TEST(Reductions, ReductionCacheConsumerAccess) {
- Placeholder a(BufHandle("a", {24, 32, 12}, kFloat));
- Placeholder b(BufHandle("b", {24, 32, 12}, kFloat));
+ BufHandle a("a", {24, 32, 12}, kFloat);
+ BufHandle b("b", {24, 32, 12}, kFloat);
Tensor c = Compute(
"scale",
@@ -1560,8 +1560,8 @@
}
TEST(Reductions, ReductionSplitCacheConsumerAccess) {
- Placeholder a(BufHandle("a", {24, 32, 12}, kFloat));
- Placeholder b(BufHandle("b", {24, 32, 12}, kFloat));
+ BufHandle a("a", {24, 32, 12}, kFloat);
+ BufHandle b("b", {24, 32, 12}, kFloat);
Tensor c = Compute(
"scale",
@@ -1606,8 +1606,8 @@
}
TEST(Reductions, ReductionReorderCacheConsumerAccess) {
- Placeholder a(BufHandle("a", {24, 32, 12}, kFloat));
- Placeholder b(BufHandle("b", {24, 32, 12}, kFloat));
+ BufHandle a("a", {24, 32, 12}, kFloat);
+ BufHandle b("b", {24, 32, 12}, kFloat);
Tensor c = Compute(
"scale",
@@ -1660,7 +1660,7 @@
VarHandle n("n", kInt);
VarHandle k("k", kInt);
- Placeholder b(BufHandle("B", {m, n, k}, kFloat));
+ BufHandle b("B", {m, n, k}, kFloat);
std::vector<float> in(M * N * K);
for (int j = 0; j < M * N * K; ++j) {
in[j] = j;
@@ -1726,7 +1726,7 @@
VarHandle n("n", kInt);
VarHandle k("k", kInt);
- Placeholder b(BufHandle("B", {m, n, k}, kFloat));
+ BufHandle b("B", {m, n, k}, kFloat);
std::vector<float> in(M * N * K);
for (int j = 0; j < M * N * K; ++j) {
in[j] = j;
@@ -1790,7 +1790,7 @@
std::vector<float> out_before(8, -1.f);
std::vector<float> out_after(8, -1.f);
- Placeholder in(BufHandle("in", {8, 8}, kFloat));
+ BufHandle in("in", {8, 8}, kFloat);
Tensor tensor = Reduce("sum", {{8, "m"}}, Sum(), in, {{8, "n"}});
LoopNest l_before({tensor});
@@ -1826,7 +1826,7 @@
}
TEST(Reductions, ReductionVectorizeInner) {
- Placeholder in(BufHandle("in", {8, 8}, kFloat));
+ BufHandle in("in", {8, 8}, kFloat);
Tensor tensor = Reduce("sum", {{8, "m"}}, Sum(), in, {{8, "n"}});
LoopNest l({tensor});
@@ -1844,7 +1844,7 @@
std::vector<float> out_before(1, -1.f);
std::vector<float> out_after(1, -1.f);
- Placeholder in(BufHandle("in", {8, 8}, kFloat));
+ BufHandle in("in", {8, 8}, kFloat);
Tensor tensor = Reduce("sum", {}, Sum(), in, {{8, "m"}, {8, "n"}});
@@ -1902,8 +1902,8 @@
TEST(Reductions, InitFunction) {
constexpr int M = 32;
constexpr int N = 16;
- Placeholder A("A", kFloat, {M, N});
- Placeholder B("B", kFloat, {N});
+ BufHandle A("A", {M, N}, kFloat);
+ BufHandle B("B", {N}, kFloat);
Tensor C = Reduce(
"C",
{{N, "n"}},
diff --git a/test/cpp/tensorexpr/test_simplify.cpp b/test/cpp/tensorexpr/test_simplify.cpp
index 9de5713..f71f907 100644
--- a/test/cpp/tensorexpr/test_simplify.cpp
+++ b/test/cpp/tensorexpr/test_simplify.cpp
@@ -3854,7 +3854,7 @@
TEST(Simplify, SimplifyForCleansUp) {
{
- Placeholder a("a", kFloat, {1, 12, 1});
+ BufHandle a("a", {1, 12, 1}, kFloat);
VarHandle x("x", kInt);
Tensor b = Compute(
// NOLINTNEXTLINE(clang-analyzer-cplusplus.NewDeleteLeaks)
@@ -4832,7 +4832,7 @@
// b[n] = 1.f;
// }
constexpr int N = 8;
- Placeholder b("b", kFloat, {N});
+ BufHandle b("b", {N}, kFloat);
VarHandle n("n", kInt);
StmtPtr s = For::make(
n, 1, N, b.store({n}, CompareSelect::make(n, 1, 0.f, 1.0f, kLT)));
@@ -4856,7 +4856,7 @@
// b[n] = 1.f;
// }
constexpr int N = 8;
- Placeholder b("b", kFloat, {N});
+ BufHandle b("b", {N}, kFloat);
VarHandle n("n", kInt);
StmtPtr s =
For::make(n, 1, N, b.store({n}, IfThenElse::make(n < 1, 0.f, 1.0f)));
@@ -4884,7 +4884,7 @@
// for (int j = 1; j < 7; j++) {
// b[i, j] = 1.f;
constexpr int N = 8;
- Placeholder b("b", kFloat, {N, N});
+ BufHandle b("b", {N, N}, kFloat);
VarHandle i("i", kInt);
VarHandle j("j", kInt);
auto csel = CompareSelect::make(i, 1, kLT);
@@ -4920,8 +4920,8 @@
// b[i, j] = (b[i, j]) + 1.f;
constexpr int N = 8;
constexpr int K = 3;
- Placeholder a("a", kFloat, {N, N});
- Placeholder b("b", kFloat, {N, N});
+ BufHandle a("a", {N, N}, kFloat);
+ BufHandle b("b", {N, N}, kFloat);
VarHandle i("i", kInt);
VarHandle j("j", kInt);
auto csel = CompareSelect::make(i, 1, kLT);
diff --git a/torch/csrc/jit/runtime/static/te_wrapper.cpp b/torch/csrc/jit/runtime/static/te_wrapper.cpp
index acd1fb7..bfb7d9e 100644
--- a/torch/csrc/jit/runtime/static/te_wrapper.cpp
+++ b/torch/csrc/jit/runtime/static/te_wrapper.cpp
@@ -109,7 +109,7 @@
wrap = std::make_shared<TEWrapper>();
auto N = VarHandle("N", kInt);
auto C = VarHandle("C", kFloat);
- Placeholder A("A", kFloat, {N});
+ BufHandle A("A", {N}, kFloat);
Tensor B = Compute("B", {N}, [&](const VarHandle& i) {
auto A_elem = [&]() {
auto elem = A.load(i);
@@ -133,7 +133,7 @@
}
wrap = std::make_shared<TEWrapper>();
auto N = VarHandle("N", kInt);
- Placeholder A("A", kFloat, {N});
+ BufHandle A("A", {N}, kFloat);
Tensor B = Compute("B", {N}, [&](const VarHandle& i) {
auto zero = FloatImm::make(0.f);
auto a = A.load(i);
@@ -151,7 +151,7 @@
}
wrap = std::make_shared<TEWrapper>();
auto N = VarHandle("N", kInt);
- Placeholder A("A", kFloat, {N});
+ BufHandle A("A", {N}, kFloat);
Tensor B = Compute("B", {N}, [&](const VarHandle& i) {
auto a = A.load(i);
return fast_tanh(a);
@@ -168,7 +168,7 @@
}
wrap = std::make_shared<TEWrapper>();
auto N = VarHandle("N", kInt);
- Placeholder A("A", kFloat, {N});
+ BufHandle A("A", {N}, kFloat);
Tensor B =
Compute("B", {N}, [&](const VarHandle& i) { return sigmoid(A.load(i)); });
// NNC uses sleef for vectorizing sigmoid, which comes in an 8-wide flavor
diff --git a/torch/csrc/jit/tensorexpr/codegen.h b/torch/csrc/jit/tensorexpr/codegen.h
index 0504f9a..5f16be5 100644
--- a/torch/csrc/jit/tensorexpr/codegen.h
+++ b/torch/csrc/jit/tensorexpr/codegen.h
@@ -107,7 +107,6 @@
class CodeGen::BufferArg {
public:
- BufferArg(const Placeholder& buffer) : buf_(buffer.data()) {}
BufferArg(Tensor tensor) : buf_(tensor.buf()) {}
BufferArg(const VarHandle& var) : var_(var.node()), isVar_(true) {}
BufferArg(const BufHandle& buf) : buf_(buf.node()) {}
diff --git a/torch/csrc/jit/tensorexpr/eval.h b/torch/csrc/jit/tensorexpr/eval.h
index 9da1d38..8461745 100644
--- a/torch/csrc/jit/tensorexpr/eval.h
+++ b/torch/csrc/jit/tensorexpr/eval.h
@@ -157,17 +157,14 @@
: dtype_(expr.dtype()) {
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
std::vector<BufferArg> buffer_args_extended = buffer_args;
- Placeholder ret_buf("ret_val", dtype_, {1});
+ BufHandle ret_buf("ret_val", {1}, dtype_);
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
- std::vector<ExprPtr> indices;
- ExprPtr zero = alloc<IntImm>(0);
- for (size_t i = 0; i < ret_buf.data()->ndim(); i++) {
+ std::vector<ExprHandle> indices;
+ ExprHandle zero = IntImm::make(0);
+ for (size_t i = 0; i < ret_buf.ndim(); i++) {
indices.push_back(zero);
}
- // NOLINTNEXTLINE(cppcoreguidelines-init-variables)
- StmtPtr store_stmt =
- // NOLINTNEXTLINE(clang-analyzer-cplusplus.NewDeleteLeaks)
- alloc<Store>(ret_buf.data(), indices, expr.node());
+ StmtPtr store_stmt = Store::make(ret_buf, indices, expr);
buffer_args_extended.emplace_back(ret_buf);
codegen_.reset(new CodeGenType(store_stmt, buffer_args_extended));
}
diff --git a/torch/csrc/jit/tensorexpr/expr.h b/torch/csrc/jit/tensorexpr/expr.h
index 41ce99a..bd3250b 100644
--- a/torch/csrc/jit/tensorexpr/expr.h
+++ b/torch/csrc/jit/tensorexpr/expr.h
@@ -288,6 +288,11 @@
template <typename T>
inline ExprHandle load(const std::vector<T>& args) const;
+ inline ExprHandle load(const std::vector<ExprHandle>& args) const;
+
+ StorePtr store(const std::vector<ExprHandle>& args, const ExprHandle& val)
+ const;
+
bool operator==(const BufHandle& other) const {
return this->node() == other.node();
}
diff --git a/torch/csrc/jit/tensorexpr/fwd_decls.h b/torch/csrc/jit/tensorexpr/fwd_decls.h
index 119308b..2054a56 100644
--- a/torch/csrc/jit/tensorexpr/fwd_decls.h
+++ b/torch/csrc/jit/tensorexpr/fwd_decls.h
@@ -35,6 +35,8 @@
using VarPtr = NodePtr<Var>;
class ExprHandle;
+class VarHandle;
+class BufHandle;
class Add;
class And;
diff --git a/torch/csrc/jit/tensorexpr/ir.cpp b/torch/csrc/jit/tensorexpr/ir.cpp
index 439993c..0812d06 100644
--- a/torch/csrc/jit/tensorexpr/ir.cpp
+++ b/torch/csrc/jit/tensorexpr/ir.cpp
@@ -75,6 +75,12 @@
buf.node(), ExprHandleVectorToExprVector(indices), value.node());
}
+StorePtr BufHandle::store(
+ const std::vector<ExprHandle>& args,
+ const ExprHandle& value) const {
+ return Store::make(*this, args, value);
+}
+
ExprPtr flatten_index(
const std::vector<ExprPtr>& dims,
const std::vector<ExprPtr>& indices) {
diff --git a/torch/csrc/jit/tensorexpr/ir.h b/torch/csrc/jit/tensorexpr/ir.h
index 4448ea4..5dd5f9a 100644
--- a/torch/csrc/jit/tensorexpr/ir.h
+++ b/torch/csrc/jit/tensorexpr/ir.h
@@ -65,8 +65,6 @@
}
}
-class Placeholder;
-
class TORCH_API Cast : public ExprNode<Cast> {
public:
ExprPtr src_value() const {
diff --git a/torch/csrc/jit/tensorexpr/kernel.cpp b/torch/csrc/jit/tensorexpr/kernel.cpp
index b38de4f..2d30a08 100644
--- a/torch/csrc/jit/tensorexpr/kernel.cpp
+++ b/torch/csrc/jit/tensorexpr/kernel.cpp
@@ -2868,18 +2868,18 @@
throw malformed_input(msg);
}
if (isContiguous(input)) {
- Placeholder inBuffer(
+ BufHandle inBuffer(
"t" + input_name_map_[input],
- ToDtype(static_cast<ScalarType>(*tt->scalarType())),
- toExprHandles(*tt->sizes().concrete_sizes()));
- bufs_.emplace(input, inBuffer.data());
+ toExprHandles(*tt->sizes().concrete_sizes()),
+ ToDtype(static_cast<ScalarType>(*tt->scalarType())));
+ bufs_.emplace(input, inBuffer.node());
bufferArgs_.emplace_back(inBuffer);
break;
}
- Placeholder inBuffer(
+ BufHandle inBuffer(
"t" + input_name_map_[input],
- ToDtype(static_cast<ScalarType>(*tt->scalarType())),
- {0});
+ {0},
+ ToDtype(static_cast<ScalarType>(*tt->scalarType())));
std::vector<DimArg> inputTensorDims;
for (size_t i = 0; i < *tt->sizes().size(); i++) {
auto const size = *tt->sizes()[i];
diff --git a/torch/csrc/jit/tensorexpr/reduction.h b/torch/csrc/jit/tensorexpr/reduction.h
index 22d90b9..24df1ef 100644
--- a/torch/csrc/jit/tensorexpr/reduction.h
+++ b/torch/csrc/jit/tensorexpr/reduction.h
@@ -26,9 +26,6 @@
Reducer(ExprHandle init, ReduceInteraction& interaction)
: init_(init.node()), interaction_(interaction) {}
- Reducer(ExprHandle init, ReduceInteraction& interaction, Placeholder& buf)
- : init_(init.node()), interaction_(interaction) {}
-
template <typename RI>
Reducer(ExprHandle init, RI interaction) : init_(init.node()) {
interaction_ = interaction;
diff --git a/torch/csrc/jit/tensorexpr/stmt.h b/torch/csrc/jit/tensorexpr/stmt.h
index 7e4914f..832d5ad 100644
--- a/torch/csrc/jit/tensorexpr/stmt.h
+++ b/torch/csrc/jit/tensorexpr/stmt.h
@@ -11,8 +11,6 @@
namespace jit {
namespace tensorexpr {
-class Placeholder;
-
// The common base between all statement node.
class TORCH_API Stmt : public std::enable_shared_from_this<Stmt> {
public:
diff --git a/torch/csrc/jit/tensorexpr/tensor.cpp b/torch/csrc/jit/tensorexpr/tensor.cpp
index 7a219fe..c78f27f 100644
--- a/torch/csrc/jit/tensorexpr/tensor.cpp
+++ b/torch/csrc/jit/tensorexpr/tensor.cpp
@@ -140,20 +140,6 @@
const std::string& name,
const std::vector<DimArg>& dim_args,
const Reducer& reducer,
- const Placeholder& buffer,
- const std::vector<DimArg>& reduce_args) {
- return Reduce(
- name,
- dim_args,
- reducer,
- [&](ParameterList& p) { return buffer.load(p); },
- reduce_args);
-}
-
-Tensor Reduce(
- const std::string& name,
- const std::vector<DimArg>& dim_args,
- const Reducer& reducer,
const BufHandle& buffer,
const std::vector<DimArg>& reduce_args) {
return Reduce(
diff --git a/torch/csrc/jit/tensorexpr/tensor.h b/torch/csrc/jit/tensorexpr/tensor.h
index 8d8ffe5..bf9b778 100644
--- a/torch/csrc/jit/tensorexpr/tensor.h
+++ b/torch/csrc/jit/tensorexpr/tensor.h
@@ -57,83 +57,6 @@
StmtPtr stmt_;
};
-// NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
-class Placeholder {
- public:
- Placeholder() = default;
-
- // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
- Placeholder(const BufHandle& data) : data_(data.node()) {
- if (data_->base_handle()->dtype() != kHandle) {
- throw malformed_input("Placeholder dtype must be Handle");
- }
-
- // NOLINTNEXTLINE(cppcoreguidelines-init-variables)
- std::vector<ExprHandle> stride_handles(ndim());
- for (int i = (int)ndim() - 1; i >= 0; i--) {
- // NOLINTNEXTLINE(bugprone-branch-clone)
- if (i == ndim() - 1) {
- stride_handles[i] = 1;
- } else {
- stride_handles[i] = stride_handles[i + 1] * ExprHandle(dim(i + 1));
- }
- }
- strides_ = ExprHandleVectorToExprVector(stride_handles);
- }
-
- // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
- Placeholder(
- const std::string& name,
- const Dtype& dtype,
- const std::vector<ExprHandle>& dims)
- : Placeholder(BufHandle(name, dims, dtype)) {}
-
- // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
- Placeholder(const std::vector<ExprHandle>& dims, const Dtype& dtype)
- : Placeholder(BufHandle("_", dims, dtype)) {}
-
- // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
- explicit Placeholder(const std::vector<ExprHandle>& dims)
- : Placeholder(BufHandle("_", dims, kFloat)) {}
-
- BufPtr data() const {
- return data_;
- }
- BufHandle handle() const {
- return BufHandle(data());
- }
- Dtype dtype() const {
- return data_->dtype();
- }
- int ndim() const {
- return data_->ndim();
- }
- ExprPtr dim(int index) const {
- return data_->dim(index);
- }
- std::vector<ExprPtr> dims() const {
- return data_->dims();
- }
-
- template <typename... Ts>
- inline ExprHandle load(const Ts&... ts) const;
-
- template <typename T>
- inline ExprHandle load(const std::vector<T>& args) const;
-
- inline ExprHandle load(const std::vector<ExprHandle>& args) const;
-
- inline StorePtr store(
- const std::vector<ExprHandle>& args,
- const ExprHandle& val) const {
- return alloc<Store>(data(), ExprHandleVectorToExprVector(args), val.node());
- }
-
- private:
- BufPtr data_;
- std::vector<ExprPtr> strides_;
-};
-
TORCH_API Tensor Compute(
const std::string& func_name,
const std::vector<DimArg>& dim_args,
@@ -258,14 +181,6 @@
return Reduce(func_name, dim_args, reducer, body_func, reduce_args);
}
-// Overload for the common case of all dimensions of a Placeholder.
-TORCH_API Tensor Reduce(
- const std::string& func_name,
- const std::vector<DimArg>& dim_args,
- const Reducer& reducer,
- const Placeholder& buffer,
- const std::vector<DimArg>& reduce_args);
-
TORCH_API Tensor Reduce(
const std::string& name,
const std::vector<DimArg>& dim_args,
@@ -297,35 +212,21 @@
}
template <typename... Ts>
-inline ExprHandle Placeholder::load(const Ts&... ts) const {
- // NOLINTNEXTLINE(cppcoreguidelines-init-variables)
- std::vector<ExprHandle> params({ExprHandle(ts)...});
- return ExprHandle(alloc<Load>(data(), ExprHandleVectorToExprVector(params)));
-}
-
-template <typename T>
-inline ExprHandle Placeholder::load(const std::vector<T>& args) const {
- // NOLINTNEXTLINE(cppcoreguidelines-init-variables)
- std::vector<ExprHandle> params(args.begin(), args.end());
- return ExprHandle(alloc<Load>(data(), ExprHandleVectorToExprVector(params)));
-}
-
-inline ExprHandle Placeholder::load(const std::vector<ExprHandle>& args) const {
- return this->template load<ExprHandle>(args);
-}
-
-template <typename... Ts>
inline ExprHandle BufHandle::load(const Ts&... ts) const {
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
std::vector<ExprHandle> params({ExprHandle(ts)...});
- return Load::make(*this, params);
+ return ExprHandle(alloc<Load>(node(), ExprHandleVectorToExprVector(params)));
}
template <typename T>
inline ExprHandle BufHandle::load(const std::vector<T>& args) const {
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
std::vector<ExprHandle> params(args.begin(), args.end());
- return Load::make(*this, params);
+ return ExprHandle(alloc<Load>(node(), ExprHandleVectorToExprVector(params)));
+}
+
+inline ExprHandle BufHandle::load(const std::vector<ExprHandle>& args) const {
+ return this->template load<ExprHandle>(args);
}
} // namespace tensorexpr
diff --git a/torch/csrc/jit/tensorexpr/tensorexpr_init.cpp b/torch/csrc/jit/tensorexpr/tensorexpr_init.cpp
index f0b7be9..ffaf20b 100644
--- a/torch/csrc/jit/tensorexpr/tensorexpr_init.cpp
+++ b/torch/csrc/jit/tensorexpr/tensorexpr_init.cpp
@@ -19,9 +19,7 @@
using namespace torch::jit::tensorexpr;
ArgValue convertPyToArgValue(py::handle inp) {
- if (py::isinstance<Placeholder>(inp)) {
- return py::cast<Placeholder>(inp).handle();
- } else if (py::isinstance<BufHandle>(inp)) {
+ if (py::isinstance<BufHandle>(inp)) {
return py::cast<BufHandle>(inp);
} else if (py::isinstance<VarHandle>(inp)) {
return py::cast<VarHandle>(inp);
@@ -204,24 +202,6 @@
const std::vector<ExprHandle>& args,
const ExprHandle& val) { return Store::make(self, args, val); });
- py::class_<Placeholder>(te, "Placeholder")
- .def(py::init<
- const std::string&,
- const Dtype&,
- const std::vector<ExprHandle>&>())
- .def(py::init<const std::vector<ExprHandle>&, const Dtype&>())
- .def(py::init<const std::vector<ExprHandle>&>())
- .def(
- "load",
- [](Placeholder& self, const std::vector<ExprHandle>& v) {
- return self.load(v);
- })
- .def(
- "store",
- [](Placeholder& self,
- const std::vector<ExprHandle>& args,
- const ExprHandle& val) { return self.store(args, val); })
- .def("data", [](Placeholder& self) { return BufHandle(self.data()); });
py::class_<Tensor>(te, "Tensor")
.def(
py::init([](BufHandle& b, StmtPtr s) { return Tensor(b.node(), s); }))
@@ -318,16 +298,6 @@
return Reduce(func_name, dim_args, reducer, buffer, reduce_args);
},
py::return_value_policy::reference);
- te.def(
- "Reduce",
- [](const std::string& func_name,
- const std::vector<DimArg>& dim_args,
- const Reducer& reducer,
- const Placeholder& buffer,
- const std::vector<DimArg>& reduce_args) {
- return Reduce(func_name, dim_args, reducer, buffer, reduce_args);
- },
- py::return_value_policy::reference);
te.def(
"Reduce",
@@ -813,12 +783,10 @@
#endif
py::class_<CodeGen::BufferArg>(te, "BufferArg")
- .def(py::init<const Placeholder&>())
.def(py::init<Tensor>())
.def(py::init<const VarHandle&>())
.def(py::init<const BufHandle&>());
- py::implicitly_convertible<Placeholder, CodeGen::BufferArg>();
py::implicitly_convertible<Tensor, CodeGen::BufferArg>();
py::implicitly_convertible<VarHandle, CodeGen::BufferArg>();
py::implicitly_convertible<BufHandle, CodeGen::BufferArg>();