Skip to content

Commit 317f598

Browse files
Mikhail Zolotukhinfacebook-github-bot
Mikhail Zolotukhin
authored andcommitted
[TensorExpr] Clang-format test/cpp/tensorexpr/*. (pytorch#36615)
Summary: Pull Request resolved: pytorch#36615 Test Plan: Imported from OSS Differential Revision: D21027733 Pulled By: ZolotukhinM fbshipit-source-id: e19cd85c1634f4e40805814ac71eec719d6587f8
1 parent 37aab14 commit 317f598

File tree

4 files changed

+39
-38
lines changed

4 files changed

+39
-38
lines changed

test/cpp/tensorexpr/gtest.cpp

+1-1
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,7 @@ namespace jit {
77

88
#define TENSOREXPR_GTEST(name) \
99
TEST(TensorExprTest, name) { \
10-
test##name(); \
10+
test##name(); \
1111
}
1212
TH_FORALL_TENSOREXPR_TESTS(TENSOREXPR_GTEST)
1313
#undef TENSOREXPR_GTEST

test/cpp/tensorexpr/gtest_assert_float_eq.h

+25-24
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
#pragma once
22

3-
#include <cmath>
3+
#include <cmath>
44
// Copyright 2005, Google Inc.
55
// All rights reserved.
66
//
@@ -40,12 +40,12 @@ using Bits = uint32_t;
4040
// this avoids the "dereferencing type-punned pointer
4141
// will break strict-aliasing rules" error
4242
union Float {
43-
float float_;
44-
Bits bits_;
43+
float float_;
44+
Bits bits_;
4545
};
4646

4747
// # of bits in a number.
48-
static const size_t kBitCount = 8*sizeof(Bits);
48+
static const size_t kBitCount = 8 * sizeof(Bits);
4949
// The mask for the sign bit.
5050
static const Bits kSignBitMask = static_cast<Bits>(1) << (kBitCount - 1);
5151

@@ -66,23 +66,24 @@ static const Bits kSignBitMask = static_cast<Bits>(1) << (kBitCount - 1);
6666
//
6767
// Read http://en.wikipedia.org/wiki/Signed_number_representations
6868
// for more details on signed number representations.
69-
static Bits SignAndMagnitudeToBiased(const Bits &sam) {
70-
if (kSignBitMask & sam) {
71-
// sam represents a negative number.
72-
return ~sam + 1;
73-
} else {
74-
// sam represents a positive number.
75-
return kSignBitMask | sam;
76-
}
69+
static Bits SignAndMagnitudeToBiased(const Bits& sam) {
70+
if (kSignBitMask & sam) {
71+
// sam represents a negative number.
72+
return ~sam + 1;
73+
} else {
74+
// sam represents a positive number.
75+
return kSignBitMask | sam;
76+
}
7777
}
7878

7979
// Given two numbers in the sign-and-magnitude representation,
8080
// returns the distance between them as an unsigned number.
81-
static Bits DistanceBetweenSignAndMagnitudeNumbers(const Bits &sam1,
82-
const Bits &sam2) {
83-
const Bits biased1 = SignAndMagnitudeToBiased(sam1);
84-
const Bits biased2 = SignAndMagnitudeToBiased(sam2);
85-
return (biased1 >= biased2) ? (biased1 - biased2) : (biased2 - biased1);
81+
static Bits DistanceBetweenSignAndMagnitudeNumbers(
82+
const Bits& sam1,
83+
const Bits& sam2) {
84+
const Bits biased1 = SignAndMagnitudeToBiased(sam1);
85+
const Bits biased2 = SignAndMagnitudeToBiased(sam2);
86+
return (biased1 >= biased2) ? (biased1 - biased2) : (biased2 - biased1);
8687
}
8788

8889
// How many ULP's (Units in the Last Place) we want to tolerate when
@@ -106,13 +107,13 @@ static const size_t kMaxUlps = 4;
106107
// - treats really large numbers as almost equal to infinity.
107108
// - thinks +0.0 and -0.0 are 0 DLP's apart.
108109
inline bool AlmostEquals(float lhs, float rhs) {
109-
// The IEEE standard says that any comparison operation involving
110-
// a NAN must return false.
111-
if (std::isnan(lhs) || std::isnan(rhs))
112-
return false;
110+
// The IEEE standard says that any comparison operation involving
111+
// a NAN must return false.
112+
if (std::isnan(lhs) || std::isnan(rhs))
113+
return false;
113114

114-
Float l = {lhs};
115-
Float r = {rhs};
115+
Float l = {lhs};
116+
Float r = {rhs};
116117

117-
return DistanceBetweenSignAndMagnitudeNumbers(l.bits_, r.bits_) <= kMaxUlps;
118+
return DistanceBetweenSignAndMagnitudeNumbers(l.bits_, r.bits_) <= kMaxUlps;
118119
}

test/cpp/tensorexpr/test_aten.cpp

+8-4
Original file line numberDiff line numberDiff line change
@@ -506,7 +506,8 @@ void testATenmaxInt() {
506506
VarHandle index = VarHandle("index", kInt);
507507
ExprHandle load_a = Load::make(a_buf, {index}, 1);
508508
ExprHandle load_b = Load::make(b_buf, {index}, 1);
509-
Stmt* store_c = Store::make(c_buf, {index}, Max::make(load_a, load_b, true), 1);
509+
Stmt* store_c =
510+
Store::make(c_buf, {index}, Max::make(load_a, load_b, true), 1);
510511
Stmt* stmt = For::make(index, 0, kTotalSize, store_c);
511512

512513
PaddedBuffer<int> a_v(kTotalSize);
@@ -538,7 +539,8 @@ void testATenmaxFloat() {
538539
VarHandle index = VarHandle("index", kInt);
539540
ExprHandle load_a = Load::make(a_buf, {index}, 1);
540541
ExprHandle load_b = Load::make(b_buf, {index}, 1);
541-
Stmt* store_c = Store::make(c_buf, {index}, Max::make(load_a, load_b, true), 1);
542+
Stmt* store_c =
543+
Store::make(c_buf, {index}, Max::make(load_a, load_b, true), 1);
542544
Stmt* stmt = For::make(index, 0, kTotalSize, store_c);
543545

544546
PaddedBuffer<float> a_v(kTotalSize);
@@ -570,7 +572,8 @@ void testATenminInt() {
570572
VarHandle index = VarHandle("index", kInt);
571573
ExprHandle load_a = Load::make(a_buf, {index}, 1);
572574
ExprHandle load_b = Load::make(b_buf, {index}, 1);
573-
Stmt* store_c = Store::make(c_buf, {index}, Min::make(load_a, load_b, true), 1);
575+
Stmt* store_c =
576+
Store::make(c_buf, {index}, Min::make(load_a, load_b, true), 1);
574577
Stmt* stmt = For::make(index, 0, kTotalSize, store_c);
575578

576579
PaddedBuffer<int> a_v(kTotalSize);
@@ -602,7 +605,8 @@ void testATenminFloat() {
602605
VarHandle index = VarHandle("index", kInt);
603606
ExprHandle load_a = Load::make(a_buf, {index}, 1);
604607
ExprHandle load_b = Load::make(b_buf, {index}, 1);
605-
Stmt* store_c = Store::make(c_buf, {index}, Min::make(load_a, load_b, true), 1);
608+
Stmt* store_c =
609+
Store::make(c_buf, {index}, Min::make(load_a, load_b, true), 1);
606610
Stmt* stmt = For::make(index, 0, kTotalSize, store_c);
607611

608612
PaddedBuffer<float> a_v(kTotalSize);

test/cpp/tensorexpr/test_loopnest.cpp

+5-9
Original file line numberDiff line numberDiff line change
@@ -1,8 +1,8 @@
1+
#include <test/cpp/tensorexpr/test_base.h>
12
#include <memory>
23
#include <sstream>
34
#include <stdexcept>
45
#include <unordered_map>
5-
#include <test/cpp/tensorexpr/test_base.h>
66

77
#include <test/cpp/tensorexpr/padded_buffer.h>
88
#include <torch/csrc/jit/tensorexpr/bounds_inference.h>
@@ -90,8 +90,7 @@ void testExprSimple02() {
9090
x_inner,
9191
0,
9292
4,
93-
For::make(
94-
y, 0, 5, Store::make(f, {x_1, y}, func(x_1, y), 1))));
93+
For::make(y, 0, 5, Store::make(f, {x_1, y}, func(x_1, y), 1))));
9594
ExprHandle x_2 = x_tail + x_outer_end * 4;
9695
For* stmt2 = For::make(
9796
x_tail,
@@ -159,8 +158,7 @@ void testExprSplitWithTailNone() {
159158
x_inner,
160159
0,
161160
4,
162-
For::make(
163-
y, 0, 5, Store::make(f, {x_1, y}, func(x_1, y), 1))));
161+
For::make(y, 0, 5, Store::make(f, {x_1, y}, func(x_1, y), 1))));
164162

165163
std::ostringstream oss_ref;
166164
oss_ref << *stmt;
@@ -705,8 +703,7 @@ void testBoundsInference_4() {
705703
}
706704
{
707705
// Infer bounds on the inner loop body's scope
708-
const std::vector<TensorAccessBoundsInfo>& bounds_info =
709-
inferBounds(body);
706+
const std::vector<TensorAccessBoundsInfo>& bounds_info = inferBounds(body);
710707
auto bounds_info_map = convertBoundsInfoToMap(bounds_info);
711708

712709
ASSERT_EQ(bounds_info_map.at(a.data()).kind, kLoad);
@@ -824,8 +821,7 @@ void testBoundsInference_6() {
824821
}
825822
{
826823
// Infer bounds on the inner loop body's scope
827-
const std::vector<TensorAccessBoundsInfo>& bounds_info =
828-
inferBounds(body);
824+
const std::vector<TensorAccessBoundsInfo>& bounds_info = inferBounds(body);
829825
auto bounds_info_map = convertBoundsInfoToMap(bounds_info);
830826

831827
ASSERT_EQ(bounds_info_map.at(a.data()).kind, kLoad);

0 commit comments

Comments
 (0)