Skip to content

Commit 96e3b3a

Browse files
malfetpytorchmergebot
authored andcommitted
[BE] Cleanup CMake flag suppressions (pytorch#97584)
Use `append_cxx_flag_if_supported` to determine whether or not `-Werror` is supported Do not suppress deprecation warnings if glog is not used/installed, as the way check is written right now, it will suppress deprecations even if `glog` is not installed. Similarly, do not suppress deprecations on MacOS simply because we are compiling with protobuf. Fix deprecation warnings in: - MPS by replacing `MTLResourceOptionCPUCacheModeDefault`->`MTLResourceCPUCacheModeDefaultCache` - In GTests by replacing `TYPED_TEST_CASE`->`TYPED_TEST_SUITE` - In `codegen/onednn/interface.cpp`, by using passing `Stack` by reference rathern than pointer. Do not guard calls to `append_cxx_flag_if_supported` with `if(CLANG)` or `if(GCC)`. Fix some deprecated calls in `Metal` hide more complex exception under `C10_CLANG_DIAGNOSTIC_IGNORE` Pull Request resolved: pytorch#97584 Approved by: https://github.com/kit1980
1 parent 345714e commit 96e3b3a

19 files changed

+89
-93
lines changed

CMakeLists.txt

+10-21
Original file line numberDiff line numberDiff line change
@@ -808,7 +808,6 @@ if(NOT MSVC)
808808
append_cxx_flag_if_supported("-Wno-unused-result" CMAKE_CXX_FLAGS)
809809
append_cxx_flag_if_supported("-Wno-strict-overflow" CMAKE_CXX_FLAGS)
810810
append_cxx_flag_if_supported("-Wno-strict-aliasing" CMAKE_CXX_FLAGS)
811-
append_cxx_flag_if_supported("-Wno-error=deprecated-declarations" CMAKE_CXX_FLAGS)
812811
append_cxx_flag_if_supported("-Wvla-extension" CMAKE_CXX_FLAGS)
813812
if("${CMAKE_CXX_COMPILER_ID}" MATCHES "Clang")
814813
string(APPEND CMAKE_CXX_FLAGS " -Wno-range-loop-analysis")
@@ -854,19 +853,13 @@ if(NOT MSVC)
854853

855854
append_cxx_flag_if_supported("-Wno-error=pedantic" CMAKE_CXX_FLAGS)
856855
append_cxx_flag_if_supported("-Wno-error=old-style-cast" CMAKE_CXX_FLAGS)
857-
# These flags are not available in GCC-4.8.5. Set only when using clang.
858-
# Compared against https://gcc.gnu.org/onlinedocs/gcc-4.8.5/gcc/Option-Summary.html
859-
if("${CMAKE_CXX_COMPILER_ID}" MATCHES "Clang")
860-
append_cxx_flag_if_supported("-Wconstant-conversion" CMAKE_CXX_FLAGS)
861-
append_cxx_flag_if_supported("-Wno-invalid-partial-specialization" CMAKE_CXX_FLAGS)
862-
append_cxx_flag_if_supported("-Wno-unused-private-field" CMAKE_CXX_FLAGS)
863-
append_cxx_flag_if_supported("-Wno-aligned-allocation-unavailable" CMAKE_CXX_FLAGS)
864-
append_cxx_flag_if_supported("-Wno-missing-braces" CMAKE_CXX_FLAGS)
865-
append_cxx_flag_if_supported("-Wunused-lambda-capture" CMAKE_CXX_FLAGS)
866-
append_cxx_flag_if_supported("-Qunused-arguments" CMAKE_CXX_FLAGS)
867-
if(${USE_COLORIZE_OUTPUT})
868-
endif()
869-
endif()
856+
append_cxx_flag_if_supported("-Wconstant-conversion" CMAKE_CXX_FLAGS)
857+
append_cxx_flag_if_supported("-Wno-invalid-partial-specialization" CMAKE_CXX_FLAGS)
858+
append_cxx_flag_if_supported("-Wno-unused-private-field" CMAKE_CXX_FLAGS)
859+
append_cxx_flag_if_supported("-Wno-aligned-allocation-unavailable" CMAKE_CXX_FLAGS)
860+
append_cxx_flag_if_supported("-Wno-missing-braces" CMAKE_CXX_FLAGS)
861+
append_cxx_flag_if_supported("-Wunused-lambda-capture" CMAKE_CXX_FLAGS)
862+
append_cxx_flag_if_supported("-Qunused-arguments" CMAKE_CXX_FLAGS)
870863

871864
if(${USE_COLORIZE_OUTPUT})
872865
append_cxx_flag_if_supported("-fcolor-diagnostics" CMAKE_CXX_FLAGS)
@@ -879,17 +872,13 @@ if(NOT MSVC)
879872
string(APPEND CMAKE_CXX_FLAGS " -faligned-new")
880873
endif()
881874
if(WERROR)
882-
check_cxx_compiler_flag("-Werror" COMPILER_SUPPORT_WERROR)
875+
append_cxx_flag_if_supported("-Werror" CMAKE_CXX_FLAGS)
883876
if(NOT COMPILER_SUPPORT_WERROR)
884877
set(WERROR FALSE)
885-
else()
886-
string(APPEND CMAKE_CXX_FLAGS " -Werror")
887878
endif()
888-
endif(WERROR)
889-
if(NOT APPLE)
890-
append_cxx_flag_if_supported("-Wno-unused-but-set-variable" CMAKE_CXX_FLAGS)
891-
append_cxx_flag_if_supported("-Wno-maybe-uninitialized" CMAKE_CXX_FLAGS)
892879
endif()
880+
append_cxx_flag_if_supported("-Wno-unused-but-set-variable" CMAKE_CXX_FLAGS)
881+
append_cxx_flag_if_supported("-Wno-maybe-uninitialized" CMAKE_CXX_FLAGS)
893882
string(APPEND CMAKE_CXX_FLAGS_DEBUG " -fno-omit-frame-pointer -O0")
894883
string(APPEND CMAKE_LINKER_FLAGS_DEBUG " -fno-omit-frame-pointer -O0")
895884
append_cxx_flag_if_supported("-fno-math-errno" CMAKE_CXX_FLAGS)

aten/src/ATen/native/metal/MetalContext.mm

+4-1
Original file line numberDiff line numberDiff line change
@@ -53,9 +53,12 @@ - (BOOL)available {
5353
isOperatingSystemAtLeastVersion:supportedVer]) {
5454
return false;
5555
}
56+
C10_CLANG_DIAGNOSTIC_PUSH()
57+
C10_CLANG_DIAGNOSTIC_IGNORE("-Wdeprecated-declarations")
5658
if (![_device supportsFeatureSet:MTLFeatureSet_macOS_GPUFamily1_v3]) {
5759
return false;
5860
}
61+
C10_CLANG_DIAGNOSTIC_POP()
5962
#else
6063
return false;
6164
#endif
@@ -136,7 +139,7 @@ - (BOOL)available {
136139
- (id<MTLBuffer>)emptyMTLBuffer:(int64_t) size {
137140
TORCH_CHECK(_device);
138141
id<MTLBuffer> buffer = [_device newBufferWithLength:size
139-
options:MTLResourceOptionCPUCacheModeWriteCombined];
142+
options:MTLResourceCPUCacheModeWriteCombined];
140143
return buffer;
141144
}
142145

aten/src/ATen/native/metal/mpscnn/MPSCNNNeuronOp.mm

+7
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,11 @@
11
#import <ATen/native/metal/MetalContext.h>
22
#import <ATen/native/metal/mpscnn/MPSCNNNeuronOp.h>
33

4+
#include <c10/macros/Macros.h>
5+
6+
C10_CLANG_DIAGNOSTIC_PUSH()
7+
C10_CLANG_DIAGNOSTIC_IGNORE("-Wdeprecated-declarations")
8+
49
@implementation MPSCNNNeuronOp
510

611
+ (MPSCNNNeuronHardSigmoid*)hardSigmoid API_AVAILABLE(ios(11.0), macos(10.13)) {
@@ -70,6 +75,8 @@ + (MPSCNNNeuronTanH*)tanh {
7075

7176
@end
7277

78+
C10_CLANG_DIAGNOSTIC_POP()
79+
7380
API_AVAILABLE(ios(11.3), macos(10.13), macCatalyst(13.0))
7481
@implementation MPSCNNNeuronOpDescriptor
7582

aten/src/ATen/native/metal/mpscnn/MPSImageUtils.mm

+2-2
Original file line numberDiff line numberDiff line change
@@ -36,7 +36,7 @@
3636
int64_t size_bytes = c10::multiply_integers(sizes) * sizeof(float);
3737
id<MTLBuffer> buff = [[MetalContext sharedInstance].device
3838
newBufferWithLength:size_bytes
39-
options:MTLResourceOptionCPUCacheModeWriteCombined];
39+
options:MTLResourceCPUCacheModeWriteCombined];
4040
memcpy(buff.contents, src, size_bytes);
4141
MPSImage* output = createStaticImage(sizes);
4242
id<MTLComputePipelineState> state = [[MetalContext sharedInstance]
@@ -171,7 +171,7 @@ void copyImageToFloatBuffer(float* dst, MPSImage* image) {
171171
int64_t size_bytes = c10::multiply_integers([image sizes]) * sizeof(float);
172172
id<MTLBuffer> buffer = [[MetalContext sharedInstance].device
173173
newBufferWithLength:size_bytes
174-
options:MTLResourceOptionCPUCacheModeDefault];
174+
options:MTLResourceCPUCacheModeDefaultCache];
175175

176176
id<MTLCommandBuffer> cb =
177177
[MetalContext sharedInstance].commandQueue.commandBuffer;

aten/src/ATen/native/metal/ops/MetalConcat.mm

+3-3
Original file line numberDiff line numberDiff line change
@@ -33,7 +33,7 @@ Tensor cat_batch(const Tensor& tensor, const ITensorListRef& tensors, MetalTenso
3333
X, "copy_offset", "copy_offset_nonarray")];
3434
id<MTLBuffer> offsetBuffer = [[MetalContext sharedInstance].device
3535
newBufferWithLength:1 * sizeof(ushort)
36-
options:MTLResourceOptionCPUCacheModeWriteCombined];
36+
options:MTLResourceCPUCacheModeWriteCombined];
3737
ushort* offsetBufferPtr = (ushort*)[offsetBuffer contents];
3838
offsetBufferPtr[0] = cat_dim4_pointer;
3939

@@ -91,7 +91,7 @@ Tensor cat_feature(const Tensor& tensor, const ITensorListRef& tensors, MetalTen
9191
]];
9292
id<MTLBuffer> offsetBuffer = [[MetalContext sharedInstance].device
9393
newBufferWithLength:6 * sizeof(ushort)
94-
options:MTLResourceOptionCPUCacheModeWriteCombined];
94+
options:MTLResourceCPUCacheModeWriteCombined];
9595
ushort* offsetBufferPtr = (ushort*)[offsetBuffer contents];
9696
offsetBufferPtr[0] = (X.featureChannels + tex_offset + 3) / 4;
9797
offsetBufferPtr[1] = (Y.featureChannels + 3) / 4;
@@ -141,7 +141,7 @@ Tensor cat_feature(const Tensor& tensor, const ITensorListRef& tensors, MetalTen
141141
]];
142142
id<MTLBuffer> offsetBuffer = [[MetalContext sharedInstance].device
143143
newBufferWithLength:2 * sizeof(ushort)
144-
options:MTLResourceOptionCPUCacheModeWriteCombined];
144+
options:MTLResourceCPUCacheModeWriteCombined];
145145
ushort* offsetBufferPtr = (ushort*)[offsetBuffer contents];
146146
offsetBufferPtr[0] = channel_offset / 4;
147147
offsetBufferPtr[1] = (Y.featureChannels + 3) / 4;

aten/src/ATen/native/metal/ops/MetalTranspose.mm

+1-1
Original file line numberDiff line numberDiff line change
@@ -19,7 +19,7 @@
1919
id<MTLBuffer> _makeMTLBuffer(const std::vector<T>& src) {
2020
id<MTLBuffer> buffer = [[MetalContext sharedInstance].device
2121
newBufferWithLength:src.size() * sizeof(T)
22-
options:MTLResourceOptionCPUCacheModeWriteCombined];
22+
options:MTLResourceCPUCacheModeWriteCombined];
2323
memcpy(buffer.contents, src.data(), src.size() * sizeof(T));
2424
return buffer;
2525
}

aten/src/ATen/native/mps/operations/Copy.mm

+2-2
Original file line numberDiff line numberDiff line change
@@ -126,7 +126,7 @@ void copy_cast_mps(at::Tensor& dst,
126126
size_t dst_tensor_nbytes = dst.nbytes();
127127

128128
@autoreleasepool {
129-
MTLResourceOptions options = MTLResourceOptionCPUCacheModeDefault | MTLResourceStorageModeShared;
129+
MTLResourceOptions options = MTLResourceCPUCacheModeDefaultCache | MTLResourceStorageModeShared;
130130
NSUInteger alignedLength = 0;
131131

132132
void* host_dst = dst.storage().data();
@@ -189,7 +189,7 @@ static void copy_to_mps_stride_contig(at::Tensor& dst, const at::Tensor& src, bo
189189
TORCH_INTERNAL_ASSERT(src.dtype() == dst.dtype() && src.strides() == dst.strides() && is_strided_contiguous(src));
190190

191191
@autoreleasepool {
192-
MTLResourceOptions options = MTLResourceOptionCPUCacheModeDefault | MTLResourceStorageModeShared;
192+
MTLResourceOptions options = MTLResourceCPUCacheModeDefaultCache | MTLResourceStorageModeShared;
193193
NSUInteger alignedLength = 0;
194194
NSUInteger sourceOffset = 0;
195195

aten/src/ATen/native/mps/operations/UnaryOps.mm

+1-1
Original file line numberDiff line numberDiff line change
@@ -372,7 +372,7 @@ Tensor logit_mps(const Tensor& self, c10::optional<double> eps) {
372372

373373
// issue #103810551: cumsum is horribly broken for int8, int16 and as chances for overflow is pretty high, cast to
374374
// int32 fixed in macOS 13.3
375-
bool castInputData = (isIntegralType(input.scalar_type()) && input.scalar_type() != ScalarType::Int &&
375+
bool castInputData = (isIntegralType(input.scalar_type(), false) && input.scalar_type() != ScalarType::Int &&
376376
input.scalar_type() != ScalarType::Long);
377377

378378
TORCH_CHECK(macOS13_3_plus || input.scalar_type() != ScalarType::Long,

aten/src/ATen/test/ExclusivelyOwned_test.cpp

+1-1
Original file line numberDiff line numberDiff line change
@@ -66,7 +66,7 @@ using ExclusivelyOwnedTypes = ::testing::Types<
6666
caffe2::Tensor
6767
>;
6868

69-
TYPED_TEST_CASE(ExclusivelyOwnedTest, ExclusivelyOwnedTypes);
69+
TYPED_TEST_SUITE(ExclusivelyOwnedTest, ExclusivelyOwnedTypes);
7070

7171
TYPED_TEST(ExclusivelyOwnedTest, DefaultConstructor) {
7272
c10::ExclusivelyOwned<TypeParam> defaultConstructed;

aten/src/ATen/test/MaybeOwned_test.cpp

+1-1
Original file line numberDiff line numberDiff line change
@@ -197,7 +197,7 @@ using MaybeOwnedTypes = ::testing::Types<
197197
c10::IValue
198198
>;
199199

200-
TYPED_TEST_CASE(MaybeOwnedTest, MaybeOwnedTypes);
200+
TYPED_TEST_SUITE(MaybeOwnedTest, MaybeOwnedTypes);
201201

202202
TYPED_TEST(MaybeOwnedTest, SimpleDereferencingString) {
203203
assertBorrow(this->borrowed, this->borrowFrom);

aten/src/ATen/test/vec_test_all_types.cpp

+29-29
Original file line numberDiff line numberDiff line change
@@ -70,35 +70,35 @@ namespace {
7070
using FloatIntTestedTypes = ::testing::Types<vfloat, vdouble, vcomplex, vcomplexDbl, vlong, vint, vshort>;
7171
using ComplexTypes = ::testing::Types<vcomplex, vcomplexDbl>;
7272
using BFloatTestedTypes = ::testing::Types<vBFloat16>;
73-
TYPED_TEST_CASE(Memory, ALLTestedTypes);
74-
TYPED_TEST_CASE(Arithmetics, FloatIntTestedTypes);
75-
TYPED_TEST_CASE(Comparison, RealFloatIntTestedTypes);
76-
TYPED_TEST_CASE(Bitwise, FloatIntTestedTypes);
77-
TYPED_TEST_CASE(MinMax, RealFloatIntTestedTypes);
78-
TYPED_TEST_CASE(Nan, RealFloatTestedTypes);
79-
TYPED_TEST_CASE(Interleave, RealFloatIntTestedTypes);
80-
TYPED_TEST_CASE(SignManipulation, FloatIntTestedTypes);
81-
TYPED_TEST_CASE(Rounding, RealFloatTestedTypes);
82-
TYPED_TEST_CASE(SqrtAndReciprocal, FloatTestedTypes);
83-
TYPED_TEST_CASE(SqrtAndReciprocalReal, RealFloatTestedTypes);
84-
TYPED_TEST_CASE(FractionAndRemainderReal, RealFloatTestedTypes);
85-
TYPED_TEST_CASE(Trigonometric, RealFloatTestedTypes);
86-
TYPED_TEST_CASE(ErrorFunctions, RealFloatTestedTypes);
87-
TYPED_TEST_CASE(Exponents, RealFloatTestedTypes);
88-
TYPED_TEST_CASE(Hyperbolic, RealFloatTestedTypes);
89-
TYPED_TEST_CASE(InverseTrigonometricReal, RealFloatTestedTypes);
90-
TYPED_TEST_CASE(InverseTrigonometric, FloatTestedTypes);
91-
TYPED_TEST_CASE(LGamma, RealFloatTestedTypes);
92-
TYPED_TEST_CASE(Logarithm, FloatTestedTypes);
93-
TYPED_TEST_CASE(LogarithmReals, RealFloatTestedTypes);
94-
TYPED_TEST_CASE(Pow, RealFloatTestedTypes);
95-
TYPED_TEST_CASE(RealTests, RealFloatTestedTypes);
96-
TYPED_TEST_CASE(RangeFactories, FloatIntTestedTypes);
97-
TYPED_TEST_CASE(BitwiseFloatsAdditional, RealFloatTestedTypes);
98-
TYPED_TEST_CASE(BitwiseFloatsAdditional2, FloatTestedTypes);
99-
TYPED_TEST_CASE(QuantizationTests, QuantTestedTypes);
100-
TYPED_TEST_CASE(FunctionalTests, RealFloatIntTestedTypes);
101-
TYPED_TEST_CASE(FunctionalBF16Tests, BFloatTestedTypes);
73+
TYPED_TEST_SUITE(Memory, ALLTestedTypes);
74+
TYPED_TEST_SUITE(Arithmetics, FloatIntTestedTypes);
75+
TYPED_TEST_SUITE(Comparison, RealFloatIntTestedTypes);
76+
TYPED_TEST_SUITE(Bitwise, FloatIntTestedTypes);
77+
TYPED_TEST_SUITE(MinMax, RealFloatIntTestedTypes);
78+
TYPED_TEST_SUITE(Nan, RealFloatTestedTypes);
79+
TYPED_TEST_SUITE(Interleave, RealFloatIntTestedTypes);
80+
TYPED_TEST_SUITE(SignManipulation, FloatIntTestedTypes);
81+
TYPED_TEST_SUITE(Rounding, RealFloatTestedTypes);
82+
TYPED_TEST_SUITE(SqrtAndReciprocal, FloatTestedTypes);
83+
TYPED_TEST_SUITE(SqrtAndReciprocalReal, RealFloatTestedTypes);
84+
TYPED_TEST_SUITE(FractionAndRemainderReal, RealFloatTestedTypes);
85+
TYPED_TEST_SUITE(Trigonometric, RealFloatTestedTypes);
86+
TYPED_TEST_SUITE(ErrorFunctions, RealFloatTestedTypes);
87+
TYPED_TEST_SUITE(Exponents, RealFloatTestedTypes);
88+
TYPED_TEST_SUITE(Hyperbolic, RealFloatTestedTypes);
89+
TYPED_TEST_SUITE(InverseTrigonometricReal, RealFloatTestedTypes);
90+
TYPED_TEST_SUITE(InverseTrigonometric, FloatTestedTypes);
91+
TYPED_TEST_SUITE(LGamma, RealFloatTestedTypes);
92+
TYPED_TEST_SUITE(Logarithm, FloatTestedTypes);
93+
TYPED_TEST_SUITE(LogarithmReals, RealFloatTestedTypes);
94+
TYPED_TEST_SUITE(Pow, RealFloatTestedTypes);
95+
TYPED_TEST_SUITE(RealTests, RealFloatTestedTypes);
96+
TYPED_TEST_SUITE(RangeFactories, FloatIntTestedTypes);
97+
TYPED_TEST_SUITE(BitwiseFloatsAdditional, RealFloatTestedTypes);
98+
TYPED_TEST_SUITE(BitwiseFloatsAdditional2, FloatTestedTypes);
99+
TYPED_TEST_SUITE(QuantizationTests, QuantTestedTypes);
100+
TYPED_TEST_SUITE(FunctionalTests, RealFloatIntTestedTypes);
101+
TYPED_TEST_SUITE(FunctionalBF16Tests, BFloatTestedTypes);
102102
TYPED_TEST(Memory, UnAlignedLoadStore) {
103103
using vec = TypeParam;
104104
using VT = ValueType<TypeParam>;

c10/test/util/bfloat16_test.cpp

+1-1
Original file line numberDiff line numberDiff line change
@@ -180,7 +180,7 @@ TEST_P(BFloat16Test, BFloat16RNETest) {
180180
EXPECT_EQ(GetParam().rne, rounded);
181181
}
182182

183-
INSTANTIATE_TEST_CASE_P(
183+
INSTANTIATE_TEST_SUITE_P(
184184
BFloat16Test_Instantiation,
185185
BFloat16Test,
186186
::testing::Values(

c10/test/util/optional_test.cpp

+4-4
Original file line numberDiff line numberDiff line change
@@ -63,7 +63,7 @@ static_assert(
6363
sizeof(c10::optional<c10::IntArrayRef>) == sizeof(c10::IntArrayRef),
6464
"c10::optional<IntArrayRef> should be size-optimized");
6565

66-
TYPED_TEST_CASE(OptionalTest, OptionalTypes);
66+
TYPED_TEST_SUITE(OptionalTest, OptionalTypes);
6767

6868
TYPED_TEST(OptionalTest, Empty) {
6969
typename TestFixture::optional empty;
@@ -111,11 +111,11 @@ TEST_P(SelfCompareTest, SelfCompare) {
111111
EXPECT_THAT(x, Not(Gt(x)));
112112
}
113113

114-
INSTANTIATE_TEST_CASE_P(
114+
INSTANTIATE_TEST_SUITE_P(
115115
nullopt,
116116
SelfCompareTest,
117117
testing::Values(c10::nullopt));
118-
INSTANTIATE_TEST_CASE_P(
118+
INSTANTIATE_TEST_SUITE_P(
119119
int,
120120
SelfCompareTest,
121121
testing::Values(c10::make_optional(2)));
@@ -158,7 +158,7 @@ using CmpTestTypes = testing::Types<
158158
std::pair<long, c10::optional<int>>>;
159159
template <typename T>
160160
class CmpTest : public testing::Test {};
161-
TYPED_TEST_CASE(CmpTest, CmpTestTypes);
161+
TYPED_TEST_SUITE(CmpTest, CmpTestTypes);
162162

163163
TYPED_TEST(CmpTest, Cmp) {
164164
TypeParam pair = {2, 3};

cmake/MiscCheck.cmake

+14-16
Original file line numberDiff line numberDiff line change
@@ -13,24 +13,22 @@ include(CMakePushCheckState)
1313
set(CAFFE2_USE_EXCEPTION_PTR 1)
1414

1515
# ---[ Check if we want to turn off deprecated warning due to glog.
16-
# Note(jiayq): on ubuntu 14.04, the default glog install uses ext/hash_set that
17-
# is being deprecated. As a result, we will test if this is the environment we
18-
# are building under. If yes, we will turn off deprecation warning for a
19-
# cleaner build output.
20-
cmake_push_check_state(RESET)
21-
set(CMAKE_REQUIRED_FLAGS "-std=c++14")
22-
CHECK_CXX_SOURCE_COMPILES(
23-
"#include <glog/stl_logging.h>
24-
int main(int argc, char** argv) {
25-
return 0;
26-
}" CAFFE2_NEED_TO_TURN_OFF_DEPRECATION_WARNING
27-
FAIL_REGEX ".*-Wno-deprecated.*")
16+
if(USE_GLOG)
17+
cmake_push_check_state(RESET)
18+
set(CMAKE_REQUIRED_FLAGS "-std=c++14")
19+
CHECK_CXX_SOURCE_COMPILES(
20+
"#include <glog/stl_logging.h>
21+
int main(int argc, char** argv) {
22+
return 0;
23+
}" CAFFE2_NEED_TO_TURN_OFF_DEPRECATION_WARNING
24+
FAIL_REGEX ".*-Wno-deprecated.*")
2825

29-
if(NOT CAFFE2_NEED_TO_TURN_OFF_DEPRECATION_WARNING AND NOT MSVC)
30-
message(STATUS "Turning off deprecation warning due to glog.")
31-
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-deprecated")
26+
if(NOT CAFFE2_NEED_TO_TURN_OFF_DEPRECATION_WARNING AND NOT MSVC)
27+
message(STATUS "Turning off deprecation warning due to glog.")
28+
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-deprecated")
29+
endif()
30+
cmake_pop_check_state()
3231
endif()
33-
cmake_pop_check_state()
3432

3533
# ---[ Check if the compiler has AVX/AVX2 support. We only check AVX2.
3634
if(NOT INTERN_BUILD_MOBILE)

cmake/ProtoBuf.cmake

-5
Original file line numberDiff line numberDiff line change
@@ -5,11 +5,6 @@ macro(custom_protobuf_find)
55
option(protobuf_BUILD_TESTS "" OFF)
66
option(protobuf_BUILD_EXAMPLES "" OFF)
77
option(protobuf_WITH_ZLIB "" OFF)
8-
if(APPLE)
9-
# Protobuf generated files triggers a deprecated atomic operation warning
10-
# so we turn it off here.
11-
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-deprecated-declarations")
12-
endif()
138
if(${CAFFE2_LINK_LOCAL_PROTOBUF})
149
# If we are going to link protobuf locally, we will need to turn off
1510
# shared libs build for protobuf.

cmake/public/utils.cmake

-1
Original file line numberDiff line numberDiff line change
@@ -445,7 +445,6 @@ function(torch_compile_options libname)
445445
-Wno-unknown-pragmas
446446
-Wno-strict-overflow
447447
-Wno-strict-aliasing
448-
-Wno-error=deprecated-declarations
449448
# Clang has an unfixed bug leading to spurious missing braces
450449
# warnings, see https://bugs.llvm.org/show_bug.cgi?id=21629
451450
-Wno-missing-braces

test/cpp/jit/test_lite_interpreter.cpp

+1-1
Original file line numberDiff line numberDiff line change
@@ -2217,7 +2217,7 @@ TEST_P(LiteInterpreterDynamicTypeTestFixture, Conformance) {
22172217
}
22182218
}
22192219

2220-
INSTANTIATE_TEST_CASE_P(
2220+
INSTANTIATE_TEST_SUITE_P(
22212221
PyTorch,
22222222
LiteInterpreterDynamicTypeTestFixture,
22232223
::testing::Range(

torch/CMakeLists.txt

+5
Original file line numberDiff line numberDiff line change
@@ -279,6 +279,11 @@ if(CMAKE_CXX_COMPILER_ID STREQUAL "GNU")
279279
set_source_files_properties(${TORCH_SRC_DIR}/csrc/utils/throughput_benchmark.cpp PROPERTIES COMPILE_FLAGS -Wno-attributes)
280280
endif()
281281

282+
if(NOT MSVC)
283+
# cudaProfilerInitialize must go away
284+
set_source_files_properties(${TORCH_SRC_DIR}/csrc/cuda/shared/cudart.cpp PROPERTIES COMPILE_FLAGS "-Wno-deprecated-declarations")
285+
endif()
286+
282287
# coreml
283288
if(USE_COREML_DELEGATE)
284289
list(APPEND TORCH_PYTHON_SRCS ${TORCH_SRC_DIR}/csrc/jit/backends/coreml/cpp/backend.cpp)

0 commit comments

Comments
 (0)