Skip to content

Commit 379bbef

Browse files
cyyeverpytorchmergebot
authored andcommitted
Enable more C++ warnings (pytorch#143355)
Fixes #ISSUE_NUMBER Pull Request resolved: pytorch#143355 Approved by: https://github.com/albanD
1 parent fca457b commit 379bbef

20 files changed

+39
-76
lines changed

.clang-format

+2
Original file line numberDiff line numberDiff line change
@@ -106,6 +106,8 @@ StatementMacros:
106106
- C10_DEFINE_int32
107107
- C10_DEFINE_int64
108108
- C10_DEFINE_string
109+
- C10_DEFINE_REGISTRY_WITHOUT_WARNING
110+
- C10_REGISTER_CREATOR
109111
- DEFINE_BINARY
110112
- PyObject_HEAD
111113
- PyObject_VAR_HEAD

CMakeLists.txt

-1
Original file line numberDiff line numberDiff line change
@@ -1057,7 +1057,6 @@ if(NOT MSVC)
10571057
append_cxx_flag_if_supported("-Wconstant-conversion" CMAKE_CXX_FLAGS)
10581058
append_cxx_flag_if_supported("-Wno-aligned-allocation-unavailable"
10591059
CMAKE_CXX_FLAGS)
1060-
append_cxx_flag_if_supported("-Wno-missing-braces" CMAKE_CXX_FLAGS)
10611060
append_cxx_flag_if_supported("-Qunused-arguments" CMAKE_CXX_FLAGS)
10621061

10631062
if(${USE_COLORIZE_OUTPUT})

aten/src/ATen/native/QuantizedLinear.cpp

+1-2
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,4 @@
11
#define TORCH_ASSERT_ONLY_METHOD_OPERATORS
2-
#include <vector>
32

43
#include <ATen/core/Tensor.h>
54
#include <ATen/Parallel.h>
@@ -116,7 +115,7 @@ Tensor fbgemm_linear_int8_weight_fp32_activation(
116115
const Tensor bias_contig = bias.contiguous();
117116

118117
// Allocate output Tensor and a buffer for fbgemmPacked to use
119-
std::vector<int64_t> output_size = input.sizes().vec();
118+
auto output_size = input.sizes().vec();
120119
output_size.back() = N;
121120
Tensor output = at::empty(output_size, input.options().dtype(at::kFloat), LEGACY_CONTIGUOUS_MEMORY_FORMAT);
122121
Tensor buffer = at::empty(output_size, input.options().dtype(at::kInt), LEGACY_CONTIGUOUS_MEMORY_FORMAT);

aten/src/ATen/native/RNN.cpp

+1-2
Original file line numberDiff line numberDiff line change
@@ -7,6 +7,7 @@
77
#include <ATen/TensorOperators.h>
88
#include <ATen/mps/MPSDevice.h>
99
#include <ATen/native/quantized/PackedParams.h>
10+
#include <ATen/native/quantized/library.h>
1011
#include <ATen/native/quantized/cpu/fbgemm_utils.h>
1112
#include <ATen/native/quantized/cpu/QnnpackUtils.h>
1213
#include <c10/core/GradMode.h>
@@ -62,8 +63,6 @@
6263
#include <utility>
6364
#endif
6465

65-
int register_linear_params();
66-
6766
namespace at::native {
6867

6968
namespace {

aten/src/ATen/native/quantized/cpu/LinearUnpackImpl.cpp

-2
Original file line numberDiff line numberDiff line change
@@ -19,8 +19,6 @@
1919
#include <ATen/ops/from_blob.h>
2020
#endif
2121

22-
int register_linear_params();
23-
2422
#ifdef USE_FBGEMM
2523
std::tuple<at::Tensor, std::optional<at::Tensor>> PackedLinearWeight::unpack() {
2624
auto packB = w.get();

aten/src/ATen/native/quantized/cpu/fbgemm_utils.cpp

+3-7
Original file line numberDiff line numberDiff line change
@@ -14,6 +14,7 @@
1414
#include <ATen/native/TensorFactories.h>
1515
#include <ATen/quantized/QTensorImpl.h>
1616
#include <ATen/quantized/Quantizer.h>
17+
#include <aten/src/ATen/native/quantized/library.h>
1718
#include <c10/core/QScheme.h>
1819
#include <c10/core/TensorOptions.h>
1920
#include <c10/util/accumulate.h>
@@ -28,7 +29,6 @@
2829
#include <utility>
2930
#endif
3031

31-
int register_embedding_params();
3232

3333
#ifdef USE_FBGEMM
3434

@@ -381,9 +381,7 @@ namespace {
381381
}
382382
}
383383

384-
template <int kSpatialDim = 2>
385-
TORCH_API int
386-
register_conv_params() {
384+
template <int kSpatialDim> int register_conv_params() {
387385
static auto register_conv_params =
388386
torch::selective_class_<ConvPackedParamsBase<kSpatialDim>>(
389387
"quantized", TORCH_SELECTIVE_CLASS(_hack_int_to_class_name(kSpatialDim)))
@@ -420,9 +418,7 @@ TORCH_API int register_conv_params<2>();
420418
template
421419
TORCH_API int register_conv_params<3>();
422420

423-
TORCH_API int register_linear_params();
424-
425-
TORCH_API int register_linear_params() {
421+
int register_linear_params() {
426422
using SerializationType = std::tuple<at::Tensor, std::optional<at::Tensor>>;
427423
static auto register_linear_params =
428424
torch::selective_class_<LinearPackedParamsBase>(

aten/src/ATen/native/quantized/cpu/qlinear.cpp

+2-3
Original file line numberDiff line numberDiff line change
@@ -4,12 +4,13 @@
44
#include <ATen/Parallel.h>
55
#include <ATen/TensorOperators.h>
66
#include <ATen/native/quantized/cpu/fbgemm_utils.h>
7-
#include <ATen/native/quantized/PackedParams.h>
87
#include <ATen/native/quantized/cpu/QnnpackUtils.h>
98
#include <ATen/native/quantized/cpu/XnnpackUtils.h>
109
#include <ATen/native/quantized/cpu/OnednnUtils.h>
1110
#include <ATen/native/quantized/cpu/QuantUtils.h>
1211
#include <ATen/native/quantized/cpu/qlinear.h>
12+
#include <ATen/native/quantized/library.h>
13+
#include <ATen/native/quantized/PackedParams.h>
1314
#include <ATen/native/mkldnn/MKLDNNCommon.h>
1415
#include <caffe2/utils/threadpool/pthreadpool-cpp.h>
1516
#include <torch/library.h>
@@ -31,8 +32,6 @@
3132
#include <algorithm>
3233
#include <string>
3334

34-
int register_linear_params();
35-
3635
#ifdef USE_FBGEMM
3736
template <bool ReluFused>
3837
at::Tensor& PackedLinearWeight::apply_impl(

aten/src/ATen/native/quantized/cpu/qlinear_dynamic.cpp

+2-3
Original file line numberDiff line numberDiff line change
@@ -3,10 +3,11 @@
33
#include <ATen/Context.h>
44
#include <ATen/Parallel.h>
55
#include <ATen/native/quantized/cpu/fbgemm_utils.h>
6-
#include <ATen/native/quantized/PackedParams.h>
76
#include <ATen/native/quantized/cpu/QnnpackUtils.h>
87
#include <ATen/native/quantized/cpu/OnednnUtils.h>
98
#include <ATen/native/quantized/cpu/QuantUtils.h>
9+
#include <ATen/native/quantized/library.h>
10+
#include <ATen/native/quantized/PackedParams.h>
1011
#include <ATen/native/mkldnn/MKLDNNCommon.h>
1112
#include <caffe2/utils/threadpool/pthreadpool-cpp.h>
1213
#include <torch/library.h>
@@ -29,8 +30,6 @@
2930
#include <string>
3031
#include <type_traits>
3132

32-
int register_linear_params();
33-
3433
#ifdef USE_FBGEMM
3534
template <bool ReluFused>
3635
at::Tensor PackedLinearWeight::apply_dynamic_impl(

aten/src/ATen/native/quantized/cpu/qlinear_prepack.cpp

+2-3
Original file line numberDiff line numberDiff line change
@@ -4,10 +4,11 @@
44
#include <ATen/Context.h>
55
#include <ATen/native/quantized/cpu/fbgemm_utils.h>
66
#include <ATen/native/quantized/cpu/init_qnnpack.h>
7-
#include <ATen/native/quantized/PackedParams.h>
87
#include <ATen/native/quantized/cpu/QnnpackUtils.h>
98
#include <ATen/native/quantized/cpu/OnednnUtils.h>
109
#include <ATen/native/quantized/cpu/QuantUtils.h>
10+
#include <ATen/native/quantized/library.h>
11+
#include <ATen/native/quantized/PackedParams.h>
1112
#include <ATen/native/mkldnn/MKLDNNCommon.h>
1213
#include <ATen/quantized/Quantizer.h>
1314
#include <torch/custom_class.h>
@@ -31,8 +32,6 @@
3132
#include <utility>
3233
#include <vector>
3334

34-
int register_linear_params();
35-
3635
#ifdef USE_FBGEMM
3736
namespace {
3837
// Calculate the column offsets.

aten/src/ATen/native/quantized/cudnn/Conv.cpp

+1-6
Original file line numberDiff line numberDiff line change
@@ -10,6 +10,7 @@
1010
#include <ATen/cudnn/Handle.h>
1111
#include <ATen/native/cudnn/ConvShared.h>
1212
#include <ATen/native/quantized/cudnn/utils.h>
13+
#include <ATen/native/quantized/library.h>
1314
#include <ATen/native/quantized/ConvUtils.h>
1415
#include <ATen/native/quantized/PackedParams.h>
1516
#include <ATen/native/utils/ParamsHash.h>
@@ -22,12 +23,6 @@
2223
#include <unordered_map>
2324
#include <vector>
2425

25-
template <int kSpatialDim = 2>
26-
int register_conv_params();
27-
28-
extern template int register_conv_params<2>();
29-
extern template int register_conv_params<3>();
30-
3126
// TODO: there is a table from input dtype and weight dtype to operator qdtype,
3227
// we can derive the operator dtype based on input dtype
3328
cudnn_frontend::ConvDesc_v8 getConvDescriptor(cudnnDataType_t dataType, c10::IntArrayRef padding, c10::IntArrayRef stride, c10::IntArrayRef dilation) {

aten/src/ATen/native/quantized/cudnn/ConvPrepack.cpp

+1-6
Original file line numberDiff line numberDiff line change
@@ -7,6 +7,7 @@
77
#include <torch/library.h>
88
#include <ATen/native/quantized/cpu/QuantUtils.h>
99
#include <ATen/native/quantized/cudnn/utils.h>
10+
#include <ATen/native/quantized/library.h>
1011
#include <ATen/native/quantized/PackedParams.h>
1112
#include <ATen/quantized/Quantizer.h>
1213
#include <c10/core/QScheme.h>
@@ -15,12 +16,6 @@
1516

1617
#include <utility>
1718

18-
template <int kSpatialDim = 2>
19-
int register_conv_params();
20-
21-
extern template int register_conv_params<2>();
22-
extern template int register_conv_params<3>();
23-
2419
template <int kSpatialDim>
2520
c10::intrusive_ptr<ConvPackedParamsBase<kSpatialDim>> PackedConvWeightCudnn<
2621
kSpatialDim>::

aten/src/ATen/native/quantized/library.cpp

+1-9
Original file line numberDiff line numberDiff line change
@@ -1,13 +1,5 @@
11
#include <torch/library.h>
2-
3-
int register_linear_params();
4-
5-
template <int kSpatialDim = 2>
6-
int register_conv_params();
7-
8-
extern template int register_conv_params<2>();
9-
extern template int register_conv_params<3>();
10-
int register_embedding_params();
2+
#include <aten/src/ATen/native/quantized/library.h>
113

124
TORCH_LIBRARY(quantized, m) {
135
m.set_python_module("caffe2.torch.fb.model_transform.splitting.split_dispatcher");
+8
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,8 @@
1+
#pragma once
2+
3+
#include <c10/macros/Export.h>
4+
5+
TORCH_API int register_linear_params();
6+
int register_embedding_params();
7+
8+
template <int kSpatialDim = 2> TORCH_API int register_conv_params();

aten/src/ATen/native/quantized/qconv_unpack.cpp

+1-7
Original file line numberDiff line numberDiff line change
@@ -19,6 +19,7 @@ and /cudnn/ConvUnpackImpl.cpp, for cudnn.
1919
#include <ATen/native/quantized/cpu/OnednnUtils.h>
2020
#include <ATen/native/quantized/cpu/QuantUtils.h>
2121
#include <ATen/native/quantized/PackedParams.h>
22+
#include <ATen/native/quantized/library.h>
2223

2324
#ifndef AT_PER_OPERATOR_HEADERS
2425
#include <ATen/Functions.h>
@@ -28,13 +29,6 @@ and /cudnn/ConvUnpackImpl.cpp, for cudnn.
2829
#include <ATen/ops/from_blob.h>
2930
#endif
3031

31-
template <int kSpatialDim = 2>
32-
int register_conv_params();
33-
34-
extern template int register_conv_params<2>();
35-
extern template int register_conv_params<3>();
36-
37-
3832

3933
namespace at::native {
4034
namespace {

aten/src/ATen/native/quantized/qlinear_unpack.cpp

+2-4
Original file line numberDiff line numberDiff line change
@@ -8,14 +8,12 @@ and /cudnn/linear_unpack_impl.cpp, for cudnn.
88
*/
99
#include <ATen/ATen.h>
1010
#include <ATen/native/quantized/cpu/fbgemm_utils.h>
11-
#include <ATen/native/quantized/PackedParams.h>
1211
#include <ATen/native/quantized/cpu/QnnpackUtils.h>
12+
#include <ATen/native/quantized/library.h>
13+
#include <ATen/native/quantized/PackedParams.h>
1314
#include <torch/custom_class.h>
1415
#include <torch/library.h>
1516

16-
int register_linear_params();
17-
18-
1917
namespace at::native {
2018
namespace {
2119

torch/csrc/distributed/c10d/GlooDeviceFactory.cpp

+4-4
Original file line numberDiff line numberDiff line change
@@ -96,7 +96,7 @@ static std::shared_ptr<::gloo::transport::Device> makeTCPTLSDevice(
9696
attr, pkey, cert, caFile, caPath);
9797
}
9898

99-
C10_REGISTER_CREATOR(GlooDeviceRegistry, TCP_TLS, makeTCPTLSDevice);
99+
C10_REGISTER_CREATOR(GlooDeviceRegistry, TCP_TLS, makeTCPTLSDevice)
100100
#endif
101101

102102
#if GLOO_HAVE_TRANSPORT_UV
@@ -120,9 +120,9 @@ static std::shared_ptr<::gloo::transport::Device> makeUVDevice(
120120
// Registry priority is per key identifier. We register UV to `APPLE` for
121121
// the flexibility of other application to override by priority. Register
122122
// UV to `UV` for env "GLOO_DEVICE_TRANSPORT" override.
123-
C10_REGISTER_CREATOR(GlooDeviceRegistry, APPLE, makeUVDevice);
124-
C10_REGISTER_CREATOR(GlooDeviceRegistry, WIN32, makeUVDevice);
125-
C10_REGISTER_CREATOR(GlooDeviceRegistry, UV, makeUVDevice);
123+
C10_REGISTER_CREATOR(GlooDeviceRegistry, APPLE, makeUVDevice)
124+
C10_REGISTER_CREATOR(GlooDeviceRegistry, WIN32, makeUVDevice)
125+
C10_REGISTER_CREATOR(GlooDeviceRegistry, UV, makeUVDevice)
126126
#endif
127127

128128
namespace {

torch/csrc/distributed/rpc/tensorpipe_agent.cpp

+3-3
Original file line numberDiff line numberDiff line change
@@ -153,11 +153,11 @@ void makeStreamsWaitOnOthers(
153153

154154
C10_DEFINE_REGISTRY_WITHOUT_WARNING(
155155
TensorPipeTransportRegistry,
156-
TransportRegistration);
156+
TransportRegistration)
157157

158158
C10_DEFINE_REGISTRY_WITHOUT_WARNING(
159159
TensorPipeChannelRegistry,
160-
ChannelRegistration);
160+
ChannelRegistration)
161161

162162
const std::string& TensorPipeAgent::guessAddress() {
163163
static const std::string uvAddress = []() {
@@ -284,7 +284,7 @@ std::unique_ptr<ChannelRegistration> makeMultiplexedUvChannel() {
284284
C10_REGISTER_CREATOR(
285285
TensorPipeChannelRegistry,
286286
mpt_uv,
287-
makeMultiplexedUvChannel);
287+
makeMultiplexedUvChannel)
288288

289289
} // namespace
290290

torch/csrc/distributed/rpc/tensorpipe_cuda.cpp

+4-4
Original file line numberDiff line numberDiff line change
@@ -24,7 +24,7 @@ std::unique_ptr<ChannelRegistration> makeCudaIpcChannel() {
2424
}
2525

2626
// The cuda_ipc channels use cudaMemcpy to transmit CUDA tensor across processes
27-
C10_REGISTER_CREATOR(TensorPipeChannelRegistry, cuda_ipc, makeCudaIpcChannel);
27+
C10_REGISTER_CREATOR(TensorPipeChannelRegistry, cuda_ipc, makeCudaIpcChannel)
2828

2929
#endif
3030

@@ -44,7 +44,7 @@ std::unique_ptr<ChannelRegistration> makeCudaGdrChannel() {
4444
// in order to ensure readiness and to agree on the device indices and thus the
4545
// queue pair to use. It automatically pairs each GPU to the "closest" NIC if
4646
// there are multiple of them (closest = longest prefix match in PCI tree).
47-
C10_REGISTER_CREATOR(TensorPipeChannelRegistry, cuda_gdr, makeCudaGdrChannel);
47+
C10_REGISTER_CREATOR(TensorPipeChannelRegistry, cuda_gdr, makeCudaGdrChannel)
4848

4949
#endif
5050

@@ -55,7 +55,7 @@ std::unique_ptr<ChannelRegistration> makeCudaXthChannel() {
5555
}
5656

5757
// The cuda_xth channel supports same-process GPU-to-GPU comm
58-
C10_REGISTER_CREATOR(TensorPipeChannelRegistry, cuda_xth, makeCudaXthChannel);
58+
C10_REGISTER_CREATOR(TensorPipeChannelRegistry, cuda_xth, makeCudaXthChannel)
5959

6060
std::unique_ptr<ChannelRegistration> makeCudaBasicChannel() {
6161
auto context = tensorpipe::channel::cuda_basic::create(
@@ -68,7 +68,7 @@ std::unique_ptr<ChannelRegistration> makeCudaBasicChannel() {
6868
C10_REGISTER_CREATOR(
6969
TensorPipeChannelRegistry,
7070
cuda_basic,
71-
makeCudaBasicChannel);
71+
makeCudaBasicChannel)
7272

7373
class TensorpipeCudaConverter : public TensorpipeDeviceTypeConverter {
7474
public:

torch/csrc/jit/passes/onnx/constant_map.h

-5
Original file line numberDiff line numberDiff line change
@@ -2,15 +2,10 @@
22

33
#include <c10/macros/Macros.h>
44

5-
C10_DIAGNOSTIC_PUSH_AND_IGNORED_IF_DEFINED("-Wsuggest-override")
6-
C10_DIAGNOSTIC_PUSH_AND_IGNORED_IF_DEFINED("-Wnewline-eof")
75
#include <onnx/shape_inference/implementation.h>
8-
C10_DIAGNOSTIC_POP()
9-
C10_DIAGNOSTIC_POP()
106

117
#include <torch/csrc/jit/ir/ir.h>
128
#include <torch/csrc/jit/serialization/export.h>
13-
#include <mutex>
149
#include <unordered_map>
1510

1611
namespace torch::jit {

torch/csrc/jit/serialization/export.cpp

+1-5
Original file line numberDiff line numberDiff line change
@@ -19,18 +19,14 @@
1919
#include <torch/csrc/onnx/back_compat.h>
2020
#include <torch/csrc/onnx/onnx.h>
2121
#include <torch/version.h>
22-
#include <optional>
2322

24-
C10_DIAGNOSTIC_PUSH_AND_IGNORED_IF_DEFINED("-Wnewline-eof")
2523
#include <onnx/checker.h>
26-
C10_DIAGNOSTIC_POP()
2724
#include <onnx/onnx_pb.h>
2825
#include <onnx/proto_utils.h>
29-
C10_DIAGNOSTIC_PUSH_AND_IGNORED_IF_DEFINED("-Wsuggest-override")
3026
#include <onnx/shape_inference/implementation.h>
31-
C10_DIAGNOSTIC_POP()
3227

3328
#include <memory>
29+
#include <optional>
3430
#include <regex>
3531
#include <set>
3632
#include <sstream>

0 commit comments

Comments
 (0)