fix various pointer issues (#90651)

Fix some issues found by static analyser

Pull Request resolved: https://github.com/pytorch/pytorch/pull/90651
Approved by: https://github.com/Skylion007
diff --git a/aten/src/ATen/native/quantized/cpu/conv_serialization.h b/aten/src/ATen/native/quantized/cpu/conv_serialization.h
index 2d38bc0..d622153 100644
--- a/aten/src/ATen/native/quantized/cpu/conv_serialization.h
+++ b/aten/src/ATen/native/quantized/cpu/conv_serialization.h
@@ -123,7 +123,6 @@
     torch::List<at::Tensor> dilation_x_kSpatialDim = elements[4].toTensorList();
     at::Tensor groups = elements[5].toTensor();
 
-    std::vector<at::Tensor> non_optional;
     std::vector<c10::optional<at::Tensor>> optional;
 
     std::vector<int64_t> config_vals;
diff --git a/c10/util/SmallBuffer.h b/c10/util/SmallBuffer.h
index b519d30..bb72fed 100644
--- a/c10/util/SmallBuffer.h
+++ b/c10/util/SmallBuffer.h
@@ -1,4 +1,5 @@
 #pragma once
+#include <array>
 #include <type_traits>
 
 /** Helper class for allocating temporary fixed size arrays with SBO.
@@ -19,9 +20,9 @@
       std::is_trivial<T>::value,
       "SmallBuffer is intended for POD types");
 
-  T storage_[N];
-  size_t size_;
-  T* data_;
+  std::array<T, N> storage_;
+  size_t size_{};
+  T* data_{};
 
  public:
   SmallBuffer(size_t size) : size_(size) {
@@ -32,6 +33,23 @@
     }
   }
 
+  SmallBuffer(const SmallBuffer&) = delete;
+  SmallBuffer& operator=(const SmallBuffer&) = delete;
+
+  // move constructor is needed in function return
+  SmallBuffer(SmallBuffer&& rhs) noexcept : size_{rhs.size_} {
+    rhs.size_ = 0;
+    if (size_ > N) {
+      data_ = rhs.data_;
+      rhs.data_ = nullptr;
+    } else {
+      storage_ = std::move(rhs.storage_);
+      data_ = &storage_[0];
+    }
+  }
+
+  SmallBuffer& operator=(SmallBuffer&&) = delete;
+
   ~SmallBuffer() {
     if (size_ > N) {
       delete[] data_;
diff --git a/torch/csrc/api/include/torch/nn/modules/common.h b/torch/csrc/api/include/torch/nn/modules/common.h
index 11728db..f172c82 100644
--- a/torch/csrc/api/include/torch/nn/modules/common.h
+++ b/torch/csrc/api/include/torch/nn/modules/common.h
@@ -1,7 +1,5 @@
 #pragma once
 
-#include <c10/util/irange.h>
-
 /// This macro enables a module with default arguments in its forward method
 /// to be used in a Sequential module.
 ///
@@ -72,33 +70,28 @@
 /// seq->forward(1);  // This correctly populates the default arguments for
 /// `MImpl::forward`
 /// ```
-#define FORWARD_HAS_DEFAULT_ARGS(...)                                       \
-  template <typename ModuleType, typename... ArgumentTypes>                 \
-  friend struct torch::nn::AnyModuleHolder;                                 \
-  bool _forward_has_default_args() override {                               \
-    return true;                                                            \
-  }                                                                         \
-  unsigned int _forward_num_required_args() override {                      \
-    std::vector<std::pair<unsigned int, torch::nn::AnyValue>> args_info = { \
-        __VA_ARGS__};                                                       \
-    return args_info[0].first;                                              \
-  }                                                                         \
-  std::vector<torch::nn::AnyValue> _forward_populate_default_args(          \
-      std::vector<torch::nn::AnyValue>&& arguments) override {              \
-    std::vector<std::pair<unsigned int, torch::nn::AnyValue>> args_info = { \
-        __VA_ARGS__};                                                       \
-    unsigned int num_all_args = args_info[args_info.size() - 1].first + 1;  \
-    TORCH_INTERNAL_ASSERT(                                                  \
-        arguments.size() >= _forward_num_required_args() &&                 \
-        arguments.size() <= num_all_args);                                  \
-    std::vector<torch::nn::AnyValue> ret;                                   \
-    ret.reserve(num_all_args);                                              \
-    for (const auto i : c10::irange(arguments.size())) {                    \
-      ret.emplace_back(std::move(arguments[i]));                            \
-    }                                                                       \
-    for (auto& arg_info : args_info) {                                      \
-      if (arg_info.first > ret.size() - 1)                                  \
-        ret.emplace_back(std::move(arg_info.second));                       \
-    }                                                                       \
-    return ret;                                                             \
+#define FORWARD_HAS_DEFAULT_ARGS(...)                                         \
+  template <typename ModuleType, typename... ArgumentTypes>                   \
+  friend struct torch::nn::AnyModuleHolder;                                   \
+  bool _forward_has_default_args() override {                                 \
+    return true;                                                              \
+  }                                                                           \
+  unsigned int _forward_num_required_args() override {                        \
+    std::pair<unsigned int, torch::nn::AnyValue> args_info[] = {__VA_ARGS__}; \
+    return args_info[0].first;                                                \
+  }                                                                           \
+  std::vector<torch::nn::AnyValue> _forward_populate_default_args(            \
+      std::vector<torch::nn::AnyValue>&& arguments) override {                \
+    std::pair<unsigned int, torch::nn::AnyValue> args_info[] = {__VA_ARGS__}; \
+    unsigned int num_all_args = std::rbegin(args_info)->first + 1;            \
+    TORCH_INTERNAL_ASSERT(                                                    \
+        arguments.size() >= _forward_num_required_args() &&                   \
+        arguments.size() <= num_all_args);                                    \
+    std::vector<torch::nn::AnyValue> ret = std::move(arguments);              \
+    ret.reserve(num_all_args);                                                \
+    for (auto& arg_info : args_info) {                                        \
+      if (arg_info.first > ret.size() - 1)                                    \
+        ret.emplace_back(std::move(arg_info.second));                         \
+    }                                                                         \
+    return ret;                                                               \
   }
diff --git a/torch/csrc/lazy/core/lazy_graph_executor.cpp b/torch/csrc/lazy/core/lazy_graph_executor.cpp
index 574f200..5bc2e67 100644
--- a/torch/csrc/lazy/core/lazy_graph_executor.cpp
+++ b/torch/csrc/lazy/core/lazy_graph_executor.cpp
@@ -1034,13 +1034,13 @@
 }
 
 void LazyGraphExecutor::TensorCollectionBarrier(SyncTensorCollection* coll) {
-  static const std::string invalid_device(
-      "Unknown0"); /* Temp solution to idetify unassigned devices */
-  if (coll->device.toString().compare(invalid_device) == 0 ||
-      coll->unlocker.size() > 0) {
-    return;
-  }
   if (coll) {
+    static const std::string invalid_device(
+        "Unknown0"); /* Temp solution to idetify unassigned devices */
+    if (coll->device.toString().compare(invalid_device) == 0 ||
+        coll->unlocker.size() > 0) {
+      return;
+    }
     VLOG(4) << "Waiting on device barrier for device " << coll->device
             << " ...";
     {