[2/N] Fix clang-tidy warnings in jit  (#131735)

Follows #131034

Pull Request resolved: https://github.com/pytorch/pytorch/pull/131735
Approved by: https://github.com/ezyang
diff --git a/torch/csrc/jit/frontend/builtin_functions.h b/torch/csrc/jit/frontend/builtin_functions.h
index 2ff8d13..27e190a 100644
--- a/torch/csrc/jit/frontend/builtin_functions.h
+++ b/torch/csrc/jit/frontend/builtin_functions.h
@@ -3,9 +3,7 @@
 #include <torch/csrc/Export.h>
 #include <torch/csrc/jit/api/module.h>
 
-namespace torch {
-namespace jit {
+namespace torch::jit {
 
 TORCH_API const std::vector<Function*>& getAllBuiltinFunctionsFor(Symbol name);
-} // namespace jit
-} // namespace torch
+} // namespace torch::jit
diff --git a/torch/csrc/jit/frontend/canonicalize_modified_loop.h b/torch/csrc/jit/frontend/canonicalize_modified_loop.h
index f8a3a4e..ce78a21 100644
--- a/torch/csrc/jit/frontend/canonicalize_modified_loop.h
+++ b/torch/csrc/jit/frontend/canonicalize_modified_loop.h
@@ -3,8 +3,7 @@
 
 #include <torch/csrc/Export.h>
 
-namespace torch {
-namespace jit {
+namespace torch::jit {
 
 struct Graph;
 
@@ -12,5 +11,4 @@
 // for or while loops
 TORCH_API void CanonicalizeModifiedLoops(std::shared_ptr<Graph>& graph);
 
-} // namespace jit
-} // namespace torch
+} // namespace torch::jit
diff --git a/torch/csrc/jit/frontend/convert_to_ssa.h b/torch/csrc/jit/frontend/convert_to_ssa.h
index 787eae8..9ea8bc8 100644
--- a/torch/csrc/jit/frontend/convert_to_ssa.h
+++ b/torch/csrc/jit/frontend/convert_to_ssa.h
@@ -6,11 +6,9 @@
 #include <torch/csrc/Export.h>
 #include <torch/csrc/jit/ir/ir.h>
 
-namespace torch {
-namespace jit {
+namespace torch::jit {
 
 // Convert a graph with Loads & Stores into SSA form
 TORCH_API void ConvertToSSA(std::shared_ptr<Graph>& graph);
 
-} // namespace jit
-} // namespace torch
+} // namespace torch::jit
diff --git a/torch/csrc/jit/frontend/edit_distance.h b/torch/csrc/jit/frontend/edit_distance.h
index f0d999e..761e7ff 100644
--- a/torch/csrc/jit/frontend/edit_distance.h
+++ b/torch/csrc/jit/frontend/edit_distance.h
@@ -3,13 +3,11 @@
 #include <torch/csrc/Export.h>
 #include <cstddef>
 
-namespace torch {
-namespace jit {
+namespace torch::jit {
 
 TORCH_API size_t ComputeEditDistance(
     const char* word1,
     const char* word2,
     size_t maxEditDistance);
 
-} // namespace jit
-} // namespace torch
+} // namespace torch::jit
diff --git a/torch/csrc/jit/frontend/error_report.cpp b/torch/csrc/jit/frontend/error_report.cpp
index 068437b..67f461f 100644
--- a/torch/csrc/jit/frontend/error_report.cpp
+++ b/torch/csrc/jit/frontend/error_report.cpp
@@ -16,8 +16,8 @@
       error_stack(e.error_stack.begin(), e.error_stack.end()) {}
 
 #ifndef C10_MOBILE
-ErrorReport::ErrorReport(SourceRange r)
-    : context(std::move(r)), error_stack(calls.begin(), calls.end()) {}
+ErrorReport::ErrorReport(const SourceRange& r)
+    : context(r), error_stack(calls.begin(), calls.end()) {}
 
 void ErrorReport::CallStack::update_pending_range(const SourceRange& range) {
   calls.back().caller_range = range;
@@ -33,7 +33,7 @@
   calls.pop_back();
 }
 #else // defined C10_MOBILE
-ErrorReport::ErrorReport(SourceRange r) : context(std::move(r)) {}
+ErrorReport::ErrorReport(const SourceRange& r) : context(r) {}
 
 void ErrorReport::CallStack::update_pending_range(const SourceRange& range) {}
 
diff --git a/torch/csrc/jit/frontend/error_report.h b/torch/csrc/jit/frontend/error_report.h
index c63f644..635dd35 100644
--- a/torch/csrc/jit/frontend/error_report.h
+++ b/torch/csrc/jit/frontend/error_report.h
@@ -2,8 +2,7 @@
 
 #include <torch/csrc/jit/frontend/tree.h>
 
-namespace torch {
-namespace jit {
+namespace torch::jit {
 
 struct Call {
   std::string fn_name;
@@ -13,7 +12,7 @@
 struct TORCH_API ErrorReport : public std::exception {
   ErrorReport(const ErrorReport& e);
 
-  explicit ErrorReport(SourceRange r);
+  explicit ErrorReport(const SourceRange& r);
   explicit ErrorReport(const TreeRef& tree) : ErrorReport(tree->range()) {}
   explicit ErrorReport(const Token& tok) : ErrorReport(tok.range) {}
 
@@ -49,5 +48,4 @@
   return e;
 }
 
-} // namespace jit
-} // namespace torch
+} // namespace torch::jit
diff --git a/torch/csrc/jit/frontend/exit_transforms.h b/torch/csrc/jit/frontend/exit_transforms.h
index 84910c6..94a983c 100644
--- a/torch/csrc/jit/frontend/exit_transforms.h
+++ b/torch/csrc/jit/frontend/exit_transforms.h
@@ -3,10 +3,8 @@
 #include <torch/csrc/Export.h>
 #include <torch/csrc/jit/ir/ir.h>
 
-namespace torch {
-namespace jit {
+namespace torch::jit {
 
 TORCH_API void TransformExits(std::shared_ptr<Graph>& graph);
 
-} // namespace jit
-} // namespace torch
+} // namespace torch::jit
diff --git a/torch/csrc/jit/frontend/function_schema_parser.cpp b/torch/csrc/jit/frontend/function_schema_parser.cpp
index d4ed5943..3a1a3af 100644
--- a/torch/csrc/jit/frontend/function_schema_parser.cpp
+++ b/torch/csrc/jit/frontend/function_schema_parser.cpp
@@ -8,7 +8,6 @@
 #include <torch/csrc/jit/frontend/schema_type_parser.h>
 #include <optional>
 
-#include <functional>
 #include <memory>
 #include <vector>
 
@@ -49,8 +48,9 @@
     size_t idx = 0;
     parseList('(', ',', ')', [&] {
       if (is_vararg)
-        throw ErrorReport(L.cur())
-            << "... must be the last element of the argument list";
+        throw(
+            ErrorReport(L.cur())
+            << "... must be the last element of the argument list");
       if (L.nextIf('*')) {
         kwarg_only = true;
       } else if (L.nextIf(TK_DOTS)) {
@@ -65,8 +65,9 @@
     if (is_vararg) {
       for (const auto& arg : arguments) {
         if (arg.default_value().has_value()) {
-          throw ErrorReport(L.cur())
-              << "schemas with vararg (...) can't have default value args";
+          throw(
+              ErrorReport(L.cur())
+              << "schemas with vararg (...) can't have default value args");
         }
       }
     }
@@ -78,8 +79,9 @@
     } else if (L.cur().kind == '(') {
       parseList('(', ',', ')', [&] {
         if (is_varret) {
-          throw ErrorReport(L.cur())
-              << "... must be the last element of the return list";
+          throw(
+              ErrorReport(L.cur())
+              << "... must be the last element of the return list");
         }
         if (L.nextIf(TK_DOTS)) {
           is_varret = true;
@@ -259,7 +261,7 @@
             str2dtype.count(text) > 0) {
           return static_cast<int64_t>(str2dtype.at(text));
         } else {
-          throw ErrorReport(L.cur().range) << "invalid numeric default value";
+          throw(ErrorReport(L.cur().range) << "invalid numeric default value");
         }
       }
       default:
@@ -300,8 +302,9 @@
         return convertToList(
             type, type.expectRef<c10::DynamicType>().dynamicKind(), range, vs);
       default:
-        throw ErrorReport(range)
-            << "lists are only supported for float, int and complex types";
+        throw(
+            ErrorReport(range)
+            << "lists are only supported for float, int and complex types");
     }
   }
   IValue parseConstantList(
@@ -372,7 +375,7 @@
             real_type,
             arg_N);
       default:
-        throw ErrorReport(range) << "unexpected type, file a bug report";
+        throw(ErrorReport(range) << "unexpected type, file a bug report");
     }
     return IValue(); // silence warnings
   }
@@ -395,7 +398,6 @@
   }
   Lexer L;
   SchemaTypeParser type_parser;
-  bool allow_typevars_;
 };
 } // namespace
 
diff --git a/torch/csrc/jit/frontend/function_schema_parser.h b/torch/csrc/jit/frontend/function_schema_parser.h
index 75782e1..c1a5601 100644
--- a/torch/csrc/jit/frontend/function_schema_parser.h
+++ b/torch/csrc/jit/frontend/function_schema_parser.h
@@ -5,8 +5,7 @@
 #include <string>
 #include <variant>
 
-namespace torch {
-namespace jit {
+namespace torch::jit {
 
 // allow_typevars: If true, we assume that lowercase types that we don't
 // understand are type variables. This is only needed for TorchScript (and not
@@ -21,5 +20,4 @@
     bool allow_typevars = true);
 TORCH_API c10::OperatorName parseName(const std::string& name);
 
-} // namespace jit
-} // namespace torch
+} // namespace torch::jit
diff --git a/torch/csrc/jit/frontend/inline_loop_condition.h b/torch/csrc/jit/frontend/inline_loop_condition.h
index c5efa0b..74ba374 100644
--- a/torch/csrc/jit/frontend/inline_loop_condition.h
+++ b/torch/csrc/jit/frontend/inline_loop_condition.h
@@ -6,11 +6,9 @@
 #include <torch/csrc/Export.h>
 #include <torch/csrc/jit/ir/ir.h>
 
-namespace torch {
-namespace jit {
+namespace torch::jit {
 
 TORCH_API void InlineLoopCondition(std::shared_ptr<Graph>& graph);
 TORCH_API void InlineBlockBeforeNode(Node* before_node, Block* block);
 
-} // namespace jit
-} // namespace torch
+} // namespace torch::jit
diff --git a/torch/csrc/jit/frontend/ir_emitter.h b/torch/csrc/jit/frontend/ir_emitter.h
index 6b27a9a..a4aee2b 100644
--- a/torch/csrc/jit/frontend/ir_emitter.h
+++ b/torch/csrc/jit/frontend/ir_emitter.h
@@ -10,12 +10,10 @@
 #include <torch/csrc/jit/frontend/tree_views.h>
 #include <torch/csrc/jit/ir/ir.h>
 
-namespace torch {
-namespace jit {
+namespace torch::jit {
 
 TORCH_API void runCleanupPasses(std::shared_ptr<Graph>& to_clean);
 
 TORCH_API bool meaningfulName(const std::string& name);
 
-} // namespace jit
-} // namespace torch
+} // namespace torch::jit
diff --git a/torch/csrc/jit/frontend/lexer.h b/torch/csrc/jit/frontend/lexer.h
index ff59fa9..15dc866 100644
--- a/torch/csrc/jit/frontend/lexer.h
+++ b/torch/csrc/jit/frontend/lexer.h
@@ -18,8 +18,7 @@
 C10_CLANG_DIAGNOSTIC_IGNORE("-Wshorten-64-to-32")
 #endif
 
-namespace torch {
-namespace jit {
+namespace torch::jit {
 
 // single character tokens are just the character itself '+'
 // multi-character tokens need an entry here
@@ -143,7 +142,7 @@
 struct TokenTrie;
 using TokenTrieRef = std::unique_ptr<TokenTrie>;
 struct TokenTrie {
-  TokenTrie() : kind(0) {}
+  TokenTrie() {}
   void insert(const char* str, int tok) {
     if (*str == '\0') {
       AT_ASSERT(kind == 0);
@@ -162,7 +161,7 @@
     child_tries.emplace_back(std::make_unique<TokenTrie>());
     child_tries.back()->insert(str + 1, tok);
   }
-  int kind; // 0 == invalid token
+  int kind{0}; // 0 == invalid token
 
   std::vector<char> child_chars;
   std::vector<TokenTrieRef> child_tries;
@@ -240,7 +239,7 @@
     // invariant: the next token is not whitespace or newline
     *start = pos;
     // check for a valid number
-    size_t len;
+    size_t len = 0;
     if (isNumber(pos.rest_line(), 0, &len)) {
       *end = *start;
       *end += len;
@@ -387,7 +386,7 @@
   }
 
   // Make an exception ignoring comments for type annotation comments
-  bool isTypeComment(StringCordView str, size_t pos) {
+  bool isTypeComment(const StringCordView& str, size_t pos) {
     const std::string type_string = "# type:";
     if (str.size() < pos + type_string.length()) {
       return false;
@@ -416,8 +415,7 @@
 struct Lexer {
   explicit Lexer(std::shared_ptr<Source> source)
       : source(std::move(source)),
-        pos(0),
-        nesting(0),
+
         indent_stack(),
         next_tokens(),
         shared(sharedParserData()) {
@@ -562,14 +560,13 @@
 
   std::shared_ptr<Source> source;
   std::unique_ptr<StringCordView::Iterator> current;
-  size_t pos;
-  size_t nesting; // depth of ( [ { nesting...
+  size_t pos{0};
+  size_t nesting{0}; // depth of ( [ { nesting...
   std::vector<int> indent_stack; // stack of indentation level of blocks
   // Invariant: this should always contain at least a single element
   std::vector<Token> next_tokens;
   SharedParserData& shared;
 };
-} // namespace jit
-} // namespace torch
+} // namespace torch::jit
 
 C10_CLANG_DIAGNOSTIC_POP()
diff --git a/torch/csrc/jit/frontend/mini_environment.h b/torch/csrc/jit/frontend/mini_environment.h
index 9a56d31..1b71927 100644
--- a/torch/csrc/jit/frontend/mini_environment.h
+++ b/torch/csrc/jit/frontend/mini_environment.h
@@ -3,8 +3,7 @@
 #include <ATen/core/jit_type.h>
 #include <torch/csrc/jit/ir/ir.h>
 
-namespace torch {
-namespace jit {
+namespace torch::jit {
 
 // Simple data structure for containing a type T in nested control blocks
 // Should only be used after initial compilation where type checking and
@@ -53,5 +52,4 @@
   std::unordered_map<std::string, T> table;
 };
 
-} // namespace jit
-} // namespace torch
+} // namespace torch::jit
diff --git a/torch/csrc/jit/frontend/name_mangler.h b/torch/csrc/jit/frontend/name_mangler.h
index 07a0352..2f436f9 100644
--- a/torch/csrc/jit/frontend/name_mangler.h
+++ b/torch/csrc/jit/frontend/name_mangler.h
@@ -3,8 +3,7 @@
 #include <ATen/core/qualified_name.h>
 #include <torch/csrc/Export.h>
 
-namespace torch {
-namespace jit {
+namespace torch::jit {
 
 /**
  * class NameMangler
@@ -23,5 +22,4 @@
   size_t mangleIndex_ = 0;
 };
 
-} // namespace jit
-} // namespace torch
+} // namespace torch::jit
diff --git a/torch/csrc/jit/frontend/parse_string_literal.h b/torch/csrc/jit/frontend/parse_string_literal.h
index 13bbbf8..1a5b5a7 100644
--- a/torch/csrc/jit/frontend/parse_string_literal.h
+++ b/torch/csrc/jit/frontend/parse_string_literal.h
@@ -3,8 +3,7 @@
 #include <torch/csrc/jit/frontend/lexer.h>
 #include <optional>
 
-namespace torch {
-namespace jit {
+namespace torch::jit {
 
 inline bool isCharCount(char c, const std::string& str, size_t start, int len) {
   // count checks from [start, start + len)
@@ -83,5 +82,4 @@
   return ret_str;
 }
 
-} // namespace jit
-} // namespace torch
+} // namespace torch::jit
diff --git a/torch/csrc/jit/frontend/parser.h b/torch/csrc/jit/frontend/parser.h
index 6d856a0..7f4d17b 100644
--- a/torch/csrc/jit/frontend/parser.h
+++ b/torch/csrc/jit/frontend/parser.h
@@ -4,8 +4,7 @@
 #include <torch/csrc/jit/frontend/tree_views.h>
 #include <memory>
 
-namespace torch {
-namespace jit {
+namespace torch::jit {
 
 struct Decl;
 struct ParserImpl;
@@ -29,5 +28,4 @@
   std::unique_ptr<ParserImpl> pImpl;
 };
 
-} // namespace jit
-} // namespace torch
+} // namespace torch::jit
diff --git a/torch/csrc/jit/frontend/parser_constants.h b/torch/csrc/jit/frontend/parser_constants.h
index 2830061..cf51d10 100644
--- a/torch/csrc/jit/frontend/parser_constants.h
+++ b/torch/csrc/jit/frontend/parser_constants.h
@@ -1,7 +1,5 @@
 #pragma once
 
-namespace torch {
-namespace jit {
+namespace torch::jit {
 static const char* valid_single_char_tokens = "+-*/%@()[]:,={}><.?!&^|~";
-} // namespace jit
-} // namespace torch
+} // namespace torch::jit
diff --git a/torch/csrc/jit/frontend/resolver.h b/torch/csrc/jit/frontend/resolver.h
index dc4ab61..d5b0f19 100644
--- a/torch/csrc/jit/frontend/resolver.h
+++ b/torch/csrc/jit/frontend/resolver.h
@@ -4,8 +4,7 @@
 #include <ATen/core/qualified_name.h>
 #include <torch/csrc/jit/frontend/sugared_value.h>
 
-namespace torch {
-namespace jit {
+namespace torch::jit {
 
 struct Resolver;
 using ResolverPtr = std::shared_ptr<Resolver>;
@@ -64,5 +63,4 @@
 inline std::shared_ptr<NativeResolver> nativeResolver() {
   return std::make_shared<NativeResolver>();
 }
-} // namespace jit
-} // namespace torch
+} // namespace torch::jit
diff --git a/torch/csrc/jit/frontend/schema_matching.cpp b/torch/csrc/jit/frontend/schema_matching.cpp
index a91f204..edfa1ae 100644
--- a/torch/csrc/jit/frontend/schema_matching.cpp
+++ b/torch/csrc/jit/frontend/schema_matching.cpp
@@ -146,9 +146,7 @@
     } else if (*value->type() == *BoolType::get()) {
       if (concrete_float) {
         value = graph.insert(aten::Float, {value}, {}, loc);
-      } else if (concrete_int) {
-        value = graph.insert(aten::Int, {value}, {}, loc);
-      } else if (concrete_number) {
+      } else if (concrete_int || concrete_number) {
         value = graph.insert(aten::Int, {value}, {}, loc);
       }
     }
@@ -516,7 +514,7 @@
   // Therefore, either all or none returns has field names.
   bool return_has_field_names =
       std::all_of(returns.begin(), returns.end(), [&](const Argument& r) {
-        return r.name().length() > 0;
+        return !r.name().empty();
       });
   c10::OptNameList return_field_names = std::nullopt;
   if (return_has_field_names) {
@@ -553,7 +551,7 @@
           /*allow_conversions=*/true)) {
     return *result;
   }
-  throw ErrorReport(loc) << failure_messages.str();
+  throw(ErrorReport(loc) << failure_messages.str());
 }
 
 static std::string prefixLine(
@@ -612,11 +610,12 @@
         schemas, loc, graph, args, kwargs, self, /*render_errors=*/true);
   }
 
-  throw ErrorReport(loc) << "Arguments for call are not valid.\n"
-                         << "The following variants are available:\n"
-                         << prefixLine(failure_messages.str(), "  ")
-                         << "\nThe original call is";
-  throw ErrorReport(loc) << failure_messages.str();
+  throw(
+      ErrorReport(loc) << "Arguments for call are not valid.\n"
+                       << "The following variants are available:\n"
+                       << prefixLine(failure_messages.str(), "  ")
+                       << "\nThe original call is");
+  throw(ErrorReport(loc) << failure_messages.str());
 }
 
 // pack outputs of a function following python rules. If there is a single value
@@ -759,7 +758,7 @@
       }
       error << "\nThe original call is";
     }
-    throw error;
+    throw ErrorReport(error);
   }
 
   auto matched = matchSchemas(schemas, loc, graph, args, kwargs, self);
diff --git a/torch/csrc/jit/frontend/schema_matching.h b/torch/csrc/jit/frontend/schema_matching.h
index 8a24863..ddc6f1f 100644
--- a/torch/csrc/jit/frontend/schema_matching.h
+++ b/torch/csrc/jit/frontend/schema_matching.h
@@ -5,8 +5,7 @@
 
 #include <ATen/core/function_schema.h>
 
-namespace torch {
-namespace jit {
+namespace torch::jit {
 
 // Try to match a list of inputs and keyword 'attributes' to this
 // schema. Return the flat list of positional inputs to the call or
@@ -66,5 +65,4 @@
     const TypePtr& concrete_type,
     Value* value,
     bool allow_conversions);
-} // namespace jit
-} // namespace torch
+} // namespace torch::jit
diff --git a/torch/csrc/jit/frontend/schema_type_parser.h b/torch/csrc/jit/frontend/schema_type_parser.h
index a852121..ca5a00e 100644
--- a/torch/csrc/jit/frontend/schema_type_parser.h
+++ b/torch/csrc/jit/frontend/schema_type_parser.h
@@ -6,8 +6,7 @@
 #include <c10/util/FunctionRef.h>
 #include <torch/csrc/jit/frontend/lexer.h>
 
-namespace torch {
-namespace jit {
+namespace torch::jit {
 
 using TypePtr = c10::TypePtr;
 
@@ -42,5 +41,4 @@
   size_t next_id = 0;
   bool allow_typevars_;
 };
-} // namespace jit
-} // namespace torch
+} // namespace torch::jit
diff --git a/torch/csrc/jit/frontend/script_type_parser.h b/torch/csrc/jit/frontend/script_type_parser.h
index 66c963b..205727f 100644
--- a/torch/csrc/jit/frontend/script_type_parser.h
+++ b/torch/csrc/jit/frontend/script_type_parser.h
@@ -4,8 +4,7 @@
 #include <torch/csrc/jit/frontend/resolver.h>
 #include <torch/csrc/jit/frontend/tree_views.h>
 
-namespace torch {
-namespace jit {
+namespace torch::jit {
 
 /**
  * class ScriptTypeParser
@@ -51,5 +50,4 @@
   friend struct ConstantTableValue;
   friend struct SourceImporterImpl;
 };
-} // namespace jit
-} // namespace torch
+} // namespace torch::jit
diff --git a/torch/csrc/jit/frontend/source_ref.h b/torch/csrc/jit/frontend/source_ref.h
index 185bd3c..c9ea38f 100644
--- a/torch/csrc/jit/frontend/source_ref.h
+++ b/torch/csrc/jit/frontend/source_ref.h
@@ -7,8 +7,7 @@
 #include <c10/macros/Export.h>
 #include <torch/csrc/jit/frontend/source_range.h>
 
-namespace torch {
-namespace jit {
+namespace torch::jit {
 
 /**
  * SourceRef does two things:
@@ -33,7 +32,7 @@
     return &other < self.source_view_.get();
   }
   bool operator<(const SourceRef& other) const {
-    return *this < *other.source_view_.get();
+    return *this < *other.source_view_;
   }
   const Source* operator->() const {
     return source_view_.get();
@@ -43,5 +42,4 @@
   std::shared_ptr<Source> source_view_;
 };
 
-} // namespace jit
-} // namespace torch
+} // namespace torch::jit
diff --git a/torch/csrc/jit/frontend/strtod.h b/torch/csrc/jit/frontend/strtod.h
index dd03c3c..eb704a3 100644
--- a/torch/csrc/jit/frontend/strtod.h
+++ b/torch/csrc/jit/frontend/strtod.h
@@ -2,11 +2,9 @@
 
 #include <c10/macros/Macros.h>
 
-namespace torch {
-namespace jit {
+namespace torch::jit {
 
 TORCH_API double strtod_c(const char* nptr, char** endptr);
 TORCH_API float strtof_c(const char* nptr, char** endptr);
 
-} // namespace jit
-} // namespace torch
+} // namespace torch::jit
diff --git a/torch/csrc/jit/frontend/sugared_value.cpp b/torch/csrc/jit/frontend/sugared_value.cpp
index 94a11b2..54be15a 100644
--- a/torch/csrc/jit/frontend/sugared_value.cpp
+++ b/torch/csrc/jit/frontend/sugared_value.cpp
@@ -23,7 +23,7 @@
     size_t n_binders) {
   auto& g = *m.graph();
   if (!kwargs.empty())
-    throw ErrorReport(loc) << "print doesn't accept any keyword arguments";
+    throw(ErrorReport(loc) << "print doesn't accept any keyword arguments");
 
   std::vector<Value*> lowered_inputs = toValues(*m.graph(), args);
   g.insertNode(g.create(prim::Print, lowered_inputs, 0)->setSourceRange(loc));
@@ -82,14 +82,16 @@
       }
       return false;
     } else {
-      throw ErrorReport(loc) << "hasattr's first argument must be a object "
-                             << "or NamedTuple, but got a normal Tuple "
-                             << value_->type()->repr_str() << " instead";
+      throw(
+          ErrorReport(loc) << "hasattr's first argument must be a object "
+                           << "or NamedTuple, but got a normal Tuple "
+                           << value_->type()->repr_str() << " instead");
     }
   }
-  throw ErrorReport(loc) << "hasattr's first argument must be an object or "
-                         << "NamedTuple, got " << value_->type()->repr_str()
-                         << " instead";
+  throw(
+      ErrorReport(loc) << "hasattr's first argument must be an object or "
+                       << "NamedTuple, got " << value_->type()->repr_str()
+                       << " instead");
 }
 
 // support syntax sugar for x.foo(y, z) by allowing x.foo to return a
@@ -277,7 +279,7 @@
       report << " Did you forget to initialize an attribute in __init__()?";
     }
   }
-  throw report;
+  throw ErrorReport(report);
 }
 
 std::vector<std::shared_ptr<SugaredValue>> SimpleValue::asTuple(
@@ -293,20 +295,22 @@
     return fmap(outputs, make_simple_value);
   } else if (value_->type()->kind() == TypeKind::ListType) {
     if (!size_hint) {
-      throw ErrorReport(loc)
-          << "cannot statically infer the expected size of a "
-          << "list in this context";
+      throw(
+          ErrorReport(loc) << "cannot statically infer the expected size of a "
+                           << "list in this context");
     }
     auto graph = value_->owningGraph();
     Node* unpack =
         graph->insertNode(graph->createListUnpack(value_, *size_hint));
     return fmap(unpack->outputs(), make_simple_value);
   } else if (value_->type()->kind() == TypeKind::AnyTupleType) {
-    throw ErrorReport(loc)
-        << "Provided tuple is not fully defined/refined including its element types, please provide a value of type like Tuple[int, int]";
+    throw(
+        ErrorReport(loc)
+        << "Provided tuple is not fully defined/refined including its element types, please provide a value of type like Tuple[int, int]");
   }
-  throw ErrorReport(loc) << value_->type()->repr_str()
-                         << " cannot be used as a tuple";
+  throw(
+      ErrorReport(loc) << value_->type()->repr_str()
+                       << " cannot be used as a tuple");
 }
 
 static bool isRecursive(const TypePtr& classType, const TypePtr& attrType) {
@@ -331,8 +335,9 @@
     Value* newValue) {
   const auto classType = value_->type()->cast<ClassType>();
   if (!classType) {
-    throw ErrorReport(loc) << "Tried to set an attribute: " << field
-                           << " on a non-class: " << value_->type()->repr_str();
+    throw(
+        ErrorReport(loc) << "Tried to set an attribute: " << field
+                         << " on a non-class: " << value_->type()->repr_str());
   }
   auto expectedType = classType->findAttribute(field);
   if (!expectedType) {
@@ -351,12 +356,13 @@
 
     if (isInitializing) {
       if (isRecursive(classType, newValue->type())) {
-        throw ErrorReport(loc)
+        throw(
+            ErrorReport(loc)
             << "Assignment to attribute '" << field
             << "' cannot be of a type that contains class "
             << "'" << classType->repr_str() << "'.\n"
             << "Classes that recursively contain instances of themselves"
-            << " are not yet supported";
+            << " are not yet supported");
       }
 
       classType->addAttribute(field, newValue->type());
@@ -365,9 +371,10 @@
       const auto insertPoint = m.graph()->insertPoint();
       const auto topLevelBlock = m.graph()->block();
       if (insertPoint->owningBlock() != topLevelBlock) {
-        throw ErrorReport(loc)
+        throw(
+            ErrorReport(loc)
             << "First assignment cannot be in a control-flow block. "
-            << "Initialize the field at the top level first";
+            << "Initialize the field at the top level first");
       }
     } else {
       // Check and see if it's a setter attribute.
@@ -379,12 +386,14 @@
       }
 
       if (prop && !prop->setter) {
-        throw ErrorReport(loc) << "Tried to set read-only attribute: " << field;
+        throw(
+            ErrorReport(loc) << "Tried to set read-only attribute: " << field);
       }
 
-      throw ErrorReport(loc)
+      throw(
+          ErrorReport(loc)
           << "Tried to set nonexistent attribute: " << field
-          << ". Did you forget to initialize it in __init__()?";
+          << ". Did you forget to initialize it in __init__()?");
     }
   }
 
@@ -393,9 +402,10 @@
   // Check type correctness
   const auto newType = newValue->type();
   if (!newType->isSubtypeOf(*expectedType)) {
-    throw ErrorReport(loc) << "Wrong type for attribute assignment. Expected "
-                           << expectedType->repr_str() << " but got "
-                           << newType->repr_str();
+    throw(
+        ErrorReport(loc) << "Wrong type for attribute assignment. Expected "
+                         << expectedType->repr_str() << " but got "
+                         << newType->repr_str());
   }
 
   auto& g = *m.graph();
@@ -451,8 +461,9 @@
       val_type->isSubtypeOf(*TensorType::get())) {
     return g.insert(aten::len, {val}, {}, loc);
   } else {
-    throw ErrorReport(loc) << "'" << val_type->repr_str() << "'"
-                           << " object is not iterable";
+    throw(
+        ErrorReport(loc) << "'" << val_type->repr_str() << "'"
+                         << " object is not iterable");
   }
 }
 
@@ -490,8 +501,9 @@
     // Defer to the __getitem__ attr on the class.
     return attr(loc, m, "__getitem__")->call(loc, m, {idx}, {}, 1);
   } else {
-    throw ErrorReport(loc) << "'" << val_type->repr_str() << "'"
-                           << " object is not subscriptable";
+    throw(
+        ErrorReport(loc) << "'" << val_type->repr_str() << "'"
+                         << " object is not subscriptable");
   }
 }
 
@@ -516,8 +528,9 @@
     }
     return std::make_shared<SugaredTupleValue>(tup_sugared);
   } else {
-    throw ErrorReport(loc) << "'" << type->repr_str() << "'"
-                           << " object is not iterable";
+    throw(
+        ErrorReport(loc) << "'" << type->repr_str() << "'"
+                         << " object is not iterable");
   }
 }
 
@@ -529,15 +542,16 @@
   for (const auto i : c10::irange(inputs.size())) {
     auto typ = inputs[i]->type();
     if (!typ->cast<IntType>()) {
-      throw ErrorReport(loc)
-          << "all inputs of range must be ints, found " << typ->repr_str()
-          << " in argument " << std::to_string(i);
+      throw(
+          ErrorReport(loc) << "all inputs of range must be ints, found "
+                           << typ->repr_str() << " in argument "
+                           << std::to_string(i));
     }
   }
 
   Graph& g = *m.graph();
   if (inputs.empty()) {
-    throw ErrorReport(loc) << "range expected at least 1 arguments, got 0";
+    throw(ErrorReport(loc) << "range expected at least 1 arguments, got 0");
   } else if (inputs.size() == 1) {
     end_ = inputs[0];
     start_ = g.insertConstant(0, loc);
@@ -554,8 +568,9 @@
     }
     has_only_end_ = false;
   } else {
-    throw ErrorReport(loc) << "range expected at most 3 arguments, got "
-                           << inputs.size();
+    throw(
+        ErrorReport(loc) << "range expected at most 3 arguments, got "
+                         << inputs.size());
   }
 
   static_len_ = static_len;
@@ -650,9 +665,10 @@
     unroll_length_ = child_len;
   } else {
     if ((unroll_length_ && !child_len) || (child_len && !unroll_length_)) {
-      throw ErrorReport(range)
+      throw(
+          ErrorReport(range)
           << "Can not iterate over a module list or tuple with a value "
-             "that does not have a statically determinable length\n";
+             "that does not have a statically determinable length\n");
     }
     if (unroll_length_ && child_len) {
       // iterables run for the minimum length of all its leaves
@@ -696,8 +712,9 @@
   auto self = g.insertNode(g.createObject(type_))->output();
   self->node()->setSourceRange(loc);
   if (!type_->findMethod("__init__")) {
-    throw ErrorReport(loc) << "Class " << type_->name()->name()
-                           << " does not have an __init__ function defined";
+    throw(
+        ErrorReport(loc) << "Class " << type_->name()->name()
+                         << " does not have an __init__ function defined");
   }
 
   // Call the init function
@@ -718,8 +735,9 @@
   }
 
   if (field != "__new__") {
-    throw ErrorReport(loc) << "Tried to lookup unknown attribute on class "
-                           << type_->annotation_str();
+    throw(
+        ErrorReport(loc) << "Tried to lookup unknown attribute on class "
+                         << type_->annotation_str());
   }
   return SpecialFormValue::create(prim::CreateObject);
 }
@@ -781,8 +799,9 @@
       names_values.end(),
       [&field](const at::EnumNameValue& nv) { return nv.first == field; });
   if (it == names_values.end()) {
-    throw ErrorReport(loc) << enum_type_->repr_str() << "'"
-                           << " has no attribute '" << field << "'";
+    throw(
+        ErrorReport(loc) << enum_type_->repr_str() << "'"
+                         << " has no attribute '" << field << "'");
   }
   auto enum_holder = c10::make_intrusive<at::ivalue::EnumHolder>(
       enum_type_, it->first, it->second);
diff --git a/torch/csrc/jit/frontend/sugared_value.h b/torch/csrc/jit/frontend/sugared_value.h
index 8bd64f8..60b3225 100644
--- a/torch/csrc/jit/frontend/sugared_value.h
+++ b/torch/csrc/jit/frontend/sugared_value.h
@@ -13,8 +13,7 @@
 #include <torch/csrc/jit/frontend/versioned_symbols.h>
 #include <torch/csrc/jit/ir/ir.h>
 
-namespace torch {
-namespace jit {
+namespace torch::jit {
 
 using SugaredValuePtr = std::shared_ptr<SugaredValue>;
 
@@ -853,5 +852,4 @@
   Value* step_;
 };
 
-} // namespace jit
-} // namespace torch
+} // namespace torch::jit
diff --git a/torch/csrc/jit/frontend/tracer.cpp b/torch/csrc/jit/frontend/tracer.cpp
index a90d5bb..7a662f0 100644
--- a/torch/csrc/jit/frontend/tracer.cpp
+++ b/torch/csrc/jit/frontend/tracer.cpp
@@ -613,7 +613,7 @@
   }
 }
 
-void addInputs(Node* n, const char* name, c10::SymInt value) {
+void addInputs(Node* n, const char* name, const c10::SymInt& value) {
   addInputs(n, name, value.guard_int(__FILE__, __LINE__));
 }
 
@@ -743,7 +743,7 @@
 void addInputs(
     Node* n,
     const char* name,
-    std::vector<at::Tensor> value,
+    const std::vector<at::Tensor>& value,
     bool allow_undefined) {
   addInputs(n, name, at::ITensorListRef(value), allow_undefined);
 }
diff --git a/torch/csrc/jit/frontend/tracer.h b/torch/csrc/jit/frontend/tracer.h
index fef018d..106a82e 100644
--- a/torch/csrc/jit/frontend/tracer.h
+++ b/torch/csrc/jit/frontend/tracer.h
@@ -13,7 +13,6 @@
 
 #include <cstdint>
 #include <memory>
-#include <mutex>
 #include <unordered_map>
 #include <vector>
 
@@ -101,10 +100,9 @@
 // data dependencies, but once they get to the ATen call where we actually have
 // the tracing logic, they get converted into a raw IntArrayRef, and we loose
 // all information. To prevent this, we temporarily stash it in here.
-// NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
 struct ArgumentStash {
   struct IntArrayRefTrace : std::vector<Value*> {
-    IntArrayRefTrace(int size) : std::vector<Value*>(size, nullptr) {}
+    IntArrayRefTrace(size_t size) : std::vector<Value*>(size, nullptr) {}
   };
 
   static bool empty() {
@@ -232,7 +230,7 @@
 // NB: those serve both as an intermediate steps in addInputs below,
 // as well as the overloads that terminate template recursion
 TORCH_API void addInputs(Node* n, const char* name, int64_t value);
-TORCH_API void addInputs(Node* n, const char* name, c10::SymInt value);
+TORCH_API void addInputs(Node* n, const char* name, const c10::SymInt& value);
 TORCH_API void addInputs(
     Node* n,
     const char* name,
@@ -283,7 +281,7 @@
 TORCH_API void addInputs(
     Node* n,
     const char* name,
-    std::vector<at::Tensor> value,
+    const std::vector<at::Tensor>& value,
     bool allow_undefined = false);
 TORCH_API void addInputs(
     Node* n,
diff --git a/torch/csrc/jit/frontend/tree.h b/torch/csrc/jit/frontend/tree.h
index 33a1223..84e5e77 100644
--- a/torch/csrc/jit/frontend/tree.h
+++ b/torch/csrc/jit/frontend/tree.h
@@ -9,8 +9,7 @@
 #include <c10/util/intrusive_ptr.h>
 #include <torch/csrc/jit/frontend/lexer.h>
 
-namespace torch {
-namespace jit {
+namespace torch::jit {
 
 // Trees are used to represent all forms of TC IR, pre- and post-typechecking.
 // Rather than have a full class hierarchy for all TC statements, trees are a
@@ -209,12 +208,11 @@
 
 static inline std::ostream& operator<<(std::ostream& out, pretty_tree t_) {
   t_.print(out, t_.tree, 0);
-  return out << std::endl;
+  return out << '\n';
 }
 
 static inline std::ostream& operator<<(std::ostream& out, const TreeRef& t) {
   return out << pretty_tree(t);
 }
 
-} // namespace jit
-} // namespace torch
+} // namespace torch::jit
diff --git a/torch/csrc/jit/frontend/tree_views.h b/torch/csrc/jit/frontend/tree_views.h
index 77d06be..dda9b2d 100644
--- a/torch/csrc/jit/frontend/tree_views.h
+++ b/torch/csrc/jit/frontend/tree_views.h
@@ -9,8 +9,7 @@
 #include <string>
 #include <utility>
 
-namespace torch {
-namespace jit {
+namespace torch::jit {
 
 // clang-format off
 // TreeView provides a statically-typed way to traverse the tree, which should
@@ -1262,8 +1261,7 @@
   return std::move(synthesised_union);
 }
 
-} // namespace jit
-} // namespace torch
+} // namespace torch::jit
 
 namespace std {
 
diff --git a/torch/csrc/jit/frontend/versioned_symbols.h b/torch/csrc/jit/frontend/versioned_symbols.h
index e3caf26..fc6b0ff 100644
--- a/torch/csrc/jit/frontend/versioned_symbols.h
+++ b/torch/csrc/jit/frontend/versioned_symbols.h
@@ -6,8 +6,7 @@
 
 #include <cstdint>
 
-namespace torch {
-namespace jit {
+namespace torch::jit {
 // Maps the given symbol into an implementation of its behavior at the
 // given version.
 // See note [Versioned Symbols]
@@ -17,5 +16,4 @@
 // Maps the given kind to the minimum version that supports it.
 // See note [Dynamic Versions and torch.jit.save vs. torch.save]
 TORCH_API uint64_t get_min_version_for_kind(const NodeKind& kind);
-} // namespace jit
-} // namespace torch
+} // namespace torch::jit
diff --git a/torch/csrc/jit/tensorexpr/bounds_overlap.cpp b/torch/csrc/jit/tensorexpr/bounds_overlap.cpp
index dbcdfd1..1a225a2 100644
--- a/torch/csrc/jit/tensorexpr/bounds_overlap.cpp
+++ b/torch/csrc/jit/tensorexpr/bounds_overlap.cpp
@@ -8,7 +8,7 @@
 namespace torch::jit::tensorexpr::analysis {
 
 // Returns true if the given expression is guaranteed to be positive.
-static bool mustBePositive(ExprPtr e) {
+static bool mustBePositive(const ExprPtr& e) {
   if (e->isConstant()) {
     int e_val = immediateAs<int>(e);
     return e_val > 0;
@@ -17,7 +17,7 @@
 }
 
 // Returns true if the given expression is guaranteed to be negative.
-static bool mustBeNegative(ExprPtr e) {
+static bool mustBeNegative(const ExprPtr& e) {
   if (e->isConstant()) {
     int e_val = immediateAs<int>(e);
     return e_val < 0;
@@ -26,7 +26,7 @@
 }
 
 // Returns true if the given expression is guaranteed to be zero.
-static bool mustBeZero(ExprPtr e) {
+static bool mustBeZero(const ExprPtr& e) {
   if (e->isConstant()) {
     int e_val = immediateAs<int>(e);
     return e_val == 0;