!6 upgrade to pytorch v1.11.0
From: @xiezhipeng1 Reviewed-by: @sinever, @zhunaipan Signed-off-by: @sinever
This commit is contained in:
commit
41be3593d1
@ -1,54 +0,0 @@
|
||||
From 7676682584d0caf9243bce74ea0a88711ec4a807 Mon Sep 17 00:00:00 2001
|
||||
From: Mitchell Spryn <mspryn@fb.com>
|
||||
Date: Fri, 26 Jun 2020 11:08:56 -0700
|
||||
Subject: [PATCH] Fix illegal opcode bug in caffe2 (#40584)
|
||||
|
||||
Summary:
|
||||
Pull Request resolved: https://github.com/pytorch/pytorch/pull/40584
|
||||
|
||||
Also patch [this github issue](https://github.com/pytorch/pytorch/issues/33124)
|
||||
involving an illegal assembly instruction in 8x8-dq-aarch64-neon.S.
|
||||
|
||||
Test Plan:
|
||||
Build binaries, copy to shaker, run executables. Also run all
|
||||
existing caffe tests.
|
||||
|
||||
Reviewed By: kimishpatel
|
||||
|
||||
Differential Revision: D22240670
|
||||
|
||||
fbshipit-source-id: 51960266ce58699fe6830bcf75632b92a122f638
|
||||
---
|
||||
.../cpu/qnnpack/src/q8gemm/8x8-dq-aarch64-neon.S | 16 ++++++++--------
|
||||
1 file changed, 8 insertions(+), 8 deletions(-)
|
||||
|
||||
diff --git a/aten/src/ATen/native/quantized/cpu/qnnpack/src/q8gemm/8x8-dq-aarch64-neon.S b/aten/src/ATen/native/quantized/cpu/qnnpack/src/q8gemm/8x8-dq-aarch64-neon.S
|
||||
index 7a67c6d401..b8bde02006 100644
|
||||
--- a/aten/src/ATen/native/quantized/cpu/qnnpack/src/q8gemm/8x8-dq-aarch64-neon.S
|
||||
+++ b/aten/src/ATen/native/quantized/cpu/qnnpack/src/q8gemm/8x8-dq-aarch64-neon.S
|
||||
@@ -687,14 +687,14 @@ BEGIN_FUNCTION pytorch_q8gemm_dq_ukernel_8x8__aarch64_neon
|
||||
|
||||
SUB x1, x1, 4
|
||||
|
||||
- MOV V8.4s, V9.4s
|
||||
- MOV v10.4s, v11.4s
|
||||
- MOV v12.4s, V13.4s
|
||||
- MOV V14.4s, V15.4s
|
||||
- MOV V16.4s, V17.4s
|
||||
- MOV V18.4s, V19.4s
|
||||
- MOV V20.4s, V21.4s
|
||||
- MOV V22.4s, V23.4s
|
||||
+ MOV V8.16b, V9.16b
|
||||
+ MOV v10.16b, v11.16b
|
||||
+ MOV v12.16b, V13.16b
|
||||
+ MOV V14.16b, V15.16b
|
||||
+ MOV V16.16b, V17.16b
|
||||
+ MOV V18.16b, V19.16b
|
||||
+ MOV V20.16b, V21.16b
|
||||
+ MOV V22.16b, V23.16b
|
||||
|
||||
5:
|
||||
CMP x1, 2
|
||||
--
|
||||
2.18.4
|
||||
|
||||
617
0001-Remove-breakpad-dependency.patch
Normal file
617
0001-Remove-breakpad-dependency.patch
Normal file
@ -0,0 +1,617 @@
|
||||
From d862908738d31f3d5e5dc2caabe51930e7cf020e Mon Sep 17 00:00:00 2001
|
||||
From: Nikita Shulga <nshulga@fb.com>
|
||||
Date: Tue, 3 May 2022 20:21:55 +0000
|
||||
Subject: [PATCH] Remove breakpad dependency
|
||||
|
||||
This functionality does not seem to be used
|
||||
and there are some requests to update dependency.
|
||||
|
||||
Add `third_party` to torch_cpu include directories if compiling with
|
||||
Caffe2 support, as `caffe2/quantization/server/conv_dnnlowp_op.cc` depends on `third_party/fbgemm/src/RefImplementations.h`
|
||||
|
||||
Pull Request resolved: https://github.com/pytorch/pytorch/pull/75394
|
||||
Approved by: https://github.com/janeyx99, https://github.com/seemethere
|
||||
---
|
||||
CMakeLists.txt | 5 -
|
||||
caffe2/CMakeLists.txt | 8 +-
|
||||
cmake/Dependencies.cmake | 4 -
|
||||
cmake/Summary.cmake | 1 -
|
||||
test/test_cpp_extensions_jit.py | 79 +----------
|
||||
test/test_utils.py | 30 +----
|
||||
tools/build_variables.bzl | 1 -
|
||||
torch/csrc/Module.cpp | 7 -
|
||||
torch/csrc/api/include/torch/utils.h | 1 -
|
||||
torch/csrc/utils/crash_handler.cpp | 169 ------------------------
|
||||
torch/csrc/utils/crash_handler.h | 35 -----
|
||||
torch/csrc/utils/init.cpp | 14 --
|
||||
torch/csrc/utils/init.h | 5 -
|
||||
torch/testing/_internal/common_utils.py | 15 ---
|
||||
15 files changed, 5 insertions(+), 372 deletions(-)
|
||||
delete mode 100644 torch/csrc/utils/crash_handler.cpp
|
||||
delete mode 100644 torch/csrc/utils/crash_handler.h
|
||||
|
||||
diff --git a/CMakeLists.txt b/CMakeLists.txt
|
||||
index e51760b5..7f62e1c6 100644
|
||||
--- a/CMakeLists.txt
|
||||
+++ b/CMakeLists.txt
|
||||
@@ -209,7 +209,6 @@ cmake_dependent_option(
|
||||
"USE_CUDNN" OFF)
|
||||
option(USE_FBGEMM "Use FBGEMM (quantized 8-bit server operators)" ON)
|
||||
option(USE_KINETO "Use Kineto profiling library" ON)
|
||||
-option(USE_BREAKPAD "Use breakpad crash dump library" ON)
|
||||
option(USE_CUPTI_SO "Use CUPTI as a shared library" OFF)
|
||||
option(USE_FAKELOWP "Use FakeLowp operators" OFF)
|
||||
option(USE_FFMPEG "Use ffmpeg" OFF)
|
||||
@@ -273,10 +272,6 @@ if(NOT DEFINED USE_VULKAN)
|
||||
"ANDROID" OFF)
|
||||
endif()
|
||||
|
||||
-if(IOS)
|
||||
- set(USE_BREAKPAD OFF)
|
||||
-endif()
|
||||
-
|
||||
option(USE_SOURCE_DEBUG_ON_MOBILE "Enable " ON)
|
||||
option(USE_LITE_INTERPRETER_PROFILER "Enable " ON)
|
||||
option(USE_VULKAN_FP16_INFERENCE "Vulkan - Use fp16 inference" OFF)
|
||||
diff --git a/caffe2/CMakeLists.txt b/caffe2/CMakeLists.txt
|
||||
index d57d7ebb..cfcc1ae2 100644
|
||||
--- a/caffe2/CMakeLists.txt
|
||||
+++ b/caffe2/CMakeLists.txt
|
||||
@@ -709,7 +709,6 @@ if(NOT INTERN_BUILD_MOBILE OR NOT BUILD_CAFFE2_MOBILE)
|
||||
${TORCH_SRC_DIR}/csrc/api/src/optim/schedulers/step_lr.cpp
|
||||
${TORCH_SRC_DIR}/csrc/api/src/serialize/input-archive.cpp
|
||||
${TORCH_SRC_DIR}/csrc/api/src/serialize/output-archive.cpp
|
||||
- ${TORCH_SRC_DIR}/csrc/utils/crash_handler.cpp
|
||||
)
|
||||
endif()
|
||||
|
||||
@@ -1066,10 +1065,9 @@ if(USE_TBB)
|
||||
target_link_libraries(torch_cpu PUBLIC TBB::tbb)
|
||||
endif()
|
||||
|
||||
-if(USE_BREAKPAD)
|
||||
- target_compile_definitions(torch_cpu PRIVATE ADD_BREAKPAD_SIGNAL_HANDLER)
|
||||
- target_include_directories(torch_cpu PRIVATE ${CMAKE_CURRENT_LIST_DIR}/../third_party ${CMAKE_CURRENT_LIST_DIR}/../third_party/breakpad/src)
|
||||
- target_link_libraries(torch_cpu PRIVATE breakpad)
|
||||
+if(BUILD_CAFFE2 AND BUILD_CAFFE2_OPS AND USE_FBGEMM)
|
||||
+ # FIXME: quantization/server/conv_dnnlowp_op.cc depends on fbgemm/src/RefImplementations.h
|
||||
+ target_include_directories(torch_cpu PRIVATE ${CMAKE_CURRENT_LIST_DIR}/../third_party)
|
||||
endif()
|
||||
|
||||
target_include_directories(torch_cpu PRIVATE ${ATen_CPU_INCLUDE})
|
||||
diff --git a/cmake/Dependencies.cmake b/cmake/Dependencies.cmake
|
||||
index 557ab649..525fea7a 100644
|
||||
--- a/cmake/Dependencies.cmake
|
||||
+++ b/cmake/Dependencies.cmake
|
||||
@@ -1824,10 +1824,6 @@ set_target_properties(fmt-header-only PROPERTIES INTERFACE_COMPILE_FEATURES "")
|
||||
list(APPEND Caffe2_DEPENDENCY_LIBS fmt::fmt-header-only)
|
||||
set(BUILD_SHARED_LIBS ${TEMP_BUILD_SHARED_LIBS} CACHE BOOL "Build shared libs" FORCE)
|
||||
|
||||
-if(USE_BREAKPAD)
|
||||
- add_subdirectory(${CMAKE_CURRENT_LIST_DIR}/../third_party/breakpad)
|
||||
-endif()
|
||||
-
|
||||
# ---[ Kineto
|
||||
# edge profiler depends on KinetoProfiler but it only does cpu
|
||||
# profiling. Thus we dont need USE_CUDA/USE_ROCM
|
||||
diff --git a/cmake/Summary.cmake b/cmake/Summary.cmake
|
||||
index 20401207..1b15ec86 100644
|
||||
--- a/cmake/Summary.cmake
|
||||
+++ b/cmake/Summary.cmake
|
||||
@@ -185,7 +185,6 @@ function(caffe2_print_configuration_summary)
|
||||
message(STATUS " SELECTED_OP_LIST : ${SELECTED_OP_LIST}")
|
||||
endif()
|
||||
message(STATUS " USE_DEPLOY : ${USE_DEPLOY}")
|
||||
- message(STATUS " USE_BREAKPAD : ${USE_BREAKPAD}")
|
||||
message(STATUS " Public Dependencies : ${Caffe2_PUBLIC_DEPENDENCY_LIBS}")
|
||||
message(STATUS " Private Dependencies : ${Caffe2_DEPENDENCY_LIBS}")
|
||||
# coreml
|
||||
diff --git a/test/test_cpp_extensions_jit.py b/test/test_cpp_extensions_jit.py
|
||||
index 06b1133d..9875f4ee 100644
|
||||
--- a/test/test_cpp_extensions_jit.py
|
||||
+++ b/test/test_cpp_extensions_jit.py
|
||||
@@ -10,15 +10,12 @@ import tempfile
|
||||
import subprocess
|
||||
import glob
|
||||
|
||||
-import textwrap
|
||||
-from multiprocessing import Process
|
||||
-
|
||||
import torch.testing._internal.common_utils as common
|
||||
import torch
|
||||
import torch.backends.cudnn
|
||||
import torch.utils.cpp_extension
|
||||
from torch.utils.cpp_extension import CUDA_HOME, ROCM_HOME
|
||||
-from torch.testing._internal.common_utils import gradcheck, TEST_WITH_ASAN, has_breakpad
|
||||
+from torch.testing._internal.common_utils import gradcheck
|
||||
|
||||
|
||||
TEST_CUDA = torch.cuda.is_available() and CUDA_HOME is not None
|
||||
@@ -869,80 +866,6 @@ class TestCppExtensionJIT(common.TestCase):
|
||||
|
||||
gradcheck(torch.ops.my.add, [a, b], eps=1e-2)
|
||||
|
||||
- @staticmethod
|
||||
- def _crash_handler_test_process(stderr_file, destination):
|
||||
- # Code to enable dumps and trigger a segfault
|
||||
- if sys.platform == "win32":
|
||||
- destination = destination.replace("\\", "\\\\")
|
||||
- csrc = textwrap.dedent(f"""
|
||||
- #include <torch/torch.h>
|
||||
- #include <locale>
|
||||
- #include <iostream>
|
||||
- #include <codecvt>
|
||||
- #include <string>
|
||||
-
|
||||
- int fail() {{
|
||||
- std::wstring_convert<std::codecvt_utf8_utf16<wchar_t>> converter;
|
||||
- std::string narrow("{destination}");
|
||||
- std::wstring wide = converter.from_bytes(narrow);
|
||||
- torch::crash_handler::enable_minidumps(wide.c_str());
|
||||
-
|
||||
- volatile int* bad = nullptr;
|
||||
- return *bad;
|
||||
- }}
|
||||
- """)
|
||||
- else:
|
||||
- csrc = textwrap.dedent(f"""
|
||||
- #include <torch/torch.h>
|
||||
-
|
||||
- int fail() {{
|
||||
- torch::crash_handler::enable_minidumps("{destination}");
|
||||
-
|
||||
- volatile int* bad = nullptr;
|
||||
- return *bad;
|
||||
- }}
|
||||
- """)
|
||||
-
|
||||
- # Some special stuff to overwrite stderr for a C++ extension
|
||||
- # Copied from: https://stackoverflow.com/questions/8804893/redirect-stdout-from-python-for-c-calls
|
||||
- sys.stdout.flush()
|
||||
- newstdout = os.dup(2)
|
||||
- devnull = os.open(stderr_file, os.O_WRONLY)
|
||||
- os.dup2(devnull, 2)
|
||||
- os.close(devnull)
|
||||
- sys.stdout = os.fdopen(newstdout, 'w')
|
||||
-
|
||||
- module = torch.utils.cpp_extension.load_inline(
|
||||
- name="segfault",
|
||||
- cpp_sources=csrc,
|
||||
- functions=["fail"],
|
||||
- )
|
||||
- module.fail()
|
||||
-
|
||||
- @unittest.skipIf(TEST_WITH_ASAN, "ASAN disables the crash handler's signal handler")
|
||||
- @unittest.skipIf(not has_breakpad(), "Built without breakpad")
|
||||
- @unittest.skipIf(os.environ.get("TEST_CONFIG") == "force_on_cpu", "fails on force_on_cpu config, tracked w/ #65253")
|
||||
- def test_crash_handler(self):
|
||||
- with tempfile.TemporaryDirectory() as temp_dir, tempfile.NamedTemporaryFile(delete=not sys.platform == "win32") as stderr:
|
||||
- # Use multiprocessing to spin up a separate process to make catching
|
||||
- # the segfault easier
|
||||
- p = Process(target=self._crash_handler_test_process, args=(stderr.name, temp_dir))
|
||||
- p.start()
|
||||
- p.join()
|
||||
-
|
||||
- with open(stderr.name) as f:
|
||||
- result = f.read().strip()
|
||||
-
|
||||
- # Check that the signal handler was called
|
||||
- self.assertTrue(result.startswith(f"Wrote minidump to {temp_dir}"))
|
||||
-
|
||||
- with open(result.replace("Wrote minidump to ", ""), "rb") as dump_file:
|
||||
- dump_bytes = dump_file.read()
|
||||
-
|
||||
- # Check that the file has the correct magic number
|
||||
- self.assertEqual(b"MDMP", dump_bytes[0:4])
|
||||
-
|
||||
-
|
||||
|
||||
if __name__ == "__main__":
|
||||
common.run_tests()
|
||||
diff --git a/test/test_utils.py b/test/test_utils.py
|
||||
index c8f4e3aa..5dd34b72 100644
|
||||
--- a/test/test_utils.py
|
||||
+++ b/test/test_utils.py
|
||||
@@ -2,8 +2,6 @@
|
||||
|
||||
import sys
|
||||
import os
|
||||
-import contextlib
|
||||
-import io
|
||||
import re
|
||||
import shutil
|
||||
import random
|
||||
@@ -21,7 +19,7 @@ import torch.utils.cpp_extension
|
||||
import torch.hub as hub
|
||||
from torch.autograd._functions.utils import check_onnx_broadcast
|
||||
from torch.onnx.symbolic_opset9 import _prepare_onnx_paddings
|
||||
-from torch.testing._internal.common_utils import has_breakpad, load_tests, retry, IS_SANDCASTLE, IS_WINDOWS, TEST_WITH_ASAN
|
||||
+from torch.testing._internal.common_utils import load_tests, retry, IS_SANDCASTLE, IS_WINDOWS
|
||||
|
||||
# load_tests from torch.testing._internal.common_utils is used to automatically filter tests for
|
||||
# sharding on sandcastle. This line silences flake warnings
|
||||
@@ -757,32 +755,6 @@ class TestAssert(TestCase):
|
||||
ms(torch.tensor([False], dtype=torch.bool))
|
||||
|
||||
|
||||
-class TestCrashHandler(TestCase):
|
||||
- @unittest.skipIf(TEST_WITH_ASAN, "ASAN disables the crash handler's signal handler")
|
||||
- @unittest.skipIf(not has_breakpad(), "Built without breakpad")
|
||||
- def test_python_exception_writing(self):
|
||||
- with tempfile.TemporaryDirectory() as temp_dir:
|
||||
- torch.utils._crash_handler.enable_minidumps(temp_dir)
|
||||
- torch.utils._crash_handler.enable_minidumps_on_exceptions()
|
||||
-
|
||||
- files = os.listdir(temp_dir)
|
||||
- self.assertEqual(len(files), 0)
|
||||
-
|
||||
- f = io.StringIO()
|
||||
- with contextlib.redirect_stderr(f):
|
||||
- try:
|
||||
- @torch.jit.script
|
||||
- def x(i: int):
|
||||
- return i + "2" # type: ignore[operator]
|
||||
- except RuntimeError as e:
|
||||
- pass
|
||||
-
|
||||
- files = os.listdir(temp_dir)
|
||||
- self.assertEqual(len(files), 1)
|
||||
- self.assertTrue(files[0].endswith(".dmp"))
|
||||
- torch.utils._crash_handler.disable_minidumps()
|
||||
-
|
||||
-
|
||||
@unittest.skipIf(IS_SANDCASTLE, "cpp_extension is OSS only")
|
||||
class TestStandaloneCPPJIT(TestCase):
|
||||
def test_load_standalone(self):
|
||||
diff --git a/tools/build_variables.bzl b/tools/build_variables.bzl
|
||||
index f78b7cb0..dc705d3e 100644
|
||||
--- a/tools/build_variables.bzl
|
||||
+++ b/tools/build_variables.bzl
|
||||
@@ -772,7 +772,6 @@ torch_cpp_srcs = [
|
||||
"torch/csrc/api/src/optim/schedulers/step_lr.cpp",
|
||||
"torch/csrc/api/src/serialize/input-archive.cpp",
|
||||
"torch/csrc/api/src/serialize/output-archive.cpp",
|
||||
- "torch/csrc/utils/crash_handler.cpp",
|
||||
]
|
||||
|
||||
libtorch_python_cuda_core_sources = [
|
||||
diff --git a/torch/csrc/Module.cpp b/torch/csrc/Module.cpp
|
||||
index 49f51790..86aea308 100644
|
||||
--- a/torch/csrc/Module.cpp
|
||||
+++ b/torch/csrc/Module.cpp
|
||||
@@ -55,7 +55,6 @@
|
||||
#include <torch/csrc/utils/tensor_new.h>
|
||||
#include <torch/csrc/utils/tensor_numpy.h>
|
||||
#include <torch/csrc/utils/python_dispatch.h>
|
||||
-#include <torch/csrc/utils/crash_handler.h>
|
||||
#include <torch/csrc/utils/python_arg_parser.h>
|
||||
#include <torch/csrc/utils/pycfunction_helpers.h>
|
||||
#include <torch/csrc/jit/python/python_tracer.h>
|
||||
@@ -64,7 +63,6 @@
|
||||
#include <torch/csrc/monitor/python_init.h>
|
||||
#include <torch/csrc/onnx/init.h>
|
||||
#include <torch/csrc/utils/init.h>
|
||||
-#include <torch/csrc/utils/crash_handler.h>
|
||||
#include <torch/csrc/api/include/torch/python/init.h>
|
||||
|
||||
#ifdef USE_DISTRIBUTED
|
||||
@@ -839,7 +837,6 @@ PyObject* initModule() {
|
||||
torch::monitor::initMonitorBindings(module);
|
||||
torch::impl::dispatch::initDispatchBindings(module);
|
||||
torch::throughput_benchmark::initThroughputBenchmarkBindings(module);
|
||||
- torch::crash_handler::initCrashHandlerBindings(module);
|
||||
torch::autograd::initReturnTypes(module);
|
||||
torch::autograd::initNNFunctions(module);
|
||||
torch::autograd::initFFTFunctions(module);
|
||||
@@ -895,10 +892,6 @@ PyObject* initModule() {
|
||||
|
||||
// Automatically translate errors thrown from pybind11 functions
|
||||
py::register_exception_translator([](std::exception_ptr e) { // NOLINT
|
||||
- if (torch::crash_handler::is_enabled_on_exceptions()) {
|
||||
- torch::crash_handler::write_minidump();
|
||||
- }
|
||||
-
|
||||
try {
|
||||
if (e) {
|
||||
std::rethrow_exception(e);
|
||||
diff --git a/torch/csrc/api/include/torch/utils.h b/torch/csrc/api/include/torch/utils.h
|
||||
index f664074d..3bb6363a 100644
|
||||
--- a/torch/csrc/api/include/torch/utils.h
|
||||
+++ b/torch/csrc/api/include/torch/utils.h
|
||||
@@ -5,7 +5,6 @@
|
||||
#include <torch/csrc/autograd/grad_mode.h>
|
||||
#include <torch/csrc/autograd/profiler.h>
|
||||
#include <torch/csrc/api/include/torch/types.h>
|
||||
-#include <torch/csrc/utils/crash_handler.h>
|
||||
#include <cstdint>
|
||||
|
||||
namespace torch {
|
||||
diff --git a/torch/csrc/utils/crash_handler.cpp b/torch/csrc/utils/crash_handler.cpp
|
||||
deleted file mode 100644
|
||||
index 8fb318b2..00000000
|
||||
--- a/torch/csrc/utils/crash_handler.cpp
|
||||
+++ /dev/null
|
||||
@@ -1,169 +0,0 @@
|
||||
-#include <cstdlib>
|
||||
-#include <cstring>
|
||||
-#include <iostream>
|
||||
-
|
||||
-#ifdef ADD_BREAKPAD_SIGNAL_HANDLER
|
||||
-#ifdef __linux__
|
||||
-#include <breakpad/src/client/linux/handler/exception_handler.h>
|
||||
-#include <csignal>
|
||||
-#elif __APPLE__
|
||||
-#include <breakpad/src/client/mac/handler/exception_handler.h>
|
||||
-#elif _WIN32
|
||||
-#include <breakpad/src/client/windows/handler/exception_handler.h>
|
||||
-#else
|
||||
-#error unsupported platform
|
||||
-#endif
|
||||
-#endif
|
||||
-
|
||||
-#include <c10/util/Exception.h>
|
||||
-#include <torch/csrc/utils/crash_handler.h>
|
||||
-
|
||||
-namespace torch {
|
||||
-namespace crash_handler {
|
||||
-
|
||||
-#ifdef ADD_BREAKPAD_SIGNAL_HANDLER
|
||||
-
|
||||
-static std::unique_ptr<google_breakpad::ExceptionHandler> handler; // NOLINT
|
||||
-static STRING_TYPE minidump_directory; // NOLINT
|
||||
-static bool enabled_for_exceptions = false; // NOLINT
|
||||
-
|
||||
-#if __linux__
|
||||
-bool dump_callback(
|
||||
- const google_breakpad::MinidumpDescriptor& descriptor,
|
||||
- void* context,
|
||||
- bool succeeded) {
|
||||
- if (succeeded) {
|
||||
- std::cerr << "Wrote minidump to " << descriptor.path() << std::endl;
|
||||
- }
|
||||
- return succeeded;
|
||||
-}
|
||||
-#elif __APPLE__
|
||||
-
|
||||
-bool dump_callback(
|
||||
- const char* dump_dir,
|
||||
- const char* minidump_id,
|
||||
- void* context,
|
||||
- bool succeeded) {
|
||||
- if (succeeded) {
|
||||
- std::cerr << "Wrote minidump to " << dump_dir << "/" << minidump_id
|
||||
- << ".dmp" << std::endl;
|
||||
- }
|
||||
- return succeeded;
|
||||
-}
|
||||
-#elif _WIN32
|
||||
-bool dump_callback(
|
||||
- const wchar_t* dump_path,
|
||||
- const wchar_t* minidump_id,
|
||||
- void* context,
|
||||
- EXCEPTION_POINTERS* exinfo,
|
||||
- MDRawAssertionInfo* assertion,
|
||||
- bool succeeded) {
|
||||
- if (succeeded) {
|
||||
- // Printing with wcerr inserts spaces between all the characters for some
|
||||
- // reason. If someone figures that out then we can get rid of the std::string
|
||||
- // conversions here.
|
||||
- std::wstring dump_path_ws(dump_path);
|
||||
- std::string dump_path_string(dump_path_ws.begin(), dump_path_ws.end());
|
||||
- std::wstring minidump_id_ws(minidump_id);
|
||||
- std::string minidump_id_string(minidump_id_ws.begin(), minidump_id_ws.end());
|
||||
- std::cerr << "Wrote minidump to " << dump_path_string << "\\" << minidump_id_string << ".dmp" << std::endl;
|
||||
- }
|
||||
- return succeeded;
|
||||
-}
|
||||
-#endif
|
||||
-
|
||||
-void enable_minidumps(const STRING_TYPE& dir) {
|
||||
- minidump_directory = dir;
|
||||
-// The constructor here registers the actual signal handler
|
||||
-#ifdef __linux__
|
||||
- handler = std::make_unique<google_breakpad::ExceptionHandler>(
|
||||
- google_breakpad::MinidumpDescriptor(minidump_directory),
|
||||
- nullptr,
|
||||
- dump_callback,
|
||||
- nullptr,
|
||||
- true,
|
||||
- -1);
|
||||
-#elif __APPLE__
|
||||
- handler = std::make_unique<google_breakpad::ExceptionHandler>(
|
||||
- /*dump_path=*/minidump_directory.c_str(),
|
||||
- /*filter=*/nullptr,
|
||||
- /*callback=*/dump_callback,
|
||||
- /*callback_context=*/nullptr,
|
||||
- /*install_handler=*/true,
|
||||
- /*port_name=*/nullptr);
|
||||
-#elif _WIN32
|
||||
- handler = std::make_unique<google_breakpad::ExceptionHandler>(
|
||||
- /*dump_path=*/minidump_directory.c_str(),
|
||||
- /*filter=*/nullptr,
|
||||
- /*callback=*/dump_callback,
|
||||
- /*callback_context=*/nullptr,
|
||||
- /*handler_types=*/
|
||||
- google_breakpad::ExceptionHandler::HandlerType::HANDLER_ALL);
|
||||
-#endif
|
||||
-}
|
||||
-
|
||||
-void disable_minidumps() {
|
||||
- handler.reset();
|
||||
-}
|
||||
-
|
||||
-const STRING_TYPE& get_minidump_directory() {
|
||||
- if (handler == nullptr) {
|
||||
- AT_ERROR(
|
||||
- "Minidump handler is uninintialized, make sure to call enable_minidumps first");
|
||||
- }
|
||||
- return minidump_directory;
|
||||
-}
|
||||
-
|
||||
-bool is_enabled_on_exceptions() {
|
||||
- if (handler == nullptr) {
|
||||
- return false;
|
||||
- }
|
||||
-
|
||||
- return enabled_for_exceptions;
|
||||
-}
|
||||
-
|
||||
-void write_minidump() {
|
||||
- TORCH_CHECK(
|
||||
- handler != nullptr,
|
||||
- "Minidump handler is uninintialized, make sure to call enable_minidumps first");
|
||||
- handler->WriteMinidump();
|
||||
-}
|
||||
-
|
||||
-void enable_minidumps_on_exceptions() {
|
||||
- if (handler == nullptr) {
|
||||
- AT_ERROR(
|
||||
- "Minidump handler is uninintialized, make sure to call enable_minidumps first");
|
||||
- }
|
||||
- enabled_for_exceptions = true;
|
||||
-}
|
||||
-
|
||||
-#else
|
||||
-// On unspported systems we can't do anything, so stub out everything.
|
||||
-void enable_minidumps(const STRING_TYPE& dir) {
|
||||
- AT_ERROR("Compiled without minidump support");
|
||||
-}
|
||||
-
|
||||
-void disable_minidumps() {
|
||||
- // Purposefully do nothing
|
||||
-}
|
||||
-
|
||||
-const STRING_TYPE& get_minidump_directory() {
|
||||
- AT_ERROR("Compiled without minidump support");
|
||||
-}
|
||||
-
|
||||
-bool is_enabled_on_exceptions() {
|
||||
- return false;
|
||||
-}
|
||||
-
|
||||
-void write_minidump() {
|
||||
- AT_ERROR("Compiled without minidump support");
|
||||
-}
|
||||
-
|
||||
-void enable_minidumps_on_exceptions() {
|
||||
- AT_ERROR("Compiled without minidump support");
|
||||
-}
|
||||
-
|
||||
-#endif
|
||||
-
|
||||
-} // namespace crash_handler
|
||||
-} // namespace torch
|
||||
diff --git a/torch/csrc/utils/crash_handler.h b/torch/csrc/utils/crash_handler.h
|
||||
deleted file mode 100644
|
||||
index 93f03380..00000000
|
||||
--- a/torch/csrc/utils/crash_handler.h
|
||||
+++ /dev/null
|
||||
@@ -1,35 +0,0 @@
|
||||
-#pragma once
|
||||
-#include <torch/csrc/Export.h>
|
||||
-#include <string>
|
||||
-
|
||||
-namespace torch {
|
||||
-namespace crash_handler {
|
||||
-
|
||||
-#ifdef _WIN32
|
||||
-typedef std::wstring STRING_TYPE;
|
||||
-#else
|
||||
-typedef std::string STRING_TYPE;
|
||||
-#endif
|
||||
-
|
||||
-// Set up a handler that writes minidumps to 'dir' on signals. This is not
|
||||
-// necessary to call unless you want to change 'dir' to something other than
|
||||
-// the default '/tmp/pytorch_crashes'.
|
||||
-TORCH_API void enable_minidumps(const STRING_TYPE& dir);
|
||||
-
|
||||
-// Enable minidumps when passing exceptions up to Python. By default these don't
|
||||
-// do anything special, but it can be useful to write out a minidump on
|
||||
-// exceptions for debugging purposes. This has no effect in C++.
|
||||
-TORCH_API void enable_minidumps_on_exceptions();
|
||||
-
|
||||
-// Disable all minidump writing and un-register the signal handler
|
||||
-TORCH_API void disable_minidumps();
|
||||
-
|
||||
-// Get the directory that minidumps will be written to
|
||||
-TORCH_API const STRING_TYPE& get_minidump_directory();
|
||||
-
|
||||
-// These are TORCH_API'ed since they are used from libtorch_python.so
|
||||
-TORCH_API bool is_enabled_on_exceptions();
|
||||
-TORCH_API void write_minidump();
|
||||
-
|
||||
-} // namespace crash_handler
|
||||
-} // namespace torch
|
||||
diff --git a/torch/csrc/utils/init.cpp b/torch/csrc/utils/init.cpp
|
||||
index 8472f208..04a5d1b3 100644
|
||||
--- a/torch/csrc/utils/init.cpp
|
||||
+++ b/torch/csrc/utils/init.cpp
|
||||
@@ -1,7 +1,6 @@
|
||||
#include <ATen/core/ivalue.h>
|
||||
#include <torch/csrc/utils/init.h>
|
||||
#include <torch/csrc/utils/throughput_benchmark.h>
|
||||
-#include <torch/csrc/utils/crash_handler.h>
|
||||
|
||||
#include <pybind11/functional.h>
|
||||
|
||||
@@ -51,17 +50,4 @@ void initThroughputBenchmarkBindings(PyObject* module) {
|
||||
}
|
||||
|
||||
} // namespace throughput_benchmark
|
||||
-
|
||||
-namespace crash_handler {
|
||||
-
|
||||
-void initCrashHandlerBindings(PyObject* module) {
|
||||
- auto m = pybind11::handle(module).cast<pybind11::module>();
|
||||
-
|
||||
- m.def("_enable_minidumps", enable_minidumps)
|
||||
- .def("_is_enabled_on_exceptions", is_enabled_on_exceptions)
|
||||
- .def("_enable_minidumps_on_exceptions", enable_minidumps_on_exceptions)
|
||||
- .def("_disable_minidumps", disable_minidumps)
|
||||
- .def("_get_minidump_directory", get_minidump_directory);
|
||||
-}
|
||||
-} // namespace crash_handler
|
||||
} // namespace torch
|
||||
diff --git a/torch/csrc/utils/init.h b/torch/csrc/utils/init.h
|
||||
index 45a1a3a7..bf6dd216 100644
|
||||
--- a/torch/csrc/utils/init.h
|
||||
+++ b/torch/csrc/utils/init.h
|
||||
@@ -8,9 +8,4 @@ namespace throughput_benchmark {
|
||||
void initThroughputBenchmarkBindings(PyObject* module);
|
||||
|
||||
} // namespace throughput_benchmark
|
||||
-
|
||||
-namespace crash_handler {
|
||||
-void initCrashHandlerBindings(PyObject* module);
|
||||
-
|
||||
-} // namespace crash_handler
|
||||
} // namespace torch
|
||||
diff --git a/torch/testing/_internal/common_utils.py b/torch/testing/_internal/common_utils.py
|
||||
index 6e67b776..81656500 100644
|
||||
--- a/torch/testing/_internal/common_utils.py
|
||||
+++ b/torch/testing/_internal/common_utils.py
|
||||
@@ -3061,21 +3061,6 @@ def bytes_to_scalar(byte_list: List[int], dtype: torch.dtype, device: torch.devi
|
||||
return torch.tensor(res, device=device, dtype=dtype)
|
||||
|
||||
|
||||
-def has_breakpad():
|
||||
- # We always build with breakpad in CI
|
||||
- if IS_IN_CI:
|
||||
- return True
|
||||
-
|
||||
- # If not on a special build, check that the library was actually linked in
|
||||
- try:
|
||||
- torch._C._get_minidump_directory() # type: ignore[attr-defined]
|
||||
- return True
|
||||
- except RuntimeError as e:
|
||||
- if "Minidump handler is uninintialized" in str(e):
|
||||
- return True
|
||||
- return False
|
||||
-
|
||||
-
|
||||
def sandcastle_skip_if(condition, reason):
|
||||
"""
|
||||
Similar to unittest.skipIf, however in the sandcastle environment it just
|
||||
--
|
||||
2.27.0
|
||||
|
||||
@ -1,29 +0,0 @@
|
||||
From 9fa24f75ca24761183ea2fe664744f2284007e3f Mon Sep 17 00:00:00 2001
|
||||
From: Zhipeng Xie <xiezhipeng1@huawei.com>
|
||||
Date: Wed, 3 Feb 2021 10:22:31 -0500
|
||||
Subject: [PATCH] disable SVE for v1.6.0 due to sleef build error.
|
||||
|
||||
sleef build error due to SVE not fully supported, so
|
||||
disable it temporarily.
|
||||
|
||||
Signed-off-by: Zhipeng Xie <xiezhipeng1@huawei.com>
|
||||
---
|
||||
third_party/sleef/Configure.cmake | 2 +-
|
||||
1 file changed, 1 insertion(+), 1 deletion(-)
|
||||
|
||||
diff --git a/third_party/sleef/Configure.cmake b/third_party/sleef/Configure.cmake
|
||||
index 25fc675e..02c364d2 100644
|
||||
--- a/third_party/sleef/Configure.cmake
|
||||
+++ b/third_party/sleef/Configure.cmake
|
||||
@@ -545,7 +545,7 @@ endif()
|
||||
|
||||
# SVE
|
||||
|
||||
-option(DISABLE_SVE "Disable SVE" OFF)
|
||||
+option(DISABLE_SVE "Disable SVE" ON)
|
||||
option(ENFORCE_SVE "Build fails if SVE is not supported by the compiler" OFF)
|
||||
|
||||
if(SLEEF_ARCH_AARCH64 AND NOT DISABLE_SVE)
|
||||
--
|
||||
2.27.0
|
||||
|
||||
Binary file not shown.
BIN
pytorch-v1.11.0.tar.gz
Normal file
BIN
pytorch-v1.11.0.tar.gz
Normal file
Binary file not shown.
16
pytorch.spec
16
pytorch.spec
@ -1,15 +1,13 @@
|
||||
%global _empty_manifest_terminate_build 0
|
||||
Name: pytorch
|
||||
Version: 1.6.0
|
||||
Release: 3
|
||||
Version: 1.11.0
|
||||
Release: 1
|
||||
Summary: Tensors and Dynamic neural networks in Python with strong GPU acceleration
|
||||
License: BSD-3
|
||||
URL: https://pytorch.org/
|
||||
#sh -x updateSource.sh
|
||||
Source0: pytorch-%{version}-include-submodules.tar.bz2
|
||||
Source0: https://github.com/pytorch/pytorch/releases/download/v%{version}/pytorch-v%{version}.tar.gz
|
||||
|
||||
Patch0001: 0001-Fix-illegal-opcode-bug-in-caffe2-40584.patch
|
||||
Patch0002: 0002-disable-SVE-for-v1.6.0-due-to-sleef-build-error.patch
|
||||
Patch0001: 0001-Remove-breakpad-dependency.patch
|
||||
|
||||
BuildRequires: g++
|
||||
Requires: python3-future
|
||||
@ -29,6 +27,7 @@ BuildRequires: python3-setuptools
|
||||
BuildRequires: python3-pyyaml
|
||||
BuildRequires: cmake
|
||||
BuildRequires: python3-numpy
|
||||
BuildRequires: python3-typing-extensions
|
||||
|
||||
%description -n python3-pytorch
|
||||
PyTorch is a Python package that provides two high-level features:
|
||||
@ -46,7 +45,7 @@ PyTorch is a Python package that provides two high-level features:
|
||||
You can reuse your favorite Python packages such as NumPy, SciPy and Cython to extend PyTorch when needed.
|
||||
|
||||
%prep
|
||||
%autosetup -p1
|
||||
%autosetup -p1 -n %{name}-v%{version}
|
||||
|
||||
%build
|
||||
%py3_build
|
||||
@ -86,6 +85,9 @@ mv %{buildroot}/doclist.lst .
|
||||
%{_docdir}/*
|
||||
|
||||
%changelog
|
||||
* Mon Jun 13 2022 Zhipeng Xie <xiezhipeng1@huawei.com> - 1.11.0-1
|
||||
- upgrade to 1.11.0
|
||||
|
||||
* Mon Jun 28 2021 wulei <wulei80@huawei.com> - 1.6.0-3
|
||||
- fixes: error: the CXX compiler identification is unknown
|
||||
|
||||
|
||||
@ -1,12 +0,0 @@
|
||||
#!/bin/bash
|
||||
topdir=$(pwd)
|
||||
version=1.6.0
|
||||
rm -rf pytorch-${version}
|
||||
git clone --recursive https://github.com/pytorch/pytorch pytorch-${version}
|
||||
cd pytorch-${version}
|
||||
git checkout v${version}
|
||||
git submodule sync
|
||||
git submodule update --init --recursive
|
||||
rm -rf .git
|
||||
cd ${topdir}
|
||||
tar -cjf pytorch-${version}-include-submodules.tar.bz2 pytorch-${version}
|
||||
Loading…
x
Reference in New Issue
Block a user