diff --git a/packages/python-torch/0001-do-not-error-on-cast-function-type-strict.patch b/packages/python-torch/0001-do-not-error-on-cast-function-type-strict.patch new file mode 100644 index 0000000000..de8e86ac9b --- /dev/null +++ b/packages/python-torch/0001-do-not-error-on-cast-function-type-strict.patch @@ -0,0 +1,10 @@ +--- a/CMakeLists.txt ++++ b/CMakeLists.txt +@@ -920,6 +920,7 @@ + append_cxx_flag_if_supported("-fno-trapping-math" CMAKE_CXX_FLAGS) + append_cxx_flag_if_supported("-Werror=format" CMAKE_CXX_FLAGS) + append_cxx_flag_if_supported("-Werror=cast-function-type" CMAKE_CXX_FLAGS) ++ append_cxx_flag_if_supported("-Wno-error=cast-function-type-strict" CMAKE_CXX_FLAGS) + else() + # skip unwanted includes from windows.h + add_compile_definitions(WIN32_LEAN_AND_MEAN) diff --git a/packages/python-torch/0001-fix-for-format-specifies-type.patch b/packages/python-torch/0001-fix-for-format-specifies-type.patch deleted file mode 100644 index 7dde2535c5..0000000000 --- a/packages/python-torch/0001-fix-for-format-specifies-type.patch +++ /dev/null @@ -1,33 +0,0 @@ -diff -uNr pytorch/torch/csrc/autograd/python_function.cpp pytorch.mod/torch/csrc/autograd/python_function.cpp ---- pytorch/torch/csrc/autograd/python_function.cpp 2022-10-03 21:59:02.604422931 +0900 -+++ pytorch.mod/torch/csrc/autograd/python_function.cpp 2022-10-03 23:39:51.207871384 +0900 -@@ -510,7 +510,7 @@ - throw torch::TypeError( - "save_for_backward can only save variables, but argument %ld is of " - "type %s", -- i, -+ (long)(i), - Py_TYPE(obj)->tp_name); - } - } -diff -uNr pytorch/torch/csrc/utils/python_arg_parser.h pytorch.mod/torch/csrc/utils/python_arg_parser.h ---- pytorch/torch/csrc/utils/python_arg_parser.h 2022-10-03 19:00:05.000000000 +0900 -+++ pytorch.mod/torch/csrc/utils/python_arg_parser.h 2022-10-03 23:39:46.263853740 +0900 -@@ -511,7 +511,7 @@ - args->signature.params[i].name.c_str(), - args->signature.params[i].type_name().c_str(), - Py_TYPE(obj)->tp_name, -- idx + 1); -+ (long)(idx + 1)); - } - - inline std::vector PythonArgs::symintlist(int i) { -@@ -671,7 +671,7 @@ - signature.params[i].name.c_str(), - signature.params[i].type_name().c_str(), - Py_TYPE(obj)->tp_name, -- idx + 1); -+ (long)(idx + 1)); - } - } - return res; diff --git a/packages/python-torch/0004-memalign.patch b/packages/python-torch/0004-memalign.patch index b4762db7ea..5e609f44c1 100644 --- a/packages/python-torch/0004-memalign.patch +++ b/packages/python-torch/0004-memalign.patch @@ -1,16 +1,15 @@ -diff -uNr pytorch-v1.12.1/torch/csrc/init_flatbuffer_module.cpp pytorch-v1.12.1.mod/torch/csrc/init_flatbuffer_module.cpp ---- pytorch-v1.12.1/torch/csrc/init_flatbuffer_module.cpp 2022-08-06 04:37:12.000000000 +0900 -+++ pytorch-v1.12.1.mod/torch/csrc/init_flatbuffer_module.cpp 2022-09-05 00:27:22.542774737 +0900 -@@ -27,7 +27,7 @@ +--- a/torch/csrc/jit/python/script_init.cpp ++++ b/torch/csrc/jit/python/script_init.cpp +@@ -718,7 +718,7 @@ std::shared_ptr bytes_copy( - static_cast(_aligned_malloc(size, FLATBUFFERS_MAX_ALIGNMENT)), + static_cast(_aligned_malloc(size, kFlatbufferDataAlignmentBytes)), _aligned_free); -#elif defined(__APPLE__) +#elif defined(__APPLE__) || defined(__ANDROID__) void* p; - ::posix_memalign(&p, FLATBUFFERS_MAX_ALIGNMENT, size); + ::posix_memalign(&p, kFlatbufferDataAlignmentBytes, size); TORCH_INTERNAL_ASSERT(p, "Could not allocate memory for flatbuffer"); ---- a/third_party/pocketfft/pocketfft_hdronly.h.orig +--- a/third_party/pocketfft/pocketfft_hdronly.h +++ b/third_party/pocketfft/pocketfft_hdronly.h @@ -149,7 +149,7 @@ #endif diff --git a/packages/python-torch/0007-enable-cxx14-by-default.patch b/packages/python-torch/0007-enable-cxx17-by-default.patch similarity index 74% rename from packages/python-torch/0007-enable-cxx14-by-default.patch rename to packages/python-torch/0007-enable-cxx17-by-default.patch index 8e700b455e..e2b939cc01 100644 --- a/packages/python-torch/0007-enable-cxx14-by-default.patch +++ b/packages/python-torch/0007-enable-cxx17-by-default.patch @@ -1,4 +1,4 @@ -Enable C++14 by default, because protobuf only supports C++14 and newer. +Enable C++17 by default, because torch needs at least C++17 to build since 2.1.0. --- a/third_party/onnx/CMakeLists.txt.orig +++ b/third_party/onnx/CMakeLists.txt @@ -7,7 +7,7 @@ Enable C++14 by default, because protobuf only supports C++14 and newer. # For other platforms, set C++11 as standard for the whole project if(NOT MSVC) - set(CMAKE_CXX_STANDARD 11) -+ set(CMAKE_CXX_STANDARD 14) ++ set(CMAKE_CXX_STANDARD 17) else() string(APPEND CMAKE_CXX_FLAGS " /std:c++17") endif() diff --git a/packages/python-torch/build.sh b/packages/python-torch/build.sh index 8b6e0f36d7..95e30d23d6 100644 --- a/packages/python-torch/build.sh +++ b/packages/python-torch/build.sh @@ -2,9 +2,9 @@ TERMUX_PKG_HOMEPAGE=https://pytorch.org/ TERMUX_PKG_DESCRIPTION="Tensors and Dynamic neural networks in Python" TERMUX_PKG_LICENSE="BSD 3-Clause" TERMUX_PKG_MAINTAINER="@termux" -TERMUX_PKG_VERSION=2.0.1 -TERMUX_PKG_REVISION=3 +TERMUX_PKG_VERSION="2.1.0" TERMUX_PKG_SRCURL=git+https://github.com/pytorch/pytorch +TERMUX_PKG_UPDATE_TAG_TYPE="latest-release-tag" TERMUX_PKG_DEPENDS="ffmpeg, libc++, libopenblas, libprotobuf, libzmq, opencv, python, python-numpy, python-pip" TERMUX_PKG_BUILD_DEPENDS="vulkan-headers, vulkan-loader-android" TERMUX_PKG_HOSTBUILD=true @@ -16,7 +16,6 @@ TERMUX_PKG_EXTRA_CONFIGURE_ARGS=" -DBUILD_CUSTOM_PROTOBUF=OFF -DBUILD_PYTHON=ON -DBUILD_TEST=OFF --DCMAKE_CXX_STANDARD=14 -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=${TERMUX_PKG_SRCDIR}/torch -DCMAKE_PREFIX_PATH=${TERMUX_PYTHON_HOME}/site-packages