misc/py-pytorch: New port: PyTorch: Tensors and dynamic neural networks in Python

This commit is contained in:
Yuri Victorovich 2023-05-07 21:03:49 -07:00
parent 41bb0f8996
commit df996f8811
13 changed files with 302 additions and 0 deletions

View file

@ -450,6 +450,7 @@
SUBDIR += py-pyprind
SUBDIR += py-python-geohash
SUBDIR += py-python-utils
SUBDIR += py-pytorch
SUBDIR += py-qiskit-machine-learning
SUBDIR += py-scikit-fusion
SUBDIR += py-serverfiles

57
misc/py-pytorch/Makefile Normal file
View file

@ -0,0 +1,57 @@
PORTNAME= pytorch
DISTVERSIONPREFIX= v
DISTVERSION= 2.0.0
CATEGORIES= misc # machine-learning
MASTER_SITES= https://github.com/pytorch/pytorch/releases/download/v${DISTVERSION}/
PKGNAMEPREFIX= ${PYTHON_PKGNAMEPREFIX}
DIST_SUBDIR= ${PORTNAME}
MAINTAINER= yuri@FreeBSD.org
COMMENT= PyTorch: Tensors and dynamic neural networks in Python
WWW= https://pytorch.org/
LICENSE= BSD3CLAUSE
LICENSE_FILE= ${WRKSRC}/LICENSE
BUILD_DEPENDS= cmake:devel/cmake-core \
gmake:devel/gmake \
pybind11>0:devel/pybind11 \
${LOCALBASE}/include/fxdiv.h:devel/fxdiv \
${PYTHON_PKGNAMEPREFIX}typing-extensions>0:devel/py-typing-extensions@${PY_FLAVOR} \
${PYTHON_PKGNAMEPREFIX}yaml>0:devel/py-yaml@${PY_FLAVOR}
LIB_DEPENDS= libopenblas.so:math/openblas \
libmpi.so:net/openmpi \
libonnx.so:misc/onnx \
libpthreadpool.so:devel/pthreadpool \
libprotobuf.so:devel/protobuf \
libsleef.so:math/sleef
RUN_DEPENDS= ${PYTHON_PKGNAMEPREFIX}filelock>0:sysutils/py-filelock@${PY_FLAVOR} \
${PYTHON_PKGNAMEPREFIX}Jinja2>=0:devel/py-Jinja2@${PY_FLAVOR} \
${PYTHON_PKGNAMEPREFIX}networkx>0:math/py-networkx@${PY_FLAVOR} \
${PYTHON_PKGNAMEPREFIX}sympy>0:math/py-sympy@${PY_FLAVOR} \
${PYTHON_PKGNAMEPREFIX}typing-extensions>0:devel/py-typing-extensions@${PY_FLAVOR}
RUN_DEPENDS+= ${PYTHON_PKGNAMEPREFIX}dill>0:devel/py-dill@${PY_FLAVOR} # optional dependency
USES= compiler:c++14-lang localbase:ldflags python
USE_PYTHON= distutils autoplist
MAKE_ENV= USE_NINJA=no # ninja breaks for some reason
MAKE_ENV+= BUILD_TEST=0 # ninja breaks for some reason
LDFLAGS+= -lexecinfo
BINARY_ALIAS= make=${GMAKE}
POST_PLIST= fix-plist
post-install: # strip binaries
@${STRIP_CMD} \
${STAGEDIR}${PYTHON_SITELIBDIR}/torch/bin/torch_shm_manager \
${STAGEDIR}${PYTHON_SITELIBDIR}/torch/_C${PYTHON_EXT_SUFFIX}.so \
${STAGEDIR}${PYTHON_SITELIBDIR}/torch/_C_flatbuffer${PYTHON_EXT_SUFFIX}.so \
${STAGEDIR}${PYTHON_SITELIBDIR}/functorch/_C${PYTHON_EXT_SUFFIX}.so \
${STAGEDIR}${PYTHON_SITELIBDIR}/torch/lib/lib*.so
fix-plist: # remove the stray %%PYTHON_SITELIBDIR%%/caffe2 file
@${REINPLACE_CMD} -e "s|.*/caffe2$$||" ${TMPPLIST}
.include <bsd.port.mk>

3
misc/py-pytorch/distinfo Normal file
View file

@ -0,0 +1,3 @@
TIMESTAMP = 1683446868
SHA256 (pytorch/pytorch-v2.0.0.tar.gz) = cecc38b6d4256b810336edfc6119d7a57b701fdf1ba43c50001f31e2724fd8e2
SIZE (pytorch/pytorch-v2.0.0.tar.gz) = 276643781

View file

@ -0,0 +1,39 @@
--- CMakeLists.txt.orig 2023-04-03 19:45:59 UTC
+++ CMakeLists.txt
@@ -138,7 +138,7 @@ endif()
set(CPU_AARCH64 OFF)
set(CPU_INTEL OFF)
-if(CMAKE_SYSTEM_PROCESSOR MATCHES "(AMD64|x86_64)")
+if(CMAKE_SYSTEM_PROCESSOR MATCHES "(amd64|x86_64)")
set(CPU_INTEL ON)
elseif(CMAKE_SYSTEM_PROCESSOR MATCHES "^(aarch64|arm64)")
set(CPU_AARCH64 ON)
@@ -163,7 +163,7 @@ include(CMakeDependentOption)
option(ATEN_NO_TEST "Do not build ATen test binaries" OFF)
option(BUILD_BINARY "Build C++ binaries" OFF)
option(BUILD_DOCS "Build Caffe2 documentation" OFF)
-option(BUILD_CUSTOM_PROTOBUF "Build and use Caffe2's own protobuf under third_party" ON)
+option(BUILD_CUSTOM_PROTOBUF "Build and use Caffe2's own protobuf under third_party" OFF)
option(BUILD_PYTHON "Build Python binaries" ON)
option(BUILD_CAFFE2 "Master flag to build Caffe2" OFF)
option(BUILD_LITE_INTERPRETER "Master flag to build Lite Interpreter" OFF)
@@ -398,15 +398,15 @@ endif()
# USE_SYSTEM_LIBS being "OFF".
option(USE_SYSTEM_LIBS "Use all available system-provided libraries." OFF)
option(USE_SYSTEM_CPUINFO "Use system-provided cpuinfo." OFF)
-option(USE_SYSTEM_SLEEF "Use system-provided sleef." OFF)
+option(USE_SYSTEM_SLEEF "Use system-provided sleef." ON)
option(USE_SYSTEM_GLOO "Use system-provided gloo." OFF)
option(USE_SYSTEM_FP16 "Use system-provided fp16." OFF)
-option(USE_SYSTEM_PYBIND11 "Use system-provided PyBind11." OFF)
+option(USE_SYSTEM_PYBIND11 "Use system-provided PyBind11." ON)
option(USE_SYSTEM_PTHREADPOOL "Use system-provided pthreadpool." OFF)
option(USE_SYSTEM_PSIMD "Use system-provided psimd." OFF)
option(USE_SYSTEM_FXDIV "Use system-provided fxdiv." OFF)
option(USE_SYSTEM_BENCHMARK "Use system-provided google benchmark." OFF)
-option(USE_SYSTEM_ONNX "Use system-provided onnx." OFF)
+option(USE_SYSTEM_ONNX "Use system-provided onnx." ON)
option(USE_SYSTEM_XNNPACK "Use system-provided xnnpack." OFF)
option(USE_GOLD_LINKER "Use ld.gold to link" OFF)
if(USE_SYSTEM_LIBS)

View file

@ -0,0 +1,11 @@
--- aten/src/ATen/cpu/vec/vec256/vec256_bfloat16.h.orig 2023-05-07 16:59:15 UTC
+++ aten/src/ATen/cpu/vec/vec256/vec256_bfloat16.h
@@ -206,7 +206,7 @@ template <> class Vectorized<BFloat16> { (public)
}
return b;
}
- Vectorized<BFloat16> map(const __m256 (*const vop)(__m256)) const {
+ Vectorized<BFloat16> map(__m256 (*const vop)(__m256)) const {
__m256 lo, hi;
cvtbf16_fp32(values, lo, hi);
const auto o1 = vop(lo);

View file

@ -0,0 +1,11 @@
--- aten/src/ATen/cpu/vec/vec512/vec512_bfloat16.h.orig 2023-05-07 17:07:36 UTC
+++ aten/src/ATen/cpu/vec/vec512/vec512_bfloat16.h
@@ -283,7 +283,7 @@ template <> class Vectorized<BFloat16> { (public)
}
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wignored-qualifiers"
- Vectorized<BFloat16> map(const __m512 (*const vop)(__m512)) const {
+ Vectorized<BFloat16> map(__m512 (*const vop)(__m512)) const {
__m512 lo, hi;
cvtbf16_fp32(values, lo, hi);
const auto o1 = vop(lo);

View file

@ -0,0 +1,78 @@
--- aten/src/ATen/native/sparse/ValidateCompressedIndicesCommon.h.orig 2023-05-07 08:51:40 UTC
+++ aten/src/ATen/native/sparse/ValidateCompressedIndicesCommon.h
@@ -39,7 +39,7 @@ namespace {
// use `cidx/idx` to refer to `compressed_indices/plain_indices` respectively.
INVARIANT_CHECK_FUNC_API
-_assert(const bool cond, const char* const message) {
+__assert(const bool cond, const char* const message) {
#ifdef GPUCC
CUDA_KERNEL_ASSERT(cond && message);
#else
@@ -57,9 +57,9 @@ INVARIANT_CHECK_FUNC_API _check_first_cidx_is_zero(
const index_t& zero) {
const bool invariant = cidx == zero;
if (cdim_name == CDimName::CRow) {
- _assert(invariant, "`crow_indices[..., 0] == 0` is not satisfied.");
+ __assert(invariant, "`crow_indices[..., 0] == 0` is not satisfied.");
} else {
- _assert(invariant, "`ccol_indices[..., 0] == 0` is not satisfied.");
+ __assert(invariant, "`ccol_indices[..., 0] == 0` is not satisfied.");
}
}
@@ -71,9 +71,9 @@ INVARIANT_CHECK_FUNC_API _check_last_cidx_is_nnz(
const index_t& nnz) {
const bool invariant = cidx == nnz;
if (cdim_name == CDimName::CRow) {
- _assert(invariant, "`crow_indices[..., -1] == nnz` is not satisfied.");
+ __assert(invariant, "`crow_indices[..., -1] == nnz` is not satisfied.");
} else {
- _assert(invariant, "`ccol_indices[..., -1] == nnz` is not satisfied.");
+ __assert(invariant, "`ccol_indices[..., -1] == nnz` is not satisfied.");
}
}
@@ -88,11 +88,11 @@ INVARIANT_CHECK_FUNC_API _check_cidx_nondecreasing_loc
const auto s_cidx = cidx_next - cidx;
const bool invariant = zero <= s_cidx && s_cidx <= dim;
if (cdim_name == CDimName::CRow) {
- _assert(
+ __assert(
invariant,
"`0 <= crow_indices[..., 1:] - crow_indices[..., :-1] <= ncols` is not satisfied.");
} else {
- _assert(
+ __assert(
invariant,
"`0 <= ccol_indices[..., 1:] - ccol_indices[..., :-1] <= nrows` is not satisfied.");
}
@@ -107,9 +107,9 @@ INVARIANT_CHECK_FUNC_API _check_idx_bounds(
const index_t& dim) {
const bool invariant = zero <= idx && idx < dim;
if (cdim_name == CDimName::CRow) {
- _assert(invariant, "`0 <= col_indices < ncols` is not satisfied.");
+ __assert(invariant, "`0 <= col_indices < ncols` is not satisfied.");
} else {
- _assert(invariant, "`0 <= row_indices < nrows` is not satisfied.");
+ __assert(invariant, "`0 <= row_indices < nrows` is not satisfied.");
}
}
@@ -128,14 +128,14 @@ INVARIANT_CHECK_FUNC_API _check_idx_sorted_distinct_va
for (auto* RESTRICT curr = slice_begin + 1; curr < slice_end; ++curr) {
const auto invariant = *(curr - 1) < *curr;
if (cdim_name == CDimName::CRow) {
- _assert(
+ __assert(
invariant,
"`col_indices[..., crow_indices[..., i - 1]:crow_indices[..., i]] "
"for all i = 1, ..., nrows "
"are sorted and distinct along the last dimension values` "
"is not satisfied.");
} else {
- _assert(
+ __assert(
invariant,
"`row_indices[..., ccol_indices[..., i - 1]:ccol_indices[..., i]] "
"for all i = 1, ..., ncols "

View file

@ -0,0 +1,11 @@
--- cmake/Dependencies.cmake.orig 2022-12-16 00:23:46 UTC
+++ cmake/Dependencies.cmake
@@ -339,7 +339,7 @@ if(USE_NNPACK OR USE_QNNPACK OR USE_PYTORCH_QNNPACK OR
set(DISABLE_NNPACK_AND_FAMILY ON)
endif()
else()
- if(NOT IOS AND NOT (CMAKE_SYSTEM_NAME MATCHES "^(Android|Linux|Darwin|Windows)$"))
+ if(NOT IOS AND NOT (CMAKE_SYSTEM_NAME MATCHES "^(Android|Linux|FreeBSD|Darwin|Windows)$"))
message(WARNING
"Target platform \"${CMAKE_SYSTEM_NAME}\" is not supported in {Q/X}NNPACK. "
"Supported platforms are Android, iOS, Linux, and macOS. "

View file

@ -0,0 +1,11 @@
--- cmake/public/mkldnn.cmake.orig 2022-12-16 00:23:46 UTC
+++ cmake/public/mkldnn.cmake
@@ -4,7 +4,7 @@ if(CPU_AARCH64)
include(${CMAKE_CURRENT_LIST_DIR}/ComputeLibrary.cmake)
endif()
-find_package(MKLDNN QUIET)
+find_package(MKLDNN REQUIRED)
if(NOT TARGET caffe2::mkldnn)
add_library(caffe2::mkldnn INTERFACE IMPORTED)

View file

@ -0,0 +1,56 @@
--- third_party/cpuinfo/CMakeLists.txt.orig 2023-04-03 19:46:00 UTC
+++ third_party/cpuinfo/CMakeLists.txt
@@ -65,7 +65,7 @@ IF(NOT CMAKE_SYSTEM_PROCESSOR)
"cpuinfo will compile, but cpuinfo_initialize() will always fail.")
SET(CPUINFO_SUPPORTED_PLATFORM FALSE)
ENDIF()
-ELSEIF(NOT CPUINFO_TARGET_PROCESSOR MATCHES "^(i[3-6]86|AMD64|x86(_64)?|armv[5-8].*|aarch64|arm64|ARM64)$")
+ELSEIF(NOT CPUINFO_TARGET_PROCESSOR MATCHES "^(i[3-6]86|amd64|x86(_64)?|armv[5-8].*|aarch64|arm64|ARM64)$")
MESSAGE(WARNING
"Target processor architecture \"${CPUINFO_TARGET_PROCESSOR}\" is not supported in cpuinfo. "
"cpuinfo will compile, but cpuinfo_initialize() will always fail.")
@@ -77,7 +77,7 @@ IF(NOT CMAKE_SYSTEM_NAME)
"Target operating system is not specified. "
"cpuinfo will compile, but cpuinfo_initialize() will always fail.")
SET(CPUINFO_SUPPORTED_PLATFORM FALSE)
-ELSEIF(NOT CMAKE_SYSTEM_NAME MATCHES "^(Windows|CYGWIN|MSYS|Darwin|Linux|Android)$")
+ELSEIF(NOT CMAKE_SYSTEM_NAME MATCHES "^(Windows|CYGWIN|MSYS|Darwin|Linux|FreeBSD|Android)$")
IF(${CMAKE_VERSION} VERSION_GREATER_EQUAL "3.14" AND NOT CMAKE_SYSTEM_NAME STREQUAL "iOS")
MESSAGE(WARNING
"Target operating system \"${CMAKE_SYSTEM_NAME}\" is not supported in cpuinfo. "
@@ -123,7 +123,7 @@ SET(CPUINFO_SRCS
src/cache.c)
IF(CPUINFO_SUPPORTED_PLATFORM)
- IF(NOT CMAKE_SYSTEM_NAME STREQUAL "Emscripten" AND (CPUINFO_TARGET_PROCESSOR MATCHES "^(i[3-6]86|AMD64|x86(_64)?)$" OR IOS_ARCH MATCHES "^(i386|x86_64)$"))
+ IF(NOT CMAKE_SYSTEM_NAME STREQUAL "Emscripten" AND (CPUINFO_TARGET_PROCESSOR MATCHES "^(i[3-6]86|amd64|x86(_64)?)$" OR IOS_ARCH MATCHES "^(i386|x86_64)$"))
LIST(APPEND CPUINFO_SRCS
src/x86/init.c
src/x86/info.c
@@ -324,7 +324,7 @@ ENDIF()
# ---[ cpuinfo mock library and mock tests
IF(CPUINFO_SUPPORTED_PLATFORM AND CPUINFO_BUILD_MOCK_TESTS)
SET(CPUINFO_MOCK_SRCS "${CPUINFO_SRCS}")
- IF(CPUINFO_TARGET_PROCESSOR MATCHES "^(i[3-6]86|AMD64|x86(_64)?)$")
+ IF(CPUINFO_TARGET_PROCESSOR MATCHES "^(i[3-6]86|amd64|x86(_64)?)$")
LIST(APPEND CPUINFO_MOCK_SRCS src/x86/mockcpuid.c)
ENDIF()
IF(CMAKE_SYSTEM_NAME STREQUAL "Linux" OR CMAKE_SYSTEM_NAME STREQUAL "Android")
@@ -768,7 +768,7 @@ IF(CPUINFO_SUPPORTED_PLATFORM AND CPUINFO_BUILD_UNIT_T
ADD_TEST(NAME get-current-test COMMAND get-current-test)
ENDIF()
- IF(CPUINFO_TARGET_PROCESSOR MATCHES "^(i[3-6]86|AMD64|x86(_64)?)$")
+ IF(CPUINFO_TARGET_PROCESSOR MATCHES "^(i[3-6]86|amd64|x86(_64)?)$")
ADD_EXECUTABLE(brand-string-test test/name/brand-string.cc)
CPUINFO_TARGET_ENABLE_CXX11(brand-string-test)
CPUINFO_TARGET_RUNTIME_LIBRARY(brand-string-test)
@@ -835,7 +835,7 @@ IF(CPUINFO_SUPPORTED_PLATFORM AND CPUINFO_BUILD_TOOLS)
CPUINFO_TARGET_RUNTIME_LIBRARY(cpuinfo-dump)
ENDIF()
- IF(CPUINFO_TARGET_PROCESSOR MATCHES "^(i[3-6]86|AMD64|x86(_64)?)$")
+ IF(CPUINFO_TARGET_PROCESSOR MATCHES "^(i[3-6]86|amd64|x86(_64)?)$")
ADD_EXECUTABLE(cpuid-dump tools/cpuid-dump.c)
CPUINFO_TARGET_ENABLE_C99(cpuid-dump)
CPUINFO_TARGET_RUNTIME_LIBRARY(cpuid-dump)

View file

@ -0,0 +1,10 @@
--- third_party/fbgemm/third_party/asmjit/src/asmjit/core/virtmem.cpp.orig 2022-12-16 00:23:48 UTC
+++ third_party/fbgemm/third_party/asmjit/src/asmjit/core/virtmem.cpp
@@ -45,6 +45,7 @@
#endif
#include <atomic>
+#include <sys/stat.h>
#if defined(__APPLE__) || defined(__BIONIC__)
#define ASMJIT_VM_SHM_DETECT 0

View file

@ -0,0 +1,11 @@
--- third_party/kineto/libkineto/src/ThreadUtil.cpp.orig 2023-04-03 19:46:02 UTC
+++ third_party/kineto/libkineto/src/ThreadUtil.cpp
@@ -57,7 +57,7 @@ int32_t systemThreadId() {
#elif defined _MSC_VER
_sysTid = (int32_t)GetCurrentThreadId();
#else
- _sysTid = (int32_t)syscall(SYS_gettid);
+ _sysTid = (int32_t)syscall(SYS_getpid);
#endif
}
return _sysTid;

View file

@ -0,0 +1,3 @@
PyTorch is a Python package that provides two high-level features:
* Tensor computation (like NumPy) with strong GPU acceleration
* Deep neural networks built on a tape-based autograd system