blob: 3887f7e75909c26b7eb193d1b28f4e8365c10e08 [file] [log] [blame]
# WARNING: DO NOT EDIT THIS FILE DIRECTLY!!!
# See the README.md in this directory.
# IMPORTANT: To update Docker image version, please first update
# https://github.com/pytorch/ossci-job-dsl/blob/master/src/main/groovy/ossci/pytorch/DockerVersion.groovy and
# https://github.com/pytorch/ossci-job-dsl/blob/master/src/main/groovy/ossci/caffe2/DockerVersion.groovy,
# and then update DOCKER_IMAGE_VERSION at the top of the following files:
# * cimodel/data/pytorch_build_definitions.py
# * cimodel/data/caffe2_build_definitions.py
# And the inline copies of the variable in
# * verbatim-sources/job-specs-custom.yml
# (grep for DOCKER_IMAGE)
version: 2.1
docker_config_defaults: &docker_config_defaults
user: jenkins
aws_auth:
# This IAM user only allows read-write access to ECR
aws_access_key_id: ${CIRCLECI_AWS_ACCESS_KEY_FOR_ECR_READ_WRITE_V4}
aws_secret_access_key: ${CIRCLECI_AWS_SECRET_KEY_FOR_ECR_READ_WRITE_V4}
# This system setup script is meant to run before the CI-related scripts, e.g.,
# installing Git client, checking out code, setting up CI env, and
# building/testing.
setup_linux_system_environment: &setup_linux_system_environment
name: Set Up System Environment
no_output_timeout: "1h"
command: ~/workspace/.circleci/scripts/setup_linux_system_environment.sh
# NB: This (and the command below) must be run after attaching
# ~/workspace. This is NOT the default working directory (that's
# ~/project); this workspace is generated by the setup job.
should_run_job: &should_run_job
name: Should Run Job After attach_workspace
no_output_timeout: "2m"
command: ~/workspace/.circleci/scripts/should_run_job.sh
setup_ci_environment: &setup_ci_environment
name: Set Up CI Environment After attach_workspace
no_output_timeout: "1h"
command: ~/workspace/.circleci/scripts/setup_ci_environment.sh
# Installs expect and moreutils so that we can call `unbuffer` and `ts`.
# Also installs OpenMP
# !!!!NOTE!!!! this is copied into a binary_macos_brew_update job which is the
# same but does not install libomp. If you are changing this, consider if you
# need to change that step as well.
macos_brew_update: &macos_brew_update
name: Brew update and install moreutils, expect and libomp
no_output_timeout: "1h"
command: |
set -ex
# Update repositories manually
for path in $(find /usr/local/Homebrew -type d -name .git)
do
cd $path/..
git fetch --depth=1 origin
git reset --hard origin/master
done
export HOMEBREW_NO_AUTO_UPDATE=1
# moreutils installs a `parallel` executable by default, which conflicts
# with the executable from the GNU `parallel`, so we must unlink GNU
# `parallel` first, and relink it afterwards
brew unlink parallel
brew install moreutils
brew link parallel --overwrite
brew install expect
brew install libomp
ios_brew_update: &ios_brew_update
name: Brew update and install iOS toolchains
no_output_timeout: "1h"
command: |
set -ex
# Update repositories manually
for path in $(find /usr/local/Homebrew -type d -name .git)
do
cd $path/..
git fetch --depth=1 origin
git reset --hard origin/master
done
export HOMEBREW_NO_AUTO_UPDATE=1
# moreutils installs a `parallel` executable by default, which conflicts
# with the executable from the GNU `parallel`, so we must unlink GNU
# `parallel` first, and relink it afterwards
brew unlink parallel
brew install moreutils
brew link parallel --overwrite
brew install expect
brew install libtool
##############################################################################
# Binary build (nightlies nightly build) defaults
# The binary builds use the docker executor b/c at time of writing the machine
# executor is limited to only two cores and is painfully slow (4.5+ hours per
# GPU build). But the docker executor cannot be run with --runtime=nvidia, and
# so the binary test/upload jobs must run on a machine executor. The package
# built in the build job is persisted to the workspace, which the test jobs
# expect. The test jobs just run a few quick smoke tests (very similar to the
# second-round-user-facing smoke tests above) and then upload the binaries to
# their final locations. The upload part requires credentials that should only
# be available to org-members.
#
# binary_checkout MUST be run before other commands here. This is because the
# other commands are written in .circleci/scripts/*.sh , so the pytorch source
# code must be downloaded on the machine before they can be run. We cannot
# inline all the code into this file, since that would cause the yaml size to
# explode past 4 MB (all the code in the command section is just copy-pasted to
# everywhere in the .circleci/config.yml file where it appears).
##############################################################################
# Checks out the Pytorch and Builder repos (always both of them), and places
# them in the right place depending on what executor we're running on. We curl
# our .sh file from the interweb to avoid yaml size bloat. Note that many jobs
# do not need both the pytorch and builder repos, so this is a little wasteful
# (smoke tests and upload jobs do not need the pytorch repo).
binary_checkout: &binary_checkout
name: Checkout pytorch/builder repo
command: ~/workspace/.circleci/scripts/binary_checkout.sh
# Parses circleci arguments in a consistent way, essentially routing to the
# correct pythonXgccXcudaXos build we want
binary_populate_env: &binary_populate_env
name: Set up binary env variables
command: ~/workspace/.circleci/scripts/binary_populate_env.sh
binary_install_miniconda: &binary_install_miniconda
name: Install miniconda
no_output_timeout: "1h"
command: ~/workspace/.circleci/scripts/binary_install_miniconda.sh
# This section is used in the binary_test and smoke_test jobs. It expects
# 'binary_populate_env' to have populated /home/circleci/project/env and it
# expects another section to populate /home/circleci/project/ci_test_script.sh
# with the code to run in the docker
binary_run_in_docker: &binary_run_in_docker
name: Run in docker
# This step only runs on circleci linux machine executors that themselves
# need to start docker images
command: ~/workspace/.circleci/scripts/binary_run_in_docker.sh
# This is copied almost verbatim from the macos_brew_update job
# In version 2.1 and above we could make this a command and pass a parameter to
# it, but in this version there is no way to pass a parameter to a step
binary_macos_brew_update: &binary_macos_brew_update
name: Brew update and install moreutils and expect
no_output_timeout: "1h"
command: |
set -eux -o pipefail
# See https://discourse.brew.sh/t/fetching-homebrew-repos-is-slow/5374/3
brew untap caskroom/homebrew-cask
# moreutils installs a `parallel` executable by default, which conflicts
# with the executable from the GNU `parallel`, so we must unlink GNU
# `parallel` first, and relink it afterwards
brew update
brew unlink parallel
brew install moreutils
brew link parallel --overwrite
brew install expect
##############################################################################
# Build parameters
##############################################################################
pytorch_params: &pytorch_params
parameters:
build_environment:
type: string
default: ""
docker_image:
type: string
default: ""
resource_class:
type: string
default: "large"
use_cuda_docker_runtime:
type: string
default: ""
environment:
BUILD_ENVIRONMENT: << parameters.build_environment >>
DOCKER_IMAGE: << parameters.docker_image >>
USE_CUDA_DOCKER_RUNTIME: << parameters.use_cuda_docker_runtime >>
resource_class: << parameters.resource_class >>
pytorch_ios_params: &pytorch_ios_params
parameters:
build_environment:
type: string
default: ""
ios_arch:
type: string
default: ""
ios_platform:
type: string
default: ""
use_nnpack:
type: string
default: "ON"
environment:
BUILD_ENVIRONMENT: << parameters.build_environment >>
IOS_ARCH: << parameters.ios_arch >>
IOS_PLATFORM: << parameters.ios_platform >>
USE_NNPACK: << parameters.use_nnpack >>
caffe2_params: &caffe2_params
parameters:
build_environment:
type: string
default: ""
build_ios:
type: string
default: ""
docker_image:
type: string
default: ""
use_cuda_docker_runtime:
type: string
default: ""
build_only:
type: string
default: ""
resource_class:
type: string
default: "large"
environment:
BUILD_ENVIRONMENT: << parameters.build_environment >>
BUILD_IOS: << parameters.build_ios >>
USE_CUDA_DOCKER_RUNTIME: << parameters.use_cuda_docker_runtime >>
DOCKER_IMAGE: << parameters.docker_image >>
BUILD_ONLY: << parameters.build_only >>
resource_class: << parameters.resource_class >>
binary_linux_build_params: &binary_linux_build_params
parameters:
build_environment:
type: string
default: ""
docker_image:
type: string
default: ""
libtorch_variant:
type: string
default: ""
resource_class:
type: string
default: "2xlarge+"
environment:
BUILD_ENVIRONMENT: << parameters.build_environment >>
LIBTORCH_VARIANT: << parameters.libtorch_variant >>
resource_class: << parameters.resource_class >>
docker:
- image: << parameters.docker_image >>
binary_linux_test_upload_params: &binary_linux_test_upload_params
parameters:
build_environment:
type: string
default: ""
docker_image:
type: string
default: ""
libtorch_variant:
type: string
default: ""
resource_class:
type: string
default: "medium"
use_cuda_docker_runtime:
type: string
default: ""
environment:
BUILD_ENVIRONMENT: << parameters.build_environment >>
DOCKER_IMAGE: << parameters.docker_image >>
USE_CUDA_DOCKER_RUNTIME: << parameters.use_cuda_docker_runtime >>
LIBTORCH_VARIANT: << parameters.libtorch_variant >>
resource_class: << parameters.resource_class >>
binary_mac_params: &binary_mac_params
parameters:
build_environment:
type: string
default: ""
environment:
BUILD_ENVIRONMENT: << parameters.build_environment >>
##############################################################################
# Job specs
##############################################################################
jobs:
pytorch_linux_build:
<<: *pytorch_params
machine:
image: ubuntu-1604:201903-01
steps:
# See Note [Workspace for CircleCI scripts] in job-specs-setup.yml
- attach_workspace:
at: ~/workspace
- run:
<<: *should_run_job
- run:
<<: *setup_linux_system_environment
- checkout
- run:
<<: *setup_ci_environment
- run:
name: Build
no_output_timeout: "1h"
command: |
set -e
# Pull Docker image and run build
echo "DOCKER_IMAGE: "${DOCKER_IMAGE}
docker pull ${DOCKER_IMAGE} >/dev/null
export id=$(docker run -t -d -w /var/lib/jenkins ${DOCKER_IMAGE})
git submodule sync && git submodule update -q --init --recursive
docker cp /home/circleci/project/. $id:/var/lib/jenkins/workspace
if [[ ${BUILD_ENVIRONMENT} == *"namedtensor"* ]]; then
NAMED_FLAG="export BUILD_NAMEDTENSOR=1"
fi
export COMMAND='((echo "export BUILD_ENVIRONMENT=${BUILD_ENVIRONMENT}" && echo '"$NAMED_FLAG"' && echo "source ./workspace/env" && echo "sudo chown -R jenkins workspace && cd workspace && .jenkins/pytorch/build.sh") | docker exec -u jenkins -i "$id" bash) 2>&1'
echo ${COMMAND} > ./command.sh && unbuffer bash ./command.sh | ts
# Push intermediate Docker image for next phase to use
if [ -z "${BUILD_ONLY}" ]; then
# Note [Special build images]
# The namedtensor and xla builds use the same docker image as
# pytorch-linux-trusty-py3.6-gcc5.4-build. In the push step, we have to
# distinguish between them so the test can pick up the correct image.
output_image=${DOCKER_IMAGE}-${CIRCLE_SHA1}
if [[ ${BUILD_ENVIRONMENT} == *"namedtensor"* ]]; then
export COMMIT_DOCKER_IMAGE=$output_image-namedtensor
elif [[ ${BUILD_ENVIRONMENT} == *"xla"* ]]; then
export COMMIT_DOCKER_IMAGE=$output_image-xla
elif [[ ${BUILD_ENVIRONMENT} == *"android-ndk-r19c-x86_64"* ]]; then
export COMMIT_DOCKER_IMAGE=$output_image-android-x86_64
elif [[ ${BUILD_ENVIRONMENT} == *"android-ndk-r19c-arm-v7a"* ]]; then
export COMMIT_DOCKER_IMAGE=$output_image-android-arm-v7a
elif [[ ${BUILD_ENVIRONMENT} == *"android-ndk-r19c-arm-v8a"* ]]; then
export COMMIT_DOCKER_IMAGE=$output_image-android-arm-v8a
elif [[ ${BUILD_ENVIRONMENT} == *"android-ndk-r19c-x86_32"* ]]; then
export COMMIT_DOCKER_IMAGE=$output_image-android-x86_32
else
export COMMIT_DOCKER_IMAGE=$output_image
fi
docker commit "$id" ${COMMIT_DOCKER_IMAGE}
docker push ${COMMIT_DOCKER_IMAGE}
fi
pytorch_linux_test:
<<: *pytorch_params
machine:
image: ubuntu-1604:201903-01
steps:
# See Note [Workspace for CircleCI scripts] in job-specs-setup.yml
- attach_workspace:
at: ~/workspace
- run:
<<: *should_run_job
- run:
<<: *setup_linux_system_environment
- run:
<<: *setup_ci_environment
- run:
name: Test
no_output_timeout: "90m"
command: |
set -e
# See Note [Special build images]
output_image=${DOCKER_IMAGE}-${CIRCLE_SHA1}
if [[ ${BUILD_ENVIRONMENT} == *"namedtensor"* ]]; then
export COMMIT_DOCKER_IMAGE=$output_image-namedtensor
export NAMED_FLAG="export BUILD_NAMEDTENSOR=1 && export TEST_NAMEDTENSOR=1"
elif [[ ${BUILD_ENVIRONMENT} == *"xla"* ]]; then
export COMMIT_DOCKER_IMAGE=$output_image-xla
else
export COMMIT_DOCKER_IMAGE=$output_image
fi
echo "DOCKER_IMAGE: "${COMMIT_DOCKER_IMAGE}
docker pull ${COMMIT_DOCKER_IMAGE} >/dev/null
if [ -n "${USE_CUDA_DOCKER_RUNTIME}" ]; then
export id=$(docker run --runtime=nvidia -t -d -w /var/lib/jenkins ${COMMIT_DOCKER_IMAGE})
else
export id=$(docker run -t -d -w /var/lib/jenkins ${COMMIT_DOCKER_IMAGE})
fi
if [[ ${BUILD_ENVIRONMENT} == *"multigpu"* ]]; then
export COMMAND='((echo "export BUILD_ENVIRONMENT=${BUILD_ENVIRONMENT}" && echo "${NAMED_FLAG}" && echo "source ./workspace/env" && echo "sudo chown -R jenkins workspace && cd workspace && .jenkins/pytorch/multigpu-test.sh") | docker exec -u jenkins -i "$id" bash) 2>&1'
else
export COMMAND='((echo "export BUILD_ENVIRONMENT=${BUILD_ENVIRONMENT}" && echo "${NAMED_FLAG}" && echo "source ./workspace/env" && echo "sudo chown -R jenkins workspace && cd workspace && .jenkins/pytorch/test.sh") | docker exec -u jenkins -i "$id" bash) 2>&1'
fi
echo ${COMMAND} > ./command.sh && unbuffer bash ./command.sh | ts
caffe2_linux_build:
<<: *caffe2_params
machine:
image: ubuntu-1604:201903-01
steps:
# See Note [Workspace for CircleCI scripts] in job-specs-setup.yml
- attach_workspace:
at: ~/workspace
- run:
<<: *should_run_job
- run:
<<: *setup_linux_system_environment
- checkout
- run:
<<: *setup_ci_environment
- run:
name: Build
no_output_timeout: "1h"
command: |
set -e
cat >/home/circleci/project/ci_build_script.sh \<<EOL
# =================== The following code will be executed inside Docker container ===================
set -ex
export BUILD_ENVIRONMENT="$BUILD_ENVIRONMENT"
# Reinitialize submodules
git submodule sync && git submodule update -q --init --recursive
# conda must be added to the path for Anaconda builds (this location must be
# the same as that in install_anaconda.sh used to build the docker image)
if [[ "${BUILD_ENVIRONMENT}" == conda* ]]; then
export PATH=/opt/conda/bin:$PATH
sudo chown -R jenkins:jenkins '/opt/conda'
fi
# Build
./.jenkins/caffe2/build.sh
# Show sccache stats if it is running
if pgrep sccache > /dev/null; then
sccache --show-stats
fi
# =================== The above code will be executed inside Docker container ===================
EOL
chmod +x /home/circleci/project/ci_build_script.sh
echo "DOCKER_IMAGE: "${DOCKER_IMAGE}
docker pull ${DOCKER_IMAGE} >/dev/null
export id=$(docker run -t -d -w /var/lib/jenkins ${DOCKER_IMAGE})
docker cp /home/circleci/project/. $id:/var/lib/jenkins/workspace
export COMMAND='((echo "source ./workspace/env" && echo "sudo chown -R jenkins workspace && cd workspace && ./ci_build_script.sh") | docker exec -u jenkins -i "$id" bash) 2>&1'
echo ${COMMAND} > ./command.sh && unbuffer bash ./command.sh | ts
# Push intermediate Docker image for next phase to use
if [ -z "${BUILD_ONLY}" ]; then
if [[ "$BUILD_ENVIRONMENT" == *cmake* ]]; then
export COMMIT_DOCKER_IMAGE=${DOCKER_IMAGE}-cmake-${CIRCLE_SHA1}
else
export COMMIT_DOCKER_IMAGE=${DOCKER_IMAGE}-${CIRCLE_SHA1}
fi
docker commit "$id" ${COMMIT_DOCKER_IMAGE}
docker push ${COMMIT_DOCKER_IMAGE}
fi
caffe2_linux_test:
<<: *caffe2_params
machine:
image: ubuntu-1604:201903-01
steps:
# See Note [Workspace for CircleCI scripts] in job-specs-setup.yml
- attach_workspace:
at: ~/workspace
- run:
<<: *setup_linux_system_environment
- run:
<<: *should_run_job
- run:
<<: *setup_ci_environment
- run:
name: Test
no_output_timeout: "1h"
command: |
set -e
# TODO: merge this into Caffe2 test.sh
cat >/home/circleci/project/ci_test_script.sh \<<EOL
# =================== The following code will be executed inside Docker container ===================
set -ex
export BUILD_ENVIRONMENT="$BUILD_ENVIRONMENT"
# libdc1394 (dependency of OpenCV) expects /dev/raw1394 to exist...
sudo ln /dev/null /dev/raw1394
# conda must be added to the path for Anaconda builds (this location must be
# the same as that in install_anaconda.sh used to build the docker image)
if [[ "${BUILD_ENVIRONMENT}" == conda* ]]; then
export PATH=/opt/conda/bin:$PATH
fi
# Upgrade SSL module to avoid old SSL warnings
pip -q install --user --upgrade pyOpenSSL ndg-httpsclient pyasn1
pip -q install --user -b /tmp/pip_install_onnx "file:///var/lib/jenkins/workspace/third_party/onnx#egg=onnx"
# Build
./.jenkins/caffe2/test.sh
# Remove benign core dumps.
# These are tests for signal handling (including SIGABRT).
rm -f ./crash/core.fatal_signal_as.*
rm -f ./crash/core.logging_test.*
# =================== The above code will be executed inside Docker container ===================
EOL
chmod +x /home/circleci/project/ci_test_script.sh
if [[ "$BUILD_ENVIRONMENT" == *cmake* ]]; then
export COMMIT_DOCKER_IMAGE=${DOCKER_IMAGE}-cmake-${CIRCLE_SHA1}
else
export COMMIT_DOCKER_IMAGE=${DOCKER_IMAGE}-${CIRCLE_SHA1}
fi
echo "DOCKER_IMAGE: "${COMMIT_DOCKER_IMAGE}
docker pull ${COMMIT_DOCKER_IMAGE} >/dev/null
if [ -n "${USE_CUDA_DOCKER_RUNTIME}" ]; then
export id=$(docker run --runtime=nvidia -t -d -w /var/lib/jenkins ${COMMIT_DOCKER_IMAGE})
else
export id=$(docker run -t -d -w /var/lib/jenkins ${COMMIT_DOCKER_IMAGE})
fi
docker cp /home/circleci/project/. "$id:/var/lib/jenkins/workspace"
export COMMAND='((echo "source ./workspace/env" && echo "sudo chown -R jenkins workspace && cd workspace && ./ci_test_script.sh") | docker exec -u jenkins -i "$id" bash) 2>&1'
echo ${COMMAND} > ./command.sh && unbuffer bash ./command.sh | ts
caffe2_macos_build:
<<: *caffe2_params
macos:
xcode: "9.0"
steps:
# See Note [Workspace for CircleCI scripts] in job-specs-setup.yml
- attach_workspace:
at: ~/workspace
- run:
<<: *should_run_job
- checkout
- run:
<<: *macos_brew_update
- run:
name: Build
no_output_timeout: "1h"
command: |
set -e
export IN_CIRCLECI=1
brew install cmake
# Reinitialize submodules
git submodule sync && git submodule update -q --init --recursive
# Reinitialize path (see man page for path_helper(8))
eval `/usr/libexec/path_helper -s`
# Use Homebrew Python if configured to do so
if [ "${PYTHON_INSTALLATION}" == "homebrew" ]; then
export PATH=/usr/local/opt/python/libexec/bin:/usr/local/bin:$PATH
fi
pip -q install numpy
# Install Anaconda if we need to
if [ -n "${CAFFE2_USE_ANACONDA}" ]; then
rm -rf ${TMPDIR}/anaconda
curl -o ${TMPDIR}/conda.sh https://repo.continuum.io/miniconda/Miniconda${ANACONDA_VERSION}-latest-MacOSX-x86_64.sh
chmod +x ${TMPDIR}/conda.sh
/bin/bash ${TMPDIR}/conda.sh -b -p ${TMPDIR}/anaconda
rm -f ${TMPDIR}/conda.sh
export PATH="${TMPDIR}/anaconda/bin:${PATH}"
source ${TMPDIR}/anaconda/bin/activate
fi
# Install sccache
sudo curl https://s3.amazonaws.com/ossci-macos/sccache --output /usr/local/bin/sccache
sudo chmod +x /usr/local/bin/sccache
export SCCACHE_BUCKET=ossci-compiler-cache-circleci-v2
# This IAM user allows write access to S3 bucket for sccache
set +x
export AWS_ACCESS_KEY_ID=${CIRCLECI_AWS_ACCESS_KEY_FOR_SCCACHE_S3_BUCKET_V4}
export AWS_SECRET_ACCESS_KEY=${CIRCLECI_AWS_SECRET_KEY_FOR_SCCACHE_S3_BUCKET_V4}
set -x
export SCCACHE_BIN=${PWD}/sccache_bin
mkdir -p ${SCCACHE_BIN}
if which sccache > /dev/null; then
printf "#!/bin/sh\nexec sccache $(which clang++) \$*" > "${SCCACHE_BIN}/clang++"
chmod a+x "${SCCACHE_BIN}/clang++"
printf "#!/bin/sh\nexec sccache $(which clang) \$*" > "${SCCACHE_BIN}/clang"
chmod a+x "${SCCACHE_BIN}/clang"
export PATH="${SCCACHE_BIN}:$PATH"
fi
# Build
if [ "${BUILD_IOS:-0}" -eq 1 ]; then
unbuffer scripts/build_ios.sh 2>&1 | ts
elif [ -n "${CAFFE2_USE_ANACONDA}" ]; then
# All conda build logic should be in scripts/build_anaconda.sh
unbuffer scripts/build_anaconda.sh 2>&1 | ts
else
unbuffer scripts/build_local.sh 2>&1 | ts
fi
# Show sccache stats if it is running
if which sccache > /dev/null; then
sccache --show-stats
fi
binary_linux_build:
<<: *binary_linux_build_params
steps:
# See Note [Workspace for CircleCI scripts] in job-specs-setup.yml
- attach_workspace:
at: ~/workspace
- run:
<<: *should_run_job
- run:
<<: *binary_checkout
- run:
<<: *binary_populate_env
- run:
name: Install unbuffer and ts
command: |
set -eux -o pipefail
source /env
OS_NAME=`awk -F= '/^NAME/{print $2}' /etc/os-release`
if [[ "$OS_NAME" == *"CentOS Linux"* ]]; then
retry yum -q -y install epel-release
retry yum -q -y install expect moreutils
elif [[ "$OS_NAME" == *"Ubuntu"* ]]; then
retry apt-get update
retry apt-get -y install expect moreutils
conda install -y -c eumetsat expect
conda install -y cmake
fi
- run:
name: Update compiler to devtoolset7
command: |
set -eux -o pipefail
source /env
if [[ "$DESIRED_DEVTOOLSET" == 'devtoolset7' ]]; then
source "/builder/update_compiler.sh"
# Env variables are not persisted into the next step
echo "export PATH=$PATH" >> /env
echo "export LD_LIBRARY_PATH=$LD_LIBRARY_PATH" >> /env
else
echo "Not updating compiler"
fi
- run:
name: Build
no_output_timeout: "1h"
command: |
source "/pytorch/.circleci/scripts/binary_linux_build.sh"
- persist_to_workspace:
root: /
paths: final_pkgs
# This should really just be another step of the binary_linux_build job above.
# This isn't possible right now b/c the build job uses the docker executor
# (otherwise they'd be really really slow) but this one uses the macine
# executor (b/c we have to run the docker with --runtime=nvidia and we can't do
# that on the docker executor)
binary_linux_test:
<<: *binary_linux_test_upload_params
machine:
image: ubuntu-1604:201903-01
steps:
# See Note [Workspace for CircleCI scripts] in job-specs-setup.yml
- attach_workspace:
at: ~/workspace
# TODO: We shouldn't attach the workspace multiple times
- attach_workspace:
at: /home/circleci/project
- run:
<<: *should_run_job
- run:
<<: *setup_linux_system_environment
- run:
<<: *setup_ci_environment
- run:
<<: *binary_checkout
- run:
<<: *binary_populate_env
- run:
name: Prepare test code
no_output_timeout: "1h"
command: ~/workspace/.circleci/scripts/binary_linux_test.sh
- run:
<<: *binary_run_in_docker
binary_linux_upload:
<<: *binary_linux_test_upload_params
machine:
image: ubuntu-1604:201903-01
steps:
# See Note [Workspace for CircleCI scripts] in job-specs-setup.yml
- attach_workspace:
at: ~/workspace
- run:
<<: *should_run_job
- run:
<<: *setup_linux_system_environment
- run:
<<: *setup_ci_environment
- attach_workspace:
at: /home/circleci/project
- run:
<<: *binary_populate_env
- run:
<<: *binary_install_miniconda
- run:
name: Upload
no_output_timeout: "1h"
command: ~/workspace/.circleci/scripts/binary_linux_upload.sh
# Nighlty build smoke tests defaults
# These are the second-round smoke tests. These make sure that the binaries are
# correct from a user perspective, testing that they exist from the cloud are
# are runnable. Note that the pytorch repo is never cloned into these jobs
##############################################################################
smoke_linux_test:
<<: *binary_linux_test_upload_params
machine:
image: ubuntu-1604:201903-01
steps:
- attach_workspace:
at: ~/workspace
- attach_workspace:
at: /home/circleci/project
- run:
<<: *setup_linux_system_environment
- run:
<<: *setup_ci_environment
- run:
<<: *binary_checkout
- run:
<<: *binary_populate_env
- run:
name: Test
no_output_timeout: "1h"
command: |
set -ex
cat >/home/circleci/project/ci_test_script.sh \<<EOL
# The following code will be executed inside Docker container
set -eux -o pipefail
/builder/smoke_test.sh
# The above code will be executed inside Docker container
EOL
- run:
<<: *binary_run_in_docker
smoke_mac_test:
<<: *binary_linux_test_upload_params
macos:
xcode: "9.0"
steps:
- attach_workspace:
at: ~/workspace
- attach_workspace: # TODO - we can `cp` from ~/workspace
at: /Users/distiller/project
- run:
<<: *binary_checkout
- run:
<<: *binary_populate_env
- run:
<<: *binary_macos_brew_update
- run:
<<: *binary_install_miniconda
- run:
name: Build
no_output_timeout: "1h"
command: |
set -ex
source "/Users/distiller/project/env"
export "PATH=$workdir/miniconda/bin:$PATH"
# TODO unbuffer and ts this, but it breaks cause miniconda overwrites
# tclsh. But unbuffer and ts aren't that important so they're just
# disabled for now
./builder/smoke_test.sh
binary_mac_build:
<<: *binary_mac_params
macos:
xcode: "9.0"
steps:
# See Note [Workspace for CircleCI scripts] in job-specs-setup.yml
- attach_workspace:
at: ~/workspace
- run:
<<: *should_run_job
- run:
<<: *binary_checkout
- run:
<<: *binary_populate_env
- run:
<<: *binary_macos_brew_update
- run:
<<: *binary_install_miniconda
- run:
name: Build
no_output_timeout: "1h"
command: |
set -eux -o pipefail
script="/Users/distiller/project/pytorch/.circleci/scripts/binary_macos_build.sh"
cat "$script"
source "$script"
- run:
name: Test
no_output_timeout: "1h"
command: |
set -eux -o pipefail
script="/Users/distiller/project/pytorch/.circleci/scripts/binary_macos_test.sh"
cat "$script"
source "$script"
- persist_to_workspace:
root: /Users/distiller/project
paths: final_pkgs
binary_mac_upload: &binary_mac_upload
<<: *binary_mac_params
macos:
xcode: "9.0"
steps:
# See Note [Workspace for CircleCI scripts] in job-specs-setup.yml
- attach_workspace:
at: ~/workspace
- run:
<<: *should_run_job
- run:
<<: *binary_checkout
- run:
<<: *binary_populate_env
- run:
<<: *binary_macos_brew_update
- run:
<<: *binary_install_miniconda
- attach_workspace: # TODO - we can `cp` from ~/workspace
at: /Users/distiller/project
- run:
name: Upload
no_output_timeout: "10m"
command: |
script="/Users/distiller/project/pytorch/.circleci/scripts/binary_macos_upload.sh"
cat "$script"
source "$script"
setup:
docker:
- image: circleci/python:3.7.3
steps:
- checkout
- run:
name: Ensure config is up to date
command: ./ensure-consistency.py
working_directory: .circleci
- run:
name: Save commit message
command: git log --format='%B' -n 1 HEAD > .circleci/scripts/COMMIT_MSG
# Note [Workspace for CircleCI scripts]
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# In the beginning, you wrote your CI scripts in a
# .circleci/config.yml file, and life was good. Your CI
# configurations flourished and multiplied.
#
# Then one day, CircleCI cometh down high and say, "Your YAML file
# is too biggeth, it stresses our servers so." And thus they
# asketh us to smite the scripts in the yml file.
#
# But you can't just put the scripts in the .circleci folder,
# because in some jobs, you don't ever actually checkout the
# source repository. Where you gonna get the scripts from?
#
# Here's how you do it: you persist .circleci/scripts into a
# workspace, attach the workspace in your subjobs, and run all
# your scripts from there.
- persist_to_workspace:
root: .
paths: .circleci/scripts
pytorch_short_perf_test_gpu:
environment:
BUILD_ENVIRONMENT: pytorch-short-perf-test-gpu
DOCKER_IMAGE: "308535385114.dkr.ecr.us-east-1.amazonaws.com/pytorch/pytorch-linux-xenial-cuda9-cudnn7-py3:339"
PYTHON_VERSION: "3.6"
USE_CUDA_DOCKER_RUNTIME: "1"
resource_class: gpu.medium
machine:
image: ubuntu-1604:201903-01
steps:
# See Note [Workspace for CircleCI scripts] in job-specs-setup.yml
- attach_workspace:
at: ~/workspace
- run:
<<: *should_run_job
- run:
<<: *setup_linux_system_environment
- run:
<<: *setup_ci_environment
- run:
name: Perf Test
no_output_timeout: "1h"
command: |
set -e
export COMMIT_DOCKER_IMAGE=${DOCKER_IMAGE}-${CIRCLE_SHA1}
echo "DOCKER_IMAGE: "${COMMIT_DOCKER_IMAGE}
docker pull ${COMMIT_DOCKER_IMAGE} >/dev/null
export id=$(docker run --runtime=nvidia -t -d -w /var/lib/jenkins ${COMMIT_DOCKER_IMAGE})
docker cp $id:/var/lib/jenkins/workspace/env /home/circleci/project/env
# This IAM user allows write access to S3 bucket for perf test numbers
set +x
echo "declare -x AWS_ACCESS_KEY_ID=${CIRCLECI_AWS_ACCESS_KEY_FOR_PERF_TEST_S3_BUCKET_V4}" >> /home/circleci/project/env
echo "declare -x AWS_SECRET_ACCESS_KEY=${CIRCLECI_AWS_SECRET_KEY_FOR_PERF_TEST_S3_BUCKET_V4}" >> /home/circleci/project/env
set -x
docker cp /home/circleci/project/env $id:/var/lib/jenkins/workspace/env
export COMMAND='((echo "export BUILD_ENVIRONMENT=${BUILD_ENVIRONMENT}" && echo "source ./workspace/env" && echo "sudo chown -R jenkins workspace && cd workspace && .jenkins/pytorch/short-perf-test-gpu.sh") | docker exec -u jenkins -i "$id" bash) 2>&1'
echo ${COMMAND} > ./command.sh && unbuffer bash ./command.sh | ts
pytorch_python_doc_push:
environment:
BUILD_ENVIRONMENT: pytorch-python-doc-push
# TODO: stop hardcoding this
DOCKER_IMAGE: "308535385114.dkr.ecr.us-east-1.amazonaws.com/pytorch/pytorch-linux-xenial-cuda9-cudnn7-py3:339"
resource_class: large
machine:
image: ubuntu-1604:201903-01
steps:
# See Note [Workspace for CircleCI scripts] in job-specs-setup.yml
- attach_workspace:
at: ~/workspace
- run:
<<: *should_run_job
- run:
<<: *setup_linux_system_environment
- run:
<<: *setup_ci_environment
- run:
name: Doc Build and Push
no_output_timeout: "1h"
command: |
set -ex
export COMMIT_DOCKER_IMAGE=${DOCKER_IMAGE}-${CIRCLE_SHA1}
echo "DOCKER_IMAGE: "${COMMIT_DOCKER_IMAGE}
docker pull ${COMMIT_DOCKER_IMAGE} >/dev/null
export id=$(docker run -t -d -w /var/lib/jenkins ${COMMIT_DOCKER_IMAGE})
# master branch docs push
if [[ "${CIRCLE_BRANCH}" == "master" ]]; then
export COMMAND='((echo "export BUILD_ENVIRONMENT=${BUILD_ENVIRONMENT}" && echo "export GITHUB_PYTORCHBOT_TOKEN=${GITHUB_PYTORCHBOT_TOKEN}" && echo "source ./workspace/env" && echo "sudo chown -R jenkins workspace && cd workspace && . ./.circleci/scripts/python_doc_push_script.sh docs/master master site") | docker exec -u jenkins -i "$id" bash) 2>&1'
# stable release docs push. Due to some circleci limitations, we keep
# an eternal PR open for merging v1.2.0 -> master for this job.
# XXX: The following code is only run on the v1.2.0 branch, which might
# not be exactly the same as what you see here.
elif [[ "${CIRCLE_BRANCH}" == "v1.2.0" ]]; then
export COMMAND='((echo "export BUILD_ENVIRONMENT=${BUILD_ENVIRONMENT}" && echo "export GITHUB_PYTORCHBOT_TOKEN=${GITHUB_PYTORCHBOT_TOKEN}" && echo "source ./workspace/env" && echo "sudo chown -R jenkins workspace && cd workspace && . ./.circleci/scripts/python_doc_push_script.sh docs/stable 1.2.0 site dry_run") | docker exec -u jenkins -i "$id" bash) 2>&1'
# For open PRs: Do a dry_run of the docs build, don't push build
else
export COMMAND='((echo "export BUILD_ENVIRONMENT=${BUILD_ENVIRONMENT}" && echo "export GITHUB_PYTORCHBOT_TOKEN=${GITHUB_PYTORCHBOT_TOKEN}" && echo "source ./workspace/env" && echo "sudo chown -R jenkins workspace && cd workspace && . ./.circleci/scripts/python_doc_push_script.sh docs/master master site dry_run") | docker exec -u jenkins -i "$id" bash) 2>&1'
fi
echo ${COMMAND} > ./command.sh && unbuffer bash ./command.sh | ts
# Save the docs build so we can debug any problems
export DEBUG_COMMIT_DOCKER_IMAGE=${COMMIT_DOCKER_IMAGE}-debug
docker commit "$id" ${DEBUG_COMMIT_DOCKER_IMAGE}
docker push ${DEBUG_COMMIT_DOCKER_IMAGE}
pytorch_cpp_doc_push:
environment:
BUILD_ENVIRONMENT: pytorch-cpp-doc-push
DOCKER_IMAGE: "308535385114.dkr.ecr.us-east-1.amazonaws.com/pytorch/pytorch-linux-xenial-cuda9-cudnn7-py3:339"
resource_class: large
machine:
image: ubuntu-1604:201903-01
steps:
# See Note [Workspace for CircleCI scripts] in job-specs-setup.yml
- attach_workspace:
at: ~/workspace
- run:
<<: *should_run_job
- run:
<<: *setup_linux_system_environment
- run:
<<: *setup_ci_environment
- run:
name: Doc Build and Push
no_output_timeout: "1h"
command: |
set -ex
export COMMIT_DOCKER_IMAGE=${DOCKER_IMAGE}-${CIRCLE_SHA1}
echo "DOCKER_IMAGE: "${COMMIT_DOCKER_IMAGE}
docker pull ${COMMIT_DOCKER_IMAGE} >/dev/null
export id=$(docker run -t -d -w /var/lib/jenkins ${COMMIT_DOCKER_IMAGE})
# master branch docs push
if [[ "${CIRCLE_BRANCH}" == "master" ]]; then
export COMMAND='((echo "export BUILD_ENVIRONMENT=${BUILD_ENVIRONMENT}" && echo "export GITHUB_PYTORCHBOT_TOKEN=${GITHUB_PYTORCHBOT_TOKEN}" && echo "source ./workspace/env" && echo "sudo chown -R jenkins workspace && cd workspace && . ./.circleci/scripts/cpp_doc_push_script.sh docs/master master") | docker exec -u jenkins -i "$id" bash) 2>&1'
# stable release docs push. Due to some circleci limitations, we keep
# an eternal PR open (#16502) for merging v1.0.1 -> master for this job.
# XXX: The following code is only run on the v1.0.1 branch, which might
# not be exactly the same as what you see here.
elif [[ "${CIRCLE_BRANCH}" == "v1.0.1" ]]; then
export COMMAND='((echo "export BUILD_ENVIRONMENT=${BUILD_ENVIRONMENT}" && echo "export GITHUB_PYTORCHBOT_TOKEN=${GITHUB_PYTORCHBOT_TOKEN}" && echo "source ./workspace/env" && echo "sudo chown -R jenkins workspace && cd workspace && . ./.circleci/scripts/cpp_doc_push_script.sh docs/stable 1.0.1") | docker exec -u jenkins -i "$id" bash) 2>&1'
# For open PRs: Do a dry_run of the docs build, don't push build
else
export COMMAND='((echo "export BUILD_ENVIRONMENT=${BUILD_ENVIRONMENT}" && echo "export GITHUB_PYTORCHBOT_TOKEN=${GITHUB_PYTORCHBOT_TOKEN}" && echo "source ./workspace/env" && echo "sudo chown -R jenkins workspace && cd workspace && . ./.circleci/scripts/cpp_doc_push_script.sh docs/master master dry_run") | docker exec -u jenkins -i "$id" bash) 2>&1'
fi
echo ${COMMAND} > ./command.sh && unbuffer bash ./command.sh | ts
# Save the docs build so we can debug any problems
export DEBUG_COMMIT_DOCKER_IMAGE=${COMMIT_DOCKER_IMAGE}-debug
docker commit "$id" ${DEBUG_COMMIT_DOCKER_IMAGE}
docker push ${DEBUG_COMMIT_DOCKER_IMAGE}
pytorch_macos_10_13_py3_build:
environment:
BUILD_ENVIRONMENT: pytorch-macos-10.13-py3-build
macos:
xcode: "9.0"
steps:
# See Note [Workspace for CircleCI scripts] in job-specs-setup.yml
- attach_workspace:
at: ~/workspace
- run:
<<: *should_run_job
- checkout
- run:
<<: *macos_brew_update
- run:
name: Build
no_output_timeout: "1h"
command: |
set -e
export IN_CIRCLECI=1
# Install sccache
sudo curl https://s3.amazonaws.com/ossci-macos/sccache --output /usr/local/bin/sccache
sudo chmod +x /usr/local/bin/sccache
export SCCACHE_BUCKET=ossci-compiler-cache-circleci-v2
# This IAM user allows write access to S3 bucket for sccache
set +x
export AWS_ACCESS_KEY_ID=${CIRCLECI_AWS_ACCESS_KEY_FOR_SCCACHE_S3_BUCKET_V4}
export AWS_SECRET_ACCESS_KEY=${CIRCLECI_AWS_SECRET_KEY_FOR_SCCACHE_S3_BUCKET_V4}
set -x
chmod a+x .jenkins/pytorch/macos-build.sh
unbuffer .jenkins/pytorch/macos-build.sh 2>&1 | ts
# copy with -a to preserve relative structure (e.g., symlinks), and be recursive
cp -a ~/project ~/workspace
- persist_to_workspace:
root: ~/workspace
paths:
- miniconda3
- project
pytorch_macos_10_13_py3_test:
environment:
BUILD_ENVIRONMENT: pytorch-macos-10.13-py3-test
macos:
xcode: "9.0"
steps:
# See Note [Workspace for CircleCI scripts] in job-specs-setup.yml
# This workspace also carries binaries from the build job
- attach_workspace:
at: ~/workspace
- run:
<<: *should_run_job
- run:
<<: *macos_brew_update
- run:
name: Test
no_output_timeout: "1h"
command: |
set -e
export IN_CIRCLECI=1
# copy with -a to preserve relative structure (e.g., symlinks), and be recursive
cp -a ~/workspace/project/. ~/project
chmod a+x .jenkins/pytorch/macos-test.sh
unbuffer .jenkins/pytorch/macos-test.sh 2>&1 | ts
pytorch_macos_10_13_cuda9_2_cudnn7_py3_build:
environment:
BUILD_ENVIRONMENT: pytorch-macos-10.13-cuda9.2-cudnn7-py3-build
macos:
xcode: "9.0"
steps:
# See Note [Workspace for CircleCI scripts] in job-specs-setup.yml
- attach_workspace:
at: ~/workspace
- run:
<<: *should_run_job
- checkout
- run:
<<: *macos_brew_update
- run:
name: Build
no_output_timeout: "1h"
command: |
set -e
export IN_CIRCLECI=1
# Install CUDA 9.2
sudo rm -rf ~/cuda_9.2.64_mac_installer.app || true
curl https://s3.amazonaws.com/ossci-macos/cuda_9.2.64_mac_installer.zip -o ~/cuda_9.2.64_mac_installer.zip
unzip ~/cuda_9.2.64_mac_installer.zip -d ~/
sudo ~/cuda_9.2.64_mac_installer.app/Contents/MacOS/CUDAMacOSXInstaller --accept-eula --no-window
sudo cp /usr/local/cuda/lib/libcuda.dylib /Developer/NVIDIA/CUDA-9.2/lib/libcuda.dylib
sudo rm -rf /usr/local/cuda || true
# Install cuDNN 7.1 for CUDA 9.2
curl https://s3.amazonaws.com/ossci-macos/cudnn-9.2-osx-x64-v7.1.tgz -o ~/cudnn-9.2-osx-x64-v7.1.tgz
rm -rf ~/cudnn-9.2-osx-x64-v7.1 && mkdir ~/cudnn-9.2-osx-x64-v7.1
tar -xzvf ~/cudnn-9.2-osx-x64-v7.1.tgz -C ~/cudnn-9.2-osx-x64-v7.1
sudo cp ~/cudnn-9.2-osx-x64-v7.1/cuda/include/cudnn.h /Developer/NVIDIA/CUDA-9.2/include/
sudo cp ~/cudnn-9.2-osx-x64-v7.1/cuda/lib/libcudnn* /Developer/NVIDIA/CUDA-9.2/lib/
sudo chmod a+r /Developer/NVIDIA/CUDA-9.2/include/cudnn.h /Developer/NVIDIA/CUDA-9.2/lib/libcudnn*
# Install sccache
sudo curl https://s3.amazonaws.com/ossci-macos/sccache --output /usr/local/bin/sccache
sudo chmod +x /usr/local/bin/sccache
export SCCACHE_BUCKET=ossci-compiler-cache-circleci-v2
# This IAM user allows write access to S3 bucket for sccache
set +x
export AWS_ACCESS_KEY_ID=${CIRCLECI_AWS_ACCESS_KEY_FOR_SCCACHE_S3_BUCKET_V4}
export AWS_SECRET_ACCESS_KEY=${CIRCLECI_AWS_SECRET_KEY_FOR_SCCACHE_S3_BUCKET_V4}
set -x
git submodule sync && git submodule update -q --init --recursive
chmod a+x .jenkins/pytorch/macos-build.sh
unbuffer .jenkins/pytorch/macos-build.sh 2>&1 | ts
pytorch_android_gradle_build:
environment:
BUILD_ENVIRONMENT: pytorch-linux-xenial-py3-clang5-android-ndk-r19c-gradle-build
DOCKER_IMAGE: "308535385114.dkr.ecr.us-east-1.amazonaws.com/pytorch/pytorch-linux-xenial-py3-clang5-android-ndk-r19c:339"
PYTHON_VERSION: "3.6"
resource_class: large
machine:
image: ubuntu-1604:201903-01
steps:
- attach_workspace:
at: ~/workspace
- run:
<<: *should_run_job
- run:
<<: *setup_linux_system_environment
- checkout
- run:
<<: *setup_ci_environment
- run:
name: pytorch android gradle build
no_output_timeout: "1h"
command: |
set -eux
docker_image_commit=${DOCKER_IMAGE}-${CIRCLE_SHA1}
docker_image_libtorch_android_x86_32=${docker_image_commit}-android-x86_32
docker_image_libtorch_android_x86_64=${docker_image_commit}-android-x86_64
docker_image_libtorch_android_arm_v7a=${docker_image_commit}-android-arm-v7a
docker_image_libtorch_android_arm_v8a=${docker_image_commit}-android-arm-v8a
echo "docker_image_commit: "${docker_image_commit}
echo "docker_image_libtorch_android_x86_32: "${docker_image_libtorch_android_x86_32}
echo "docker_image_libtorch_android_x86_64: "${docker_image_libtorch_android_x86_64}
echo "docker_image_libtorch_android_arm_v7a: "${docker_image_libtorch_android_arm_v7a}
echo "docker_image_libtorch_android_arm_v8a: "${docker_image_libtorch_android_arm_v8a}
# x86_32
docker pull ${docker_image_libtorch_android_x86_32} >/dev/null
export id_x86_32=$(docker run -t -d -w /var/lib/jenkins ${docker_image_libtorch_android_x86_32})
export COMMAND='((echo "source ./workspace/env" && echo "sudo chown -R jenkins workspace") | docker exec -u jenkins -i "$id_x86_32" bash) 2>&1'
echo ${COMMAND} > ./command.sh && unbuffer bash ./command.sh | ts
# arm-v7a
docker pull ${docker_image_libtorch_android_arm_v7a} >/dev/null
export id_arm_v7a=$(docker run -t -d -w /var/lib/jenkins ${docker_image_libtorch_android_arm_v7a})
export COMMAND='((echo "source ./workspace/env" && echo "sudo chown -R jenkins workspace") | docker exec -u jenkins -i "$id_arm_v7a" bash) 2>&1'
echo ${COMMAND} > ./command.sh && unbuffer bash ./command.sh | ts
mkdir ~/workspace/build_android_install_arm_v7a
docker cp $id_arm_v7a:/var/lib/jenkins/workspace/build_android/install ~/workspace/build_android_install_arm_v7a
# x86_64
docker pull ${docker_image_libtorch_android_x86_64} >/dev/null
export id_x86_64=$(docker run -t -d -w /var/lib/jenkins ${docker_image_libtorch_android_x86_64})
export COMMAND='((echo "source ./workspace/env" && echo "sudo chown -R jenkins workspace") | docker exec -u jenkins -i "$id_x86_64" bash) 2>&1'
echo ${COMMAND} > ./command.sh && unbuffer bash ./command.sh | ts
mkdir ~/workspace/build_android_install_x86_64
docker cp $id_x86_64:/var/lib/jenkins/workspace/build_android/install ~/workspace/build_android_install_x86_64
# arm-v8a
docker pull ${docker_image_libtorch_android_arm_v8a} >/dev/null
export id_arm_v8a=$(docker run -t -d -w /var/lib/jenkins ${docker_image_libtorch_android_arm_v8a})
export COMMAND='((echo "source ./workspace/env" && echo "sudo chown -R jenkins workspace") | docker exec -u jenkins -i "$id_arm_v8a" bash) 2>&1'
echo ${COMMAND} > ./command.sh && unbuffer bash ./command.sh | ts
mkdir ~/workspace/build_android_install_arm_v8a
docker cp $id_arm_v8a:/var/lib/jenkins/workspace/build_android/install ~/workspace/build_android_install_arm_v8a
docker cp ~/workspace/build_android_install_arm_v7a $id_x86_32:/var/lib/jenkins/workspace/build_android_install_arm_v7a
docker cp ~/workspace/build_android_install_x86_64 $id_x86_32:/var/lib/jenkins/workspace/build_android_install_x86_64
docker cp ~/workspace/build_android_install_arm_v8a $id_x86_32:/var/lib/jenkins/workspace/build_android_install_arm_v8a
# run gradle buildRelease
export COMMAND='((echo "source ./workspace/env" && echo "export BUILD_ENVIRONMENT=${BUILD_ENVIRONMENT}" && echo "sudo chown -R jenkins workspace && cd workspace && ./.circleci/scripts/build_android_gradle.sh") | docker exec -u jenkins -i "$id_x86_32" bash) 2>&1'
echo ${COMMAND} > ./command.sh && unbuffer bash ./command.sh | ts
mkdir -p ~/workspace/build_android_artifacts
docker cp $id_x86_32:/var/lib/jenkins/workspace/android/artifacts.tgz ~/workspace/build_android_artifacts/
output_image=$docker_image_libtorch_android_x86_32-gradle
docker commit "$id_x86_32" ${output_image}
docker push ${output_image}
- store_artifacts:
path: ~/workspace/build_android_artifacts/artifacts.tgz
destination: artifacts.tgz
pytorch_android_gradle_build-x86_32:
environment:
BUILD_ENVIRONMENT: pytorch-linux-xenial-py3-clang5-android-ndk-r19c-gradle-build-only-x86_32
DOCKER_IMAGE: "308535385114.dkr.ecr.us-east-1.amazonaws.com/pytorch/pytorch-linux-xenial-py3-clang5-android-ndk-r19c:339"
PYTHON_VERSION: "3.6"
resource_class: large
machine:
image: ubuntu-1604:201903-01
steps:
- attach_workspace:
at: ~/workspace
- run:
<<: *should_run_job
- run:
name: filter out not PR runs
no_output_timeout: "5m"
command: |
echo "CIRCLE_PULL_REQUEST: ${CIRCLE_PULL_REQUEST:-}"
if [ -z "${CIRCLE_PULL_REQUEST:-}" ]; then
circleci step halt
fi
- run:
<<: *setup_linux_system_environment
- checkout
- run:
<<: *setup_ci_environment
- run:
name: pytorch android gradle build only x86_32 (for PR)
no_output_timeout: "1h"
command: |
set -e
docker_image_libtorch_android_x86_32=${DOCKER_IMAGE}-${CIRCLE_SHA1}-android-x86_32
echo "docker_image_libtorch_android_x86_32: "${docker_image_libtorch_android_x86_32}
# x86
docker pull ${docker_image_libtorch_android_x86_32} >/dev/null
export id=$(docker run -t -d -w /var/lib/jenkins ${docker_image_libtorch_android_x86_32})
export COMMAND='((echo "source ./workspace/env" && echo "export BUILD_ENVIRONMENT=${BUILD_ENVIRONMENT}" && echo "sudo chown -R jenkins workspace && cd workspace && ./.circleci/scripts/build_android_gradle.sh") | docker exec -u jenkins -i "$id" bash) 2>&1'
echo ${COMMAND} > ./command.sh && unbuffer bash ./command.sh | ts
mkdir -p ~/workspace/build_android_x86_32_artifacts
docker cp $id:/var/lib/jenkins/workspace/android/artifacts.tgz ~/workspace/build_android_x86_32_artifacts/
output_image=${DOCKER_IMAGE}-${CIRCLE_SHA1}-android-gradle-x86_32
docker commit "$id" ${output_image}
docker push ${output_image}
- store_artifacts:
path: ~/workspace/build_android_x86_32_artifacts/artifacts.tgz
destination: artifacts.tgz
pytorch_ios_build:
<<: *pytorch_ios_params
macos:
xcode: "10.2.1"
steps:
# See Note [Workspace for CircleCI scripts] in job-specs-setup.yml
- attach_workspace:
at: ~/workspace
- run:
<<: *should_run_job
- checkout
- run:
<<: *ios_brew_update
- run:
name: Build
no_output_timeout: "1h"
command: |
set -e
export IN_CIRCLECI=1
WORKSPACE=/Users/distiller/workspace
PROJ_ROOT=/Users/distiller/project
export TCLLIBPATH="/usr/local/lib"
# Install conda
curl -o ~/Downloads/conda.sh https://repo.anaconda.com/miniconda/Miniconda3-latest-MacOSX-x86_64.sh
chmod +x ~/Downloads/conda.sh
/bin/bash ~/Downloads/conda.sh -b -p ~/anaconda
export PATH="~/anaconda/bin:${PATH}"
source ~/anaconda/bin/activate
# Install dependencies
conda install numpy ninja pyyaml mkl mkl-include setuptools cmake cffi typing requests
# sync submodules
cd ${PROJ_ROOT}
git submodule sync
git submodule update --init --recursive
# export
export CMAKE_PREFIX_PATH=${CONDA_PREFIX:-"$(dirname $(which conda))/../"}
# run build script
chmod a+x ${PROJ_ROOT}/scripts/build_ios.sh
CMAKE_ARGS=()
if [ -n "${IOS_ARCH:-}" ]; then
CMAKE_ARGS+=("-DIOS_ARCH=${IOS_ARCH}")
fi
if [ -n "${IOS_PLATFORM:-}" ]; then
CMAKE_ARGS+=("-DIOS_PLATFORM=${IOS_PLATFORM}")
fi
if [ -n "${USE_NNPACK:-}" ]; then
CMAKE_ARGS+=("-DUSE_NNPACK=${USE_NNPACK}")
fi
CMAKE_ARGS+=("-DBUILD_CAFFE2_MOBILE=OFF")
CMAKE_ARGS+=("-DCMAKE_PREFIX_PATH=$(python -c 'from distutils.sysconfig import get_python_lib; print(get_python_lib())')")
CMAKE_ARGS+=("-DPYTHON_EXECUTABLE=$(python -c 'import sys; print(sys.executable)')")
unbuffer ${PROJ_ROOT}/scripts/build_ios.sh ${CMAKE_ARGS[@]} 2>&1 | ts
# update_s3_htmls job
# These jobs create html files for every cpu/cu## folder in s3. The html
# files just store the names of all the files in that folder (which are
# binary files (.whl files)). This is to allow pip installs of the latest
# version in a folder without having to know the latest date. Pip has a flag
# -f that you can pass an html file listing a bunch of packages, and pip will
# then install the one with the most recent version.
update_s3_htmls: &update_s3_htmls
machine:
image: ubuntu-1604:201903-01
steps:
- attach_workspace:
at: ~/workspace
- run:
<<: *setup_linux_system_environment
- run:
<<: *binary_checkout
# N.B. we do not run binary_populate_env. The only variable we need is
# PIP_UPLOAD_FOLDER (which is 'nightly/' for the nightlies and '' for
# releases, and sometimes other things for special cases). Instead we
# expect PIP_UPLOAD_FOLDER to be passed directly in the env. This is
# because, unlike all the other binary jobs, these jobs only get run once,
# in a separate workflow. They are not a step in other binary jobs like
# build, test, upload.
#
# You could attach this to every job, or include it in the upload step if
# you wanted. You would need to add binary_populate_env in this case to
# make sure it has the same upload folder as the job it's attached to. This
# function is idempotent, so it won't hurt anything; it's just a little
# unnescessary"
- run:
name: Update s3 htmls
no_output_timeout: "1h"
command: |
set +x
echo "declare -x \"AWS_ACCESS_KEY_ID=${PYTORCH_BINARY_AWS_ACCESS_KEY_ID}\"" >> /home/circleci/project/env
echo "declare -x \"AWS_SECRET_ACCESS_KEY=${PYTORCH_BINARY_AWS_SECRET_ACCESS_KEY}\"" >> /home/circleci/project/env
source /home/circleci/project/env
set -eux -o pipefail
retry () {
$* || (sleep 1 && $*) || (sleep 2 && $*) || (sleep 4 && $*) || (sleep 8 && $*)
}
retry pip install awscli==1.6
"/home/circleci/project/builder/cron/update_s3_htmls.sh"
# Update s3 htmls for the nightlies
update_s3_htmls_for_nightlies:
environment:
PIP_UPLOAD_FOLDER: "nightly/"
<<: *update_s3_htmls
# Update s3 htmls for the nightlies for devtoolset7
update_s3_htmls_for_nightlies_devtoolset7:
environment:
PIP_UPLOAD_FOLDER: "nightly/devtoolset7/"
<<: *update_s3_htmls
# upload_binary_logs job
# The builder hud at pytorch.org/builder shows the sizes of all the binaries
# over time. It gets this info from html files stored in S3, which this job
# populates every day.
upload_binary_sizes: &upload_binary_sizes
machine:
image: ubuntu-1604:201903-01
steps:
- attach_workspace:
at: ~/workspace
- run:
<<: *setup_linux_system_environment
- run:
<<: *binary_checkout
- run:
<<: *binary_install_miniconda
- run:
name: Upload binary sizes
no_output_timeout: "1h"
command: |
set +x
echo "declare -x \"AWS_ACCESS_KEY_ID=${PYTORCH_BINARY_AWS_ACCESS_KEY_ID}\"" > /home/circleci/project/env
echo "declare -x \"AWS_SECRET_ACCESS_KEY=${PYTORCH_BINARY_AWS_SECRET_ACCESS_KEY}\"" >> /home/circleci/project/env
export DATE="$(date -u +%Y_%m_%d)"
retry () {
$* || (sleep 1 && $*) || (sleep 2 && $*) || (sleep 4 && $*) || (sleep 8 && $*)
}
source /home/circleci/project/env
set -eux -o pipefail
# This is hardcoded to match binary_install_miniconda.sh
export PATH="/home/circleci/project/miniconda/bin:$PATH"
# Not any awscli will work. Most won't. This one will work
retry conda create -qyn aws36 python=3.6
source activate aws36
pip install awscli==1.16.46
"/home/circleci/project/builder/cron/upload_binary_sizes.sh"
# There is currently no testing for libtorch TODO
# binary_linux_libtorch_2.7m_cpu_test:
# environment:
# BUILD_ENVIRONMENT: "libtorch 2.7m cpu"
# resource_class: gpu.medium
# <<: *binary_linux_test
#
# binary_linux_libtorch_2.7m_cu90_test:
# environment:
# BUILD_ENVIRONMENT: "libtorch 2.7m cu90"
# resource_class: gpu.medium
# <<: *binary_linux_test
#
# binary_linux_libtorch_2.7m_cu100_test:
# environment:
# BUILD_ENVIRONMENT: "libtorch 2.7m cu100"
# resource_class: gpu.medium
# <<: *binary_linux_test
##############################################################################
##############################################################################
# Workflows
##############################################################################
##############################################################################
# PR jobs pr builds
workflows:
build:
jobs:
- setup
- pytorch_linux_build:
name: pytorch_linux_xenial_py2_7_9_build
requires:
- setup
build_environment: "pytorch-linux-xenial-py2.7.9-build"
docker_image: "308535385114.dkr.ecr.us-east-1.amazonaws.com/pytorch/pytorch-linux-xenial-py2.7.9:339"
- pytorch_linux_test:
name: pytorch_linux_xenial_py2_7_9_test
requires:
- setup
- pytorch_linux_xenial_py2_7_9_build
build_environment: "pytorch-linux-xenial-py2.7.9-test"
docker_image: "308535385114.dkr.ecr.us-east-1.amazonaws.com/pytorch/pytorch-linux-xenial-py2.7.9:339"
resource_class: large
- pytorch_linux_build:
name: pytorch_linux_xenial_py2_7_build
requires:
- setup
filters:
branches:
only:
- master
- /ci-all\/.*/
build_environment: "pytorch-linux-xenial-py2.7-build"
docker_image: "308535385114.dkr.ecr.us-east-1.amazonaws.com/pytorch/pytorch-linux-xenial-py2.7:339"
- pytorch_linux_test:
name: pytorch_linux_xenial_py2_7_test
requires:
- setup
- pytorch_linux_xenial_py2_7_build
filters:
branches:
only:
- master
- /ci-all\/.*/
build_environment: "pytorch-linux-xenial-py2.7-test"
docker_image: "308535385114.dkr.ecr.us-east-1.amazonaws.com/pytorch/pytorch-linux-xenial-py2.7:339"
resource_class: large
- pytorch_linux_build:
name: pytorch_linux_xenial_py3_5_build
requires:
- setup
filters:
branches:
only:
- master
- /ci-all\/.*/
build_environment: "pytorch-linux-xenial-py3.5-build"
docker_image: "308535385114.dkr.ecr.us-east-1.amazonaws.com/pytorch/pytorch-linux-xenial-py3.5:339"
- pytorch_linux_test:
name: pytorch_linux_xenial_py3_5_test
requires:
- setup
- pytorch_linux_xenial_py3_5_build
filters:
branches:
only:
- master
- /ci-all\/.*/
build_environment: "pytorch-linux-xenial-py3.5-test"
docker_image: "308535385114.dkr.ecr.us-east-1.amazonaws.com/pytorch/pytorch-linux-xenial-py3.5:339"
resource_class: large
- pytorch_linux_build:
name: pytorch_linux_xenial_pynightly_build
requires:
- setup
filters:
branches:
only:
- master
- /ci-all\/.*/
build_environment: "pytorch-linux-xenial-pynightly-build"
docker_image: "308535385114.dkr.ecr.us-east-1.amazonaws.com/pytorch/pytorch-linux-xenial-pynightly:339"
- pytorch_linux_test:
name: pytorch_linux_xenial_pynightly_test
requires:
- setup
- pytorch_linux_xenial_pynightly_build
filters:
branches:
only:
- master
- /ci-all\/.*/
build_environment: "pytorch-linux-xenial-pynightly-test"
docker_image: "308535385114.dkr.ecr.us-east-1.amazonaws.com/pytorch/pytorch-linux-xenial-pynightly:339"
resource_class: large
- pytorch_linux_build:
name: pytorch_linux_xenial_py3_6_gcc4_8_build
requires:
- setup
filters:
branches:
only:
- master
- /ci-all\/.*/
build_environment: "pytorch-linux-xenial-py3.6-gcc4.8-build"
docker_image: "308535385114.dkr.ecr.us-east-1.amazonaws.com/pytorch/pytorch-linux-xenial-py3.6-gcc4.8:339"
- pytorch_linux_test:
name: pytorch_linux_xenial_py3_6_gcc4_8_test
requires:
- setup
- pytorch_linux_xenial_py3_6_gcc4_8_build
filters:
branches:
only:
- master
- /ci-all\/.*/
build_environment: "pytorch-linux-xenial-py3.6-gcc4.8-test"
docker_image: "308535385114.dkr.ecr.us-east-1.amazonaws.com/pytorch/pytorch-linux-xenial-py3.6-gcc4.8:339"
resource_class: large
- pytorch_linux_build:
name: pytorch_linux_xenial_py3_6_gcc5_4_build
requires:
- setup
build_environment: "pytorch-linux-xenial-py3.6-gcc5.4-build"
docker_image: "308535385114.dkr.ecr.us-east-1.amazonaws.com/pytorch/pytorch-linux-xenial-py3.6-gcc5.4:339"
- pytorch_linux_test:
name: pytorch_linux_xenial_py3_6_gcc5_4_test
requires:
- setup
- pytorch_linux_xenial_py3_6_gcc5_4_build
build_environment: "pytorch-linux-xenial-py3.6-gcc5.4-test"
docker_image: "308535385114.dkr.ecr.us-east-1.amazonaws.com/pytorch/pytorch-linux-xenial-py3.6-gcc5.4:339"
resource_class: large
- pytorch_linux_build:
name: pytorch_namedtensor_linux_xenial_py3_6_gcc5_4_build
requires:
- setup
build_environment: "pytorch-namedtensor-linux-xenial-py3.6-gcc5.4-build"
docker_image: "308535385114.dkr.ecr.us-east-1.amazonaws.com/pytorch/pytorch-linux-xenial-py3.6-gcc5.4:339"
- pytorch_linux_test:
name: pytorch_namedtensor_linux_xenial_py3_6_gcc5_4_test
requires:
- setup
- pytorch_namedtensor_linux_xenial_py3_6_gcc5_4_build
build_environment: "pytorch-namedtensor-linux-xenial-py3.6-gcc5.4-test"
docker_image: "308535385114.dkr.ecr.us-east-1.amazonaws.com/pytorch/pytorch-linux-xenial-py3.6-gcc5.4:339"
resource_class: large
- pytorch_linux_build:
name: pytorch_linux_xenial_py3_6_gcc7_build
requires:
- setup
filters:
branches:
only:
- master
- /ci-all\/.*/
build_environment: "pytorch-linux-xenial-py3.6-gcc7-build"
docker_image: "308535385114.dkr.ecr.us-east-1.amazonaws.com/pytorch/pytorch-linux-xenial-py3.6-gcc7:339"
- pytorch_linux_test:
name: pytorch_linux_xenial_py3_6_gcc7_test
requires:
- setup
- pytorch_linux_xenial_py3_6_gcc7_build
filters:
branches:
only:
- master
- /ci-all\/.*/
build_environment: "pytorch-linux-xenial-py3.6-gcc7-test"
docker_image: "308535385114.dkr.ecr.us-east-1.amazonaws.com/pytorch/pytorch-linux-xenial-py3.6-gcc7:339"
resource_class: large
- pytorch_linux_build:
name: pytorch_linux_xenial_py3_clang5_asan_build
requires:
- setup
build_environment: "pytorch-linux-xenial-py3-clang5-asan-build"
docker_image: "308535385114.dkr.ecr.us-east-1.amazonaws.com/pytorch/pytorch-linux-xenial-py3-clang5-asan:339"
- pytorch_linux_test:
name: pytorch_linux_xenial_py3_clang5_asan_test
requires:
- setup
- pytorch_linux_xenial_py3_clang5_asan_build
build_environment: "pytorch-linux-xenial-py3-clang5-asan-test"
docker_image: "308535385114.dkr.ecr.us-east-1.amazonaws.com/pytorch/pytorch-linux-xenial-py3-clang5-asan:339"
resource_class: large
- pytorch_linux_build:
name: pytorch_namedtensor_linux_xenial_py3_clang5_asan_build
requires:
- setup
build_environment: "pytorch-namedtensor-linux-xenial-py3-clang5-asan-build"
docker_image: "308535385114.dkr.ecr.us-east-1.amazonaws.com/pytorch/pytorch-linux-xenial-py3-clang5-asan:339"
- pytorch_linux_test:
name: pytorch_namedtensor_linux_xenial_py3_clang5_asan_test
requires:
- setup
- pytorch_namedtensor_linux_xenial_py3_clang5_asan_build
build_environment: "pytorch-namedtensor-linux-xenial-py3-clang5-asan-test"
docker_image: "308535385114.dkr.ecr.us-east-1.amazonaws.com/pytorch/pytorch-linux-xenial-py3-clang5-asan:339"
resource_class: large
- pytorch_linux_build:
name: pytorch_xla_linux_xenial_py3_6_clang7_build
requires:
- setup
build_environment: "pytorch-xla-linux-xenial-py3.6-clang7-build"
docker_image: "308535385114.dkr.ecr.us-east-1.amazonaws.com/pytorch/pytorch-linux-xenial-py3.6-clang7:339"
- pytorch_linux_test:
name: pytorch_xla_linux_xenial_py3_6_clang7_test
requires:
- setup
- pytorch_xla_linux_xenial_py3_6_clang7_build
build_environment: "pytorch-xla-linux-xenial-py3.6-clang7-test"
docker_image: "308535385114.dkr.ecr.us-east-1.amazonaws.com/pytorch/pytorch-linux-xenial-py3.6-clang7:339"
resource_class: large
- pytorch_linux_build:
name: pytorch_linux_xenial_cuda9_cudnn7_py2_build
requires:
- setup
filters:
branches:
only:
- master
- /ci-all\/.*/
build_environment: "pytorch-linux-xenial-cuda9-cudnn7-py2-build"
docker_image: "308535385114.dkr.ecr.us-east-1.amazonaws.com/pytorch/pytorch-linux-xenial-cuda9-cudnn7-py2:339"
- pytorch_linux_test:
name: pytorch_linux_xenial_cuda9_cudnn7_py2_test
requires:
- setup
- pytorch_linux_xenial_cuda9_cudnn7_py2_build
filters:
branches:
only:
- master
- /ci-all\/.*/
build_environment: "pytorch-linux-xenial-cuda9-cudnn7-py2-test"
docker_image: "308535385114.dkr.ecr.us-east-1.amazonaws.com/pytorch/pytorch-linux-xenial-cuda9-cudnn7-py2:339"
use_cuda_docker_runtime: "1"
resource_class: gpu.medium
- pytorch_linux_build:
name: pytorch_linux_xenial_cuda9_cudnn7_py3_build
requires:
- setup
build_environment: "pytorch-linux-xenial-cuda9-cudnn7-py3-build"
docker_image: "308535385114.dkr.ecr.us-east-1.amazonaws.com/pytorch/pytorch-linux-xenial-cuda9-cudnn7-py3:339"
- pytorch_linux_test:
name: pytorch_linux_xenial_cuda9_cudnn7_py3_test
requires:
- setup
- pytorch_linux_xenial_cuda9_cudnn7_py3_build
build_environment: "pytorch-linux-xenial-cuda9-cudnn7-py3-test"
docker_image: "308535385114.dkr.ecr.us-east-1.amazonaws.com/pytorch/pytorch-linux-xenial-cuda9-cudnn7-py3:339"
use_cuda_docker_runtime: "1"
resource_class: gpu.medium
- pytorch_linux_test:
name: pytorch_linux_xenial_cuda9_cudnn7_py3_multigpu_test
requires:
- setup
- pytorch_linux_xenial_cuda9_cudnn7_py3_build
build_environment: "pytorch-linux-xenial-cuda9-cudnn7-py3-multigpu-test"
docker_image: "308535385114.dkr.ecr.us-east-1.amazonaws.com/pytorch/pytorch-linux-xenial-cuda9-cudnn7-py3:339"
use_cuda_docker_runtime: "1"
resource_class: gpu.large
- pytorch_linux_test:
name: pytorch_linux_xenial_cuda9_cudnn7_py3_NO_AVX2_test
requires:
- setup
- pytorch_linux_xenial_cuda9_cudnn7_py3_build
build_environment: "pytorch-linux-xenial-cuda9-cudnn7-py3-NO_AVX2-test"
docker_image: "308535385114.dkr.ecr.us-east-1.amazonaws.com/pytorch/pytorch-linux-xenial-cuda9-cudnn7-py3:339"
use_cuda_docker_runtime: "1"
resource_class: gpu.medium
- pytorch_linux_test:
name: pytorch_linux_xenial_cuda9_cudnn7_py3_NO_AVX_NO_AVX2_test
requires:
- setup
- pytorch_linux_xenial_cuda9_cudnn7_py3_build
build_environment: "pytorch-linux-xenial-cuda9-cudnn7-py3-NO_AVX-NO_AVX2-test"
docker_image: "308535385114.dkr.ecr.us-east-1.amazonaws.com/pytorch/pytorch-linux-xenial-cuda9-cudnn7-py3:339"
use_cuda_docker_runtime: "1"
resource_class: gpu.medium
- pytorch_linux_test:
name: pytorch_linux_xenial_cuda9_cudnn7_py3_slow_test
requires:
- setup
- pytorch_linux_xenial_cuda9_cudnn7_py3_build
build_environment: "pytorch-linux-xenial-cuda9-cudnn7-py3-slow-test"
docker_image: "308535385114.dkr.ecr.us-east-1.amazonaws.com/pytorch/pytorch-linux-xenial-cuda9-cudnn7-py3:339"
use_cuda_docker_runtime: "1"
resource_class: gpu.medium
- pytorch_linux_test:
name: pytorch_linux_xenial_cuda9_cudnn7_py3_nogpu_test
requires:
- setup
- pytorch_linux_xenial_cuda9_cudnn7_py3_build
build_environment: "pytorch-linux-xenial-cuda9-cudnn7-py3-nogpu-test"
docker_image: "308535385114.dkr.ecr.us-east-1.amazonaws.com/pytorch/pytorch-linux-xenial-cuda9-cudnn7-py3:339"
resource_class: large
- pytorch_short_perf_test_gpu:
requires:
- pytorch_linux_xenial_cuda9_cudnn7_py3_build
- pytorch_python_doc_push:
requires:
- pytorch_linux_xenial_cuda9_cudnn7_py3_build
- pytorch_cpp_doc_push:
requires:
- pytorch_linux_xenial_cuda9_cudnn7_py3_build
- pytorch_linux_build:
name: pytorch_namedtensor_linux_xenial_cuda9_cudnn7_py2_build
requires:
- setup
build_environment: "pytorch-namedtensor-linux-xenial-cuda9-cudnn7-py2-build"
docker_image: "308535385114.dkr.ecr.us-east-1.amazonaws.com/pytorch/pytorch-linux-xenial-cuda9-cudnn7-py2:339"
- pytorch_linux_test:
name: pytorch_namedtensor_linux_xenial_cuda9_cudnn7_py2_test
requires:
- setup
- pytorch_namedtensor_linux_xenial_cuda9_cudnn7_py2_build
build_environment: "pytorch-namedtensor-linux-xenial-cuda9-cudnn7-py2-test"
docker_image: "308535385114.dkr.ecr.us-east-1.amazonaws.com/pytorch/pytorch-linux-xenial-cuda9-cudnn7-py2:339"
use_cuda_docker_runtime: "1"
resource_class: gpu.medium
- pytorch_linux_build:
name: pytorch_linux_xenial_cuda9_2_cudnn7_py3_gcc7_build
requires:
- setup
filters:
branches:
only:
- master
- /ci-all\/.*/
build_environment: "pytorch-linux-xenial-cuda9.2-cudnn7-py3-gcc7-build"
docker_image: "308535385114.dkr.ecr.us-east-1.amazonaws.com/pytorch/pytorch-linux-xenial-cuda9.2-cudnn7-py3-gcc7:339"
- pytorch_linux_test:
name: pytorch_linux_xenial_cuda9_2_cudnn7_py3_gcc7_test
requires:
- setup
- pytorch_linux_xenial_cuda9_2_cudnn7_py3_gcc7_build
filters:
branches:
only:
- master
- /ci-all\/.*/
build_environment: "pytorch-linux-xenial-cuda9.2-cudnn7-py3-gcc7-test"
docker_image: "308535385114.dkr.ecr.us-east-1.amazonaws.com/pytorch/pytorch-linux-xenial-cuda9.2-cudnn7-py3-gcc7:339"
use_cuda_docker_runtime: "1"
resource_class: gpu.medium
- pytorch_linux_build:
name: pytorch_linux_xenial_cuda10_cudnn7_py3_gcc7_build
requires:
- setup
filters:
branches:
only:
- master
- /ci-all\/.*/
build_environment: "pytorch-linux-xenial-cuda10-cudnn7-py3-gcc7-build"
docker_image: "308535385114.dkr.ecr.us-east-1.amazonaws.com/pytorch/pytorch-linux-xenial-cuda10-cudnn7-py3-gcc7:339"
- pytorch_linux_build:
name: pytorch_linux_xenial_cuda10_1_cudnn7_py3_gcc7_build
requires:
- setup
filters:
branches:
only:
- master
- /ci-all\/.*/
build_environment: "pytorch-linux-xenial-cuda10.1-cudnn7-py3-gcc7-build"
docker_image: "308535385114.dkr.ecr.us-east-1.amazonaws.com/pytorch/pytorch-linux-xenial-cuda10.1-cudnn7-py3-gcc7:339"
- pytorch_linux_test:
name: pytorch_linux_xenial_cuda10_1_cudnn7_py3_gcc7_test
requires:
- setup
- pytorch_linux_xenial_cuda10_1_cudnn7_py3_gcc7_build
filters:
branches:
only:
- master
- /ci-all\/.*/
build_environment: "pytorch-linux-xenial-cuda10.1-cudnn7-py3-gcc7-test"
docker_image: "308535385114.dkr.ecr.us-east-1.amazonaws.com/pytorch/pytorch-linux-xenial-cuda10.1-cudnn7-py3-gcc7:339"
use_cuda_docker_runtime: "1"
resource_class: gpu.medium
- pytorch_linux_build:
name: pytorch_linux_xenial_py3_clang5_android_ndk_r19c_x86_32_build
requires:
- setup
build_environment: "pytorch-linux-xenial-py3-clang5-android-ndk-r19c-x86_32-build"
docker_image: "308535385114.dkr.ecr.us-east-1.amazonaws.com/pytorch/pytorch-linux-xenial-py3-clang5-android-ndk-r19c:339"
- pytorch_linux_build:
name: pytorch_linux_xenial_py3_clang5_android_ndk_r19c_x86_64_build
requires:
- setup
filters:
branches:
only:
- master
- /ci-all\/.*/
build_environment: "pytorch-linux-xenial-py3-clang5-android-ndk-r19c-x86_64-build"
docker_image: "308535385114.dkr.ecr.us-east-1.amazonaws.com/pytorch/pytorch-linux-xenial-py3-clang5-android-ndk-r19c:339"
- pytorch_linux_build:
name: pytorch_linux_xenial_py3_clang5_android_ndk_r19c_arm_v7a_build
requires:
- setup
filters:
branches:
only:
- master
- /ci-all\/.*/
build_environment: "pytorch-linux-xenial-py3-clang5-android-ndk-r19c-arm-v7a-build"
docker_image: "308535385114.dkr.ecr.us-east-1.amazonaws.com/pytorch/pytorch-linux-xenial-py3-clang5-android-ndk-r19c:339"
- pytorch_linux_build:
name: pytorch_linux_xenial_py3_clang5_android_ndk_r19c_arm_v8a_build
requires:
- setup
filters:
branches:
only:
- master
- /ci-all\/.*/
build_environment: "pytorch-linux-xenial-py3-clang5-android-ndk-r19c-arm-v8a-build"
docker_image: "308535385114.dkr.ecr.us-east-1.amazonaws.com/pytorch/pytorch-linux-xenial-py3-clang5-android-ndk-r19c:339"
# Warning: indentation here matters!
# Pytorch MacOS builds
- pytorch_macos_10_13_py3_build:
requires:
- setup
- pytorch_macos_10_13_py3_test:
requires:
- setup
- pytorch_macos_10_13_py3_build
- pytorch_macos_10_13_cuda9_2_cudnn7_py3_build:
requires:
- setup
- pytorch_android_gradle_build-x86_32:
requires:
- pytorch_linux_xenial_py3_clang5_android_ndk_r19c_x86_32_build
- pytorch_android_gradle_build:
requires:
- pytorch_linux_xenial_py3_clang5_android_ndk_r19c_x86_32_build
- pytorch_linux_xenial_py3_clang5_android_ndk_r19c_x86_64_build
- pytorch_linux_xenial_py3_clang5_android_ndk_r19c_arm_v7a_build
- pytorch_linux_xenial_py3_clang5_android_ndk_r19c_arm_v8a_build
# Pytorch iOS builds
- pytorch_ios_build:
name: pytorch_ios_10_2_1_x86_64_build
build_environment: "pytorch-ios-10.2.1-x86_64_build"
ios_platform: "SIMULATOR"
use_nnpack: "OFF"
requires:
- setup
- pytorch_ios_build:
name: pytorch_ios_10_2_1_arm64_build
build_environment: "pytorch-ios-10.2.1-arm64_build"
ios_arch: "arm64"
requires:
- setup
- caffe2_linux_build:
name: caffe2_py2_gcc4_8_ubuntu14_04_build
requires:
- setup
filters:
branches:
only:
- master
- /ci-all\/.*/
build_environment: "caffe2-py2-gcc4.8-ubuntu14.04-build"
docker_image: "308535385114.dkr.ecr.us-east-1.amazonaws.com/caffe2/py2-gcc4.8-ubuntu14.04:301"
- caffe2_linux_test:
name: caffe2_py2_gcc4_8_ubuntu14_04_test
requires:
- setup
- caffe2_py2_gcc4_8_ubuntu14_04_build
filters:
branches:
only:
- master
- /ci-all\/.*/
build_environment: "caffe2-py2-gcc4.8-ubuntu14.04-test"
docker_image: "308535385114.dkr.ecr.us-east-1.amazonaws.com/caffe2/py2-gcc4.8-ubuntu14.04:301"
resource_class: large
- caffe2_linux_build:
name: caffe2_py2_cuda9_0_cudnn7_ubuntu16_04_build
requires:
- setup
filters:
branches:
only:
- master
- /ci-all\/.*/
build_environment: "caffe2-py2-cuda9.0-cudnn7-ubuntu16.04-build"
docker_image: "308535385114.dkr.ecr.us-east-1.amazonaws.com/caffe2/py2-cuda9.0-cudnn7-ubuntu16.04:301"
- caffe2_linux_test:
name: caffe2_py2_cuda9_0_cudnn7_ubuntu16_04_test
requires:
- setup
- caffe2_py2_cuda9_0_cudnn7_ubuntu16_04_build
filters:
branches:
only:
- master
- /ci-all\/.*/
build_environment: "caffe2-py2-cuda9.0-cudnn7-ubuntu16.04-test"
use_cuda_docker_runtime: "1"
docker_image: "308535385114.dkr.ecr.us-east-1.amazonaws.com/caffe2/py2-cuda9.0-cudnn7-ubuntu16.04:301"
resource_class: gpu.medium
- caffe2_linux_build:
name: caffe2_cmake_cuda9_0_cudnn7_ubuntu16_04_build
requires:
- setup
build_environment: "caffe2-cmake-cuda9.0-cudnn7-ubuntu16.04-build"
docker_image: "308535385114.dkr.ecr.us-east-1.amazonaws.com/caffe2/py2-cuda9.0-cudnn7-ubuntu16.04:301"
- caffe2_linux_test:
name: caffe2_cmake_cuda9_0_cudnn7_ubuntu16_04_test
requires:
- setup
- caffe2_cmake_cuda9_0_cudnn7_ubuntu16_04_build
build_environment: "caffe2-cmake-cuda9.0-cudnn7-ubuntu16.04-test"
use_cuda_docker_runtime: "1"
docker_image: "308535385114.dkr.ecr.us-east-1.amazonaws.com/caffe2/py2-cuda9.0-cudnn7-ubuntu16.04:301"
resource_class: gpu.medium
- caffe2_linux_build:
name: caffe2_py2_cuda9_1_cudnn7_ubuntu16_04_build
requires:
- setup
build_environment: "caffe2-py2-cuda9.1-cudnn7-ubuntu16.04-build"
docker_image: "308535385114.dkr.ecr.us-east-1.amazonaws.com/caffe2/py2-cuda9.1-cudnn7-ubuntu16.04:301"
- caffe2_linux_test:
name: caffe2_py2_cuda9_1_cudnn7_ubuntu16_04_test
requires:
- setup
- caffe2_py2_cuda9_1_cudnn7_ubuntu16_04_build
build_environment: "caffe2-py2-cuda9.1-cudnn7-ubuntu16.04-test"
use_cuda_docker_runtime: "1"
docker_image: "308535385114.dkr.ecr.us-east-1.amazonaws.com/caffe2/py2-cuda9.1-cudnn7-ubuntu16.04:301"
resource_class: gpu.medium
- caffe2_linux_build:
name: caffe2_py2_mkl_ubuntu16_04_build
requires:
- setup
build_environment: "caffe2-py2-mkl-ubuntu16.04-build"
docker_image: "308535385114.dkr.ecr.us-east-1.amazonaws.com/caffe2/py2-mkl-ubuntu16.04:301"
- caffe2_linux_test:
name: caffe2_py2_mkl_ubuntu16_04_test
requires:
- setup
- caffe2_py2_mkl_ubuntu16_04_build
build_environment: "caffe2-py2-mkl-ubuntu16.04-test"
docker_image: "308535385114.dkr.ecr.us-east-1.amazonaws.com/caffe2/py2-mkl-ubuntu16.04:301"
resource_class: large
- caffe2_linux_build:
name: caffe2_onnx_py2_gcc5_ubuntu16_04_build
requires:
- setup
build_environment: "caffe2-onnx-py2-gcc5-ubuntu16.04-build"
docker_image: "308535385114.dkr.ecr.us-east-1.amazonaws.com/caffe2/py2-gcc5-ubuntu16.04:301"
- caffe2_linux_test:
name: caffe2_onnx_py2_gcc5_ubuntu16_04_test
requires:
- setup
- caffe2_onnx_py2_gcc5_ubuntu16_04_build
build_environment: "caffe2-onnx-py2-gcc5-ubuntu16.04-test"
docker_image: "308535385114.dkr.ecr.us-east-1.amazonaws.com/caffe2/py2-gcc5-ubuntu16.04:301"
resource_class: large
- caffe2_linux_build:
name: caffe2_py2_clang3_8_ubuntu16_04_build
requires:
- setup
filters:
branches:
only:
- master
- /ci-all\/.*/
build_environment: "caffe2-py2-clang3.8-ubuntu16.04-build"
docker_image: "308535385114.dkr.ecr.us-east-1.amazonaws.com/caffe2/py2-clang3.8-ubuntu16.04:301"
build_only: "1"
- caffe2_linux_build:
name: caffe2_py2_clang3_9_ubuntu16_04_build
requires:
- setup
filters:
branches:
only:
- master
- /ci-all\/.*/
build_environment: "caffe2-py2-clang3.9-ubuntu16.04-build"
docker_image: "308535385114.dkr.ecr.us-east-1.amazonaws.com/caffe2/py2-clang3.9-ubuntu16.04:301"
build_only: "1"
- caffe2_linux_build:
name: caffe2_py2_clang7_ubuntu16_04_build
requires:
- setup
build_environment: "caffe2-py2-clang7-ubuntu16.04-build"
docker_image: "308535385114.dkr.ecr.us-east-1.amazonaws.com/caffe2/py2-clang7-ubuntu16.04:301"
build_only: "1"
- caffe2_linux_build:
name: caffe2_onnx_py3_6_clang7_ubuntu16_04_build
requires:
- setup
build_environment: "caffe2-onnx-py3.6-clang7-ubuntu16.04-build"
docker_image: "308535385114.dkr.ecr.us-east-1.amazonaws.com/caffe2/py3.6-clang7-ubuntu16.04:301"
- caffe2_linux_test:
name: caffe2_onnx_py3_6_clang7_ubuntu16_04_test
requires:
- setup
- caffe2_onnx_py3_6_clang7_ubuntu16_04_build
build_environment: "caffe2-onnx-py3.6-clang7-ubuntu16.04-test"
docker_image: "308535385114.dkr.ecr.us-east-1.amazonaws.com/caffe2/py3.6-clang7-ubuntu16.04:301"
resource_class: large
- caffe2_linux_build:
name: caffe2_py2_android_ubuntu16_04_build
requires:
- setup
build_environment: "caffe2-py2-android-ubuntu16.04-build"
docker_image: "308535385114.dkr.ecr.us-east-1.amazonaws.com/caffe2/py2-android-ubuntu16.04:301"
build_only: "1"
- caffe2_linux_build:
name: caffe2_py2_cuda9_0_cudnn7_centos7_build
requires:
- setup
filters:
branches:
only:
- master
- /ci-all\/.*/
build_environment: "caffe2-py2-cuda9.0-cudnn7-centos7-build"
docker_image: "308535385114.dkr.ecr.us-east-1.amazonaws.com/caffe2/py2-cuda9.0-cudnn7-centos7:301"
- caffe2_linux_test:
name: caffe2_py2_cuda9_0_cudnn7_centos7_test
requires:
- setup
- caffe2_py2_cuda9_0_cudnn7_centos7_build
filters:
branches:
only:
- master
- /ci-all\/.*/
build_environment: "caffe2-py2-cuda9.0-cudnn7-centos7-test"
use_cuda_docker_runtime: "1"
docker_image: "308535385114.dkr.ecr.us-east-1.amazonaws.com/caffe2/py2-cuda9.0-cudnn7-centos7:301"
resource_class: gpu.medium
- caffe2_macos_build:
name: caffe2_py2_ios_macos10_13_build
requires:
- setup
filters:
branches:
only:
- master
- /ci-all\/.*/
build_environment: "caffe2-py2-ios-macos10.13-build"
build_ios: "1"
- caffe2_macos_build:
name: caffe2_py2_system_macos10_13_build
requires:
- setup
build_environment: "caffe2-py2-system-macos10.13-build"
# Binary builds (subset, to smoke test that they'll work)
#
# NB: If you modify this file, you need to also modify
# the binary_and_smoke_tests_on_pr variable in
# pytorch-ci-hud to adjust the list of whitelisted builds
# at https://github.com/ezyang/pytorch-ci-hud/blob/master/src/BuildHistoryDisplay.js
- binary_linux_build:
name: binary_linux_manywheel_2_7mu_cpu_devtoolset7_build
build_environment: "manywheel 2.7mu cpu devtoolset7"
requires:
- setup
docker_image: "soumith/manylinux-cuda100"
- binary_linux_build:
name: binary_linux_manywheel_3_7m_cu100_devtoolset7_build
build_environment: "manywheel 3.7m cu100 devtoolset7"
requires:
- setup
docker_image: "soumith/manylinux-cuda100"
- binary_linux_build:
name: binary_linux_conda_2_7_cpu_devtoolset7_build
build_environment: "conda 2.7 cpu devtoolset7"
requires:
- setup
docker_image: "soumith/conda-cuda"
# This binary build is currently broken, see https://github_com/pytorch/pytorch/issues/16710
# - binary_linux_conda_3_6_cu90_devtoolset7_build
- binary_linux_build:
name: binary_linux_libtorch_2_7m_cpu_devtoolset7_shared-with-deps_build
build_environment: "libtorch 2.7m cpu devtoolset7"
requires:
- setup
libtorch_variant: "shared-with-deps"
docker_image: "soumith/manylinux-cuda100"
- binary_linux_build:
name: binary_linux_libtorch_2_7m_cpu_gcc5_4_cxx11-abi_shared-with-deps_build
build_environment: "libtorch 2.7m cpu gcc5.4_cxx11-abi"
requires:
- setup
libtorch_variant: "shared-with-deps"
docker_image: "yf225/pytorch-binary-docker-image-ubuntu16.04:latest"
# TODO we should test a libtorch cuda build, but they take too long
# - binary_linux_libtorch_2_7m_cu90_devtoolset7_static-without-deps_build
- binary_mac_build:
name: binary_macos_wheel_3_6_cpu_build
build_environment: "wheel 3.6 cpu"
requires:
- setup
- binary_mac_build:
name: binary_macos_conda_2_7_cpu_build
build_environment: "conda 2.7 cpu"
requires:
- setup
- binary_mac_build:
name: binary_macos_libtorch_2_7_cpu_build
build_environment: "libtorch 2.7 cpu"
requires:
- setup
- binary_linux_test:
name: binary_linux_manywheel_2_7mu_cpu_devtoolset7_test
build_environment: "manywheel 2.7mu cpu devtoolset7"
requires:
- setup
- binary_linux_manywheel_2_7mu_cpu_devtoolset7_build
docker_image: "soumith/manylinux-cuda100"
- binary_linux_test:
name: binary_linux_manywheel_3_7m_cu100_devtoolset7_test
build_environment: "manywheel 3.7m cu100 devtoolset7"
requires:
- setup
- binary_linux_manywheel_3_7m_cu100_devtoolset7_build
docker_image: "soumith/manylinux-cuda100"
use_cuda_docker_runtime: "1"
resource_class: gpu.medium
- binary_linux_test:
name: binary_linux_conda_2_7_cpu_devtoolset7_test
build_environment: "conda 2.7 cpu devtoolset7"
requires:
- setup
- binary_linux_conda_2_7_cpu_devtoolset7_build
docker_image: "soumith/conda-cuda"
# This binary build is currently broken, see https://github_com/pytorch/pytorch/issues/16710
# - binary_linux_conda_3_6_cu90_devtoolset7_test:
- binary_linux_test:
name: binary_linux_libtorch_2_7m_cpu_devtoolset7_shared-with-deps_test
build_environment: "libtorch 2.7m cpu devtoolset7"
requires:
- setup
- binary_linux_libtorch_2_7m_cpu_devtoolset7_shared-with-deps_build
libtorch_variant: "shared-with-deps"
docker_image: "soumith/manylinux-cuda100"
- binary_linux_test:
name: binary_linux_libtorch_2_7m_cpu_gcc5_4_cxx11-abi_shared-with-deps_test
build_environment: "libtorch 2.7m cpu gcc5.4_cxx11-abi"
requires:
- setup
- binary_linux_libtorch_2_7m_cpu_gcc5_4_cxx11-abi_shared-with-deps_build
libtorch_variant: "shared-with-deps"
docker_image: "yf225/pytorch-binary-docker-image-ubuntu16.04:latest"
##############################################################################
# Daily smoke test trigger
##############################################################################
binarysmoketests:
triggers:
- schedule:
cron: "15 16 * * *"
filters:
branches:
only:
- master
jobs:
- setup
- smoke_linux_test:
name: smoke_linux_manywheel_2_7m_cpu_devtoolset7
build_environment: "manywheel 2.7m cpu devtoolset7"
requires:
- setup
docker_image: "soumith/manylinux-cuda100"
- smoke_linux_test:
name: smoke_linux_manywheel_2_7mu_cpu_devtoolset7
build_environment: "manywheel 2.7mu cpu devtoolset7"
requires:
- setup
docker_image: "soumith/manylinux-cuda100"
- smoke_linux_test:
name: smoke_linux_manywheel_3_5m_cpu_devtoolset7
build_environment: "manywheel 3.5m cpu devtoolset7"
requires:
- setup
docker_image: "soumith/manylinux-cuda100"
- smoke_linux_test:
name: smoke_linux_manywheel_3_6m_cpu_devtoolset7
build_environment: "manywheel 3.6m cpu devtoolset7"
requires:
- setup
docker_image: "soumith/manylinux-cuda100"
- smoke_linux_test:
name: smoke_linux_manywheel_3_7m_cpu_devtoolset7
build_environment: "manywheel 3.7m cpu devtoolset7"
requires:
- setup
docker_image: "soumith/manylinux-cuda100"
- smoke_linux_test:
name: smoke_linux_manywheel_2_7m_cu92_devtoolset7
build_environment: "manywheel 2.7m cu92 devtoolset7"
requires:
- setup
docker_image: "soumith/manylinux-cuda92"
use_cuda_docker_runtime: "1"
resource_class: gpu.medium
- smoke_linux_test:
name: smoke_linux_manywheel_2_7mu_cu92_devtoolset7
build_environment: "manywheel 2.7mu cu92 devtoolset7"
requires:
- setup
docker_image: "soumith/manylinux-cuda92"
use_cuda_docker_runtime: "1"
resource_class: gpu.medium
- smoke_linux_test:
name: smoke_linux_manywheel_3_5m_cu92_devtoolset7
build_environment: "manywheel 3.5m cu92 devtoolset7"
requires:
- setup
docker_image: "soumith/manylinux-cuda92"
use_cuda_docker_runtime: "1"
resource_class: gpu.medium
- smoke_linux_test:
name: smoke_linux_manywheel_3_6m_cu92_devtoolset7
build_environment: "manywheel 3.6m cu92 devtoolset7"
requires:
- setup
docker_image: "soumith/manylinux-cuda92"
use_cuda_docker_runtime: "1"
resource_class: gpu.medium
- smoke_linux_test:
name: smoke_linux_manywheel_3_7m_cu92_devtoolset7
build_environment: "manywheel 3.7m cu92 devtoolset7"
requires:
- setup
docker_image: "soumith/manylinux-cuda92"
use_cuda_docker_runtime: "1"
resource_class: gpu.medium
- smoke_linux_test:
name: smoke_linux_manywheel_2_7m_cu100_devtoolset7
build_environment: "manywheel 2.7m cu100 devtoolset7"
requires:
- setup
docker_image: "soumith/manylinux-cuda100"
use_cuda_docker_runtime: "1"
resource_class: gpu.medium
- smoke_linux_test:
name: smoke_linux_manywheel_2_7mu_cu100_devtoolset7
build_environment: "manywheel 2.7mu cu100 devtoolset7"
requires:
- setup
docker_image: "soumith/manylinux-cuda100"
use_cuda_docker_runtime: "1"
resource_class: gpu.medium
- smoke_linux_test:
name: smoke_linux_manywheel_3_5m_cu100_devtoolset7
build_environment: "manywheel 3.5m cu100 devtoolset7"
requires:
- setup
docker_image: "soumith/manylinux-cuda100"
use_cuda_docker_runtime: "1"
resource_class: gpu.medium
- smoke_linux_test:
name: smoke_linux_manywheel_3_6m_cu100_devtoolset7
build_environment: "manywheel 3.6m cu100 devtoolset7"
requires:
- setup
docker_image: "soumith/manylinux-cuda100"
use_cuda_docker_runtime: "1"
resource_class: gpu.medium
- smoke_linux_test:
name: smoke_linux_manywheel_3_7m_cu100_devtoolset7
build_environment: "manywheel 3.7m cu100 devtoolset7"
requires:
- setup
docker_image: "soumith/manylinux-cuda100"
use_cuda_docker_runtime: "1"
resource_class: gpu.medium
- smoke_linux_test:
name: smoke_linux_conda_2_7_cpu_devtoolset7
build_environment: "conda 2.7 cpu devtoolset7"
requires:
- setup
docker_image: "soumith/conda-cuda"
- smoke_linux_test:
name: smoke_linux_conda_3_5_cpu_devtoolset7
build_environment: "conda 3.5 cpu devtoolset7"
requires:
- setup
docker_image: "soumith/conda-cuda"
- smoke_linux_test:
name: smoke_linux_conda_3_6_cpu_devtoolset7
build_environment: "conda 3.6 cpu devtoolset7"
requires:
- setup
docker_image: "soumith/conda-cuda"
- smoke_linux_test:
name: smoke_linux_conda_3_7_cpu_devtoolset7
build_environment: "conda 3.7 cpu devtoolset7"
requires:
- setup
docker_image: "soumith/conda-cuda"
- smoke_linux_test:
name: smoke_linux_conda_2_7_cu92_devtoolset7
build_environment: "conda 2.7 cu92 devtoolset7"
requires:
- setup
docker_image: "soumith/conda-cuda"
use_cuda_docker_runtime: "1"
resource_class: gpu.medium
- smoke_linux_test:
name: smoke_linux_conda_3_5_cu92_devtoolset7
build_environment: "conda 3.5 cu92 devtoolset7"
requires:
- setup
docker_image: "soumith/conda-cuda"
use_cuda_docker_runtime: "1"
resource_class: gpu.medium
- smoke_linux_test:
name: smoke_linux_conda_3_6_cu92_devtoolset7
build_environment: "conda 3.6 cu92 devtoolset7"
requires:
- setup
docker_image: "soumith/conda-cuda"
use_cuda_docker_runtime: "1"
resource_class: gpu.medium
- smoke_linux_test:
name: smoke_linux_conda_3_7_cu92_devtoolset7
build_environment: "conda 3.7 cu92 devtoolset7"
requires:
- setup
docker_image: "soumith/conda-cuda"
use_cuda_docker_runtime: "1"
resource_class: gpu.medium
- smoke_linux_test:
name: smoke_linux_conda_2_7_cu100_devtoolset7
build_environment: "conda 2.7 cu100 devtoolset7"
requires:
- setup
docker_image: "soumith/conda-cuda"
use_cuda_docker_runtime: "1"
resource_class: gpu.medium
- smoke_linux_test:
name: smoke_linux_conda_3_5_cu100_devtoolset7
build_environment: "conda 3.5 cu100 devtoolset7"
requires:
- setup
docker_image: "soumith/conda-cuda"
use_cuda_docker_runtime: "1"
resource_class: gpu.medium
- smoke_linux_test:
name: smoke_linux_conda_3_6_cu100_devtoolset7
build_environment: "conda 3.6 cu100 devtoolset7"
requires:
- setup
docker_image: "soumith/conda-cuda"
use_cuda_docker_runtime: "1"
resource_class: gpu.medium
- smoke_linux_test:
name: smoke_linux_conda_3_7_cu100_devtoolset7
build_environment: "conda 3.7 cu100 devtoolset7"
requires:
- setup
docker_image: "soumith/conda-cuda"
use_cuda_docker_runtime: "1"
resource_class: gpu.medium
- smoke_linux_test:
name: smoke_linux_libtorch_2_7m_cpu_devtoolset7_shared-with-deps
build_environment: "libtorch 2.7m cpu devtoolset7"
requires:
- setup
libtorch_variant: "shared-with-deps"
docker_image: "soumith/manylinux-cuda100"
- smoke_linux_test:
name: smoke_linux_libtorch_2_7m_cpu_devtoolset7_shared-without-deps
build_environment: "libtorch 2.7m cpu devtoolset7"
requires:
- setup
libtorch_variant: "shared-without-deps"
docker_image: "soumith/manylinux-cuda100"
- smoke_linux_test:
name: smoke_linux_libtorch_2_7m_cpu_devtoolset7_static-with-deps
build_environment: "libtorch 2.7m cpu devtoolset7"
requires:
- setup
libtorch_variant: "static-with-deps"
docker_image: "soumith/manylinux-cuda100"
- smoke_linux_test:
name: smoke_linux_libtorch_2_7m_cpu_devtoolset7_static-without-deps
build_environment: "libtorch 2.7m cpu devtoolset7"
requires:
- setup
libtorch_variant: "static-without-deps"
docker_image: "soumith/manylinux-cuda100"
- smoke_linux_test:
name: smoke_linux_libtorch_2_7m_cu92_devtoolset7_shared-with-deps
build_environment: "libtorch 2.7m cu92 devtoolset7"
requires:
- setup
libtorch_variant: "shared-with-deps"
docker_image: "soumith/manylinux-cuda92"
use_cuda_docker_runtime: "1"
resource_class: gpu.medium
- smoke_linux_test:
name: smoke_linux_libtorch_2_7m_cu92_devtoolset7_shared-without-deps
build_environment: "libtorch 2.7m cu92 devtoolset7"
requires:
- setup
libtorch_variant: "shared-without-deps"
docker_image: "soumith/manylinux-cuda92"
use_cuda_docker_runtime: "1"
resource_class: gpu.medium
- smoke_linux_test:
name: smoke_linux_libtorch_2_7m_cu92_devtoolset7_static-with-deps
build_environment: "libtorch 2.7m cu92 devtoolset7"
requires:
- setup
libtorch_variant: "static-with-deps"
docker_image: "soumith/manylinux-cuda92"
use_cuda_docker_runtime: "1"
resource_class: gpu.medium
- smoke_linux_test:
name: smoke_linux_libtorch_2_7m_cu92_devtoolset7_static-without-deps
build_environment: "libtorch 2.7m cu92 devtoolset7"
requires:
- setup
libtorch_variant: "static-without-deps"
docker_image: "soumith/manylinux-cuda92"
use_cuda_docker_runtime: "1"
resource_class: gpu.medium
- smoke_linux_test:
name: smoke_linux_libtorch_2_7m_cu100_devtoolset7_shared-with-deps
build_environment: "libtorch 2.7m cu100 devtoolset7"
requires:
- setup
libtorch_variant: "shared-with-deps"
docker_image: "soumith/manylinux-cuda100"
use_cuda_docker_runtime: "1"
resource_class: gpu.medium
- smoke_linux_test:
name: smoke_linux_libtorch_2_7m_cu100_devtoolset7_shared-without-deps
build_environment: "libtorch 2.7m cu100 devtoolset7"
requires:
- setup
libtorch_variant: "shared-without-deps"
docker_image: "soumith/manylinux-cuda100"
use_cuda_docker_runtime: "1"
resource_class: gpu.medium
- smoke_linux_test:
name: smoke_linux_libtorch_2_7m_cu100_devtoolset7_static-with-deps
build_environment: "libtorch 2.7m cu100 devtoolset7"
requires:
- setup
libtorch_variant: "static-with-deps"
docker_image: "soumith/manylinux-cuda100"
use_cuda_docker_runtime: "1"
resource_class: gpu.medium
- smoke_linux_test:
name: smoke_linux_libtorch_2_7m_cu100_devtoolset7_static-without-deps
build_environment: "libtorch 2.7m cu100 devtoolset7"
requires:
- setup
libtorch_variant: "static-without-deps"
docker_image: "soumith/manylinux-cuda100"
use_cuda_docker_runtime: "1"
resource_class: gpu.medium
- smoke_linux_test:
name: smoke_linux_libtorch_2_7m_cpu_gcc5_4_cxx11-abi_shared-with-deps
build_environment: "libtorch 2.7m cpu gcc5.4_cxx11-abi"
requires:
- setup
libtorch_variant: "shared-with-deps"
docker_image: "yf225/pytorch-binary-docker-image-ubuntu16.04:latest"
- smoke_linux_test:
name: smoke_linux_libtorch_2_7m_cpu_gcc5_4_cxx11-abi_shared-without-deps
build_environment: "libtorch 2.7m cpu gcc5.4_cxx11-abi"
requires:
- setup
libtorch_variant: "shared-without-deps"
docker_image: "yf225/pytorch-binary-docker-image-ubuntu16.04:latest"
- smoke_linux_test:
name: smoke_linux_libtorch_2_7m_cpu_gcc5_4_cxx11-abi_static-with-deps
build_environment: "libtorch 2.7m cpu gcc5.4_cxx11-abi"
requires:
- setup
libtorch_variant: "static-with-deps"
docker_image: "yf225/pytorch-binary-docker-image-ubuntu16.04:latest"
- smoke_linux_test:
name: smoke_linux_libtorch_2_7m_cpu_gcc5_4_cxx11-abi_static-without-deps
build_environment: "libtorch 2.7m cpu gcc5.4_cxx11-abi"
requires:
- setup
libtorch_variant: "static-without-deps"
docker_image: "yf225/pytorch-binary-docker-image-ubuntu16.04:latest"
- smoke_linux_test:
name: smoke_linux_libtorch_2_7m_cu92_gcc5_4_cxx11-abi_shared-with-deps
build_environment: "libtorch 2.7m cu92 gcc5.4_cxx11-abi"
requires:
- setup
libtorch_variant: "shared-with-deps"
docker_image: "yf225/pytorch-binary-docker-image-ubuntu16.04:latest"
use_cuda_docker_runtime: "1"
resource_class: gpu.medium
- smoke_linux_test:
name: smoke_linux_libtorch_2_7m_cu92_gcc5_4_cxx11-abi_shared-without-deps
build_environment: "libtorch 2.7m cu92 gcc5.4_cxx11-abi"
requires:
- setup
libtorch_variant: "shared-without-deps"
docker_image: "yf225/pytorch-binary-docker-image-ubuntu16.04:latest"
use_cuda_docker_runtime: "1"
resource_class: gpu.medium
- smoke_linux_test:
name: smoke_linux_libtorch_2_7m_cu92_gcc5_4_cxx11-abi_static-with-deps
build_environment: "libtorch 2.7m cu92 gcc5.4_cxx11-abi"
requires:
- setup
libtorch_variant: "static-with-deps"
docker_image: "yf225/pytorch-binary-docker-image-ubuntu16.04:latest"
use_cuda_docker_runtime: "1"
resource_class: gpu.medium
- smoke_linux_test:
name: smoke_linux_libtorch_2_7m_cu92_gcc5_4_cxx11-abi_static-without-deps
build_environment: "libtorch 2.7m cu92 gcc5.4_cxx11-abi"
requires:
- setup
libtorch_variant: "static-without-deps"
docker_image: "yf225/pytorch-binary-docker-image-ubuntu16.04:latest"
use_cuda_docker_runtime: "1"
resource_class: gpu.medium
- smoke_linux_test:
name: smoke_linux_libtorch_2_7m_cu100_gcc5_4_cxx11-abi_shared-with-deps
build_environment: "libtorch 2.7m cu100 gcc5.4_cxx11-abi"
requires:
- setup
libtorch_variant: "shared-with-deps"
docker_image: "yf225/pytorch-binary-docker-image-ubuntu16.04:latest"
use_cuda_docker_runtime: "1"
resource_class: gpu.medium
- smoke_linux_test:
name: smoke_linux_libtorch_2_7m_cu100_gcc5_4_cxx11-abi_shared-without-deps
build_environment: "libtorch 2.7m cu100 gcc5.4_cxx11-abi"
requires:
- setup
libtorch_variant: "shared-without-deps"
docker_image: "yf225/pytorch-binary-docker-image-ubuntu16.04:latest"
use_cuda_docker_runtime: "1"
resource_class: gpu.medium
- smoke_linux_test:
name: smoke_linux_libtorch_2_7m_cu100_gcc5_4_cxx11-abi_static-with-deps
build_environment: "libtorch 2.7m cu100 gcc5.4_cxx11-abi"
requires:
- setup
libtorch_variant: "static-with-deps"
docker_image: "yf225/pytorch-binary-docker-image-ubuntu16.04:latest"
use_cuda_docker_runtime: "1"
resource_class: gpu.medium
- smoke_linux_test:
name: smoke_linux_libtorch_2_7m_cu100_gcc5_4_cxx11-abi_static-without-deps
build_environment: "libtorch 2.7m cu100 gcc5.4_cxx11-abi"
requires:
- setup
libtorch_variant: "static-without-deps"
docker_image: "yf225/pytorch-binary-docker-image-ubuntu16.04:latest"
use_cuda_docker_runtime: "1"
resource_class: gpu.medium
- smoke_mac_test:
name: smoke_macos_wheel_2_7_cpu
build_environment: "wheel 2.7 cpu"
requires:
- setup
- smoke_mac_test:
name: smoke_macos_wheel_3_5_cpu
build_environment: "wheel 3.5 cpu"
requires:
- setup
- smoke_mac_test:
name: smoke_macos_wheel_3_6_cpu
build_environment: "wheel 3.6 cpu"
requires:
- setup
- smoke_mac_test:
name: smoke_macos_wheel_3_7_cpu
build_environment: "wheel 3.7 cpu"
requires:
- setup
- smoke_mac_test:
name: smoke_macos_conda_2_7_cpu
build_environment: "conda 2.7 cpu"
requires:
- setup
- smoke_mac_test:
name: smoke_macos_conda_3_5_cpu
build_environment: "conda 3.5 cpu"
requires:
- setup
- smoke_mac_test:
name: smoke_macos_conda_3_6_cpu
build_environment: "conda 3.6 cpu"
requires:
- setup
- smoke_mac_test:
name: smoke_macos_conda_3_7_cpu
build_environment: "conda 3.7 cpu"
requires:
- setup
- smoke_mac_test:
name: smoke_macos_libtorch_2_7_cpu
build_environment: "libtorch 2.7 cpu"
requires:
- setup
##############################################################################
# Daily binary build trigger
##############################################################################
binarybuilds:
triggers:
- schedule:
cron: "5 5 * * *"
filters:
branches:
only:
- master
jobs:
- setup
- binary_linux_build:
name: binary_linux_manywheel_2_7m_cpu_devtoolset7_build
build_environment: "manywheel 2.7m cpu devtoolset7"
requires:
- setup
docker_image: "soumith/manylinux-cuda100"
- binary_linux_build:
name: binary_linux_manywheel_2_7mu_cpu_devtoolset7_build
build_environment: "manywheel 2.7mu cpu devtoolset7"
requires:
- setup
docker_image: "soumith/manylinux-cuda100"
- binary_linux_build:
name: binary_linux_manywheel_3_5m_cpu_devtoolset7_build
build_environment: "manywheel 3.5m cpu devtoolset7"
requires:
- setup
docker_image: "soumith/manylinux-cuda100"
- binary_linux_build:
name: binary_linux_manywheel_3_6m_cpu_devtoolset7_build
build_environment: "manywheel 3.6m cpu devtoolset7"
requires:
- setup
docker_image: "soumith/manylinux-cuda100"
- binary_linux_build:
name: binary_linux_manywheel_3_7m_cpu_devtoolset7_build
build_environment: "manywheel 3.7m cpu devtoolset7"
requires:
- setup
docker_image: "soumith/manylinux-cuda100"
- binary_linux_build:
name: binary_linux_manywheel_2_7m_cu92_devtoolset7_build
build_environment: "manywheel 2.7m cu92 devtoolset7"
requires:
- setup
docker_image: "soumith/manylinux-cuda92"
- binary_linux_build:
name: binary_linux_manywheel_2_7mu_cu92_devtoolset7_build
build_environment: "manywheel 2.7mu cu92 devtoolset7"
requires:
- setup
docker_image: "soumith/manylinux-cuda92"
- binary_linux_build:
name: binary_linux_manywheel_3_5m_cu92_devtoolset7_build
build_environment: "manywheel 3.5m cu92 devtoolset7"
requires:
- setup
docker_image: "soumith/manylinux-cuda92"
- binary_linux_build:
name: binary_linux_manywheel_3_6m_cu92_devtoolset7_build
build_environment: "manywheel 3.6m cu92 devtoolset7"
requires:
- setup
docker_image: "soumith/manylinux-cuda92"
- binary_linux_build:
name: binary_linux_manywheel_3_7m_cu92_devtoolset7_build
build_environment: "manywheel 3.7m cu92 devtoolset7"
requires:
- setup
docker_image: "soumith/manylinux-cuda92"
- binary_linux_build:
name: binary_linux_manywheel_2_7m_cu100_devtoolset7_build
build_environment: "manywheel 2.7m cu100 devtoolset7"
requires:
- setup
docker_image: "soumith/manylinux-cuda100"
- binary_linux_build:
name: binary_linux_manywheel_2_7mu_cu100_devtoolset7_build
build_environment: "manywheel 2.7mu cu100 devtoolset7"
requires:
- setup
docker_image: "soumith/manylinux-cuda100"
- binary_linux_build:
name: binary_linux_manywheel_3_5m_cu100_devtoolset7_build
build_environment: "manywheel 3.5m cu100 devtoolset7"
requires:
- setup
docker_image: "soumith/manylinux-cuda100"
- binary_linux_build:
name: binary_linux_manywheel_3_6m_cu100_devtoolset7_build
build_environment: "manywheel 3.6m cu100 devtoolset7"
requires:
- setup
docker_image: "soumith/manylinux-cuda100"
- binary_linux_build:
name: binary_linux_manywheel_3_7m_cu100_devtoolset7_build
build_environment: "manywheel 3.7m cu100 devtoolset7"
requires:
- setup
docker_image: "soumith/manylinux-cuda100"
- binary_linux_build:
name: binary_linux_conda_2_7_cpu_devtoolset7_build
build_environment: "conda 2.7 cpu devtoolset7"
requires:
- setup
docker_image: "soumith/conda-cuda"
- binary_linux_build:
name: binary_linux_conda_3_5_cpu_devtoolset7_build
build_environment: "conda 3.5 cpu devtoolset7"
requires:
- setup
docker_image: "soumith/conda-cuda"
- binary_linux_build:
name: binary_linux_conda_3_6_cpu_devtoolset7_build
build_environment: "conda 3.6 cpu devtoolset7"
requires:
- setup
docker_image: "soumith/conda-cuda"
- binary_linux_build:
name: binary_linux_conda_3_7_cpu_devtoolset7_build
build_environment: "conda 3.7 cpu devtoolset7"
requires:
- setup
docker_image: "soumith/conda-cuda"
- binary_linux_build:
name: binary_linux_conda_2_7_cu92_devtoolset7_build
build_environment: "conda 2.7 cu92 devtoolset7"
requires:
- setup
docker_image: "soumith/conda-cuda"
- binary_linux_build:
name: binary_linux_conda_3_5_cu92_devtoolset7_build
build_environment: "conda 3.5 cu92 devtoolset7"
requires:
- setup
docker_image: "soumith/conda-cuda"
- binary_linux_build:
name: binary_linux_conda_3_6_cu92_devtoolset7_build
build_environment: "conda 3.6 cu92 devtoolset7"
requires:
- setup
docker_image: "soumith/conda-cuda"
- binary_linux_build:
name: binary_linux_conda_3_7_cu92_devtoolset7_build
build_environment: "conda 3.7 cu92 devtoolset7"
requires:
- setup
docker_image: "soumith/conda-cuda"
- binary_linux_build:
name: binary_linux_conda_2_7_cu100_devtoolset7_build
build_environment: "conda 2.7 cu100 devtoolset7"
requires:
- setup
docker_image: "soumith/conda-cuda"
- binary_linux_build:
name: binary_linux_conda_3_5_cu100_devtoolset7_build
build_environment: "conda 3.5 cu100 devtoolset7"
requires:
- setup
docker_image: "soumith/conda-cuda"
- binary_linux_build:
name: binary_linux_conda_3_6_cu100_devtoolset7_build
build_environment: "conda 3.6 cu100 devtoolset7"
requires:
- setup
docker_image: "soumith/conda-cuda"
- binary_linux_build:
name: binary_linux_conda_3_7_cu100_devtoolset7_build
build_environment: "conda 3.7 cu100 devtoolset7"
requires:
- setup
docker_image: "soumith/conda-cuda"
- binary_linux_build:
name: binary_linux_libtorch_2_7m_cpu_devtoolset7_shared-with-deps_build
build_environment: "libtorch 2.7m cpu devtoolset7"
requires:
- setup
libtorch_variant: "shared-with-deps"
docker_image: "soumith/manylinux-cuda100"
- binary_linux_build:
name: binary_linux_libtorch_2_7m_cpu_devtoolset7_shared-without-deps_build
build_environment: "libtorch 2.7m cpu devtoolset7"
requires:
- setup
libtorch_variant: "shared-without-deps"
docker_image: "soumith/manylinux-cuda100"
- binary_linux_build:
name: binary_linux_libtorch_2_7m_cpu_devtoolset7_static-with-deps_build
build_environment: "libtorch 2.7m cpu devtoolset7"
requires:
- setup
libtorch_variant: "static-with-deps"
docker_image: "soumith/manylinux-cuda100"
- binary_linux_build:
name: binary_linux_libtorch_2_7m_cpu_devtoolset7_static-without-deps_build
build_environment: "libtorch 2.7m cpu devtoolset7"
requires:
- setup
libtorch_variant: "static-without-deps"
docker_image: "soumith/manylinux-cuda100"
- binary_linux_build:
name: binary_linux_libtorch_2_7m_cu92_devtoolset7_shared-with-deps_build
build_environment: "libtorch 2.7m cu92 devtoolset7"
requires:
- setup
libtorch_variant: "shared-with-deps"
docker_image: "soumith/manylinux-cuda92"
- binary_linux_build:
name: binary_linux_libtorch_2_7m_cu92_devtoolset7_shared-without-deps_build
build_environment: "libtorch 2.7m cu92 devtoolset7"
requires:
- setup
libtorch_variant: "shared-without-deps"
docker_image: "soumith/manylinux-cuda92"
- binary_linux_build:
name: binary_linux_libtorch_2_7m_cu92_devtoolset7_static-with-deps_build
build_environment: "libtorch 2.7m cu92 devtoolset7"
requires:
- setup
libtorch_variant: "static-with-deps"
docker_image: "soumith/manylinux-cuda92"
- binary_linux_build:
name: binary_linux_libtorch_2_7m_cu92_devtoolset7_static-without-deps_build
build_environment: "libtorch 2.7m cu92 devtoolset7"
requires:
- setup
libtorch_variant: "static-without-deps"
docker_image: "soumith/manylinux-cuda92"
- binary_linux_build:
name: binary_linux_libtorch_2_7m_cu100_devtoolset7_shared-with-deps_build
build_environment: "libtorch 2.7m cu100 devtoolset7"
requires:
- setup
libtorch_variant: "shared-with-deps"
docker_image: "soumith/manylinux-cuda100"
- binary_linux_build:
name: binary_linux_libtorch_2_7m_cu100_devtoolset7_shared-without-deps_build
build_environment: "libtorch 2.7m cu100 devtoolset7"
requires:
- setup
libtorch_variant: "shared-without-deps"
docker_image: "soumith/manylinux-cuda100"
- binary_linux_build:
name: binary_linux_libtorch_2_7m_cu100_devtoolset7_static-with-deps_build
build_environment: "libtorch 2.7m cu100 devtoolset7"
requires:
- setup
libtorch_variant: "static-with-deps"
docker_image: "soumith/manylinux-cuda100"
- binary_linux_build:
name: binary_linux_libtorch_2_7m_cu100_devtoolset7_static-without-deps_build
build_environment: "libtorch 2.7m cu100 devtoolset7"
requires:
- setup
libtorch_variant: "static-without-deps"
docker_image: "soumith/manylinux-cuda100"
- binary_linux_build:
name: binary_linux_libtorch_2_7m_cpu_gcc5_4_cxx11-abi_shared-with-deps_build
build_environment: "libtorch 2.7m cpu gcc5.4_cxx11-abi"
requires:
- setup
libtorch_variant: "shared-with-deps"
docker_image: "yf225/pytorch-binary-docker-image-ubuntu16.04:latest"
- binary_linux_build:
name: binary_linux_libtorch_2_7m_cpu_gcc5_4_cxx11-abi_shared-without-deps_build
build_environment: "libtorch 2.7m cpu gcc5.4_cxx11-abi"
requires:
- setup
libtorch_variant: "shared-without-deps"
docker_image: "yf225/pytorch-binary-docker-image-ubuntu16.04:latest"
- binary_linux_build:
name: binary_linux_libtorch_2_7m_cpu_gcc5_4_cxx11-abi_static-with-deps_build
build_environment: "libtorch 2.7m cpu gcc5.4_cxx11-abi"
requires:
- setup
libtorch_variant: "static-with-deps"
docker_image: "yf225/pytorch-binary-docker-image-ubuntu16.04:latest"
- binary_linux_build:
name: binary_linux_libtorch_2_7m_cpu_gcc5_4_cxx11-abi_static-without-deps_build
build_environment: "libtorch 2.7m cpu gcc5.4_cxx11-abi"
requires:
- setup
libtorch_variant: "static-without-deps"
docker_image: "yf225/pytorch-binary-docker-image-ubuntu16.04:latest"
- binary_linux_build:
name: binary_linux_libtorch_2_7m_cu92_gcc5_4_cxx11-abi_shared-with-deps_build
build_environment: "libtorch 2.7m cu92 gcc5.4_cxx11-abi"
requires:
- setup
libtorch_variant: "shared-with-deps"
docker_image: "yf225/pytorch-binary-docker-image-ubuntu16.04:latest"
- binary_linux_build:
name: binary_linux_libtorch_2_7m_cu92_gcc5_4_cxx11-abi_shared-without-deps_build
build_environment: "libtorch 2.7m cu92 gcc5.4_cxx11-abi"
requires:
- setup
libtorch_variant: "shared-without-deps"
docker_image: "yf225/pytorch-binary-docker-image-ubuntu16.04:latest"
- binary_linux_build:
name: binary_linux_libtorch_2_7m_cu92_gcc5_4_cxx11-abi_static-with-deps_build
build_environment: "libtorch 2.7m cu92 gcc5.4_cxx11-abi"
requires:
- setup
libtorch_variant: "static-with-deps"
docker_image: "yf225/pytorch-binary-docker-image-ubuntu16.04:latest"
- binary_linux_build:
name: binary_linux_libtorch_2_7m_cu92_gcc5_4_cxx11-abi_static-without-deps_build
build_environment: "libtorch 2.7m cu92 gcc5.4_cxx11-abi"
requires:
- setup
libtorch_variant: "static-without-deps"
docker_image: "yf225/pytorch-binary-docker-image-ubuntu16.04:latest"
- binary_linux_build:
name: binary_linux_libtorch_2_7m_cu100_gcc5_4_cxx11-abi_shared-with-deps_build
build_environment: "libtorch 2.7m cu100 gcc5.4_cxx11-abi"
requires:
- setup
libtorch_variant: "shared-with-deps"
docker_image: "yf225/pytorch-binary-docker-image-ubuntu16.04:latest"
- binary_linux_build:
name: binary_linux_libtorch_2_7m_cu100_gcc5_4_cxx11-abi_shared-without-deps_build
build_environment: "libtorch 2.7m cu100 gcc5.4_cxx11-abi"
requires:
- setup
libtorch_variant: "shared-without-deps"
docker_image: "yf225/pytorch-binary-docker-image-ubuntu16.04:latest"
- binary_linux_build:
name: binary_linux_libtorch_2_7m_cu100_gcc5_4_cxx11-abi_static-with-deps_build
build_environment: "libtorch 2.7m cu100 gcc5.4_cxx11-abi"
requires:
- setup
libtorch_variant: "static-with-deps"
docker_image: "yf225/pytorch-binary-docker-image-ubuntu16.04:latest"
- binary_linux_build:
name: binary_linux_libtorch_2_7m_cu100_gcc5_4_cxx11-abi_static-without-deps_build
build_environment: "libtorch 2.7m cu100 gcc5.4_cxx11-abi"
requires:
- setup
libtorch_variant: "static-without-deps"
docker_image: "yf225/pytorch-binary-docker-image-ubuntu16.04:latest"
- binary_mac_build:
name: binary_macos_wheel_2_7_cpu_build
build_environment: "wheel 2.7 cpu"
requires:
- setup
- binary_mac_build:
name: binary_macos_wheel_3_5_cpu_build
build_environment: "wheel 3.5 cpu"
requires:
- setup
- binary_mac_build:
name: binary_macos_wheel_3_6_cpu_build
build_environment: "wheel 3.6 cpu"
requires:
- setup
- binary_mac_build:
name: binary_macos_wheel_3_7_cpu_build
build_environment: "wheel 3.7 cpu"
requires:
- setup
- binary_mac_build:
name: binary_macos_conda_2_7_cpu_build
build_environment: "conda 2.7 cpu"
requires:
- setup
- binary_mac_build:
name: binary_macos_conda_3_5_cpu_build
build_environment: "conda 3.5 cpu"
requires:
- setup
- binary_mac_build:
name: binary_macos_conda_3_6_cpu_build
build_environment: "conda 3.6 cpu"
requires:
- setup
- binary_mac_build:
name: binary_macos_conda_3_7_cpu_build
build_environment: "conda 3.7 cpu"
requires:
- setup
- binary_mac_build:
name: binary_macos_libtorch_2_7_cpu_build
build_environment: "libtorch 2.7 cpu"
requires:
- setup
##############################################################################
# Nightly tests
##############################################################################
- binary_linux_test:
name: binary_linux_manywheel_2_7m_cpu_devtoolset7_test
build_environment: "manywheel 2.7m cpu devtoolset7"
requires:
- setup
- binary_linux_manywheel_2_7m_cpu_devtoolset7_build
docker_image: "soumith/manylinux-cuda100"
- binary_linux_test:
name: binary_linux_manywheel_2_7mu_cpu_devtoolset7_test
build_environment: "manywheel 2.7mu cpu devtoolset7"
requires:
- setup
- binary_linux_manywheel_2_7mu_cpu_devtoolset7_build
docker_image: "soumith/manylinux-cuda100"
- binary_linux_test:
name: binary_linux_manywheel_3_5m_cpu_devtoolset7_test
build_environment: "manywheel 3.5m cpu devtoolset7"
requires:
- setup
- binary_linux_manywheel_3_5m_cpu_devtoolset7_build
docker_image: "soumith/manylinux-cuda100"
- binary_linux_test:
name: binary_linux_manywheel_3_6m_cpu_devtoolset7_test
build_environment: "manywheel 3.6m cpu devtoolset7"
requires:
- setup
- binary_linux_manywheel_3_6m_cpu_devtoolset7_build
docker_image: "soumith/manylinux-cuda100"
- binary_linux_test:
name: binary_linux_manywheel_3_7m_cpu_devtoolset7_test
build_environment: "manywheel 3.7m cpu devtoolset7"
requires:
- setup
- binary_linux_manywheel_3_7m_cpu_devtoolset7_build
docker_image: "soumith/manylinux-cuda100"
- binary_linux_test:
name: binary_linux_manywheel_2_7m_cu92_devtoolset7_test
build_environment: "manywheel 2.7m cu92 devtoolset7"
requires:
- setup
- binary_linux_manywheel_2_7m_cu92_devtoolset7_build
docker_image: "soumith/manylinux-cuda92"
use_cuda_docker_runtime: "1"
resource_class: gpu.medium
- binary_linux_test:
name: binary_linux_manywheel_2_7mu_cu92_devtoolset7_test
build_environment: "manywheel 2.7mu cu92 devtoolset7"
requires:
- setup
- binary_linux_manywheel_2_7mu_cu92_devtoolset7_build
docker_image: "soumith/manylinux-cuda92"
use_cuda_docker_runtime: "1"
resource_class: gpu.medium
- binary_linux_test:
name: binary_linux_manywheel_3_5m_cu92_devtoolset7_test
build_environment: "manywheel 3.5m cu92 devtoolset7"
requires:
- setup
- binary_linux_manywheel_3_5m_cu92_devtoolset7_build
docker_image: "soumith/manylinux-cuda92"
use_cuda_docker_runtime: "1"
resource_class: gpu.medium
- binary_linux_test:
name: binary_linux_manywheel_3_6m_cu92_devtoolset7_test
build_environment: "manywheel 3.6m cu92 devtoolset7"
requires:
- setup
- binary_linux_manywheel_3_6m_cu92_devtoolset7_build
docker_image: "soumith/manylinux-cuda92"
use_cuda_docker_runtime: "1"
resource_class: gpu.medium
- binary_linux_test:
name: binary_linux_manywheel_3_7m_cu92_devtoolset7_test
build_environment: "manywheel 3.7m cu92 devtoolset7"
requires:
- setup
- binary_linux_manywheel_3_7m_cu92_devtoolset7_build
docker_image: "soumith/manylinux-cuda92"
use_cuda_docker_runtime: "1"
resource_class: gpu.medium
- binary_linux_test:
name: binary_linux_manywheel_2_7m_cu100_devtoolset7_test
build_environment: "manywheel 2.7m cu100 devtoolset7"
requires:
- setup
- binary_linux_manywheel_2_7m_cu100_devtoolset7_build
docker_image: "soumith/manylinux-cuda100"
use_cuda_docker_runtime: "1"
resource_class: gpu.medium
- binary_linux_test:
name: binary_linux_manywheel_2_7mu_cu100_devtoolset7_test
build_environment: "manywheel 2.7mu cu100 devtoolset7"
requires:
- setup
- binary_linux_manywheel_2_7mu_cu100_devtoolset7_build
docker_image: "soumith/manylinux-cuda100"
use_cuda_docker_runtime: "1"
resource_class: gpu.medium
- binary_linux_test:
name: binary_linux_manywheel_3_5m_cu100_devtoolset7_test
build_environment: "manywheel 3.5m cu100 devtoolset7"
requires:
- setup
- binary_linux_manywheel_3_5m_cu100_devtoolset7_build
docker_image: "soumith/manylinux-cuda100"
use_cuda_docker_runtime: "1"
resource_class: gpu.medium
- binary_linux_test:
name: binary_linux_manywheel_3_6m_cu100_devtoolset7_test
build_environment: "manywheel 3.6m cu100 devtoolset7"
requires:
- setup
- binary_linux_manywheel_3_6m_cu100_devtoolset7_build
docker_image: "soumith/manylinux-cuda100"
use_cuda_docker_runtime: "1"
resource_class: gpu.medium
- binary_linux_test:
name: binary_linux_manywheel_3_7m_cu100_devtoolset7_test
build_environment: "manywheel 3.7m cu100 devtoolset7"
requires:
- setup
- binary_linux_manywheel_3_7m_cu100_devtoolset7_build
docker_image: "soumith/manylinux-cuda100"
use_cuda_docker_runtime: "1"
resource_class: gpu.medium
- binary_linux_test:
name: binary_linux_conda_2_7_cpu_devtoolset7_test
build_environment: "conda 2.7 cpu devtoolset7"
requires:
- setup
- binary_linux_conda_2_7_cpu_devtoolset7_build
docker_image: "soumith/conda-cuda"
- binary_linux_test:
name: binary_linux_conda_3_5_cpu_devtoolset7_test
build_environment: "conda 3.5 cpu devtoolset7"
requires:
- setup
- binary_linux_conda_3_5_cpu_devtoolset7_build
docker_image: "soumith/conda-cuda"
- binary_linux_test:
name: binary_linux_conda_3_6_cpu_devtoolset7_test
build_environment: "conda 3.6 cpu devtoolset7"
requires:
- setup
- binary_linux_conda_3_6_cpu_devtoolset7_build
docker_image: "soumith/conda-cuda"
- binary_linux_test:
name: binary_linux_conda_3_7_cpu_devtoolset7_test
build_environment: "conda 3.7 cpu devtoolset7"
requires:
- setup
- binary_linux_conda_3_7_cpu_devtoolset7_build
docker_image: "soumith/conda-cuda"
- binary_linux_test:
name: binary_linux_conda_2_7_cu92_devtoolset7_test
build_environment: "conda 2.7 cu92 devtoolset7"
requires:
- setup
- binary_linux_conda_2_7_cu92_devtoolset7_build
docker_image: "soumith/conda-cuda"
use_cuda_docker_runtime: "1"
resource_class: gpu.medium
- binary_linux_test:
name: binary_linux_conda_3_5_cu92_devtoolset7_test
build_environment: "conda 3.5 cu92 devtoolset7"
requires:
- setup
- binary_linux_conda_3_5_cu92_devtoolset7_build
docker_image: "soumith/conda-cuda"
use_cuda_docker_runtime: "1"
resource_class: gpu.medium
- binary_linux_test:
name: binary_linux_conda_3_6_cu92_devtoolset7_test
build_environment: "conda 3.6 cu92 devtoolset7"
requires:
- setup
- binary_linux_conda_3_6_cu92_devtoolset7_build
docker_image: "soumith/conda-cuda"
use_cuda_docker_runtime: "1"
resource_class: gpu.medium
- binary_linux_test:
name: binary_linux_conda_3_7_cu92_devtoolset7_test
build_environment: "conda 3.7 cu92 devtoolset7"
requires:
- setup
- binary_linux_conda_3_7_cu92_devtoolset7_build
docker_image: "soumith/conda-cuda"
use_cuda_docker_runtime: "1"
resource_class: gpu.medium
- binary_linux_test:
name: binary_linux_conda_2_7_cu100_devtoolset7_test
build_environment: "conda 2.7 cu100 devtoolset7"
requires:
- setup
- binary_linux_conda_2_7_cu100_devtoolset7_build
docker_image: "soumith/conda-cuda"
use_cuda_docker_runtime: "1"
resource_class: gpu.medium
- binary_linux_test:
name: binary_linux_conda_3_5_cu100_devtoolset7_test
build_environment: "conda 3.5 cu100 devtoolset7"
requires:
- setup
- binary_linux_conda_3_5_cu100_devtoolset7_build
docker_image: "soumith/conda-cuda"
use_cuda_docker_runtime: "1"
resource_class: gpu.medium
- binary_linux_test:
name: binary_linux_conda_3_6_cu100_devtoolset7_test
build_environment: "conda 3.6 cu100 devtoolset7"
requires:
- setup
- binary_linux_conda_3_6_cu100_devtoolset7_build
docker_image: "soumith/conda-cuda"
use_cuda_docker_runtime: "1"
resource_class: gpu.medium
- binary_linux_test:
name: binary_linux_conda_3_7_cu100_devtoolset7_test
build_environment: "conda 3.7 cu100 devtoolset7"
requires:
- setup
- binary_linux_conda_3_7_cu100_devtoolset7_build
docker_image: "soumith/conda-cuda"
use_cuda_docker_runtime: "1"
resource_class: gpu.medium
- binary_linux_test:
name: binary_linux_libtorch_2_7m_cpu_devtoolset7_shared-with-deps_test
build_environment: "libtorch 2.7m cpu devtoolset7"
requires:
- setup
- binary_linux_libtorch_2_7m_cpu_devtoolset7_shared-with-deps_build
libtorch_variant: "shared-with-deps"
docker_image: "soumith/manylinux-cuda100"
- binary_linux_test:
name: binary_linux_libtorch_2_7m_cpu_devtoolset7_shared-without-deps_test
build_environment: "libtorch 2.7m cpu devtoolset7"
requires:
- setup
- binary_linux_libtorch_2_7m_cpu_devtoolset7_shared-without-deps_build
libtorch_variant: "shared-without-deps"
docker_image: "soumith/manylinux-cuda100"
- binary_linux_test:
name: binary_linux_libtorch_2_7m_cpu_devtoolset7_static-with-deps_test
build_environment: "libtorch 2.7m cpu devtoolset7"
requires:
- setup
- binary_linux_libtorch_2_7m_cpu_devtoolset7_static-with-deps_build
libtorch_variant: "static-with-deps"
docker_image: "soumith/manylinux-cuda100"
- binary_linux_test:
name: binary_linux_libtorch_2_7m_cpu_devtoolset7_static-without-deps_test
build_environment: "libtorch 2.7m cpu devtoolset7"
requires:
- setup
- binary_linux_libtorch_2_7m_cpu_devtoolset7_static-without-deps_build
libtorch_variant: "static-without-deps"
docker_image: "soumith/manylinux-cuda100"
- binary_linux_test:
name: binary_linux_libtorch_2_7m_cu92_devtoolset7_shared-with-deps_test
build_environment: "libtorch 2.7m cu92 devtoolset7"
requires:
- setup
- binary_linux_libtorch_2_7m_cu92_devtoolset7_shared-with-deps_build
libtorch_variant: "shared-with-deps"
docker_image: "soumith/manylinux-cuda92"
use_cuda_docker_runtime: "1"
resource_class: gpu.medium
- binary_linux_test:
name: binary_linux_libtorch_2_7m_cu92_devtoolset7_shared-without-deps_test
build_environment: "libtorch 2.7m cu92 devtoolset7"
requires:
- setup
- binary_linux_libtorch_2_7m_cu92_devtoolset7_shared-without-deps_build
libtorch_variant: "shared-without-deps"
docker_image: "soumith/manylinux-cuda92"
use_cuda_docker_runtime: "1"
resource_class: gpu.medium
- binary_linux_test:
name: binary_linux_libtorch_2_7m_cu92_devtoolset7_static-with-deps_test
build_environment: "libtorch 2.7m cu92 devtoolset7"
requires:
- setup
- binary_linux_libtorch_2_7m_cu92_devtoolset7_static-with-deps_build
libtorch_variant: "static-with-deps"
docker_image: "soumith/manylinux-cuda92"
use_cuda_docker_runtime: "1"
resource_class: gpu.medium
- binary_linux_test:
name: binary_linux_libtorch_2_7m_cu92_devtoolset7_static-without-deps_test
build_environment: "libtorch 2.7m cu92 devtoolset7"
requires:
- setup
- binary_linux_libtorch_2_7m_cu92_devtoolset7_static-without-deps_build
libtorch_variant: "static-without-deps"
docker_image: "soumith/manylinux-cuda92"
use_cuda_docker_runtime: "1"
resource_class: gpu.medium
- binary_linux_test:
name: binary_linux_libtorch_2_7m_cu100_devtoolset7_shared-with-deps_test
build_environment: "libtorch 2.7m cu100 devtoolset7"
requires:
- setup
- binary_linux_libtorch_2_7m_cu100_devtoolset7_shared-with-deps_build
libtorch_variant: "shared-with-deps"
docker_image: "soumith/manylinux-cuda100"
use_cuda_docker_runtime: "1"
resource_class: gpu.medium
- binary_linux_test:
name: binary_linux_libtorch_2_7m_cu100_devtoolset7_shared-without-deps_test
build_environment: "libtorch 2.7m cu100 devtoolset7"
requires:
- setup
- binary_linux_libtorch_2_7m_cu100_devtoolset7_shared-without-deps_build
libtorch_variant: "shared-without-deps"
docker_image: "soumith/manylinux-cuda100"
use_cuda_docker_runtime: "1"
resource_class: gpu.medium
- binary_linux_test:
name: binary_linux_libtorch_2_7m_cu100_devtoolset7_static-with-deps_test
build_environment: "libtorch 2.7m cu100 devtoolset7"
requires:
- setup
- binary_linux_libtorch_2_7m_cu100_devtoolset7_static-with-deps_build
libtorch_variant: "static-with-deps"
docker_image: "soumith/manylinux-cuda100"
use_cuda_docker_runtime: "1"
resource_class: gpu.medium
- binary_linux_test:
name: binary_linux_libtorch_2_7m_cu100_devtoolset7_static-without-deps_test
build_environment: "libtorch 2.7m cu100 devtoolset7"
requires:
- setup
- binary_linux_libtorch_2_7m_cu100_devtoolset7_static-without-deps_build
libtorch_variant: "static-without-deps"
docker_image: "soumith/manylinux-cuda100"
use_cuda_docker_runtime: "1"
resource_class: gpu.medium
- binary_linux_test:
name: binary_linux_libtorch_2_7m_cpu_gcc5_4_cxx11-abi_shared-with-deps_test
build_environment: "libtorch 2.7m cpu gcc5.4_cxx11-abi"
requires:
- setup
- binary_linux_libtorch_2_7m_cpu_gcc5_4_cxx11-abi_shared-with-deps_build
libtorch_variant: "shared-with-deps"
docker_image: "yf225/pytorch-binary-docker-image-ubuntu16.04:latest"
- binary_linux_test:
name: binary_linux_libtorch_2_7m_cpu_gcc5_4_cxx11-abi_shared-without-deps_test
build_environment: "libtorch 2.7m cpu gcc5.4_cxx11-abi"
requires:
- setup
- binary_linux_libtorch_2_7m_cpu_gcc5_4_cxx11-abi_shared-without-deps_build
libtorch_variant: "shared-without-deps"
docker_image: "yf225/pytorch-binary-docker-image-ubuntu16.04:latest"
- binary_linux_test:
name: binary_linux_libtorch_2_7m_cpu_gcc5_4_cxx11-abi_static-with-deps_test
build_environment: "libtorch 2.7m cpu gcc5.4_cxx11-abi"
requires:
- setup
- binary_linux_libtorch_2_7m_cpu_gcc5_4_cxx11-abi_static-with-deps_build
libtorch_variant: "static-with-deps"
docker_image: "yf225/pytorch-binary-docker-image-ubuntu16.04:latest"
- binary_linux_test:
name: binary_linux_libtorch_2_7m_cpu_gcc5_4_cxx11-abi_static-without-deps_test
build_environment: "libtorch 2.7m cpu gcc5.4_cxx11-abi"
requires:
- setup
- binary_linux_libtorch_2_7m_cpu_gcc5_4_cxx11-abi_static-without-deps_build
libtorch_variant: "static-without-deps"
docker_image: "yf225/pytorch-binary-docker-image-ubuntu16.04:latest"
- binary_linux_test:
name: binary_linux_libtorch_2_7m_cu92_gcc5_4_cxx11-abi_shared-with-deps_test
build_environment: "libtorch 2.7m cu92 gcc5.4_cxx11-abi"
requires:
- setup
- binary_linux_libtorch_2_7m_cu92_gcc5_4_cxx11-abi_shared-with-deps_build
libtorch_variant: "shared-with-deps"
docker_image: "yf225/pytorch-binary-docker-image-ubuntu16.04:latest"
use_cuda_docker_runtime: "1"
resource_class: gpu.medium
- binary_linux_test:
name: binary_linux_libtorch_2_7m_cu92_gcc5_4_cxx11-abi_shared-without-deps_test
build_environment: "libtorch 2.7m cu92 gcc5.4_cxx11-abi"
requires:
- setup
- binary_linux_libtorch_2_7m_cu92_gcc5_4_cxx11-abi_shared-without-deps_build
libtorch_variant: "shared-without-deps"
docker_image: "yf225/pytorch-binary-docker-image-ubuntu16.04:latest"
use_cuda_docker_runtime: "1"
resource_class: gpu.medium
- binary_linux_test:
name: binary_linux_libtorch_2_7m_cu92_gcc5_4_cxx11-abi_static-with-deps_test
build_environment: "libtorch 2.7m cu92 gcc5.4_cxx11-abi"
requires:
- setup
- binary_linux_libtorch_2_7m_cu92_gcc5_4_cxx11-abi_static-with-deps_build
libtorch_variant: "static-with-deps"
docker_image: "yf225/pytorch-binary-docker-image-ubuntu16.04:latest"
use_cuda_docker_runtime: "1"
resource_class: gpu.medium
- binary_linux_test:
name: binary_linux_libtorch_2_7m_cu92_gcc5_4_cxx11-abi_static-without-deps_test
build_environment: "libtorch 2.7m cu92 gcc5.4_cxx11-abi"
requires:
- setup
- binary_linux_libtorch_2_7m_cu92_gcc5_4_cxx11-abi_static-without-deps_build
libtorch_variant: "static-without-deps"
docker_image: "yf225/pytorch-binary-docker-image-ubuntu16.04:latest"
use_cuda_docker_runtime: "1"
resource_class: gpu.medium
- binary_linux_test:
name: binary_linux_libtorch_2_7m_cu100_gcc5_4_cxx11-abi_shared-with-deps_test
build_environment: "libtorch 2.7m cu100 gcc5.4_cxx11-abi"
requires:
- setup
- binary_linux_libtorch_2_7m_cu100_gcc5_4_cxx11-abi_shared-with-deps_build
libtorch_variant: "shared-with-deps"
docker_image: "yf225/pytorch-binary-docker-image-ubuntu16.04:latest"
use_cuda_docker_runtime: "1"
resource_class: gpu.medium
- binary_linux_test:
name: binary_linux_libtorch_2_7m_cu100_gcc5_4_cxx11-abi_shared-without-deps_test
build_environment: "libtorch 2.7m cu100 gcc5.4_cxx11-abi"
requires:
- setup
- binary_linux_libtorch_2_7m_cu100_gcc5_4_cxx11-abi_shared-without-deps_build
libtorch_variant: "shared-without-deps"
docker_image: "yf225/pytorch-binary-docker-image-ubuntu16.04:latest"
use_cuda_docker_runtime: "1"
resource_class: gpu.medium
- binary_linux_test:
name: binary_linux_libtorch_2_7m_cu100_gcc5_4_cxx11-abi_static-with-deps_test
build_environment: "libtorch 2.7m cu100 gcc5.4_cxx11-abi"
requires:
- setup
- binary_linux_libtorch_2_7m_cu100_gcc5_4_cxx11-abi_static-with-deps_build
libtorch_variant: "static-with-deps"
docker_image: "yf225/pytorch-binary-docker-image-ubuntu16.04:latest"
use_cuda_docker_runtime: "1"
resource_class: gpu.medium
- binary_linux_test:
name: binary_linux_libtorch_2_7m_cu100_gcc5_4_cxx11-abi_static-without-deps_test
build_environment: "libtorch 2.7m cu100 gcc5.4_cxx11-abi"
requires:
- setup
- binary_linux_libtorch_2_7m_cu100_gcc5_4_cxx11-abi_static-without-deps_build
libtorch_variant: "static-without-deps"
docker_image: "yf225/pytorch-binary-docker-image-ubuntu16.04:latest"
use_cuda_docker_runtime: "1"
resource_class: gpu.medium
#- binary_linux_libtorch_2.7m_cpu_test:
# requires:
# - binary_linux_libtorch_2.7m_cpu_build
#- binary_linux_libtorch_2.7m_cu90_test:
# requires:
# - binary_linux_libtorch_2.7m_cu90_build
#- binary_linux_libtorch_2.7m_cu100_test:
# requires:
# - binary_linux_libtorch_2.7m_cu100_build
# Nightly uploads
- binary_linux_upload:
name: binary_linux_manywheel_2_7m_cpu_devtoolset7_upload
build_environment: "manywheel 2.7m cpu devtoolset7"
requires:
- setup
- binary_linux_manywheel_2_7m_cpu_devtoolset7_test
context: org-member
- binary_linux_upload:
name: binary_linux_manywheel_2_7mu_cpu_devtoolset7_upload
build_environment: "manywheel 2.7mu cpu devtoolset7"
requires:
- setup
- binary_linux_manywheel_2_7mu_cpu_devtoolset7_test
context: org-member
- binary_linux_upload:
name: binary_linux_manywheel_3_5m_cpu_devtoolset7_upload
build_environment: "manywheel 3.5m cpu devtoolset7"
requires:
- setup
- binary_linux_manywheel_3_5m_cpu_devtoolset7_test
context: org-member
- binary_linux_upload:
name: binary_linux_manywheel_3_6m_cpu_devtoolset7_upload
build_environment: "manywheel 3.6m cpu devtoolset7"
requires:
- setup
- binary_linux_manywheel_3_6m_cpu_devtoolset7_test
context: org-member
- binary_linux_upload:
name: binary_linux_manywheel_3_7m_cpu_devtoolset7_upload
build_environment: "manywheel 3.7m cpu devtoolset7"
requires:
- setup
- binary_linux_manywheel_3_7m_cpu_devtoolset7_test
context: org-member
- binary_linux_upload:
name: binary_linux_manywheel_2_7m_cu92_devtoolset7_upload
build_environment: "manywheel 2.7m cu92 devtoolset7"
requires:
- setup
- binary_linux_manywheel_2_7m_cu92_devtoolset7_test
context: org-member
- binary_linux_upload:
name: binary_linux_manywheel_2_7mu_cu92_devtoolset7_upload
build_environment: "manywheel 2.7mu cu92 devtoolset7"
requires:
- setup
- binary_linux_manywheel_2_7mu_cu92_devtoolset7_test
context: org-member
- binary_linux_upload:
name: binary_linux_manywheel_3_5m_cu92_devtoolset7_upload
build_environment: "manywheel 3.5m cu92 devtoolset7"
requires:
- setup
- binary_linux_manywheel_3_5m_cu92_devtoolset7_test
context: org-member
- binary_linux_upload:
name: binary_linux_manywheel_3_6m_cu92_devtoolset7_upload
build_environment: "manywheel 3.6m cu92 devtoolset7"
requires:
- setup
- binary_linux_manywheel_3_6m_cu92_devtoolset7_test
context: org-member
- binary_linux_upload:
name: binary_linux_manywheel_3_7m_cu92_devtoolset7_upload
build_environment: "manywheel 3.7m cu92 devtoolset7"
requires:
- setup
- binary_linux_manywheel_3_7m_cu92_devtoolset7_test
context: org-member
- binary_linux_upload:
name: binary_linux_manywheel_2_7m_cu100_devtoolset7_upload
build_environment: "manywheel 2.7m cu100 devtoolset7"
requires:
- setup
- binary_linux_manywheel_2_7m_cu100_devtoolset7_test
context: org-member
- binary_linux_upload:
name: binary_linux_manywheel_2_7mu_cu100_devtoolset7_upload
build_environment: "manywheel 2.7mu cu100 devtoolset7"
requires:
- setup
- binary_linux_manywheel_2_7mu_cu100_devtoolset7_test
context: org-member
- binary_linux_upload:
name: binary_linux_manywheel_3_5m_cu100_devtoolset7_upload
build_environment: "manywheel 3.5m cu100 devtoolset7"
requires:
- setup
- binary_linux_manywheel_3_5m_cu100_devtoolset7_test
context: org-member
- binary_linux_upload:
name: binary_linux_manywheel_3_6m_cu100_devtoolset7_upload
build_environment: "manywheel 3.6m cu100 devtoolset7"
requires:
- setup
- binary_linux_manywheel_3_6m_cu100_devtoolset7_test
context: org-member
- binary_linux_upload:
name: binary_linux_manywheel_3_7m_cu100_devtoolset7_upload
build_environment: "manywheel 3.7m cu100 devtoolset7"
requires:
- setup
- binary_linux_manywheel_3_7m_cu100_devtoolset7_test
context: org-member
- binary_linux_upload:
name: binary_linux_conda_2_7_cpu_devtoolset7_upload
build_environment: "conda 2.7 cpu devtoolset7"
requires:
- setup
- binary_linux_conda_2_7_cpu_devtoolset7_test
context: org-member
- binary_linux_upload:
name: binary_linux_conda_3_5_cpu_devtoolset7_upload
build_environment: "conda 3.5 cpu devtoolset7"
requires:
- setup
- binary_linux_conda_3_5_cpu_devtoolset7_test
context: org-member
- binary_linux_upload:
name: binary_linux_conda_3_6_cpu_devtoolset7_upload
build_environment: "conda 3.6 cpu devtoolset7"
requires:
- setup
- binary_linux_conda_3_6_cpu_devtoolset7_test
context: org-member
- binary_linux_upload:
name: binary_linux_conda_3_7_cpu_devtoolset7_upload
build_environment: "conda 3.7 cpu devtoolset7"
requires:
- setup
- binary_linux_conda_3_7_cpu_devtoolset7_test
context: org-member
- binary_linux_upload:
name: binary_linux_conda_2_7_cu92_devtoolset7_upload
build_environment: "conda 2.7 cu92 devtoolset7"
requires:
- setup
- binary_linux_conda_2_7_cu92_devtoolset7_test
context: org-member
- binary_linux_upload:
name: binary_linux_conda_3_5_cu92_devtoolset7_upload
build_environment: "conda 3.5 cu92 devtoolset7"
requires:
- setup
- binary_linux_conda_3_5_cu92_devtoolset7_test
context: org-member
- binary_linux_upload:
name: binary_linux_conda_3_6_cu92_devtoolset7_upload
build_environment: "conda 3.6 cu92 devtoolset7"
requires:
- setup
- binary_linux_conda_3_6_cu92_devtoolset7_test
context: org-member
- binary_linux_upload:
name: binary_linux_conda_3_7_cu92_devtoolset7_upload
build_environment: "conda 3.7 cu92 devtoolset7"
requires:
- setup
- binary_linux_conda_3_7_cu92_devtoolset7_test
context: org-member
- binary_linux_upload:
name: binary_linux_conda_2_7_cu100_devtoolset7_upload
build_environment: "conda 2.7 cu100 devtoolset7"
requires:
- setup
- binary_linux_conda_2_7_cu100_devtoolset7_test
context: org-member
- binary_linux_upload:
name: binary_linux_conda_3_5_cu100_devtoolset7_upload
build_environment: "conda 3.5 cu100 devtoolset7"
requires:
- setup
- binary_linux_conda_3_5_cu100_devtoolset7_test
context: org-member
- binary_linux_upload:
name: binary_linux_conda_3_6_cu100_devtoolset7_upload
build_environment: "conda 3.6 cu100 devtoolset7"
requires:
- setup
- binary_linux_conda_3_6_cu100_devtoolset7_test
context: org-member
- binary_linux_upload:
name: binary_linux_conda_3_7_cu100_devtoolset7_upload
build_environment: "conda 3.7 cu100 devtoolset7"
requires:
- setup
- binary_linux_conda_3_7_cu100_devtoolset7_test
context: org-member
- binary_linux_upload:
name: binary_linux_libtorch_2_7m_cpu_devtoolset7_shared-with-deps_upload
build_environment: "libtorch 2.7m cpu devtoolset7"
requires:
- setup
- binary_linux_libtorch_2_7m_cpu_devtoolset7_shared-with-deps_test
libtorch_variant: "shared-with-deps"
context: org-member
- binary_linux_upload:
name: binary_linux_libtorch_2_7m_cpu_devtoolset7_shared-without-deps_upload
build_environment: "libtorch 2.7m cpu devtoolset7"
requires:
- setup
- binary_linux_libtorch_2_7m_cpu_devtoolset7_shared-without-deps_test
libtorch_variant: "shared-without-deps"
context: org-member
- binary_linux_upload:
name: binary_linux_libtorch_2_7m_cpu_devtoolset7_static-with-deps_upload
build_environment: "libtorch 2.7m cpu devtoolset7"
requires:
- setup
- binary_linux_libtorch_2_7m_cpu_devtoolset7_static-with-deps_test
libtorch_variant: "static-with-deps"
context: org-member
- binary_linux_upload:
name: binary_linux_libtorch_2_7m_cpu_devtoolset7_static-without-deps_upload
build_environment: "libtorch 2.7m cpu devtoolset7"
requires:
- setup
- binary_linux_libtorch_2_7m_cpu_devtoolset7_static-without-deps_test
libtorch_variant: "static-without-deps"
context: org-member
- binary_linux_upload:
name: binary_linux_libtorch_2_7m_cu92_devtoolset7_shared-with-deps_upload
build_environment: "libtorch 2.7m cu92 devtoolset7"
requires:
- setup
- binary_linux_libtorch_2_7m_cu92_devtoolset7_shared-with-deps_test
libtorch_variant: "shared-with-deps"
context: org-member
- binary_linux_upload:
name: binary_linux_libtorch_2_7m_cu92_devtoolset7_shared-without-deps_upload
build_environment: "libtorch 2.7m cu92 devtoolset7"
requires:
- setup
- binary_linux_libtorch_2_7m_cu92_devtoolset7_shared-without-deps_test
libtorch_variant: "shared-without-deps"
context: org-member
- binary_linux_upload:
name: binary_linux_libtorch_2_7m_cu92_devtoolset7_static-with-deps_upload
build_environment: "libtorch 2.7m cu92 devtoolset7"
requires:
- setup
- binary_linux_libtorch_2_7m_cu92_devtoolset7_static-with-deps_test
libtorch_variant: "static-with-deps"
context: org-member
- binary_linux_upload:
name: binary_linux_libtorch_2_7m_cu92_devtoolset7_static-without-deps_upload
build_environment: "libtorch 2.7m cu92 devtoolset7"
requires:
- setup
- binary_linux_libtorch_2_7m_cu92_devtoolset7_static-without-deps_test
libtorch_variant: "static-without-deps"
context: org-member
- binary_linux_upload:
name: binary_linux_libtorch_2_7m_cu100_devtoolset7_shared-with-deps_upload
build_environment: "libtorch 2.7m cu100 devtoolset7"
requires:
- setup
- binary_linux_libtorch_2_7m_cu100_devtoolset7_shared-with-deps_test
libtorch_variant: "shared-with-deps"
context: org-member
- binary_linux_upload:
name: binary_linux_libtorch_2_7m_cu100_devtoolset7_shared-without-deps_upload
build_environment: "libtorch 2.7m cu100 devtoolset7"
requires:
- setup
- binary_linux_libtorch_2_7m_cu100_devtoolset7_shared-without-deps_test
libtorch_variant: "shared-without-deps"
context: org-member
- binary_linux_upload:
name: binary_linux_libtorch_2_7m_cu100_devtoolset7_static-with-deps_upload
build_environment: "libtorch 2.7m cu100 devtoolset7"
requires:
- setup
- binary_linux_libtorch_2_7m_cu100_devtoolset7_static-with-deps_test
libtorch_variant: "static-with-deps"
context: org-member
- binary_linux_upload:
name: binary_linux_libtorch_2_7m_cu100_devtoolset7_static-without-deps_upload
build_environment: "libtorch 2.7m cu100 devtoolset7"
requires:
- setup
- binary_linux_libtorch_2_7m_cu100_devtoolset7_static-without-deps_test
libtorch_variant: "static-without-deps"
context: org-member
- binary_linux_upload:
name: binary_linux_libtorch_2_7m_cpu_gcc5_4_cxx11-abi_shared-with-deps_upload
build_environment: "libtorch 2.7m cpu gcc5.4_cxx11-abi"
requires:
- setup
- binary_linux_libtorch_2_7m_cpu_gcc5_4_cxx11-abi_shared-with-deps_test
libtorch_variant: "shared-with-deps"
context: org-member
- binary_linux_upload:
name: binary_linux_libtorch_2_7m_cpu_gcc5_4_cxx11-abi_shared-without-deps_upload
build_environment: "libtorch 2.7m cpu gcc5.4_cxx11-abi"
requires:
- setup
- binary_linux_libtorch_2_7m_cpu_gcc5_4_cxx11-abi_shared-without-deps_test
libtorch_variant: "shared-without-deps"
context: org-member
- binary_linux_upload:
name: binary_linux_libtorch_2_7m_cpu_gcc5_4_cxx11-abi_static-with-deps_upload
build_environment: "libtorch 2.7m cpu gcc5.4_cxx11-abi"
requires:
- setup
- binary_linux_libtorch_2_7m_cpu_gcc5_4_cxx11-abi_static-with-deps_test
libtorch_variant: "static-with-deps"
context: org-member
- binary_linux_upload:
name: binary_linux_libtorch_2_7m_cpu_gcc5_4_cxx11-abi_static-without-deps_upload
build_environment: "libtorch 2.7m cpu gcc5.4_cxx11-abi"
requires:
- setup
- binary_linux_libtorch_2_7m_cpu_gcc5_4_cxx11-abi_static-without-deps_test
libtorch_variant: "static-without-deps"
context: org-member
- binary_linux_upload:
name: binary_linux_libtorch_2_7m_cu92_gcc5_4_cxx11-abi_shared-with-deps_upload
build_environment: "libtorch 2.7m cu92 gcc5.4_cxx11-abi"
requires:
- setup
- binary_linux_libtorch_2_7m_cu92_gcc5_4_cxx11-abi_shared-with-deps_test
libtorch_variant: "shared-with-deps"
context: org-member
- binary_linux_upload:
name: binary_linux_libtorch_2_7m_cu92_gcc5_4_cxx11-abi_shared-without-deps_upload
build_environment: "libtorch 2.7m cu92 gcc5.4_cxx11-abi"
requires:
- setup
- binary_linux_libtorch_2_7m_cu92_gcc5_4_cxx11-abi_shared-without-deps_test
libtorch_variant: "shared-without-deps"
context: org-member
- binary_linux_upload:
name: binary_linux_libtorch_2_7m_cu92_gcc5_4_cxx11-abi_static-with-deps_upload
build_environment: "libtorch 2.7m cu92 gcc5.4_cxx11-abi"
requires:
- setup
- binary_linux_libtorch_2_7m_cu92_gcc5_4_cxx11-abi_static-with-deps_test
libtorch_variant: "static-with-deps"
context: org-member
- binary_linux_upload:
name: binary_linux_libtorch_2_7m_cu92_gcc5_4_cxx11-abi_static-without-deps_upload
build_environment: "libtorch 2.7m cu92 gcc5.4_cxx11-abi"
requires:
- setup
- binary_linux_libtorch_2_7m_cu92_gcc5_4_cxx11-abi_static-without-deps_test
libtorch_variant: "static-without-deps"
context: org-member
- binary_linux_upload:
name: binary_linux_libtorch_2_7m_cu100_gcc5_4_cxx11-abi_shared-with-deps_upload
build_environment: "libtorch 2.7m cu100 gcc5.4_cxx11-abi"
requires:
- setup
- binary_linux_libtorch_2_7m_cu100_gcc5_4_cxx11-abi_shared-with-deps_test
libtorch_variant: "shared-with-deps"
context: org-member
- binary_linux_upload:
name: binary_linux_libtorch_2_7m_cu100_gcc5_4_cxx11-abi_shared-without-deps_upload
build_environment: "libtorch 2.7m cu100 gcc5.4_cxx11-abi"
requires:
- setup
- binary_linux_libtorch_2_7m_cu100_gcc5_4_cxx11-abi_shared-without-deps_test
libtorch_variant: "shared-without-deps"
context: org-member
- binary_linux_upload:
name: binary_linux_libtorch_2_7m_cu100_gcc5_4_cxx11-abi_static-with-deps_upload
build_environment: "libtorch 2.7m cu100 gcc5.4_cxx11-abi"
requires:
- setup
- binary_linux_libtorch_2_7m_cu100_gcc5_4_cxx11-abi_static-with-deps_test
libtorch_variant: "static-with-deps"
context: org-member
- binary_linux_upload:
name: binary_linux_libtorch_2_7m_cu100_gcc5_4_cxx11-abi_static-without-deps_upload
build_environment: "libtorch 2.7m cu100 gcc5.4_cxx11-abi"
requires:
- setup
- binary_linux_libtorch_2_7m_cu100_gcc5_4_cxx11-abi_static-without-deps_test
libtorch_variant: "static-without-deps"
context: org-member
- binary_mac_upload:
name: binary_macos_wheel_2_7_cpu_upload
build_environment: "wheel 2.7 cpu"
requires:
- setup
- binary_macos_wheel_2_7_cpu_build
context: org-member
- binary_mac_upload:
name: binary_macos_wheel_3_5_cpu_upload
build_environment: "wheel 3.5 cpu"
requires:
- setup
- binary_macos_wheel_3_5_cpu_build
context: org-member
- binary_mac_upload:
name: binary_macos_wheel_3_6_cpu_upload
build_environment: "wheel 3.6 cpu"
requires:
- setup
- binary_macos_wheel_3_6_cpu_build
context: org-member
- binary_mac_upload:
name: binary_macos_wheel_3_7_cpu_upload
build_environment: "wheel 3.7 cpu"
requires:
- setup
- binary_macos_wheel_3_7_cpu_build
context: org-member
- binary_mac_upload:
name: binary_macos_conda_2_7_cpu_upload
build_environment: "conda 2.7 cpu"
requires:
- setup
- binary_macos_conda_2_7_cpu_build
context: org-member
- binary_mac_upload:
name: binary_macos_conda_3_5_cpu_upload
build_environment: "conda 3.5 cpu"
requires:
- setup
- binary_macos_conda_3_5_cpu_build
context: org-member
- binary_mac_upload:
name: binary_macos_conda_3_6_cpu_upload
build_environment: "conda 3.6 cpu"
requires:
- setup
- binary_macos_conda_3_6_cpu_build
context: org-member
- binary_mac_upload:
name: binary_macos_conda_3_7_cpu_upload
build_environment: "conda 3.7 cpu"
requires:
- setup
- binary_macos_conda_3_7_cpu_build
context: org-member
- binary_mac_upload:
name: binary_macos_libtorch_2_7_cpu_upload
build_environment: "libtorch 2.7 cpu"
requires:
- setup
- binary_macos_libtorch_2_7_cpu_build
context: org-member
# Scheduled to run 4 hours after the binary jobs start
# These jobs need to run after all the binary jobs run, regardless of if the
# jobs failed or not. There's no way to do this in CircleCI right now, so we
# just schedule this to run after all the binary jobs should've finished.
# These jobs are all idempotent and very lightweight; they just upload html
# files that track what binaries are available and what their sizes are.
update_s3_htmls:
triggers:
- schedule:
cron: "0 9 * * *"
filters:
branches:
only:
- master
jobs:
- setup
- update_s3_htmls_for_nightlies:
context: org-member
requires:
- setup
- update_s3_htmls_for_nightlies_devtoolset7:
context: org-member
requires:
- setup
- upload_binary_sizes:
context: org-member
requires:
- setup