647 lines
32 KiB
Plaintext
647 lines
32 KiB
Plaintext
|
# TensorFlow Bazel configuration file.
|
||
|
# This file tries to group and simplify build options for TensorFlow
|
||
|
#
|
||
|
# ----CONFIG OPTIONS----
|
||
|
# Android options:
|
||
|
# android:
|
||
|
# android_arm:
|
||
|
# android_arm64:
|
||
|
# android_x86:
|
||
|
# android_x86_64:
|
||
|
#
|
||
|
# iOS options:
|
||
|
# ios:
|
||
|
# ios_armv7:
|
||
|
# ios_arm64:
|
||
|
# ios_i386:
|
||
|
# ios_x86_64:
|
||
|
# ios_fat:
|
||
|
#
|
||
|
# Macosx options
|
||
|
# darwin_arm64:
|
||
|
#
|
||
|
# Compiler options:
|
||
|
# cuda_clang: Use clang when building CUDA code.
|
||
|
# c++17: Build with C++17 options (links with libc++)
|
||
|
# c++1z: Build with C++17 options (links with libc++)
|
||
|
# c++17_gcc: Build with C++17 options (links with stdlibc++)
|
||
|
# c++1z_gcc: Build with C++17 options (links with stdlibc++)
|
||
|
# avx_linux: Build with avx instruction set on linux.
|
||
|
# avx2_linux: Build with avx2 instruction set on linux.
|
||
|
# native_arch_linux: Build with instruction sets available to the host machine on linux
|
||
|
# avx_win: Build with avx instruction set on windows
|
||
|
# avx2_win: Build with avx2 instruction set on windows
|
||
|
#
|
||
|
# Other build options:
|
||
|
# short_logs: Only log errors during build, skip warnings.
|
||
|
# verbose_logs: Show all compiler warnings during build.
|
||
|
# monolithic: Build all TF C++ code into a single shared object.
|
||
|
# dynamic_kernels: Try to link all kernels dynamically (experimental).
|
||
|
# libc++: Link against libc++ instead of stdlibc++
|
||
|
# asan: Build with the clang address sanitizer
|
||
|
# msan: Build with the clang memory sanitizer
|
||
|
# ubsan: Build with the clang undefined behavior sanitizer
|
||
|
# dbg: Build with debug info
|
||
|
#
|
||
|
#
|
||
|
# TF version options;
|
||
|
# v1: Build TF V1 (without contrib)
|
||
|
# v2: Build TF v2
|
||
|
#
|
||
|
# Feature and Third party library support options:
|
||
|
# xla: Build TF with XLA
|
||
|
# tpu: Build TF with TPU support
|
||
|
# cuda: Build with full cuda support.
|
||
|
# rocm: Build with AMD GPU support (rocm).
|
||
|
# mkl: Enable full mkl support.
|
||
|
# tensorrt: Enable Tensorrt support.
|
||
|
# numa: Enable numa using hwloc.
|
||
|
# noaws: Disable AWS S3 storage support
|
||
|
# nogcp: Disable GCS support.
|
||
|
# nohdfs: Disable hadoop hdfs support.
|
||
|
# nonccl: Disable nccl support.
|
||
|
#
|
||
|
#
|
||
|
# Remote build execution options (only configured to work with TF team projects for now.)
|
||
|
# rbe: General RBE options shared by all flavors.
|
||
|
# rbe_linux: General RBE options used on all linux builds.
|
||
|
# rbe_win: General RBE options used on all windows builds.
|
||
|
#
|
||
|
# rbe_cpu_linux: RBE options to build with only CPU support.
|
||
|
# rbe_linux_cuda_nvcc_py*: RBE options to build with GPU support using nvcc.
|
||
|
#
|
||
|
# rbe_linux_py2: Linux Python 2 RBE config.
|
||
|
# rbe_linux_py3: Linux Python 3 RBE config
|
||
|
#
|
||
|
# rbe_win_py37: Windows Python 3.7 RBE config
|
||
|
# rbe_win_py38: Windows Python 3.8 RBE config
|
||
|
#
|
||
|
# tensorflow_testing_rbe_linux: RBE options to use RBE with tensorflow-testing project on linux
|
||
|
# tensorflow_testing_rbe_win: RBE options to use RBE with tensorflow-testing project on windows
|
||
|
#
|
||
|
# Embedded Linux options (experimental and only tested with TFLite build yet)
|
||
|
# elinux: General Embedded Linux options shared by all flavors.
|
||
|
# elinux_aarch64: Embedded Linux options for aarch64 (ARM64) CPU support.
|
||
|
# elinux_armhf: Embedded Linux options for armhf (ARMv7) CPU support.
|
||
|
#
|
||
|
# Release build options (for all operating systems)
|
||
|
# release_base: Common options for all builds on all operating systems.
|
||
|
# release_gpu_base: Common options for GPU builds on Linux and Windows.
|
||
|
# release_cpu_linux: Toolchain and CUDA options for Linux CPU builds.
|
||
|
# release_cpu_macos: Toolchain and CUDA options for MacOS CPU builds.
|
||
|
# release_gpu_linux: Toolchain and CUDA options for Linux GPU builds.
|
||
|
# release_cpu_windows: Toolchain and CUDA options for Windows CPU builds.
|
||
|
# release_gpu_windows: Toolchain and CUDA options for Windows GPU builds.
|
||
|
|
||
|
# Default build options. These are applied first and unconditionally.
|
||
|
|
||
|
# For projects which use TensorFlow as part of a Bazel build process, putting
|
||
|
# nothing in a bazelrc will default to a monolithic build. The following line
|
||
|
# opts in to modular op registration support by default.
|
||
|
build --define framework_shared_object=true
|
||
|
|
||
|
# For workaround https://github.com/bazelbuild/bazel/issues/8772 with Bazel >= 0.29.1
|
||
|
build --java_toolchain=@tf_toolchains//toolchains/java:tf_java_toolchain
|
||
|
build --host_java_toolchain=@tf_toolchains//toolchains/java:tf_java_toolchain
|
||
|
|
||
|
build --define=use_fast_cpp_protos=true
|
||
|
build --define=allow_oversize_protos=true
|
||
|
|
||
|
build --spawn_strategy=standalone
|
||
|
build -c opt
|
||
|
|
||
|
# Make Bazel print out all options from rc files.
|
||
|
build --announce_rc
|
||
|
|
||
|
build --define=grpc_no_ares=true
|
||
|
|
||
|
# See https://github.com/bazelbuild/bazel/issues/7362 for information on what
|
||
|
# --incompatible_remove_legacy_whole_archive flag does.
|
||
|
# This flag is set to true in Bazel 1.0 and newer versions. We tried to migrate
|
||
|
# Tensorflow to the default, however test coverage wasn't enough to catch the
|
||
|
# errors.
|
||
|
# There is ongoing work on Bazel team's side to provide support for transitive
|
||
|
# shared libraries. As part of migrating to transitive shared libraries, we
|
||
|
# hope to provide a better mechanism for control over symbol exporting, and
|
||
|
# then tackle this issue again.
|
||
|
#
|
||
|
# TODO: Remove this line once TF doesn't depend on Bazel wrapping all library
|
||
|
# archives in -whole_archive -no_whole_archive.
|
||
|
build --noincompatible_remove_legacy_whole_archive
|
||
|
|
||
|
build --enable_platform_specific_config
|
||
|
|
||
|
# Enable XLA support by default.
|
||
|
build --define=with_xla_support=true
|
||
|
|
||
|
build --config=short_logs
|
||
|
|
||
|
build --config=v2
|
||
|
|
||
|
# Disable AWS/HDFS support by default
|
||
|
build --define=no_aws_support=true
|
||
|
build --define=no_hdfs_support=true
|
||
|
|
||
|
# Default options should come above this line.
|
||
|
|
||
|
# Allow builds using libc++ as a linker library
|
||
|
# This is mostly for OSSFuzz, so we also pass in the flags from environment to clean build file
|
||
|
build:libc++ --action_env=CC
|
||
|
build:libc++ --action_env=CXX
|
||
|
build:libc++ --action_env=CXXFLAGS=-stdlib=libc++
|
||
|
build:libc++ --action_env=PATH
|
||
|
build:libc++ --define force_libcpp=enabled
|
||
|
build:libc++ --linkopt -fuse-ld=lld
|
||
|
|
||
|
# Android configs. Bazel needs to have --cpu and --fat_apk_cpu both set to the
|
||
|
# target CPU to build transient dependencies correctly. See
|
||
|
# https://docs.bazel.build/versions/master/user-manual.html#flag--fat_apk_cpu
|
||
|
build:android --crosstool_top=//external:android/crosstool
|
||
|
build:android --host_crosstool_top=@bazel_tools//tools/cpp:toolchain
|
||
|
build:android_arm --config=android
|
||
|
build:android_arm --cpu=armeabi-v7a
|
||
|
build:android_arm --fat_apk_cpu=armeabi-v7a
|
||
|
build:android_arm64 --config=android
|
||
|
build:android_arm64 --cpu=arm64-v8a
|
||
|
build:android_arm64 --fat_apk_cpu=arm64-v8a
|
||
|
build:android_x86 --config=android
|
||
|
build:android_x86 --cpu=x86
|
||
|
build:android_x86 --fat_apk_cpu=x86
|
||
|
build:android_x86_64 --config=android
|
||
|
build:android_x86_64 --cpu=x86_64
|
||
|
build:android_x86_64 --fat_apk_cpu=x86_64
|
||
|
|
||
|
# Sets the default Apple platform to macOS.
|
||
|
build:macos --apple_platform_type=macos
|
||
|
|
||
|
# gRPC on MacOS requires this #define
|
||
|
build:macos --copt=-DGRPC_BAZEL_BUILD
|
||
|
|
||
|
# Settings for MacOS on ARM CPUs.
|
||
|
build:macos_arm64 --cpu=darwin_arm64
|
||
|
|
||
|
# iOS configs for each architecture and the fat binary builds.
|
||
|
build:ios --apple_platform_type=ios
|
||
|
build:ios --apple_bitcode=embedded --copt=-fembed-bitcode
|
||
|
build:ios --copt=-Wno-c++11-narrowing
|
||
|
build:ios_armv7 --config=ios
|
||
|
build:ios_armv7 --cpu=ios_armv7
|
||
|
build:ios_arm64 --config=ios
|
||
|
build:ios_arm64 --cpu=ios_arm64
|
||
|
build:ios_i386 --config=ios
|
||
|
build:ios_i386 --cpu=ios_i386
|
||
|
build:ios_x86_64 --config=ios
|
||
|
build:ios_x86_64 --cpu=ios_x86_64
|
||
|
build:ios_fat --config=ios
|
||
|
build:ios_fat --ios_multi_cpus=armv7,arm64,i386,x86_64
|
||
|
|
||
|
# Config to use a mostly-static build and disable modular op registration
|
||
|
# support (this will revert to loading TensorFlow with RTLD_GLOBAL in Python).
|
||
|
# By default, TensorFlow will build with a dependence on
|
||
|
# //tensorflow:libtensorflow_framework.so.
|
||
|
build:monolithic --define framework_shared_object=false
|
||
|
|
||
|
# Please note that MKL on MacOS or windows is still not supported.
|
||
|
# If you would like to use a local MKL instead of downloading, please set the
|
||
|
# environment variable "TF_MKL_ROOT" every time before build.
|
||
|
build:mkl --define=build_with_mkl=true --define=enable_mkl=true
|
||
|
build:mkl --define=tensorflow_mkldnn_contraction_kernel=0
|
||
|
build:mkl --define=build_with_openmp=true
|
||
|
build:mkl -c opt
|
||
|
|
||
|
# config to build OneDNN backend with a user specified threadpool.
|
||
|
build:mkl_threadpool --define=build_with_mkl=true --define=enable_mkl=true
|
||
|
build:mkl_threadpool --define=tensorflow_mkldnn_contraction_kernel=0
|
||
|
build:mkl_threadpool --define=build_with_mkl_opensource=true
|
||
|
build:mkl_threadpool -c opt
|
||
|
|
||
|
# Config setting to build oneDNN with Compute Library for the Arm Architecture (ACL).
|
||
|
# This build is for the inference regime only.
|
||
|
build:mkl_aarch64 --define=build_with_mkl_aarch64=true --define=enable_mkl=true
|
||
|
build:mkl_aarch64 --define=tensorflow_mkldnn_contraction_kernel=0
|
||
|
build:mkl_aarch64 --define=build_with_mkl_opensource=true
|
||
|
build:mkl_aarch64 --define=build_with_openmp=true
|
||
|
build:mkl_aarch64 -c opt
|
||
|
|
||
|
# This config refers to building CUDA op kernels with nvcc.
|
||
|
build:cuda --repo_env TF_NEED_CUDA=1
|
||
|
build:cuda --crosstool_top=@local_config_cuda//crosstool:toolchain
|
||
|
build:cuda --@local_config_cuda//:enable_cuda
|
||
|
|
||
|
# This config refers to building CUDA op kernels with clang.
|
||
|
build:cuda_clang --config=cuda
|
||
|
build:cuda_clang --repo_env TF_CUDA_CLANG=1
|
||
|
build:cuda_clang --@local_config_cuda//:cuda_compiler=clang
|
||
|
|
||
|
# Debug config
|
||
|
build:dbg -c dbg
|
||
|
# Only include debug info for files under tensorflow/, excluding kernels, to
|
||
|
# reduce the size of the debug info in the binary. This is because if the debug
|
||
|
# sections in the ELF binary are too large, errors can occur. See
|
||
|
# https://github.com/tensorflow/tensorflow/issues/48919.
|
||
|
# Users can still include debug info for a specific kernel, e.g. with:
|
||
|
# --config=dbg --per_file_copt=+tensorflow/core/kernels/identity_op.*@-g
|
||
|
build:dbg --per_file_copt=+.*,-tensorflow.*@-g0
|
||
|
build:dbg --per_file_copt=+tensorflow/core/kernels.*@-g0
|
||
|
# for now, disable arm_neon. see: https://github.com/tensorflow/tensorflow/issues/33360
|
||
|
build:dbg --cxxopt -DTF_LITE_DISABLE_X86_NEON
|
||
|
# AWS SDK must be compiled in release mode. see: https://github.com/tensorflow/tensorflow/issues/37498
|
||
|
build:dbg --copt -DDEBUG_BUILD
|
||
|
|
||
|
# Config to build TPU backend
|
||
|
build:tpu --define=with_tpu_support=true
|
||
|
|
||
|
build:tensorrt --repo_env TF_NEED_TENSORRT=1
|
||
|
|
||
|
build:rocm --crosstool_top=@local_config_rocm//crosstool:toolchain
|
||
|
build:rocm --define=using_rocm_hipcc=true
|
||
|
build:rocm --repo_env TF_NEED_ROCM=1
|
||
|
|
||
|
# Options extracted from configure script
|
||
|
build:numa --define=with_numa_support=true
|
||
|
|
||
|
# Options to disable default on features
|
||
|
build:noaws --define=no_aws_support=true
|
||
|
build:nogcp --define=no_gcp_support=true
|
||
|
build:nohdfs --define=no_hdfs_support=true
|
||
|
build:nonccl --define=no_nccl_support=true
|
||
|
|
||
|
build:stackdriver_support --define=stackdriver_support=true
|
||
|
|
||
|
# Modular TF build options
|
||
|
build:dynamic_kernels --define=dynamic_loaded_kernels=true
|
||
|
build:dynamic_kernels --copt=-DAUTOLOAD_DYNAMIC_KERNELS
|
||
|
|
||
|
# Build TF with C++ 17 features.
|
||
|
build:c++17 --cxxopt=-std=c++1z
|
||
|
build:c++17 --cxxopt=-stdlib=libc++
|
||
|
build:c++1z --config=c++17
|
||
|
build:c++17_gcc --cxxopt=-std=c++1z
|
||
|
build:c++1z_gcc --config=c++17_gcc
|
||
|
|
||
|
# Don't trigger --config=<host platform> when cross-compiling.
|
||
|
build:android --noenable_platform_specific_config
|
||
|
build:ios --noenable_platform_specific_config
|
||
|
|
||
|
# Suppress C++ compiler warnings, otherwise build logs become 10s of MBs.
|
||
|
build:android --copt=-w
|
||
|
build:ios --copt=-w
|
||
|
build:linux --copt=-w
|
||
|
build:linux --host_copt=-w
|
||
|
build:macos --copt=-w
|
||
|
build:windows --copt=/W0
|
||
|
|
||
|
# Tensorflow uses M_* math constants that only get defined by MSVC headers if
|
||
|
# _USE_MATH_DEFINES is defined.
|
||
|
build:windows --copt=/D_USE_MATH_DEFINES
|
||
|
build:windows --host_copt=/D_USE_MATH_DEFINES
|
||
|
|
||
|
# Default paths for TF_SYSTEM_LIBS
|
||
|
build:linux --define=PREFIX=/usr
|
||
|
build:linux --define=LIBDIR=$(PREFIX)/lib
|
||
|
build:linux --define=INCLUDEDIR=$(PREFIX)/include
|
||
|
build:linux --define=PROTOBUF_INCLUDE_PATH=$(PREFIX)/include
|
||
|
build:macos --define=PREFIX=/usr
|
||
|
build:macos --define=LIBDIR=$(PREFIX)/lib
|
||
|
build:macos --define=INCLUDEDIR=$(PREFIX)/include
|
||
|
build:macos --define=PROTOBUF_INCLUDE_PATH=$(PREFIX)/include
|
||
|
# TF_SYSTEM_LIBS do not work on windows.
|
||
|
|
||
|
# By default, build TF in C++ 14 mode.
|
||
|
build:android --cxxopt=-std=c++14
|
||
|
build:android --host_cxxopt=-std=c++14
|
||
|
build:ios --cxxopt=-std=c++14
|
||
|
build:ios --host_cxxopt=-std=c++14
|
||
|
build:linux --cxxopt=-std=c++14
|
||
|
build:linux --host_cxxopt=-std=c++14
|
||
|
build:macos --cxxopt=-std=c++14
|
||
|
build:macos --host_cxxopt=-std=c++14
|
||
|
build:windows --cxxopt=/std:c++14
|
||
|
build:windows --host_cxxopt=/std:c++14
|
||
|
|
||
|
# On windows, we still link everything into a single DLL.
|
||
|
build:windows --config=monolithic
|
||
|
|
||
|
# On linux, we dynamically link small amount of kernels
|
||
|
build:linux --config=dynamic_kernels
|
||
|
|
||
|
# Make sure to include as little of windows.h as possible
|
||
|
build:windows --copt=-DWIN32_LEAN_AND_MEAN
|
||
|
build:windows --host_copt=-DWIN32_LEAN_AND_MEAN
|
||
|
build:windows --copt=-DNOGDI
|
||
|
build:windows --host_copt=-DNOGDI
|
||
|
|
||
|
# MSVC (Windows): Standards-conformant preprocessor mode
|
||
|
# See https://docs.microsoft.com/en-us/cpp/preprocessor/preprocessor-experimental-overview
|
||
|
build:windows --copt=/experimental:preprocessor
|
||
|
build:windows --host_copt=/experimental:preprocessor
|
||
|
|
||
|
# Misc build options we need for windows.
|
||
|
build:windows --linkopt=/DEBUG
|
||
|
build:windows --host_linkopt=/DEBUG
|
||
|
build:windows --linkopt=/OPT:REF
|
||
|
build:windows --host_linkopt=/OPT:REF
|
||
|
build:windows --linkopt=/OPT:ICF
|
||
|
build:windows --host_linkopt=/OPT:ICF
|
||
|
|
||
|
# Verbose failure logs when something goes wrong
|
||
|
build:windows --verbose_failures
|
||
|
|
||
|
# On windows, we never cross compile
|
||
|
build:windows --distinct_host_configuration=false
|
||
|
# On linux, don't cross compile by default
|
||
|
build:linux --distinct_host_configuration=false
|
||
|
|
||
|
# Do not risk cache corruption. See:
|
||
|
# https://github.com/bazelbuild/bazel/issues/3360
|
||
|
build:linux --experimental_guard_against_concurrent_changes
|
||
|
|
||
|
# Configure short or long logs
|
||
|
build:short_logs --output_filter=DONT_MATCH_ANYTHING
|
||
|
build:verbose_logs --output_filter=
|
||
|
|
||
|
# Instruction set optimizations
|
||
|
# TODO(gunan): Create a feature in toolchains for avx/avx2 to
|
||
|
# avoid having to define linux/win separately.
|
||
|
build:avx_linux --copt=-mavx
|
||
|
build:avx_linux --host_copt=-mavx
|
||
|
build:avx2_linux --copt=-mavx2
|
||
|
build:native_arch_linux --copt=-march=native
|
||
|
build:avx_win --copt=/arch=AVX
|
||
|
build:avx2_win --copt=/arch=AVX2
|
||
|
|
||
|
# Options to build TensorFlow 1.x or 2.x.
|
||
|
build:v1 --define=tf_api_version=1 --action_env=TF2_BEHAVIOR=0
|
||
|
build:v2 --define=tf_api_version=2 --action_env=TF2_BEHAVIOR=1
|
||
|
|
||
|
# Disable XLA on mobile.
|
||
|
build:xla --define=with_xla_support=true # TODO: remove, it's on by default.
|
||
|
build:android --define=with_xla_support=false
|
||
|
build:ios --define=with_xla_support=false
|
||
|
|
||
|
# BEGIN TF REMOTE BUILD EXECUTION OPTIONS
|
||
|
# Options when using remote execution
|
||
|
# WARNING: THESE OPTIONS WONT WORK IF YOU DO NOT HAVE PROPER AUTHENTICATION AND PERMISSIONS
|
||
|
|
||
|
# Flag to enable remote config
|
||
|
common --experimental_repo_remote_exec
|
||
|
|
||
|
build:rbe --repo_env=BAZEL_DO_NOT_DETECT_CPP_TOOLCHAIN=1
|
||
|
build:rbe --google_default_credentials
|
||
|
build:rbe --bes_backend=buildeventservice.googleapis.com
|
||
|
build:rbe --bes_results_url="https://source.cloud.google.com/results/invocations"
|
||
|
build:rbe --bes_timeout=600s
|
||
|
build:rbe --define=EXECUTOR=remote
|
||
|
build:rbe --distinct_host_configuration=false
|
||
|
build:rbe --flaky_test_attempts=3
|
||
|
build:rbe --jobs=200
|
||
|
build:rbe --remote_executor=grpcs://remotebuildexecution.googleapis.com
|
||
|
build:rbe --remote_timeout=3600
|
||
|
build:rbe --spawn_strategy=remote,worker,standalone,local
|
||
|
test:rbe --test_env=USER=anon
|
||
|
# Attempt to minimize the amount of data transfer between bazel and the remote
|
||
|
# workers:
|
||
|
build:rbe --remote_download_toplevel
|
||
|
|
||
|
build:rbe_linux --config=rbe
|
||
|
build:rbe_linux --action_env=PATH="/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/local/go/bin"
|
||
|
build:rbe_linux --host_javabase=@bazel_toolchains//configs/ubuntu16_04_clang/1.1:jdk8
|
||
|
build:rbe_linux --javabase=@bazel_toolchains//configs/ubuntu16_04_clang/1.1:jdk8
|
||
|
build:rbe_linux --host_java_toolchain=@bazel_tools//tools/jdk:toolchain_hostjdk8
|
||
|
build:rbe_linux --java_toolchain=@bazel_tools//tools/jdk:toolchain_hostjdk8
|
||
|
|
||
|
# Non-rbe settings we should include because we do not run configure
|
||
|
build:rbe_linux --config=avx_linux
|
||
|
# TODO(gunan): Check why we need this specified in rbe, but not in other builds.
|
||
|
build:rbe_linux --linkopt=-lrt
|
||
|
build:rbe_linux --host_linkopt=-lrt
|
||
|
build:rbe_linux --linkopt=-lm
|
||
|
build:rbe_linux --host_linkopt=-lm
|
||
|
|
||
|
# Use the GPU toolchain until the CPU one is ready.
|
||
|
# https://github.com/bazelbuild/bazel/issues/13623
|
||
|
build:rbe_cpu_linux --config=rbe_linux
|
||
|
build:rbe_cpu_linux --host_crosstool_top="@ubuntu18.04-gcc7_manylinux2010-cuda11.2-cudnn8.1-tensorrt7.2_config_cuda//crosstool:toolchain"
|
||
|
build:rbe_cpu_linux --crosstool_top="@ubuntu18.04-gcc7_manylinux2010-cuda11.2-cudnn8.1-tensorrt7.2_config_cuda//crosstool:toolchain"
|
||
|
build:rbe_cpu_linux --extra_toolchains="@ubuntu18.04-gcc7_manylinux2010-cuda11.2-cudnn8.1-tensorrt7.2_config_cuda//crosstool:toolchain-linux-x86_64"
|
||
|
build:rbe_cpu_linux --extra_execution_platforms="@ubuntu18.04-gcc7_manylinux2010-cuda11.2-cudnn8.1-tensorrt7.2_config_platform//:platform"
|
||
|
build:rbe_cpu_linux --host_platform="@ubuntu18.04-gcc7_manylinux2010-cuda11.2-cudnn8.1-tensorrt7.2_config_platform//:platform"
|
||
|
build:rbe_cpu_linux --platforms="@ubuntu18.04-gcc7_manylinux2010-cuda11.2-cudnn8.1-tensorrt7.2_config_platform//:platform"
|
||
|
|
||
|
build:rbe_linux_cuda_base --config=rbe_linux
|
||
|
build:rbe_linux_cuda_base --config=cuda
|
||
|
build:rbe_linux_cuda_base --config=tensorrt
|
||
|
build:rbe_linux_cuda_base --action_env=TF_CUDA_VERSION=11
|
||
|
build:rbe_linux_cuda_base --action_env=TF_CUDNN_VERSION=8
|
||
|
build:rbe_linux_cuda_base --repo_env=REMOTE_GPU_TESTING=1
|
||
|
# TensorRT 7 for CUDA 11.1 is compatible with CUDA 11.2, but requires
|
||
|
# libnvrtc.so.11.1. See https://github.com/NVIDIA/TensorRT/issues/1064.
|
||
|
# TODO(b/187962120): Remove when upgrading to TensorRT 8.
|
||
|
test:rbe_linux_cuda_base --test_env=LD_LIBRARY_PATH="/usr/local/cuda/lib64:/usr/local/cuda/extras/CUPTI/lib64:/usr/local/cuda-11.1/lib64"
|
||
|
|
||
|
build:rbe_linux_cuda11.2_nvcc_base --config=rbe_linux_cuda_base
|
||
|
build:rbe_linux_cuda11.2_nvcc_base --host_crosstool_top="@ubuntu18.04-gcc7_manylinux2010-cuda11.2-cudnn8.1-tensorrt7.2_config_cuda//crosstool:toolchain"
|
||
|
build:rbe_linux_cuda11.2_nvcc_base --crosstool_top="@ubuntu18.04-gcc7_manylinux2010-cuda11.2-cudnn8.1-tensorrt7.2_config_cuda//crosstool:toolchain"
|
||
|
build:rbe_linux_cuda11.2_nvcc_base --extra_toolchains="@ubuntu18.04-gcc7_manylinux2010-cuda11.2-cudnn8.1-tensorrt7.2_config_cuda//crosstool:toolchain-linux-x86_64"
|
||
|
build:rbe_linux_cuda11.2_nvcc_base --extra_execution_platforms="@ubuntu18.04-gcc7_manylinux2010-cuda11.2-cudnn8.1-tensorrt7.2_config_platform//:platform"
|
||
|
build:rbe_linux_cuda11.2_nvcc_base --host_platform="@ubuntu18.04-gcc7_manylinux2010-cuda11.2-cudnn8.1-tensorrt7.2_config_platform//:platform"
|
||
|
build:rbe_linux_cuda11.2_nvcc_base --platforms="@ubuntu18.04-gcc7_manylinux2010-cuda11.2-cudnn8.1-tensorrt7.2_config_platform//:platform"
|
||
|
build:rbe_linux_cuda11.2_nvcc_base --repo_env=TF_CUDA_CONFIG_REPO="@ubuntu18.04-gcc7_manylinux2010-cuda11.2-cudnn8.1-tensorrt7.2_config_cuda"
|
||
|
build:rbe_linux_cuda11.2_nvcc_base --repo_env=TF_TENSORRT_CONFIG_REPO="@ubuntu18.04-gcc7_manylinux2010-cuda11.2-cudnn8.1-tensorrt7.2_config_tensorrt"
|
||
|
build:rbe_linux_cuda11.2_nvcc_base --repo_env=TF_NCCL_CONFIG_REPO="@ubuntu18.04-gcc7_manylinux2010-cuda11.2-cudnn8.1-tensorrt7.2_config_nccl"
|
||
|
build:rbe_linux_cuda11.2_nvcc_py3.6 --config=rbe_linux_cuda11.2_nvcc_base --repo_env=TF_PYTHON_CONFIG_REPO="@ubuntu18.04-gcc7_manylinux2010-cuda11.2-cudnn8.1-tensorrt7.2_config_python3.6"
|
||
|
build:rbe_linux_cuda11.2_nvcc_py3.7 --config=rbe_linux_cuda11.2_nvcc_base --repo_env=TF_PYTHON_CONFIG_REPO="@ubuntu18.04-gcc7_manylinux2010-cuda11.2-cudnn8.1-tensorrt7.2_config_python3.7"
|
||
|
build:rbe_linux_cuda11.2_nvcc_py3.8 --config=rbe_linux_cuda11.2_nvcc_base --repo_env=TF_PYTHON_CONFIG_REPO="@ubuntu18.04-gcc7_manylinux2010-cuda11.2-cudnn8.1-tensorrt7.2_config_python3.8"
|
||
|
build:rbe_linux_cuda11.2_nvcc_py3.9 --config=rbe_linux_cuda11.2_nvcc_base --repo_env=TF_PYTHON_CONFIG_REPO="@ubuntu18.04-gcc7_manylinux2010-cuda11.2-cudnn8.1-tensorrt7.2_config_python3.9"
|
||
|
|
||
|
# Map default to CUDA 11.2.
|
||
|
build:rbe_linux_cuda_nvcc_py36 --config=rbe_linux_cuda11.2_nvcc_py3.6
|
||
|
build:rbe_linux_cuda_nvcc_py37 --config=rbe_linux_cuda11.2_nvcc_py3.7
|
||
|
build:rbe_linux_cuda_nvcc_py38 --config=rbe_linux_cuda11.2_nvcc_py3.8
|
||
|
build:rbe_linux_cuda_nvcc_py39 --config=rbe_linux_cuda11.2_nvcc_py3.9
|
||
|
|
||
|
# Deprecated configs that people might still use.
|
||
|
build:rbe_linux_cuda_nvcc --config=rbe_linux_cuda_nvcc_py36
|
||
|
build:rbe_gpu_linux --config=rbe_linux_cuda_nvcc
|
||
|
|
||
|
build:rbe_linux_cuda_clang_base --config=rbe_linux_cuda_base
|
||
|
build:rbe_linux_cuda_clang_base --repo_env TF_CUDA_CLANG=1
|
||
|
build:rbe_linux_cuda_clang_base --@local_config_cuda//:cuda_compiler=clang
|
||
|
build:rbe_linux_cuda_clang_base --crosstool_top="@ubuntu18.04-clang_manylinux2010-cuda11.2-cudnn8.1-tensorrt7.2_config_cuda//crosstool:toolchain"
|
||
|
build:rbe_linux_cuda_clang_base --extra_toolchains="@ubuntu18.04-clang_manylinux2010-cuda11.2-cudnn8.1-tensorrt7.2_config_cuda//crosstool:toolchain-linux-x86_64"
|
||
|
build:rbe_linux_cuda_clang_base --extra_execution_platforms="@ubuntu18.04-clang_manylinux2010-cuda11.2-cudnn8.1-tensorrt7.2_config_platform//:platform"
|
||
|
build:rbe_linux_cuda_clang_base --host_platform="@ubuntu18.04-clang_manylinux2010-cuda11.2-cudnn8.1-tensorrt7.2_config_platform//:platform"
|
||
|
build:rbe_linux_cuda_clang_base --platforms="@ubuntu18.04-clang_manylinux2010-cuda11.2-cudnn8.1-tensorrt7.2_config_platform//:platform"
|
||
|
build:rbe_linux_cuda_clang_base --repo_env=TF_CUDA_CONFIG_REPO="@ubuntu18.04-clang_manylinux2010-cuda11.2-cudnn8.1-tensorrt7.2_config_cuda"
|
||
|
build:rbe_linux_cuda_clang_base --repo_env=TF_TENSORRT_CONFIG_REPO="@ubuntu18.04-clang_manylinux2010-cuda11.2-cudnn8.1-tensorrt7.2_config_tensorrt"
|
||
|
build:rbe_linux_cuda_clang_base --repo_env=TF_NCCL_CONFIG_REPO="@ubuntu18.04-clang_manylinux2010-cuda11.2-cudnn8.1-tensorrt7.2_config_nccl"
|
||
|
build:rbe_linux_cuda_clang_py27 --config=rbe_linux_cuda_clang_base --repo_env=TF_PYTHON_CONFIG_REPO="@ubuntu18.04-clang_manylinux2010-cuda11.2-cudnn8.1-tensorrt7.2_config_python2.7"
|
||
|
build:rbe_linux_cuda_clang_py35 --config=rbe_linux_cuda_clang_base --repo_env=TF_PYTHON_CONFIG_REPO="@ubuntu18.04-clang_manylinux2010-cuda11.2-cudnn8.1-tensorrt7.2_config_python3.5"
|
||
|
build:rbe_linux_cuda_clang_py36 --config=rbe_linux_cuda_clang_base --repo_env=TF_PYTHON_CONFIG_REPO="@ubuntu18.04-clang_manylinux2010-cuda11.2-cudnn8.1-tensorrt7.2_config_python3.6"
|
||
|
build:rbe_linux_cuda_clang_py37 --config=rbe_linux_cuda_clang_base --repo_env=TF_PYTHON_CONFIG_REPO="@ubuntu18.04-clang_manylinux2010-cuda11.2-cudnn8.1-tensorrt7.2_config_python3.7"
|
||
|
build:rbe_linux_cuda_clang_py38 --config=rbe_linux_cuda_clang_base --repo_env=TF_PYTHON_CONFIG_REPO="@ubuntu18.04-clang_manylinux2010-cuda11.2-cudnn8.1-tensorrt7.2_config_python3.8"
|
||
|
|
||
|
# ROCm
|
||
|
build:rbe_linux_rocm_base --config=rocm
|
||
|
build:rbe_linux_rocm_base --config=rbe_linux
|
||
|
build:rbe_linux_rocm_base --crosstool_top="@ubuntu18.04-gcc7_manylinux2010-rocm_config_rocm//crosstool:toolchain"
|
||
|
build:rbe_linux_rocm_base --extra_toolchains="@ubuntu18.04-gcc7_manylinux2010-rocm_config_rocm//crosstool:toolchain-linux-x86_64"
|
||
|
build:rbe_linux_rocm_base --extra_execution_platforms="@ubuntu18.04-gcc7_manylinux2010-rocm_config_platform//:platform"
|
||
|
build:rbe_linux_rocm_base --host_platform="@ubuntu18.04-gcc7_manylinux2010-rocm_config_platform//:platform"
|
||
|
build:rbe_linux_rocm_base --platforms="@ubuntu18.04-gcc7_manylinux2010-rocm_config_platform//:platform"
|
||
|
build:rbe_linux_rocm_base --action_env=TF_ROCM_CONFIG_REPO="@ubuntu18.04-gcc7_manylinux2010-rocm_config_rocm"
|
||
|
build:rbe_linux_rocm_py2.7 --config=rbe_linux_rocm_base --repo_env=TF_PYTHON_CONFIG_REPO="@ubuntu18.04-gcc7_manylinux2010-rocm_config_python2.7"
|
||
|
build:rbe_linux_rocm_py3.5 --config=rbe_linux_rocm_base --repo_env=TF_PYTHON_CONFIG_REPO="@ubuntu18.04-gcc7_manylinux2010-rocm_config_python3.5"
|
||
|
build:rbe_linux_rocm_py3.6 --config=rbe_linux_rocm_base --repo_env=TF_PYTHON_CONFIG_REPO="@ubuntu18.04-gcc7_manylinux2010-rocm_config_python3.6"
|
||
|
build:rbe_linux_rocm_py3.7 --config=rbe_linux_rocm_base --repo_env=TF_PYTHON_CONFIG_REPO="@ubuntu18.04-gcc7_manylinux2010-rocm_config_python3.7"
|
||
|
build:rbe_linux_rocm_py3.8 --config=rbe_linux_rocm_base --repo_env=TF_PYTHON_CONFIG_REPO="@ubuntu18.04-gcc7_manylinux2010-rocm_config_python3.8"
|
||
|
|
||
|
# Linux CPU
|
||
|
|
||
|
build:rbe_linux_py3 --config=rbe_linux
|
||
|
build:rbe_linux_py3 --python_path="/usr/local/bin/python3.9"
|
||
|
build:rbe_linux_py3 --repo_env=TF_PYTHON_CONFIG_REPO="@ubuntu18.04-gcc7_manylinux2010-cuda11.2-cudnn8.1-tensorrt7.2_config_python3.9"
|
||
|
|
||
|
build:rbe_win --config=rbe
|
||
|
build:rbe_win --crosstool_top="@tf_toolchains//toolchains/win/tf_win_06242021:toolchain"
|
||
|
build:rbe_win --extra_toolchains="@tf_toolchains//toolchains/win/tf_win_06242021:cc-toolchain-x64_windows"
|
||
|
build:rbe_win --host_javabase="@tf_toolchains//toolchains/win:windows_jdk8"
|
||
|
build:rbe_win --javabase="@tf_toolchains//toolchains/win:windows_jdk8"
|
||
|
build:rbe_win --extra_execution_platforms="@tf_toolchains//toolchains/win:rbe_windows_ltsc2019"
|
||
|
build:rbe_win --host_platform="@tf_toolchains//toolchains/win:rbe_windows_ltsc2019"
|
||
|
build:rbe_win --platforms="@tf_toolchains//toolchains/win:rbe_windows_ltsc2019"
|
||
|
build:rbe_win --shell_executable=C:\\tools\\msys64\\usr\\bin\\bash.exe
|
||
|
build:rbe_win --experimental_strict_action_env=true
|
||
|
|
||
|
# TODO(gunan): Remove once we use MSVC 2019 with latest patches.
|
||
|
build:rbe_win --define=override_eigen_strong_inline=true
|
||
|
build:rbe_win --jobs=100
|
||
|
|
||
|
# Don't build the python zip archive in the RBE build.
|
||
|
build:rbe_win --remote_download_minimal
|
||
|
build:rbe_win --enable_runfiles
|
||
|
build:rbe_win --nobuild_python_zip
|
||
|
|
||
|
build:rbe_win_py37 --config=rbe
|
||
|
build:rbe_win_py37 --repo_env=TF_PYTHON_CONFIG_REPO="@windows_py37_config_python"
|
||
|
build:rbe_win_py37 --python_path=C:\\Python37\\python.exe
|
||
|
|
||
|
build:rbe_win_py38 --config=rbe
|
||
|
build:rbe_win_py38 --repo_env=PYTHON_BIN_PATH=C:\\Python38\\python.exe
|
||
|
build:rbe_win_py38 --repo_env=PYTHON_LIB_PATH=C:\\Python38\\lib\\site-packages
|
||
|
build:rbe_win_py38 --repo_env=TF_PYTHON_CONFIG_REPO=@tf_toolchains//toolchains/win_1803/py38
|
||
|
build:rbe_win_py38 --python_path=C:\\Python38\\python.exe
|
||
|
|
||
|
# These you may need to change for your own GCP project.
|
||
|
build:tensorflow_testing_rbe --project_id=tensorflow-testing
|
||
|
common:tensorflow_testing_rbe_linux --remote_instance_name=projects/tensorflow-testing/instances/default_instance
|
||
|
build:tensorflow_testing_rbe_linux --config=tensorflow_testing_rbe
|
||
|
|
||
|
common:tensorflow_testing_rbe_win --remote_instance_name=projects/tensorflow-testing/instances/windows
|
||
|
build:tensorflow_testing_rbe_win --config=tensorflow_testing_rbe
|
||
|
|
||
|
# TFLite build configs for generic embedded Linux
|
||
|
build:elinux --crosstool_top=@local_config_embedded_arm//:toolchain
|
||
|
build:elinux --host_crosstool_top=@bazel_tools//tools/cpp:toolchain
|
||
|
build:elinux_aarch64 --config=elinux
|
||
|
build:elinux_aarch64 --cpu=aarch64
|
||
|
build:elinux_aarch64 --distinct_host_configuration=true
|
||
|
build:elinux_armhf --config=elinux
|
||
|
build:elinux_armhf --cpu=armhf
|
||
|
build:elinux_armhf --distinct_host_configuration=true
|
||
|
# END TF REMOTE BUILD EXECUTION OPTIONS
|
||
|
|
||
|
# Config-specific options should come above this line.
|
||
|
|
||
|
# Load rc file written by ./configure.
|
||
|
try-import %workspace%/.tf_configure.bazelrc
|
||
|
|
||
|
# Load rc file with user-specific options.
|
||
|
try-import %workspace%/.bazelrc.user
|
||
|
|
||
|
# Here are bazelrc configs for release builds
|
||
|
build:release_base --config=v2
|
||
|
build:release_base --distinct_host_configuration=false
|
||
|
test:release_base --flaky_test_attempts=3
|
||
|
test:release_base --test_size_filters=small,medium
|
||
|
|
||
|
build:release_cpu_linux --config=release_base
|
||
|
build:release_cpu_linux --config=avx_linux
|
||
|
build:release_cpu_linux --crosstool_top="@ubuntu18.04-gcc7_manylinux2010-cuda11.2-cudnn8.1-tensorrt7.2_config_cuda//crosstool:toolchain"
|
||
|
test:release_cpu_linux --test_env=LD_LIBRARY_PATH
|
||
|
|
||
|
build:release_cpu_macos --config=release_base
|
||
|
build:release_cpu_macos --config=avx_linux
|
||
|
|
||
|
build:release_gpu_base --config=cuda
|
||
|
build:release_gpu_base --action_env=TF_CUDA_VERSION="11"
|
||
|
build:release_gpu_base --action_env=TF_CUDNN_VERSION="8"
|
||
|
build:release_gpu_base --repo_env=TF_CUDA_COMPUTE_CAPABILITIES="sm_35,sm_50,sm_60,sm_70,sm_75,compute_80"
|
||
|
|
||
|
build:release_gpu_linux --config=release_cpu_linux
|
||
|
build:release_gpu_linux --config=release_gpu_base
|
||
|
build:release_gpu_linux --config=tensorrt
|
||
|
build:release_gpu_linux --action_env=CUDA_TOOLKIT_PATH="/usr/local/cuda-11.2"
|
||
|
build:release_gpu_linux --action_env=LD_LIBRARY_PATH="/usr/local/cuda:/usr/local/cuda/lib64:/usr/local/cuda/extras/CUPTI/lib64:/usr/local/tensorrt/lib"
|
||
|
build:release_gpu_linux --action_env=GCC_HOST_COMPILER_PATH="/dt7/usr/bin/gcc"
|
||
|
build:release_gpu_linux --crosstool_top=@ubuntu18.04-gcc7_manylinux2010-cuda11.2-cudnn8.1-tensorrt7.2_config_cuda//crosstool:toolchain
|
||
|
|
||
|
build:release_cpu_windows --config=release_base
|
||
|
build:release_cpu_windows --config=avx_win
|
||
|
build:release_cpu_windows --define=no_tensorflow_py_deps=true
|
||
|
# First available in VS 16.4. Speeds Windows compile times by a lot. See
|
||
|
# https://groups.google.com/a/tensorflow.org/d/topic/build/SsW98Eo7l3o/discussion
|
||
|
build:release_cpu_windows --copt=/d2ReducedOptimizeHugeFunctions --host_copt=/d2ReducedOptimizeHugeFunctions
|
||
|
|
||
|
build:release_gpu_windows --config=release_cpu_windows
|
||
|
build:release_gpu_windows --config=release_gpu_base
|
||
|
|
||
|
# Address sanitizer
|
||
|
# CC=clang bazel build --config asan
|
||
|
build:asan --strip=never
|
||
|
build:asan --copt -fsanitize=address
|
||
|
build:asan --copt -DADDRESS_SANITIZER
|
||
|
build:asan --copt -g
|
||
|
build:asan --copt -O3
|
||
|
build:asan --copt -fno-omit-frame-pointer
|
||
|
build:asan --linkopt -fsanitize=address
|
||
|
|
||
|
# Memory sanitizer
|
||
|
# CC=clang bazel build --config msan
|
||
|
build:msan --strip=never
|
||
|
build:msan --copt -fsanitize=memory
|
||
|
build:msan --copt -DMEMORY_SANITIZER
|
||
|
build:msan --copt -g
|
||
|
build:msan --copt -O3
|
||
|
build:msan --copt -fno-omit-frame-pointer
|
||
|
build:msan --linkopt -fsanitize=memory
|
||
|
|
||
|
# Undefined Behavior Sanitizer
|
||
|
# CC=clang bazel build --config ubsan
|
||
|
build:ubsan --strip=never
|
||
|
build:ubsan --copt -fsanitize=undefined
|
||
|
build:ubsan --copt -DUNDEFINED_BEHAVIOR_SANITIZER
|
||
|
build:ubsan --copt -g
|
||
|
build:ubsan --copt -O3
|
||
|
build:ubsan --copt -fno-omit-frame-pointer
|
||
|
build:ubsan --linkopt -fsanitize=undefined
|
||
|
build:ubsan --linkopt -lubsan
|
||
|
|
||
|
# Exclude TFRT integration for anything but Linux.
|
||
|
build:android --config=no_tfrt
|
||
|
build:macos --config=no_tfrt
|
||
|
build:windows --config=no_tfrt
|
||
|
build:rocm --config=no_tfrt
|
||
|
build:no_tfrt --deleted_packages=tensorflow/compiler/mlir/tfrt,tensorflow/compiler/mlir/tfrt/benchmarks,tensorflow/compiler/mlir/tfrt/jit/python_binding,tensorflow/compiler/mlir/tfrt/python_tests,tensorflow/compiler/mlir/tfrt/tests,tensorflow/compiler/mlir/tfrt/tests/saved_model,tensorflow/compiler/mlir/tfrt/transforms/lhlo_gpu_to_tfrt_gpu,tensorflow/core/runtime_fallback,tensorflow/core/runtime_fallback/conversion,tensorflow/core/runtime_fallback/kernel,tensorflow/core/runtime_fallback/opdefs,tensorflow/core/runtime_fallback/runtime,tensorflow/core/runtime_fallback/util,tensorflow/core/tfrt/common,tensorflow/core/tfrt/eager,tensorflow/core/tfrt/eager/backends/cpu,tensorflow/core/tfrt/eager/backends/gpu,tensorflow/core/tfrt/eager/core_runtime,tensorflow/core/tfrt/eager/cpp_tests/core_runtime,tensorflow/core/tfrt/fallback,tensorflow/core/tfrt/gpu,tensorflow/core/tfrt/run_handler_thread_pool,tensorflow/core/tfrt/runtime,tensorflow/core/tfrt/saved_model,tensorflow/core/tfrt/saved_model/tests,tensorflow/core/tfrt/tpu,tensorflow/core/tfrt/utils
|
||
|
|
||
|
# Experimental configuration for testing XLA GPU lowering to TFRT BEF thunks.
|
||
|
# bazel test --config=experimental_enable_bef_thunk \
|
||
|
# //tensorflow/compiler/xla/service/gpu/tests:mlir_gemm_test
|
||
|
build:experimental_enable_bef_thunk --config=cuda
|
||
|
build:experimental_enable_bef_thunk --//tensorflow/compiler/xla/service/gpu:enable_bef_thunk
|
||
|
build:experimental_enable_bef_thunk --@tf_runtime//:enable_gpu
|
||
|
build:experimental_enable_bef_thunk --@rules_cuda//cuda:enable_cuda
|
||
|
build:experimental_enable_bef_thunk --nocheck_visibility
|
||
|
build:experimental_enable_bef_thunk --incompatible_strict_action_env
|
||
|
build:experimental_enable_bef_thunk --config=monolithic
|