Skip to content

Commit bc456e3

Browse files
martinwicketensorflower-gardener
authored andcommitted
Merge changes from github.
Change: 151046259
1 parent 8ca0714 commit bc456e3

File tree

141 files changed

+4407
-602
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

141 files changed

+4407
-602
lines changed

.gitignore

+2-1
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,7 @@
11
.DS_Store
22
.ipynb_checkpoints
33
node_modules
4+
/.bazelrc
45
/bazel-*
56
/third_party/py/numpy/numpy_include
67
/tools/bazel.rc
@@ -13,4 +14,4 @@ node_modules
1314
*.pyc
1415
__pycache__
1516
*.swp
16-
.vscode/
17+
.vscode/

RELEASE.md

+9
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,10 @@
1+
# Release 1.0.1
2+
3+
## Bug Fixes and Other Changes
4+
* Change GraphConstructor to not increase the version when importing, but instead take the min of all versions.
5+
* Google Cloud Storage fixes.
6+
* Removed `tf.core` and `tf.python` modules from the API. These were never intended to be exposed. Please use the same objects through top-level `tf` module instead.
7+
18
# Release 1.0.0
29

310
## Major Features and Improvements
@@ -88,6 +95,8 @@ To help you upgrade your existing TensorFlow Python code to match the API change
8895
from the tensorflow::ops namespace to tensorflow.
8996
* Change arg order for `{softmax,sparse_softmax,sigmoid}_cross_entropy_with_logits` to be (labels, predictions), and force use of named args.
9097
* tf.nn.rnn_cell.* and most functions in tf.nn.rnn.* (with the exception of dynamic_rnn and raw_rnn) are temporarily in tf.contrib.rnn. They will be moved back into core for TF 1.1.
98+
* `tf.nn.sampled_softmax_loss` and `tf.nn.nce_loss` have both changed their API such that you need to switch the `inputs, labels` to `labels, inputs` parameters.
99+
* The shape keyword argument of the `SparseTensor` constructor changes its name to `dense_shape` between Tensorflow 0.12 and Tensorflow 1.0.
91100

92101
## Bug Fixes and Other Changes
93102
* Numerous C++ API updates.

WORKSPACE

+1-6
Original file line numberDiff line numberDiff line change
@@ -14,12 +14,7 @@ load("@io_bazel_rules_closure//closure:defs.bzl", "closure_repositories")
1414

1515
closure_repositories()
1616

17-
load("//tensorflow:workspace.bzl", "check_version", "tf_workspace")
18-
19-
# We must check the bazel version before trying to parse any other BUILD files,
20-
# in case the parsing of those build files depends on the bazel version we
21-
# require here.
22-
check_version("0.4.2")
17+
load("//tensorflow:workspace.bzl", "tf_workspace")
2318

2419
# Uncomment and update the paths in these entries to build the Android demo.
2520
#android_sdk_repository(

configure

+23-42
Original file line numberDiff line numberDiff line change
@@ -8,6 +8,9 @@ pushd `dirname $0` > /dev/null
88
SOURCE_BASE_DIR=`pwd -P`
99
popd > /dev/null
1010

11+
# This file contains customized config settings.
12+
touch .bazelrc
13+
1114
PLATFORM="$(uname -s | tr 'A-Z' 'a-z')"
1215

1316
function is_linux() {
@@ -36,15 +39,11 @@ function is_windows() {
3639
}
3740

3841
function bazel_clean_and_fetch() {
39-
# bazel clean --expunge currently doesn't work on Windows
40-
# TODO(pcloudy): Re-enable it after bazel clean --expunge is fixed.
41-
if ! is_windows; then
42-
bazel clean --expunge
43-
fi
4442
if [ -z "$TF_BAZEL_TARGETS" ]; then
45-
TF_BAZEL_TARGETS="//tensorflow/... -//tensorflow/contrib/nccl/... -//tensorflow/examples/android/..."
43+
bazel fetch "//tensorflow/... -//tensorflow/contrib/nccl/... -//tensorflow/examples/android/..."
44+
else
45+
bazel fetch $TF_BAZEL_TARGETS
4646
fi
47-
bazel fetch "$TF_BAZEL_TARGETS"
4847
}
4948

5049
function sed_hyphen_i() {
@@ -102,8 +101,8 @@ if false; then # Disable building with MKL for now
102101

103102
if [ "$TF_NEED_MKL" == "1" ]; then # TF_NEED_MKL
104103
DST=`dirname $0`
105-
ARCHIVE_BASENAME=mklml_lnx_2017.0.2.20170110.tgz
106-
GITHUB_RELEASE_TAG=v0.3
104+
ARCHIVE_BASENAME=mklml_lnx_2017.0.2.20170209.tgz
105+
GITHUB_RELEASE_TAG=v0.5
107106
MKLURL="https://github.com/01org/mkl-dnn/releases/download/$GITHUB_RELEASE_TAG/$ARCHIVE_BASENAME"
108107
if ! [ -e "$DST/third_party/mkl/$ARCHIVE_BASENAME" ]; then
109108
wget --no-check-certificate -P $DST/third_party/mkl/ $MKLURL
@@ -182,13 +181,12 @@ else
182181
TF_NEED_JEMALLOC=0
183182
fi
184183

185-
if [ "$TF_NEED_JEMALLOC" == "1" ]; then
186-
sed_hyphen_i -e "s/WITH_JEMALLOC = False/WITH_JEMALLOC = True/" tensorflow/core/platform/default/build_config.bzl
187-
else
188-
sed_hyphen_i -e "s/WITH_JEMALLOC = True/WITH_JEMALLOC = False/" tensorflow/core/platform/default/build_config.bzl
184+
sed_hyphen_i -e "/with_jemalloc/d" .bazelrc
185+
if [[ "$TF_NEED_JEMALLOC" == "1" ]]; then
186+
echo 'build --define with_jemalloc=true' >>.bazelrc
189187
fi
190188

191-
while [ "$TF_NEED_GCP" == "" ]; do
189+
while [[ "$TF_NEED_GCP" == "" ]]; do
192190
read -p "Do you wish to build TensorFlow with "\
193191
"Google Cloud Platform support? [y/N] " INPUT
194192
case $INPUT in
@@ -202,23 +200,12 @@ while [ "$TF_NEED_GCP" == "" ]; do
202200
esac
203201
done
204202

205-
if [ "$TF_NEED_GCP" == "1" ]; then
206-
## Verify that libcurl header files are available.
207-
# Only check Linux, since on MacOS the header files are installed with XCode.
208-
if is_linux && [[ ! -f "/usr/include/curl/curl.h" ]]; then
209-
echo "ERROR: It appears that the development version of libcurl is not "\
210-
"available. Please install the libcurl3-dev package."
211-
exit 1
212-
fi
213-
214-
# Update Bazel build configuration.
215-
sed_hyphen_i -e "s/WITH_GCP_SUPPORT = False/WITH_GCP_SUPPORT = True/" tensorflow/core/platform/default/build_config.bzl
216-
else
217-
# Update Bazel build configuration.
218-
sed_hyphen_i -e "s/WITH_GCP_SUPPORT = True/WITH_GCP_SUPPORT = False/" tensorflow/core/platform/default/build_config.bzl
203+
sed_hyphen_i -e "/with_gcp_support/d" .bazelrc
204+
if [[ "$TF_NEED_GCP" == "1" ]]; then
205+
echo 'build --define with_gcp_support=true' >>.bazelrc
219206
fi
220207

221-
while [ "$TF_NEED_HDFS" == "" ]; do
208+
while [[ "$TF_NEED_HDFS" == "" ]]; do
222209
read -p "Do you wish to build TensorFlow with "\
223210
"Hadoop File System support? [y/N] " INPUT
224211
case $INPUT in
@@ -232,16 +219,13 @@ while [ "$TF_NEED_HDFS" == "" ]; do
232219
esac
233220
done
234221

235-
if [ "$TF_NEED_HDFS" == "1" ]; then
236-
# Update Bazel build configuration.
237-
sed_hyphen_i -e "s/WITH_HDFS_SUPPORT = False/WITH_HDFS_SUPPORT = True/" tensorflow/core/platform/default/build_config.bzl
238-
else
239-
# Update Bazel build configuration.
240-
sed_hyphen_i -e "s/WITH_HDFS_SUPPORT = True/WITH_HDFS_SUPPORT = False/" tensorflow/core/platform/default/build_config.bzl
222+
sed_hyphen_i -e "/with_hdfs_support/d" .bazelrc
223+
if [[ "$TF_NEED_HDFS" == "1" ]]; then
224+
echo 'build --define with_hdfs_support=true' >>.bazelrc
241225
fi
242226

243227
## Enable XLA.
244-
while [ "$TF_ENABLE_XLA" == "" ]; do
228+
while [[ "$TF_ENABLE_XLA" == "" ]]; do
245229
read -p "Do you wish to build TensorFlow with the XLA just-in-time compiler (experimental)? [y/N] " INPUT
246230
case $INPUT in
247231
[Yy]* ) echo "XLA JIT support will be enabled for TensorFlow"; TF_ENABLE_XLA=1;;
@@ -251,12 +235,9 @@ while [ "$TF_ENABLE_XLA" == "" ]; do
251235
esac
252236
done
253237

254-
if [ "$TF_ENABLE_XLA" == "1" ]; then
255-
# Update Bazel build configuration.
256-
sed_hyphen_i -e "s/^WITH_XLA_SUPPORT = [FT].*/WITH_XLA_SUPPORT = True/" tensorflow/core/platform/default/build_config_root.bzl
257-
else
258-
# Update Bazel build configuration.
259-
sed_hyphen_i -e "s/^WITH_XLA_SUPPORT = [FT].*/WITH_XLA_SUPPORT = False/" tensorflow/core/platform/default/build_config_root.bzl
238+
sed_hyphen_i -e "/with_xla_support/d" .bazelrc
239+
if [[ "$TF_ENABLE_XLA" == "1" ]]; then
240+
echo 'build --define with_xla_support=true' >>.bazelrc
260241
fi
261242

262243

tensorflow/BUILD

+30
Original file line numberDiff line numberDiff line change
@@ -110,6 +110,34 @@ config_setting(
110110
visibility = ["//visibility:public"],
111111
)
112112

113+
# TODO(jhseu): Enable on other platforms other than Linux.
114+
config_setting(
115+
name = "with_jemalloc",
116+
values = {
117+
"cpu": "k8",
118+
"define": "with_jemalloc=true",
119+
},
120+
visibility = ["//visibility:public"],
121+
)
122+
123+
config_setting(
124+
name = "with_gcp_support",
125+
values = {"define": "with_gcp_support=true"},
126+
visibility = ["//visibility:public"],
127+
)
128+
129+
config_setting(
130+
name = "with_hdfs_support",
131+
values = {"define": "with_hdfs_support=true"},
132+
visibility = ["//visibility:public"],
133+
)
134+
135+
config_setting(
136+
name = "with_xla_support",
137+
values = {"define": "with_xla_support=true"},
138+
visibility = ["//visibility:public"],
139+
)
140+
113141
package_group(
114142
name = "internal",
115143
packages = ["//tensorflow/..."],
@@ -321,6 +349,8 @@ cc_binary(
321349
deps = [
322350
"//tensorflow/c:c_api",
323351
"//tensorflow/cc:cc_ops",
352+
"//tensorflow/cc:client_session",
353+
"//tensorflow/cc:scope",
324354
"//tensorflow/core:tensorflow",
325355
],
326356
)

tensorflow/compiler/xla/service/allocation_tracker.cc

+2-1
Original file line numberDiff line numberDiff line change
@@ -138,7 +138,8 @@ tensorflow::Status AllocationTracker::DeallocateShape(
138138
TF_RET_CHECK(ShapeUtil::TupleElementCount(shape) == elements.size())
139139
<< "tuple has unexpected number of elements: " << elements.size()
140140
<< " != " << ShapeUtil::TupleElementCount(shape);
141-
for (int i = 0; i < elements.size(); ++i) {
141+
for (std::vector<se::DeviceMemoryBase>::size_type i = 0;
142+
i < elements.size(); ++i) {
142143
VLOG(2) << "recursing onto the tuple elements";
143144
TF_RETURN_IF_ERROR(DeallocateShape(backend, device_ordinal, &elements[i],
144145
shape.tuple_shapes(i),

tensorflow/compiler/xla/service/generic_transfer_manager.cc

+2-2
Original file line numberDiff line numberDiff line change
@@ -118,10 +118,10 @@ GenericTransferManager::ShallowCopyTupleFromDevice(
118118

119119
// Create a DeviceMemoryBase from each void* pointer.
120120
std::vector<se::DeviceMemoryBase> destination;
121-
for (int i = 0; i < element_pointers.size(); ++i) {
121+
for (std::vector<void*>::size_type i = 0; i < element_pointers.size(); ++i) {
122122
if (element_pointers[i] == nullptr &&
123123
!ShapeUtil::HasZeroElements(shape.tuple_shapes(i))) {
124-
return FailedPrecondition("tuple contains nullptr at element %d", i);
124+
return FailedPrecondition("tuple contains nullptr at element %lu", i);
125125
}
126126
int64 buffer_size = ShapeUtil::ByteSizeOf(shape.tuple_shapes(i),
127127
/*pointer_size=*/sizeof(void*));

tensorflow/compiler/xla/service/service.cc

+10-6
Original file line numberDiff line numberDiff line change
@@ -256,7 +256,8 @@ StatusOr<std::vector<const Allocation*>> Service::ResolveAndValidateArguments(
256256
tensorflow::gtl::ArraySlice<const GlobalDataHandle*> arguments,
257257
const Backend* backend, int device_ordinal) {
258258
std::vector<const Allocation*> allocations;
259-
for (int i = 0; i < arguments.size(); ++i) {
259+
for (tensorflow::gtl::ArraySlice<const GlobalDataHandle*>::size_type i = 0;
260+
i < arguments.size(); ++i) {
260261
auto allocation_status = allocation_tracker_.Resolve(*arguments[i]);
261262
if (!allocation_status.ok()) {
262263
return Status(allocation_status.status().code(),
@@ -269,7 +270,7 @@ StatusOr<std::vector<const Allocation*>> Service::ResolveAndValidateArguments(
269270
if (allocation->backend() != backend ||
270271
allocation->device_ordinal() != device_ordinal) {
271272
return InvalidArgument(
272-
"argument %d is on device %s but computation will be executed "
273+
"argument %lu is on device %s but computation will be executed "
273274
"on device %s",
274275
i,
275276
allocation->backend()
@@ -295,13 +296,14 @@ StatusOr<std::unique_ptr<HloModuleConfig>> Service::CreateModuleConfig(
295296
program_shape.parameters_size(), arguments.size());
296297
}
297298

298-
for (int i = 0; i < arguments.size(); ++i) {
299+
for (tensorflow::gtl::ArraySlice<const Allocation*>::size_type i = 0;
300+
i < arguments.size(); ++i) {
299301
// Verify that shape of arguments matches the shape of the arguments in the
300302
// ProgramShape.
301303
if (!ShapeUtil::Compatible(arguments[i]->shape(),
302304
program_shape.parameters(i))) {
303305
return InvalidArgument(
304-
"computation expects parameter %d to have shape %s, given shape %s",
306+
"computation expects parameter %lu to have shape %s, given shape %s",
305307
i, ShapeUtil::HumanString(program_shape.parameters(i)).c_str(),
306308
ShapeUtil::HumanString(arguments[i]->shape()).c_str());
307309
}
@@ -383,7 +385,8 @@ StatusOr<std::vector<std::unique_ptr<Executable>>> Service::BuildExecutables(
383385
hlo_dumper, std::move(executors)));
384386

385387
if (!other_directory_path.empty()) {
386-
for (int64 i = 0; i < versioned_handles.size(); ++i) {
388+
for (std::vector<VersionedComputationHandle>::size_type i = 0;
389+
i < versioned_handles.size(); ++i) {
387390
executables[i]->set_session_module(std::move(session_modules[i]));
388391
}
389392
}
@@ -523,7 +526,8 @@ Service::ExecuteParallelAndRegisterResult(
523526

524527
// Asynchronously launch all executables.
525528
std::vector<GlobalDataHandle> result_handles;
526-
for (int64 i = 0; i < executables.size(); i++) {
529+
for (tensorflow::gtl::ArraySlice<Executable*>::size_type i = 0;
530+
i < executables.size(); i++) {
527531
TF_ASSIGN_OR_RETURN(
528532
perftools::gputools::DeviceMemoryBase result,
529533
executables[i]->ExecuteAsyncOnStream(&run_options[i], arguments[i]));

tensorflow/contrib/android/BUILD

+5-1
Original file line numberDiff line numberDiff line change
@@ -72,13 +72,17 @@ LINKER_SCRIPT = "//tensorflow/contrib/android:jni/version_script.lds"
7272
cc_binary(
7373
name = "libtensorflow_inference.so",
7474
srcs = [],
75-
copts = tf_copts(),
75+
copts = tf_copts() + [
76+
"-ffunction-sections",
77+
"-fdata-sections",
78+
],
7679
linkopts = if_android([
7780
"-landroid",
7881
"-llog",
7982
"-lm",
8083
"-z defs",
8184
"-s",
85+
"-Wl,--gc-sections",
8286
"-Wl,--version-script", # This line must be directly followed by LINKER_SCRIPT.
8387
LINKER_SCRIPT,
8488
]),

tensorflow/contrib/cmake/CMakeLists.txt

+3-1
Original file line numberDiff line numberDiff line change
@@ -56,9 +56,10 @@ mark_as_advanced(DOWNLOAD_LOCATION)
5656
set(CMAKE_POSITION_INDEPENDENT_CODE ON)
5757
add_definitions(-DEIGEN_AVOID_STL_ARRAY)
5858
if(WIN32)
59-
add_definitions(-DNOMINMAX -D_WIN32_WINNT=0x0A00 -DLANG_CXX11 -DCOMPILER_MSVC -D__VERSION__=\"MSVC\")
59+
add_definitions(-DNOMINMAX -D_WIN32_WINNT=0x0A00 -DLANG_CXX11 -DCOMPILER_MSVC)
6060
add_definitions(-DWIN32 -DOS_WIN -D_MBCS -DWIN64 -DWIN32_LEAN_AND_MEAN -DNOGDI -DPLATFORM_WINDOWS)
6161
add_definitions(-DTENSORFLOW_USE_EIGEN_THREADPOOL -DEIGEN_HAS_C99_MATH -D_ITERATOR_DEBUG_LEVEL=0)
62+
add_definitions(-DTF_COMPILE_LIBRARY)
6263
add_definitions(-DNDEBUG /O2) # Equivalent of -c opt in Bazel.
6364
add_definitions(/bigobj /nologo /EHsc /GF /FC /MP /Gm-)
6465
# Suppress warnings to reduce build log size.
@@ -190,6 +191,7 @@ if (tensorflow_ENABLE_GPU)
190191
${CUDA_TOOLKIT_TARGET_DIR}/include/cuda.h ${CUDA_TOOLKIT_TARGET_DIR}/include/cuComplex.h
191192
${CUDA_TOOLKIT_TARGET_DIR}/include/cublas_v2.h ${CUDNN_HOME}/include/cudnn.h
192193
${CUDA_TOOLKIT_TARGET_DIR}/include/cufft.h ${CUDA_TOOLKIT_TARGET_DIR}/include/curand.h
194+
${CUDA_TOOLKIT_TARGET_DIR}/include/cuda_runtime_api.h
193195
DESTINATION ${tensorflow_source_dir}/third_party/gpus/cuda/include
194196
)
195197
include_directories(${tensorflow_source_dir}/third_party/gpus)

tensorflow/contrib/cmake/README.md

+1-1
Original file line numberDiff line numberDiff line change
@@ -13,7 +13,7 @@ Linux.
1313
Current Status
1414
--------------
1515

16-
CMake can be used to build TensorFlow on Windows. See the [getting started documentation](https://www.tensorflow.org/get_started/os_setup.html#pip-installation-on-windows)
16+
CMake can be used to build TensorFlow on Windows. See the [getting started documentation](https://www.tensorflow.org/install/install_windows)
1717
for instructions on how to install a pre-built TensorFlow package on Windows.
1818

1919
### Current known limitations

tensorflow/contrib/cmake/tf_cc_ops.cmake

+40
Original file line numberDiff line numberDiff line change
@@ -120,3 +120,43 @@ list(REMOVE_ITEM tf_cc_srcs ${tf_cc_test_srcs})
120120

121121
add_library(tf_cc OBJECT ${tf_cc_srcs})
122122
add_dependencies(tf_cc tf_cc_framework tf_cc_ops)
123+
124+
set (pywrap_tensorflow_lib "${CMAKE_CURRENT_BINARY_DIR}/${CMAKE_BUILD_TYPE}/pywrap_tensorflow_internal.lib")
125+
add_custom_target(tf_extension_ops)
126+
127+
function(AddUserOps)
128+
cmake_parse_arguments(_AT "" "" "TARGET;SOURCES;GPUSOURCES;DEPENDS;DISTCOPY" ${ARGN})
129+
if (tensorflow_ENABLE_GPU AND _AT_GPUSOURCES)
130+
# if gpu build is enabled and we have gpu specific code,
131+
# hint to cmake that this needs to go to nvcc
132+
set (gpu_source ${_AT_GPUSOURCES})
133+
set (gpu_lib "${_AT_TARGET}_gpu")
134+
set_source_files_properties(${gpu_source} PROPERTIES CUDA_SOURCE_PROPERTY_FORMAT OBJ)
135+
cuda_compile(gpu_lib ${gpu_source})
136+
endif()
137+
# create shared library from source and cuda obj
138+
add_library(${_AT_TARGET} SHARED ${_AT_SOURCES} ${gpu_lib})
139+
target_link_libraries(${_AT_TARGET} ${pywrap_tensorflow_lib})
140+
if(WIN32)
141+
if (tensorflow_ENABLE_GPU AND _AT_GPUSOURCES)
142+
# some ops call out to cuda directly; need to link libs for the cuda dlls
143+
target_link_libraries(${_AT_TARGET} ${CUDA_LIBRARIES})
144+
endif()
145+
if (_AT_DISTCOPY)
146+
add_custom_command(TARGET ${_AT_TARGET} POST_BUILD
147+
COMMAND ${CMAKE_COMMAND} -E copy $<TARGET_FILE:${_AT_TARGET}> ${_AT_DISTCOPY}/)
148+
endif()
149+
endif()
150+
if (_AT_DEPENDS)
151+
add_dependencies(${_AT_TARGET} ${_AT_DEPENDS})
152+
endif()
153+
# make sure TF_COMPILE_LIBRARY is not defined for this target
154+
get_target_property(target_compile_flags ${_AT_TARGET} COMPILE_FLAGS)
155+
if(target_compile_flags STREQUAL "target_compile_flags-NOTFOUND")
156+
set(target_compile_flags "/UTF_COMPILE_LIBRARY")
157+
else()
158+
set(target_compile_flags "${target_compile_flags} /UTF_COMPILE_LIBRARY")
159+
endif()
160+
set_target_properties(${_AT_TARGET} PROPERTIES COMPILE_FLAGS ${target_compile_flags})
161+
add_dependencies(tf_extension_ops ${_AT_TARGET})
162+
endfunction(AddUserOps)

0 commit comments

Comments
 (0)