TensorFlow-Raspberry-Pi_64-bit
TensorFlow-Raspberry-Pi_64-bit copied to clipboard
Not and issue but could you also provide Tensorflow addons
I am struggling as need addons but the github standard install doesn't work with 64bit but does armv7 so guess its about lib locations but prob above my head
I already looked at it at the time. Turns out to be tricky. Try again next week. Stay tuned.
I seem to of got TF2.4.0 & TensorFlow Addons v0.12.0 ArmV7 working (just testing now)
After installing bazel 3.1.0 and TF2.4
I just mangled configure.py starting with https://github.com/koenvervloesem/tensorflow-addons-on-arm/blob/master/scripts/build_tfa.sh
But use the standard
bazel build build_pip_pkg
bazel-bin/build_pip_pkg artifacts
pip install artifacts/tensorflow_addons-*.whl
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# Usage: python configure.py
#
import os
import pathlib
import platform
import logging
import tensorflow as tf
_TFA_BAZELRC = ".bazelrc"
# Writes variables to bazelrc file
def write(line):
with open(_TFA_BAZELRC, "a") as f:
f.write(line + "\n")
def write_action_env(var_name, var):
write('build --action_env {}="{}"'.format(var_name, var))
def is_macos():
return platform.system() == "Darwin"
def is_windows():
return platform.system() == "Windows"
def is_linux():
return platform.system() == "Linux"
def is_raspi_arm():
return os.uname()[4] == "armv7l"
def get_tf_header_dir():
import tensorflow as tf
tf_header_dir = tf.sysconfig.get_compile_flags()[0][2:]
if is_windows():
tf_header_dir = tf_header_dir.replace("\\", "/")
return tf_header_dir
def get_tf_shared_lib_dir():
import tensorflow as tf
# OS Specific parsing
if is_windows():
tf_shared_lib_dir = tf.sysconfig.get_compile_flags()[0][2:-7] + "python"
return tf_shared_lib_dir.replace("\\", "/")
elif is_raspi_arm():
return tf.sysconfig.get_compile_flags()[0][2:-7] + "python"
else:
return tf.sysconfig.get_link_flags()[0][2:]
# Converts the linkflag namespec to the full shared library name
def get_shared_lib_name():
import tensorflow as tf
namespec = tf.sysconfig.get_link_flags()
if is_macos():
# MacOS
return "lib" + namespec[1][2:] + ".dylib"
elif is_windows():
# Windows
return "_pywrap_tensorflow_internal.lib"
elif is_raspi_arm():
# The below command for linux would return an empty list
return "_pywrap_tensorflow_internal.so"
else:
# Linux
return namespec[1][3:]
def create_build_configuration():
print()
print("Configuring TensorFlow Addons to be built from source...")
if os.path.isfile(_TFA_BAZELRC):
os.remove(_TFA_BAZELRC)
logging.disable(logging.WARNING)
write_action_env("TF_HEADER_DIR", get_tf_header_dir())
write_action_env("TF_SHARED_LIBRARY_DIR", get_tf_shared_lib_dir())
write_action_env("TF_SHARED_LIBRARY_NAME", get_shared_lib_name())
write_action_env("TF_CXX11_ABI_FLAG", tf.sysconfig.CXX11_ABI_FLAG)
write("build --spawn_strategy=standalone")
write("build --strategy=Genrule=standalone")
write("build -c opt")
if is_windows():
write("build --config=windows")
write("build:windows --enable_runfiles")
write("build:windows --copt=/experimental:preprocessor")
write("build:windows --host_copt=/experimental:preprocessor")
write("build:windows --copt=/arch=AVX2")
if is_macos() or is_linux():
#write("build --copt=-mavx2")
write("build --copt=-mfpu=neon-vfpv4")
write("build --copt=-funsafe-math-optimizations")
write("build --copt=-ftree-vectorize")
write("build --copt=-fomit-frame-pointer")
if os.getenv("TF_NEED_CUDA", "0") == "1":
print("> Building GPU & CPU ops")
configure_cuda()
else:
print("> Building only CPU ops")
print()
print("Build configurations successfully written to", _TFA_BAZELRC, ":\n")
print(pathlib.Path(_TFA_BAZELRC).read_text())
def configure_cuda():
write_action_env("TF_NEED_CUDA", "1")
write_action_env(
"CUDA_TOOLKIT_PATH", os.getenv("CUDA_TOOLKIT_PATH", "/usr/local/cuda")
)
write_action_env(
"CUDNN_INSTALL_PATH",
os.getenv("CUDNN_INSTALL_PATH", "/usr/lib/x86_64-linux-gnu"),
)
write_action_env("TF_CUDA_VERSION", os.getenv("TF_CUDA_VERSION", "11"))
write_action_env("TF_CUDNN_VERSION", os.getenv("TF_CUDNN_VERSION", "8"))
write("test --config=cuda")
write("build --config=cuda")
write("build:cuda --define=using_cuda=true --define=using_cuda_nvcc=true")
write("build:cuda --crosstool_top=@local_config_cuda//crosstool:toolchain")
if __name__ == "__main__":
create_build_configuration()
The missing _pywrap_tensorflow_internal.so I did the usual if in doubt sym link it. I actually did 2 as sudo ld -l_pywrap_tensorflow_internal --verbose was wrong as its ld.gold but created in usr/lib usr/locallib lib_pywrap_tensorflow_internal.so
64bit would be great but also going to try with 2.4.1 I am running the googleresearch streaming kws but 2.4 runs but the results show all is not well. Works great on x86_64 but so problematic getting addons to run with tensorflow on arm
https://github.com/google-research/google-research/tree/master/kws_streaming is actually such a good test as its sort of perfect and prob much of the rationale for current TF & TFL additions.
You can dive in with the readme of https://github.com/google-research/google-research/blob/master/kws_streaming/experiments/kws_experiments_paper_12_labels.md
Addons is used and seems to work for me and likely a very good test for addons but the problem is a particular 'addon' might not be working but testing all is a little confusing.
Using the above is likely a good TF / TFL / Addons test and XXNPACK as uses them all.