From 58c4cc4ebbddd34af4acccda52cd05840a0e15d7 Mon Sep 17 00:00:00 2001 From: jagiella Date: Wed, 16 Oct 2024 16:46:27 +0200 Subject: [PATCH 1/2] Fix Makefile for tf2.16.1 support --- README.md | 32 ++++++++++++++++++++++++++++---- makefile_build/Makefile | 29 ++++++++++++++++++++--------- makefile_build/README.md | 32 ++++++++++++++++++++++++++++---- 3 files changed, 76 insertions(+), 17 deletions(-) diff --git a/README.md b/README.md index a8f85cc..e2b14ae 100644 --- a/README.md +++ b/README.md @@ -77,12 +77,29 @@ Repeat compilation. If only building for native systems, it is possible to significantly reduce the complexity of the build by removing Bazel (and Docker). This simple approach builds only what is needed, removes build-time depenency fetching, increases the speed, and uses upstream Debian packages. -To prepare your system, you'll need the following packages (both available on Debian Bookworm, Bullseye or Buster-Backports): +To prepare your system, you'll need the following packages (both available on Debian Bookworm / Ubuntu 24.04): ``` -sudo apt install libabsl-dev libflatbuffers-dev +sudo apt install libabsl-dev libusb-1.0-0-dev xxd ``` -Next, you'll need to clone the [Tensorflow Repo](https://github.com/tensorflow/tensorflow) at the desired checkout (using TF head isn't advised). If you are planning to use libcoral or pycoral libraries, this should match the ones in those repos' WORKSPACE files. For example, if you are using TF2.15, we can check that [tag in the TF Repo](https://github.com/tensorflow/tensorflow/tree/r2.15) get the latest commit for that stable release and then checkout that address: +Next, build [FlatBuffers](https://github.com/google/flatbuffers) v23.5.26 required by TensorFlow v2.16.1 from source: + +``` +git clone https://github.com/google/flatbuffers.git +cd flatbuffers/ +git checkout v23.5.26 +mkdir build && cd build +cmake .. \ + -DFLATBUFFERS_BUILD_SHAREDLIB=ON \ + -DFLATBUFFERS_BUILD_TESTS=OFF \ + -DCMAKE_BUILD_TYPE=Release \ + -DCMAKE_INSTALL_PREFIX=/usr/local +make -j$(nproc) +sudo make install +``` + +Next, you'll need to clone the [Tensorflow Repo](https://github.com/tensorflow/tensorflow) at the desired checkout (using TF head isn't advised). If you are planning to use libcoral or pycoral libraries, this should match the ones in those repos' WORKSPACE files. For example, if you are using TF2.16.1, we can check that [tag in the TF Repo](https://github.com/tensorflow/tensorflow/tree/v2.16.1) and then checkout that address: +Next, you'll need to clone the [Tensorflow Repo](https://github.com/tensorflow/tensorflow) at the desired checkout (using TF head isn't advised). If you are planning to use libcoral or pycoral libraries, this should match the ones in those repos' WORKSPACE files. For example, if you are using TF2.5, we can check that [tag in the TF Repo](https://github.com/tensorflow/tensorflow/commit/a4dfb8d1a71385bd6d122e4f27f86dcebb96712d) and then checkout that address: ``` git clone https://github.com/tensorflow/tensorflow git checkout v2.16.1 @@ -90,7 +107,14 @@ git checkout v2.16.1 To build the library: ``` -TFROOT= make -f makefile_build/Makefile -j$(nproc) libedgetpu +git clone https://github.com/google-coral/libedgetpu.git +cd libedgetpu/makefile_build +TFROOT=../../tensorflow make -j$(nproc) +``` + +To build packages for Debian/Ubuntu: +``` +debuild -us -uc -tc -b -d ``` ## Support diff --git a/makefile_build/Makefile b/makefile_build/Makefile index ef7d290..80eee4c 100644 --- a/makefile_build/Makefile +++ b/makefile_build/Makefile @@ -14,6 +14,16 @@ CC=gcc CXX=g++ FLATC=flatc +ARCH := $(shell uname -m) +ifeq ($(ARCH),armv7) + CPU := armv7a +else ifeq ($(ARCH),aarch64) + CPU := aarch64 +else ifeq ($(ARCH),x86_64) + CPU := k8 +endif + + LIBEDGETPU_CFLAGS := \ -fPIC \ -Wall \ @@ -22,7 +32,7 @@ LIBEDGETPU_CFLAGS := \ LIBEDGETPU_CXXFLAGS := \ -fPIC \ -Wall \ - -std=c++14 \ + -std=c++17 \ -DDARWINN_PORT_DEFAULT LIBEDGETPU_LDFLAGS := \ @@ -32,7 +42,7 @@ LIBEDGETPU_LDFLAGS := \ -Wl,--version-script=$(BUILDROOT)/tflite/public/libedgetpu.lds \ -fuse-ld=gold \ -lflatbuffers \ - -labsl_flags \ + -labsl_flags_usage \ -labsl_flags_internal \ -labsl_flags_reflection \ -labsl_flags_marshalling \ @@ -59,7 +69,6 @@ LIBEDGETPU_INCLUDES := \ $(BUILDDIR)/$(BUILDROOT) LIBEDGETPU_INCLUDES := $(addprefix -I,$(LIBEDGETPU_INCLUDES)) -LIBEDGETPU_CSRCS := $(TFROOT)/tensorflow/lite/c/common.c LIBEDGETPU_COBJS := $(call TOBUILDDIR,$(patsubst %.c,%.o,$(LIBEDGETPU_CSRCS))) LIBEDGETPU_CCSRCS := \ @@ -140,6 +149,8 @@ LIBEDGETPU_CCSRCS := \ $(BUILDROOT)/tflite/edgetpu_c.cc \ $(BUILDROOT)/tflite/edgetpu_delegate_for_custom_op.cc \ $(BUILDROOT)/tflite/edgetpu_delegate_for_custom_op_tflite_plugin.cc \ + $(TFROOT)/tensorflow/lite/c/common_internal.cc \ + $(TFROOT)/tensorflow/lite/array.cc \ $(TFROOT)/tensorflow/lite/util.cc LIBEDGETPU_CCOBJS := $(call TOBUILDDIR,$(patsubst %.cc,%.o,$(LIBEDGETPU_CCSRCS))) @@ -202,13 +213,13 @@ $(LIBEDGETPU_STD_CCOBJS) : $(BUILDDIR)/%-throttled.o: %.cc @$(CXX) -DTHROTTLE_EDGE_TPU $(LIBEDGETPU_CXXFLAGS) $(LIBEDGETPU_INCLUDES) -c $< -MD -MT $@ -MF $(@:%o=%d) -o $@ libedgetpu: | firmware $(LIBEDGETPU_FLATC_OBJS) $(LIBEDGETPU_COBJS) $(LIBEDGETPU_CCOBJS) $(LIBEDGETPU_MAX_CCOBJS) - @mkdir -p $(BUILDDIR)/direct/k8 + @mkdir -p $(BUILDDIR)/direct/$(CPU) @echo "Building libedgetpu.so" - @$(CXX) $(LIBEDGETPU_CCFLAGS) $(LIBEDGETPU_LDFLAGS) $(LIBEDGETPU_COBJS) $(LIBEDGETPU_CCOBJS) $(LIBEDGETPU_MAX_CCOBJS) -o $(BUILDDIR)/direct/k8/libedgetpu.so.1.0 - @ln -sf $(BUILDDIR)/direct/k8/libedgetpu.so.1.0 $(BUILDDIR)/direct/k8/libedgetpu.so.1 + @$(CXX) $(LIBEDGETPU_CCFLAGS) $(LIBEDGETPU_LDFLAGS) $(LIBEDGETPU_COBJS) $(LIBEDGETPU_CCOBJS) $(LIBEDGETPU_MAX_CCOBJS) -o $(BUILDDIR)/direct/$(CPU)/libedgetpu.so.1.0 + @ln -sf $(BUILDDIR)/direct/$(CPU)/libedgetpu.so.1.0 $(BUILDDIR)/direct/$(CPU)/libedgetpu.so.1 libedgetpu-throttled: | firmware $(LIBEDGETPU_FLATC_OBJS) $(LIBEDGETPU_COBJS) $(LIBEDGETPU_CCOBJS) $(LIBEDGETPU_STD_CCOBJS) - @mkdir -p $(BUILDDIR)/throttled/k8 + @mkdir -p $(BUILDDIR)/throttled/$(CPU) @echo "Building throttled libedgetpu.so" - @$(CXX) $(LIBEDGETPU_CCFLAGS) $(LIBEDGETPU_LDFLAGS) $(LIBEDGETPU_COBJS) $(LIBEDGETPU_CCOBJS) $(LIBEDGETPU_STD_CCOBJS) -o $(BUILDDIR)/throttled/k8/libedgetpu.so.1.0 - @ln -sf $(BUILDDIR)/throttled/k8/libedgetpu.so.1.0 $(BUILDDIR)/throttled/k8/libedgetpu.so.1 + @$(CXX) $(LIBEDGETPU_CCFLAGS) $(LIBEDGETPU_LDFLAGS) $(LIBEDGETPU_COBJS) $(LIBEDGETPU_CCOBJS) $(LIBEDGETPU_STD_CCOBJS) -o $(BUILDDIR)/throttled/$(CPU)/libedgetpu.so.1.0 + @ln -sf $(BUILDDIR)/throttled/$(CPU)/libedgetpu.so.1.0 $(BUILDDIR)/throttled/$(CPU)/libedgetpu.so.1 diff --git a/makefile_build/README.md b/makefile_build/README.md index a0ed003..12338a1 100644 --- a/makefile_build/README.md +++ b/makefile_build/README.md @@ -2,18 +2,42 @@ If only building for native systems, it is possible to significantly reduce the complexity of the build by removing Bazel (and Docker). This simple approach builds only what is needed, removes build-time depenency fetching, increases the speed, and uses upstream Debian packages. -To prepare your system, you'll need the following packages (both available on Debian Bullseye): +To prepare your system, you'll need the following packages (both available on Debian Bookworm / Ubuntu 24.04): ``` -sudo apt install libabsl-dev libflatbuffers-dev +sudo apt install libabsl-dev libusb-1.0-0-dev xxd ``` +Next, build [FlatBuffers](https://github.com/google/flatbuffers) v23.5.26 required by TensorFlow v2.16.1 from source: + +``` +git clone https://github.com/google/flatbuffers.git +cd flatbuffers/ +git checkout v23.5.26 +mkdir build && cd build +cmake .. \ + -DFLATBUFFERS_BUILD_SHAREDLIB=ON \ + -DFLATBUFFERS_BUILD_TESTS=OFF \ + -DCMAKE_BUILD_TYPE=Release \ + -DCMAKE_INSTALL_PREFIX=/usr/local +make -j$(nproc) +sudo make install +``` + +Next, you'll need to clone the [Tensorflow Repo](https://github.com/tensorflow/tensorflow) at the desired checkout (using TF head isn't advised). If you are planning to use libcoral or pycoral libraries, this should match the ones in those repos' WORKSPACE files. For example, if you are using TF2.16.1, we can check that [tag in the TF Repo](https://github.com/tensorflow/tensorflow/tree/v2.16.1) and then checkout that address: Next, you'll need to clone the [Tensorflow Repo](https://github.com/tensorflow/tensorflow) at the desired checkout (using TF head isn't advised). If you are planning to use libcoral or pycoral libraries, this should match the ones in those repos' WORKSPACE files. For example, if you are using TF2.5, we can check that [tag in the TF Repo](https://github.com/tensorflow/tensorflow/commit/a4dfb8d1a71385bd6d122e4f27f86dcebb96712d) and then checkout that address: ``` git clone https://github.com/tensorflow/tensorflow -git checkout a4dfb8d1a71385bd6d122e4f27f86dcebb96712d -b tf2.5 +git checkout v2.16.1 ``` To build the library: ``` -TFROOT= make -j$(nproc) libedgetpu +git clone https://github.com/google-coral/libedgetpu.git +cd libedgetpu/makefile_build +TFROOT=../../tensorflow make -j$(nproc) +``` + +To build packages for Debian/Ubuntu: +``` +debuild -us -uc -tc -b -d ``` From e53aa9baf05ae70e8d7df4836417cd6cf027b4d7 Mon Sep 17 00:00:00 2001 From: jagiella Date: Wed, 16 Oct 2024 17:05:58 +0200 Subject: [PATCH 2/2] Adapt readme --- README.md | 10 ++++------ makefile_build/README.md | 10 ++++------ 2 files changed, 8 insertions(+), 12 deletions(-) diff --git a/README.md b/README.md index e2b14ae..bf852c9 100644 --- a/README.md +++ b/README.md @@ -85,9 +85,8 @@ sudo apt install libabsl-dev libusb-1.0-0-dev xxd Next, build [FlatBuffers](https://github.com/google/flatbuffers) v23.5.26 required by TensorFlow v2.16.1 from source: ``` -git clone https://github.com/google/flatbuffers.git +git clone --depth 1 --branch v23.5.26 https://github.com/google/flatbuffers.git cd flatbuffers/ -git checkout v23.5.26 mkdir build && cd build cmake .. \ -DFLATBUFFERS_BUILD_SHAREDLIB=ON \ @@ -101,15 +100,14 @@ sudo make install Next, you'll need to clone the [Tensorflow Repo](https://github.com/tensorflow/tensorflow) at the desired checkout (using TF head isn't advised). If you are planning to use libcoral or pycoral libraries, this should match the ones in those repos' WORKSPACE files. For example, if you are using TF2.16.1, we can check that [tag in the TF Repo](https://github.com/tensorflow/tensorflow/tree/v2.16.1) and then checkout that address: Next, you'll need to clone the [Tensorflow Repo](https://github.com/tensorflow/tensorflow) at the desired checkout (using TF head isn't advised). If you are planning to use libcoral or pycoral libraries, this should match the ones in those repos' WORKSPACE files. For example, if you are using TF2.5, we can check that [tag in the TF Repo](https://github.com/tensorflow/tensorflow/commit/a4dfb8d1a71385bd6d122e4f27f86dcebb96712d) and then checkout that address: ``` -git clone https://github.com/tensorflow/tensorflow -git checkout v2.16.1 +git clone --depth 1 --branch v2.16.1 https://github.com/tensorflow/tensorflow ``` To build the library: ``` git clone https://github.com/google-coral/libedgetpu.git -cd libedgetpu/makefile_build -TFROOT=../../tensorflow make -j$(nproc) +cd libedgetpu +TFROOT= make -f makefile_build/Makefile -j$(nproc) ``` To build packages for Debian/Ubuntu: diff --git a/makefile_build/README.md b/makefile_build/README.md index 12338a1..c11ed6c 100644 --- a/makefile_build/README.md +++ b/makefile_build/README.md @@ -10,9 +10,8 @@ sudo apt install libabsl-dev libusb-1.0-0-dev xxd Next, build [FlatBuffers](https://github.com/google/flatbuffers) v23.5.26 required by TensorFlow v2.16.1 from source: ``` -git clone https://github.com/google/flatbuffers.git +git clone --depth 1 --branch v23.5.26 https://github.com/google/flatbuffers.git cd flatbuffers/ -git checkout v23.5.26 mkdir build && cd build cmake .. \ -DFLATBUFFERS_BUILD_SHAREDLIB=ON \ @@ -26,15 +25,14 @@ sudo make install Next, you'll need to clone the [Tensorflow Repo](https://github.com/tensorflow/tensorflow) at the desired checkout (using TF head isn't advised). If you are planning to use libcoral or pycoral libraries, this should match the ones in those repos' WORKSPACE files. For example, if you are using TF2.16.1, we can check that [tag in the TF Repo](https://github.com/tensorflow/tensorflow/tree/v2.16.1) and then checkout that address: Next, you'll need to clone the [Tensorflow Repo](https://github.com/tensorflow/tensorflow) at the desired checkout (using TF head isn't advised). If you are planning to use libcoral or pycoral libraries, this should match the ones in those repos' WORKSPACE files. For example, if you are using TF2.5, we can check that [tag in the TF Repo](https://github.com/tensorflow/tensorflow/commit/a4dfb8d1a71385bd6d122e4f27f86dcebb96712d) and then checkout that address: ``` -git clone https://github.com/tensorflow/tensorflow -git checkout v2.16.1 +git clone --depth 1 --branch v2.16.1 https://github.com/tensorflow/tensorflow ``` To build the library: ``` git clone https://github.com/google-coral/libedgetpu.git -cd libedgetpu/makefile_build -TFROOT=../../tensorflow make -j$(nproc) +cd libedgetpu +TFROOT= make -f makefile_build/Makefile -j$(nproc) ``` To build packages for Debian/Ubuntu: