diff --git a/llvm/CMakeLists.txt b/llvm/CMakeLists.txt index b52e2d04b646..67cb5ce7d0e5 100644 --- a/llvm/CMakeLists.txt +++ b/llvm/CMakeLists.txt @@ -944,15 +944,17 @@ if(LLVM_INCLUDE_TESTS) get_errc_messages(LLVM_LIT_ERRC_MESSAGES) endif() -# For up-to-date instructions for installing the Tensorflow dependency, refer to +# For up-to-date instructions for installing the TFLite dependency, refer to # the bot setup script: https://github.com/google/ml-compiler-opt/blob/main/buildbot/buildbot_init.sh -# In this case, the latest C API library is available for download from -# https://www.tensorflow.org/install/lang_c. -# We will expose the conditional compilation variable, -# LLVM_HAVE_TF_API, through llvm-config.h, so that a user of the LLVM library may -# also leverage the dependency. set(TENSORFLOW_C_LIB_PATH "" CACHE PATH "Path to TensorFlow C library install") -if (TENSORFLOW_C_LIB_PATH) +set(LLVM_HAVE_TFLITE "" CACHE BOOL "Use tflite") +if (LLVM_HAVE_TFLITE) + find_package(protobuf REQUIRED) + find_package(tensorflow-lite REQUIRED) + set(LLVM_HAVE_TF_API "ON" CACHE BOOL "Full Tensorflow API available") + set(LLVM_PROTOBUF_OUT_DIR ${CMAKE_LIBRARY_OUTPUT_DIRECTORY}/protobuf_gen) + include_directories(${LLVM_PROTOBUF_OUT_DIR}) +elseif (TENSORFLOW_C_LIB_PATH) find_library(tensorflow_c_api tensorflow PATHS ${TENSORFLOW_C_LIB_PATH}/lib NO_DEFAULT_PATH REQUIRED) # Currently, the protobuf headers are distributed with the pip package that corresponds to the version # of the C API library. @@ -989,7 +991,12 @@ if (NOT TENSORFLOW_AOT_PATH STREQUAL "") set(TENSORFLOW_AOT_COMPILER "${TENSORFLOW_AOT_PATH}/../../../../bin/saved_model_cli" CACHE PATH "Path to the Tensorflow AOT compiler") - include_directories(${TENSORFLOW_AOT_PATH}/include) + # This needs to happen to avoid clashing protobuf codegen when building both AOT and development mode. + # We plan to replace protobuf with a simpler alternative, so this will go away. + file(COPY ${TENSORFLOW_AOT_PATH}/include DESTINATION ${CMAKE_LIBRARY_OUTPUT_DIRECTORY}/tensorflow + PATTERN "google/*" EXCLUDE + PATTERN "*.pb.h" EXCLUDE) + include_directories(${CMAKE_LIBRARY_OUTPUT_DIRECTORY}/tensorflow/include) add_subdirectory(${TENSORFLOW_AOT_PATH}/xla_aot_runtime_src ${CMAKE_ARCHIVE_OUTPUT_DIRECTORY}/tf_runtime) install(TARGETS tf_xla_runtime EXPORT LLVMExports diff --git a/llvm/cmake/modules/TensorFlowCompile.cmake b/llvm/cmake/modules/TensorFlowCompile.cmake index d5ff44c9330d..45ec3fe7c327 100644 --- a/llvm/cmake/modules/TensorFlowCompile.cmake +++ b/llvm/cmake/modules/TensorFlowCompile.cmake @@ -116,3 +116,18 @@ function(tf_find_and_compile model default_url default_path test_model_generator set(MLLinkDeps ${MLLinkDeps} tf_xla_runtime PARENT_SCOPE) add_definitions(-DLLVM_HAVE_TF_AOT_${fname_allcaps}) endfunction() + +function(build_proto) + foreach (P ${ARGV}) + set(PB_SRCS ${PB_SRCS} ${LLVM_PROTOBUF_OUT_DIR}/${P}.pb.cc) + set(PB_HDRS ${PB_HDRS} ${LLVM_PROTOBUF_OUT_DIR}/${P}.pb.h) + set(PBS ${PBS} ${TENSORFLOW_SRC_DIR}/${P}.proto) + endforeach() + add_custom_command(OUTPUT ${PB_SRCS} ${PB_HDRS} + COMMAND protobuf::protoc + ARGS --proto_path=${TENSORFLOW_SRC_DIR} --cpp_out=${LLVM_PROTOBUF_OUT_DIR} ${PBS}) + set_source_files_properties(${PB_SRCS} PROPERTIES + GENERATED 1) + set(GeneratedMLSources ${GeneratedMLSources} ${PB_SRCS} PARENT_SCOPE) + set(MLDeps ${MLDeps} ${MLDeps} PARENT_SCOPE) +endfunction() diff --git a/llvm/include/llvm/Config/llvm-config.h.cmake b/llvm/include/llvm/Config/llvm-config.h.cmake index 96fbe6f771d2..b289db1d46ec 100644 --- a/llvm/include/llvm/Config/llvm-config.h.cmake +++ b/llvm/include/llvm/Config/llvm-config.h.cmake @@ -101,6 +101,9 @@ /* Define if LLVM was built with a dependency to the libtensorflow dynamic library */ #cmakedefine LLVM_HAVE_TF_API +/* Define if LLVM is using tflite instead of libtensorflow */ +#cmakedefine LLVM_HAVE_TFLITE + /* Define to 1 if you have the header file. */ #cmakedefine HAVE_SYSEXITS_H ${HAVE_SYSEXITS_H} diff --git a/llvm/lib/Analysis/CMakeLists.txt b/llvm/lib/Analysis/CMakeLists.txt index a122851e5495..4bf14a86225d 100644 --- a/llvm/lib/Analysis/CMakeLists.txt +++ b/llvm/lib/Analysis/CMakeLists.txt @@ -18,7 +18,17 @@ if (DEFINED LLVM_HAVE_TF_AOT OR DEFINED LLVM_HAVE_TF_API) endif() if (DEFINED LLVM_HAVE_TF_API) - list(APPEND MLLinkDeps ${tensorflow_c_api} ${tensorflow_fx}) + if (DEFINED LLVM_HAVE_TFLITE) + build_proto( + tensorflow/core/protobuf/error_codes + tensorflow/core/example/feature + tensorflow/core/example/example) + list(APPEND MLLinkDeps + tensorflow-lite::tensorflow-lite + protobuf::libprotobuf) + else() + list(APPEND MLLinkDeps ${tensorflow_c_api} ${tensorflow_fx}) + endif() endif() endif() @@ -130,6 +140,7 @@ add_llvm_component_library(LLVMAnalysis SyncDependenceAnalysis.cpp SyntheticCountsUtils.cpp TFUtils.cpp + TFLiteUtils.cpp TargetLibraryInfo.cpp TargetTransformInfo.cpp TensorSpec.cpp diff --git a/llvm/lib/Analysis/TFLiteUtils.cpp b/llvm/lib/Analysis/TFLiteUtils.cpp new file mode 100644 index 000000000000..96eabb2b2c38 --- /dev/null +++ b/llvm/lib/Analysis/TFLiteUtils.cpp @@ -0,0 +1,232 @@ +//===- TFUtils.cpp - tensorflow evaluation utilities ----------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file implements utilities for interfacing with tensorflow C APIs. +// +//===----------------------------------------------------------------------===// +#include "llvm/Config/config.h" +#if defined(LLVM_HAVE_TFLITE) + +#include "llvm/ADT/Twine.h" +#include "llvm/Analysis/Utils/TFUtils.h" +#include "llvm/Support/Base64.h" +#include "llvm/Support/CommandLine.h" +#include "llvm/Support/Debug.h" +#include "llvm/Support/JSON.h" +#include "llvm/Support/MemoryBuffer.h" +#include "llvm/Support/Path.h" +#include "llvm/Support/raw_ostream.h" + +#include "tensorflow/lite/interpreter.h" +#include "tensorflow/lite/kernels/register.h" +#include "tensorflow/lite/model.h" +#include "tensorflow/lite/model_builder.h" +#include "tensorflow/lite/op_resolver.h" + +#include +#include + +using namespace llvm; + +namespace llvm { +class EvaluationResultImpl { +public: + EvaluationResultImpl(const std::vector &Outputs) + : Outputs(Outputs){}; + + const TfLiteTensor *getOutput(size_t I) { return Outputs[I]; } + + EvaluationResultImpl(const EvaluationResultImpl &) = delete; + EvaluationResultImpl(EvaluationResultImpl &&Other) = delete; + +private: + const std::vector Outputs; +}; + +class TFModelEvaluatorImpl { +public: + TFModelEvaluatorImpl(StringRef SavedModelPath, + const std::vector &InputSpecs, + function_ref GetOutputSpecs, + size_t OutputSpecsSize, const char *Tags); + + bool isValid() const { return IsValid; } + size_t outputSize() const { return Output.size(); } + + std::unique_ptr evaluate() { + Interpreter->Invoke(); + return std::make_unique(Output); + } + + const std::vector &getInput() const { return Input; } + + ~TFModelEvaluatorImpl(); + +private: + std::unique_ptr Model; + + /// The objects necessary for carrying out an evaluation of the SavedModel. + /// They are expensive to set up, and we maintain them accross all the + /// evaluations of the model. + std::unique_ptr Interpreter; + + /// The input tensors. We set up the tensors once and just mutate theirs + /// scalars before each evaluation. The input tensors keep their value after + /// an evaluation. + std::vector Input; + + /// The output nodes. + std::vector Output; + + void invalidate() { IsValid = false; } + + bool IsValid = true; + + /// Reusable utility for ensuring we can bind the requested Name to a node in + /// the SavedModel Graph. + bool checkReportAndInvalidate(const TfLiteTensor *Tensor, + const TensorSpec &Spec); +}; + +} // namespace llvm + +TFModelEvaluatorImpl::TFModelEvaluatorImpl( + StringRef SavedModelPath, const std::vector &InputSpecs, + function_ref GetOutputSpecs, size_t OutputSpecsSize, + const char *Tags = "serve") + : Input(InputSpecs.size()), Output(OutputSpecsSize) { + // FIXME: make ErrorReporter a member (may also need subclassing + // StatefulErrorReporter) to easily get the latest error status, for + // debugging. + tflite::StderrReporter ErrorReporter; + SmallVector TFLitePathBuff; + llvm::sys::path::append(TFLitePathBuff, SavedModelPath, "model.tflite"); + StringRef TFLitePath(TFLitePathBuff.data(), TFLitePathBuff.size()); + Model = tflite::FlatBufferModel::BuildFromFile(TFLitePath.str().c_str(), + &ErrorReporter); + if (!Model) { + invalidate(); + return; + } + + tflite::ops::builtin::BuiltinOpResolver Resolver; + tflite::InterpreterBuilder Builder(*Model, Resolver); + Builder(&Interpreter); + + if (!Interpreter || + Interpreter->AllocateTensors() != TfLiteStatus::kTfLiteOk) { + invalidate(); + return; + } + // Known inputs and outputs + StringMap InputsMap; + StringMap OutputsMap; + for (size_t I = 0; I < Interpreter->inputs().size(); ++I) + InputsMap[Interpreter->GetInputName(I)] = I; + for (size_t I = 0; I < Interpreter->outputs().size(); ++I) + OutputsMap[Interpreter->GetOutputName(I)] = I; + + for (size_t I = 0; I < InputSpecs.size(); ++I) { + auto &InputSpec = InputSpecs[I]; + auto MapI = InputsMap.find(InputSpec.name() + ":" + + std::to_string(InputSpec.port())); + if (MapI == InputsMap.end()) { + Input[I] = nullptr; + continue; + } + Input[I] = Interpreter->tensor(MapI->second); + if (!checkReportAndInvalidate(Input[I], InputSpec)) + return; + std::memset(Input[I]->data.data, 0, + InputSpecs[I].getTotalTensorBufferSize()); + } + + for (size_t I = 0; I < OutputSpecsSize; ++I) { + auto OutputSpec = GetOutputSpecs(I); + Output[I] = Interpreter->output_tensor( + OutputsMap[OutputSpec.name() + ":" + + std::to_string(OutputSpec.port())]); + if (!checkReportAndInvalidate(Output[I], OutputSpec)) + return; + } +} + +TFModelEvaluator::TFModelEvaluator( + StringRef SavedModelPath, const std::vector &InputSpecs, + function_ref GetOutputSpecs, size_t OutputSpecsSize, + const char *Tags) + : Impl(new TFModelEvaluatorImpl(SavedModelPath, InputSpecs, GetOutputSpecs, + OutputSpecsSize, Tags)) { + if (!Impl->isValid()) + Impl.reset(); +} + +TFModelEvaluator::TFModelEvaluator(StringRef SavedModelPath, + const std::vector &InputSpecs, + const std::vector &OutputSpecs, + const char *Tags) + : TFModelEvaluator( + SavedModelPath, InputSpecs, [&](size_t I) { return OutputSpecs[I]; }, + OutputSpecs.size(), Tags) {} + +TFModelEvaluatorImpl::~TFModelEvaluatorImpl() {} + +bool TFModelEvaluatorImpl::checkReportAndInvalidate(const TfLiteTensor *Tensor, + const TensorSpec &Spec) { + if (!Tensor) { + errs() << "Could not find TF_Output named: " + Spec.name(); + IsValid = false; + } + if (Spec.getTotalTensorBufferSize() != Tensor->bytes) + IsValid = false; + + // If the total sizes match, there could still be a mismatch in the shape. + // We ignore that for now. + + return IsValid; +} + +Optional TFModelEvaluator::evaluate() { + if (!isValid()) + return None; + return EvaluationResult(Impl->evaluate()); +} + +void *TFModelEvaluator::getUntypedInput(size_t Index) { + TfLiteTensor *T = Impl->getInput()[Index]; + if (!T) + return nullptr; + return T->data.data; +} + +TFModelEvaluator::EvaluationResult::EvaluationResult( + std::unique_ptr Impl) + : Impl(std::move(Impl)) {} + +TFModelEvaluator::EvaluationResult::EvaluationResult(EvaluationResult &&Other) + : Impl(std::move(Other.Impl)) {} + +TFModelEvaluator::EvaluationResult & +TFModelEvaluator::EvaluationResult::operator=(EvaluationResult &&Other) { + Impl = std::move(Other.Impl); + return *this; +} + +void *TFModelEvaluator::EvaluationResult::getUntypedTensorValue(size_t Index) { + return Impl->getOutput(Index)->data.data; +} + +const void * +TFModelEvaluator::EvaluationResult::getUntypedTensorValue(size_t Index) const { + return Impl->getOutput(Index)->data.data; +} + +TFModelEvaluator::EvaluationResult::~EvaluationResult() {} +TFModelEvaluator::~TFModelEvaluator() {} + +#endif // defined(LLVM_HAVE_TF_API) diff --git a/llvm/lib/Analysis/TFUtils.cpp b/llvm/lib/Analysis/TFUtils.cpp index de5dde8d2ea4..980ee5908f98 100644 --- a/llvm/lib/Analysis/TFUtils.cpp +++ b/llvm/lib/Analysis/TFUtils.cpp @@ -10,7 +10,7 @@ // //===----------------------------------------------------------------------===// #include "llvm/Config/config.h" -#if defined(LLVM_HAVE_TF_API) +#if defined(LLVM_HAVE_TF_API) && !defined(LLVM_HAVE_TFLITE) #include "llvm/ADT/Twine.h" #include "llvm/Analysis/Utils/TFUtils.h" diff --git a/llvm/lib/Analysis/models/saved-model-to-tflite.py b/llvm/lib/Analysis/models/saved-model-to-tflite.py new file mode 100644 index 000000000000..e9d45fdf983b --- /dev/null +++ b/llvm/lib/Analysis/models/saved-model-to-tflite.py @@ -0,0 +1,37 @@ +"""Convert a saved model to tflite model. + +Usage: python3 saved-model-to-tflite.py + +The will contain: + model.tflite: this is the converted saved model + output_spec.json: the output spec, copied from the saved_model dir. +""" + +import tensorflow as tf +import os +import sys +from tf_agents.policies import greedy_policy + + +def main(argv): + assert len(argv) == 3 + sm_dir = argv[1] + tfl_dir = argv[2] + tf.io.gfile.makedirs(tfl_dir) + tfl_path = os.path.join(tfl_dir, 'model.tflite') + converter = tf.lite.TFLiteConverter.from_saved_model(sm_dir) + converter.target_spec.supported_ops = [ + tf.lite.OpsSet.TFLITE_BUILTINS, + ] + tfl_model = converter.convert() + with tf.io.gfile.GFile(tfl_path, 'wb') as f: + f.write(tfl_model) + + json_file = 'output_spec.json' + src_json = os.path.join(sm_dir, json_file) + if tf.io.gfile.exists(src_json): + tf.io.gfile.copy(src_json, + os.path.join(tfl_dir, json_file)) + +if __name__ == '__main__': + main(sys.argv) diff --git a/llvm/test/CodeGen/MLRegalloc/dev-mode-log-2-fcts.ll b/llvm/test/CodeGen/MLRegalloc/dev-mode-log-2-fcts.ll index c22d95d698a4..14183ca8a9b6 100644 --- a/llvm/test/CodeGen/MLRegalloc/dev-mode-log-2-fcts.ll +++ b/llvm/test/CodeGen/MLRegalloc/dev-mode-log-2-fcts.ll @@ -11,8 +11,9 @@ ; RUN: sed -i 's/\\n/ /g' %t1 ; RUN: FileCheck --input-file %t1 %s -; RUN: rm -rf %t && mkdir %t -; RUN: %python %S/../../../lib/Analysis/models/gen-regalloc-eviction-test-model.py %t +; RUN: rm -rf %t %t_savedmodel +; RUN: %python %S/../../../lib/Analysis/models/gen-regalloc-eviction-test-model.py %t_savedmodel +; RUN: %python %S/../../../lib/Analysis/models/saved-model-to-tflite.py %t_savedmodel %t ; RUN: llc -mtriple=x86_64-linux-unknown -regalloc=greedy -regalloc-enable-advisor=development \ ; RUN: -regalloc-training-log=%t2 -tfutils-text-log -regalloc-model=%t < %s ; RUN: sed -i 's/ \+/ /g' %t2 diff --git a/llvm/test/CodeGen/MLRegalloc/dev-mode-logging.ll b/llvm/test/CodeGen/MLRegalloc/dev-mode-logging.ll index 351d46376cb8..e1a3dada665e 100644 --- a/llvm/test/CodeGen/MLRegalloc/dev-mode-logging.ll +++ b/llvm/test/CodeGen/MLRegalloc/dev-mode-logging.ll @@ -12,8 +12,9 @@ ; RUN: FileCheck --input-file %t1 %s --check-prefixes=CHECK,NOML ; RUN: diff %t1 %S/Inputs/reference-log-noml.txt -; RUN: rm -rf %t && mkdir %t -; RUN: %python %S/../../../lib/Analysis/models/gen-regalloc-eviction-test-model.py %t +; RUN: rm -rf %t_savedmodel %t +; RUN: %python %S/../../../lib/Analysis/models/gen-regalloc-eviction-test-model.py %t_savedmodel +; RUN: %python %S/../../../lib/Analysis/models/saved-model-to-tflite.py %t_savedmodel %t ; RUN: llc -mtriple=x86_64-linux-unknown -regalloc=greedy -regalloc-enable-advisor=development \ ; RUN: -regalloc-training-log=%t2 -tfutils-text-log -regalloc-model=%t < %S/Inputs/input.ll ; RUN: sed -i 's/ \+/ /g' %t2 diff --git a/llvm/test/CodeGen/MLRegalloc/dev-rel-equivalence.ll b/llvm/test/CodeGen/MLRegalloc/dev-rel-equivalence.ll index 6f1b265480b4..43c363750f93 100644 --- a/llvm/test/CodeGen/MLRegalloc/dev-rel-equivalence.ll +++ b/llvm/test/CodeGen/MLRegalloc/dev-rel-equivalence.ll @@ -11,7 +11,8 @@ ; RUN: llc -mtriple=x86_64-linux-unknown -regalloc=greedy -regalloc-enable-advisor=release \ ; RUN: %S/Inputs/input.ll -o %t.release -; RUN: rm -rf %t && mkdir %t +; RUN: rm -rf %t %t_savedmodel +; RUN: %python %S/../../../../lib/Analysis/models/saved-model-to-tflite.py %t_savedmodel %t ; RUN: %python %S/../../../lib/Analysis/models/gen-regalloc-eviction-test-model.py %t ; RUN: llc -mtriple=x86_64-linux-unknown -regalloc=greedy -regalloc-enable-advisor=development \ ; RUN: -regalloc-model=%t %S/Inputs/input.ll -o %t.development diff --git a/llvm/test/Transforms/Inline/ML/bounds-checks-rewards.ll b/llvm/test/Transforms/Inline/ML/bounds-checks-rewards.ll index 48c2e0a301a9..d272bb0f7a83 100644 --- a/llvm/test/Transforms/Inline/ML/bounds-checks-rewards.ll +++ b/llvm/test/Transforms/Inline/ML/bounds-checks-rewards.ll @@ -8,7 +8,9 @@ ; ; Generate mock model ; RUN: rm -rf %t -; RUN: %python %S/../../../../lib/Analysis/models/gen-inline-oz-test-model.py %t +; RUN: rm -rf %t_savedmodel +; RUN: %python %S/../../../../lib/Analysis/models/gen-inline-oz-test-model.py %t_savedmodel +; RUN: %python %S/../../../../lib/Analysis/models/saved-model-to-tflite.py %t_savedmodel %t ; ; When the bounds are very wide ("no bounds"), all inlinings happen. ; RUN: opt -passes=scc-oz-module-inliner -ml-inliner-ir2native-model=%S/../../../../unittests/Analysis/Inputs/ir2native_x86_64_model -ml-inliner-model-under-training=%t -training-log=- -tfutils-text-log -enable-ml-inliner=development -ml-advisor-size-increase-threshold=10.0 -S < %s 2>&1 | FileCheck %s --check-prefix=CHECK --check-prefix=NOBOUNDS diff --git a/llvm/test/Transforms/Inline/ML/development-training-log.ll b/llvm/test/Transforms/Inline/ML/development-training-log.ll index 7d3b59207582..a0b8d730a19b 100644 --- a/llvm/test/Transforms/Inline/ML/development-training-log.ll +++ b/llvm/test/Transforms/Inline/ML/development-training-log.ll @@ -1,8 +1,9 @@ ; Test that we can produce a log if we have or do not have a model, in development mode. ; REQUIRES: have_tf_api ; Generate mock model -; RUN: rm -rf %t -; RUN: %python %S/../../../../lib/Analysis/models/gen-inline-oz-test-model.py %t +; RUN: rm -rf %t_savedmodel %t +; RUN: %python %S/../../../../lib/Analysis/models/gen-inline-oz-test-model.py %t_savedmodel +; RUN: %python %S/../../../../lib/Analysis/models/saved-model-to-tflite.py %t_savedmodel %t ; ; RUN: opt -enable-ml-inliner=development -passes=scc-oz-module-inliner -training-log=- -tfutils-text-log -ml-inliner-model-under-training=%t -ml-inliner-ir2native-model=%S/../../../../unittests/Analysis/Inputs/ir2native_x86_64_model -S < %s | FileCheck %s ; RUN: opt -enable-ml-inliner=development -passes=scc-oz-module-inliner -training-log=- -tfutils-text-log -ml-inliner-model-under-training=%t -ml-inliner-ir2native-model=%S/../../../../unittests/Analysis/Inputs/ir2native_x86_64_model -ml-inliner-output-spec-override=%S/Inputs/test_output_spec.json -S < %s | FileCheck %s --check-prefixes=EXTRA-OUTPUTS,CHECK diff --git a/llvm/test/Transforms/Inline/ML/ml-test-development-mode.ll b/llvm/test/Transforms/Inline/ML/ml-test-development-mode.ll index d902b9e4c778..3bfb9b628949 100644 --- a/llvm/test/Transforms/Inline/ML/ml-test-development-mode.ll +++ b/llvm/test/Transforms/Inline/ML/ml-test-development-mode.ll @@ -6,7 +6,9 @@ ; for the 'release' mode. ; ; REQUIRES: have_tf_api -; RUN: rm -rf %t && mkdir %t -; RUN: %python %S/../../../../lib/Analysis/models/gen-inline-oz-test-model.py %t +; RUN: rm -rf %t +; RUN: rm -rf %t_savedmodel +; RUN: %python %S/../../../../lib/Analysis/models/gen-inline-oz-test-model.py %t_savedmodel +; RUN: %python %S/../../../../lib/Analysis/models/saved-model-to-tflite.py %t_savedmodel %t ; RUN: opt -passes=scc-oz-module-inliner -enable-ml-inliner=default -S < %S/Inputs/test-module.ll 2>&1 | FileCheck %S/Inputs/test-module.ll --check-prefix=DEFAULT ; RUN: opt -passes=scc-oz-module-inliner -enable-ml-inliner=development -ml-inliner-model-under-training=%t -S < %S/Inputs/test-module.ll 2>&1 | FileCheck %S/Inputs/test-module.ll --check-prefix=CHECK diff --git a/llvm/unittests/Analysis/Inputs/ir2native_x86_64_model/model.tflite b/llvm/unittests/Analysis/Inputs/ir2native_x86_64_model/model.tflite new file mode 100644 index 000000000000..7d4097256911 Binary files /dev/null and b/llvm/unittests/Analysis/Inputs/ir2native_x86_64_model/model.tflite differ diff --git a/llvm/unittests/Analysis/TFUtilsTest.cpp b/llvm/unittests/Analysis/TFUtilsTest.cpp index 6d70ab08636e..fe3b115822be 100644 --- a/llvm/unittests/Analysis/TFUtilsTest.cpp +++ b/llvm/unittests/Analysis/TFUtilsTest.cpp @@ -89,15 +89,6 @@ TEST(TFUtilsTest, EvalError) { TensorSpec::createSpec("StatefulPartitionedCall", {1})}; TFModelEvaluator Evaluator(getModelPath(), InputSpecs, OutputSpecs); - EXPECT_TRUE(Evaluator.isValid()); - - int32_t *V = Evaluator.getInput(0); - // Fill it up with 1's, we know the output. - for (auto I = 0; I < KnownSize; ++I) { - V[I] = 1; - } - auto ER = Evaluator.evaluate(); - EXPECT_FALSE(ER.hasValue()); EXPECT_FALSE(Evaluator.isValid()); } diff --git a/utils/bazel/llvm_configs/llvm-config.h.cmake b/utils/bazel/llvm_configs/llvm-config.h.cmake index 96fbe6f771d2..b289db1d46ec 100644 --- a/utils/bazel/llvm_configs/llvm-config.h.cmake +++ b/utils/bazel/llvm_configs/llvm-config.h.cmake @@ -101,6 +101,9 @@ /* Define if LLVM was built with a dependency to the libtensorflow dynamic library */ #cmakedefine LLVM_HAVE_TF_API +/* Define if LLVM is using tflite instead of libtensorflow */ +#cmakedefine LLVM_HAVE_TFLITE + /* Define to 1 if you have the header file. */ #cmakedefine HAVE_SYSEXITS_H ${HAVE_SYSEXITS_H}