From 8914dfcd31aefd0e2d8f45f24e82493761027dd1 Mon Sep 17 00:00:00 2001 From: Adrian Tobiszewski Date: Mon, 27 Apr 2026 13:26:50 +0200 Subject: [PATCH 1/5] Split test_utils.hpp into focused headers (Phase 1+2) Extract from monolithic test_utils.hpp (849 lines) into: - test_request_utils_tfs.hpp/.cpp: TFS request helpers - test_request_utils_kfs.hpp/.cpp: KFS request helpers - test_request_utils_capi.hpp/.cpp: CAPI request helpers - test_server_utils.hpp/.cpp: Server start/stop, port randomization - test_model_manager_utils.hpp/.cpp: waitForOVMSConfigReload - test_predict_validation_utils.hpp: MockedMetadataModelIns - test_config_utils.hpp: ConstructorEnabledConfig - test_mediapipe_utils.hpp: DummyMediapipeGraphDefinition Backward-compat re-exports in test_utils.hpp preserved. Removed dead code: waitForOVMSResourcesCleanup (zero callers). All files folded into test_utils cc_library (no circular deps). Build + 235 tests pass (--config=mp_on_py_on). --- src/BUILD | 13 + src/test/deserialization_tests.cpp | 8 +- src/test/predict_validation_test.cpp | 2 +- src/test/stress_test_utils.hpp | 1 + src/test/test_config_utils.hpp | 23 + src/test/test_mediapipe_utils.hpp | 72 ++ src/test/test_model_manager_utils.cpp | 33 + src/test/test_model_manager_utils.hpp | 20 + src/test/test_predict_validation_utils.hpp | 55 ++ src/test/test_request_utils_capi.cpp | 78 ++ src/test/test_request_utils_capi.hpp | 90 +++ src/test/test_request_utils_kfs.cpp | 271 +++++++ src/test/test_request_utils_kfs.hpp | 369 ++++++++++ src/test/test_request_utils_tfs.cpp | 234 ++++++ src/test/test_request_utils_tfs.hpp | 131 ++++ src/test/test_server_utils.cpp | 212 ++++++ src/test/test_server_utils.hpp | 40 ++ src/test/test_utils.cpp | 770 +------------------- src/test/test_utils.hpp | 795 +-------------------- 19 files changed, 1702 insertions(+), 1515 deletions(-) create mode 100644 src/test/test_config_utils.hpp create mode 100644 src/test/test_mediapipe_utils.hpp create mode 100644 src/test/test_model_manager_utils.cpp create mode 100644 src/test/test_model_manager_utils.hpp create mode 100644 src/test/test_predict_validation_utils.hpp create mode 100644 src/test/test_request_utils_capi.cpp create mode 100644 src/test/test_request_utils_capi.hpp create mode 100644 src/test/test_request_utils_kfs.cpp create mode 100644 src/test/test_request_utils_kfs.hpp create mode 100644 src/test/test_request_utils_tfs.cpp create mode 100644 src/test/test_request_utils_tfs.hpp create mode 100644 src/test/test_server_utils.cpp create mode 100644 src/test/test_server_utils.hpp diff --git a/src/BUILD b/src/BUILD index b9810a0e07..f29dc6c181 100644 --- a/src/BUILD +++ b/src/BUILD @@ -2647,10 +2647,23 @@ cc_library( hdrs = [ "test/c_api_test_utils.hpp", "test/test_utils.hpp", + "test/test_request_utils_tfs.hpp", + "test/test_request_utils_kfs.hpp", + "test/test_request_utils_capi.hpp", + "test/test_server_utils.hpp", + "test/test_mediapipe_utils.hpp", + "test/test_predict_validation_utils.hpp", + "test/test_config_utils.hpp", + "test/test_model_manager_utils.hpp", "test/stress_test_utils.hpp", ], srcs = [ "test/c_api_test_utils.cpp", + "test/test_request_utils_tfs.cpp", + "test/test_request_utils_kfs.cpp", + "test/test_request_utils_capi.cpp", + "test/test_server_utils.cpp", + "test/test_model_manager_utils.cpp", "test/test_utils.cpp", ], deps = [ diff --git a/src/test/deserialization_tests.cpp b/src/test/deserialization_tests.cpp index 1983aa872d..3136ee3a3d 100644 --- a/src/test/deserialization_tests.cpp +++ b/src/test/deserialization_tests.cpp @@ -519,14 +519,14 @@ INSTANTIATE_TEST_SUITE_P( INSTANTIATE_TEST_SUITE_P( TestDeserialize, GRPCPredictRequestNegative, - ::testing::ValuesIn(UNSUPPORTED_INPUT_PRECISIONS), + ::testing::ValuesIn(UNSUPPORTED_TFS_INPUT_PRECISIONS), [](const ::testing::TestParamInfo& info) { return toString(info.param); }); INSTANTIATE_TEST_SUITE_P( TestDeserialize, GRPCPredictRequest, - ::testing::ValuesIn(SUPPORTED_INPUT_PRECISIONS), + ::testing::ValuesIn(SUPPORTED_TFS_INPUT_PRECISIONS), [](const ::testing::TestParamInfo& info) { return toString(info.param); }); @@ -534,7 +534,7 @@ INSTANTIATE_TEST_SUITE_P( INSTANTIATE_TEST_SUITE_P( Test, DeserializeTFTensorProtoNegative, - ::testing::ValuesIn(UNSUPPORTED_INPUT_PRECISIONS), + ::testing::ValuesIn(UNSUPPORTED_TFS_INPUT_PRECISIONS), [](const ::testing::TestParamInfo& info) { return toString(info.param); }); @@ -550,7 +550,7 @@ INSTANTIATE_TEST_SUITE_P( INSTANTIATE_TEST_SUITE_P( Test, DeserializeTFTensorProto, - ::testing::ValuesIn(SUPPORTED_INPUT_PRECISIONS), + ::testing::ValuesIn(SUPPORTED_TFS_INPUT_PRECISIONS), [](const ::testing::TestParamInfo& info) { return toString(info.param); }); diff --git a/src/test/predict_validation_test.cpp b/src/test/predict_validation_test.cpp index 92e6ca447e..2fb46140e8 100644 --- a/src/test/predict_validation_test.cpp +++ b/src/test/predict_validation_test.cpp @@ -857,7 +857,7 @@ TEST_P(TfsPredictValidationPrecision, ValidPrecisions) { INSTANTIATE_TEST_SUITE_P( Test, TfsPredictValidationPrecision, - ::testing::ValuesIn(SUPPORTED_INPUT_PRECISIONS), + ::testing::ValuesIn(SUPPORTED_TFS_INPUT_PRECISIONS), [](const ::testing::TestParamInfo& info) { return toString(info.param); }); diff --git a/src/test/stress_test_utils.hpp b/src/test/stress_test_utils.hpp index 172056101e..15caaae3b3 100644 --- a/src/test/stress_test_utils.hpp +++ b/src/test/stress_test_utils.hpp @@ -41,6 +41,7 @@ #include "../get_model_metadata_impl.hpp" #include "../kfs_frontend/kfs_utils.hpp" #include "src/metrics/metric_config.hpp" +#include "src/metrics/metric_registry.hpp" #include "src/filesystem/localfilesystem.hpp" #include "../logging.hpp" #include "../model_service.hpp" diff --git a/src/test/test_config_utils.hpp b/src/test/test_config_utils.hpp new file mode 100644 index 0000000000..725262c8ab --- /dev/null +++ b/src/test/test_config_utils.hpp @@ -0,0 +1,23 @@ +//***************************************************************************** +// Copyright 2026 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +//***************************************************************************** +#pragma once + +#include "src/config.hpp" + +class ConstructorEnabledConfig : public ovms::Config { +public: + ConstructorEnabledConfig() {} +}; diff --git a/src/test/test_mediapipe_utils.hpp b/src/test/test_mediapipe_utils.hpp new file mode 100644 index 0000000000..531635589d --- /dev/null +++ b/src/test/test_mediapipe_utils.hpp @@ -0,0 +1,72 @@ +//***************************************************************************** +// Copyright 2026 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +//***************************************************************************** +#pragma once + +#if (MEDIAPIPE_DISABLE == 0) + +#include + +#include "src/mediapipe_internal/mediapipegraphdefinition.hpp" +#include "src/mediapipe_internal/mediapipegraphexecutor.hpp" +#include "src/status.hpp" + +#if (PYTHON_DISABLE == 0) +#include "src/python/pythonnoderesources.hpp" +#endif + +class DummyMediapipeGraphDefinition : public ovms::MediapipeGraphDefinition { +public: + std::string inputConfig; +#if (PYTHON_DISABLE == 0) + ovms::PythonNodeResources* getPythonNodeResources(const std::string& nodeName) { + auto it = this->sidePacketMaps.pythonNodeResourcesMap.find(nodeName); + if (it == std::end(this->sidePacketMaps.pythonNodeResourcesMap)) { + return nullptr; + } else { + return it->second.get(); + } + } +#endif + + ovms::GenAiServable* getGenAiServable(const std::string& nodeName) { + auto it = this->sidePacketMaps.genAiServableMap.find(nodeName); + if (it == std::end(this->sidePacketMaps.genAiServableMap)) { + return nullptr; + } else { + return it->second.get(); + } + } + + ovms::Status validateForConfigLoadablenessPublic() { + return this->validateForConfigLoadableness(); + } + + ovms::GenAiServableMap& getGenAiServableMap() { return this->sidePacketMaps.genAiServableMap; } + + DummyMediapipeGraphDefinition(const std::string name, + const ovms::MediapipeGraphConfig& config, + std::string inputConfig, + ovms::PythonBackend* pythonBackend = nullptr) : + ovms::MediapipeGraphDefinition(name, config, nullptr, nullptr, pythonBackend) { this->inputConfig = inputConfig; } + + // Do not read from path - use predefined config contents + ovms::Status validateForConfigFileExistence() override { + this->chosenConfig = this->inputConfig; + return ovms::StatusCode::OK; + } +}; + +#endif // MEDIAPIPE_DISABLE == 0 diff --git a/src/test/test_model_manager_utils.cpp b/src/test/test_model_manager_utils.cpp new file mode 100644 index 0000000000..9f13575a8b --- /dev/null +++ b/src/test/test_model_manager_utils.cpp @@ -0,0 +1,33 @@ +//***************************************************************************** +// Copyright 2026 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +//***************************************************************************** +#include "test_model_manager_utils.hpp" + +#include +#include + +void waitForOVMSConfigReload(ovms::ModelManager& manager) { + const float WAIT_MULTIPLIER_FACTOR = 5; + const uint32_t waitTime = WAIT_MULTIPLIER_FACTOR * manager.getWatcherIntervalMillisec() * 1000; + bool reloadIsNeeded = true; + int timestepMs = 10; + + auto start = std::chrono::high_resolution_clock::now(); + while (reloadIsNeeded && + (std::chrono::duration_cast(std::chrono::high_resolution_clock::now() - start).count() < waitTime)) { + std::this_thread::sleep_for(std::chrono::milliseconds(timestepMs)); + manager.configFileReloadNeeded(reloadIsNeeded); + } +} diff --git a/src/test/test_model_manager_utils.hpp b/src/test/test_model_manager_utils.hpp new file mode 100644 index 0000000000..38ee5cae69 --- /dev/null +++ b/src/test/test_model_manager_utils.hpp @@ -0,0 +1,20 @@ +//***************************************************************************** +// Copyright 2026 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +//***************************************************************************** +#pragma once + +#include "src/modelmanager.hpp" + +void waitForOVMSConfigReload(ovms::ModelManager& manager); diff --git a/src/test/test_predict_validation_utils.hpp b/src/test/test_predict_validation_utils.hpp new file mode 100644 index 0000000000..ba4f5352af --- /dev/null +++ b/src/test/test_predict_validation_utils.hpp @@ -0,0 +1,55 @@ +//***************************************************************************** +// Copyright 2026 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +//***************************************************************************** +#pragma once + +#include +#include + +#include + +#include "src/kfs_frontend/validation.hpp" +#include "src/modelinstance.hpp" + +class MockedMetadataModelIns : public ovms::ModelInstance { +public: + MockedMetadataModelIns(ov::Core& ieCore) : + ModelInstance("UNUSED_NAME", 42, ieCore) {} + MOCK_METHOD(const ovms::tensor_map_t&, getInputsInfo, (), (const, override)); + MOCK_METHOD(const ovms::tensor_map_t&, getOutputsInfo, (), (const, override)); + MOCK_METHOD(std::optional, getBatchSize, (), (const, override)); + MOCK_METHOD(const ovms::ModelConfig&, getModelConfig, (), (const, override)); + const ovms::Status mockValidate(const tensorflow::serving::PredictRequest* request) { + return validate(request); + } + const ovms::Status mockValidate(const ::KFSRequest* request) { + return validate(request); + } + const ovms::Status mockValidate(const ovms::InferenceRequest* request) { + return validate(request); + } + template + ovms::Status validate(const RequestType* request) { + return ovms::request_validation_utils::validate( + *request, + this->getInputsInfo(), + this->getOutputsInfo(), + this->getName(), + this->getVersion(), + this->getOptionalInputNames(), + this->getModelConfig().getBatchingMode(), + this->getModelConfig().getShapes()); + } +}; diff --git a/src/test/test_request_utils_capi.cpp b/src/test/test_request_utils_capi.cpp new file mode 100644 index 0000000000..0406a0a1cc --- /dev/null +++ b/src/test/test_request_utils_capi.cpp @@ -0,0 +1,78 @@ +//***************************************************************************** +// Copyright 2026 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +//***************************************************************************** +#include "test_request_utils_capi.hpp" + +#include + +#include "src/capi_frontend/capi_utils.hpp" +#include "src/capi_frontend/inferenceparameter.hpp" + +void prepareBinaryPredictRequest(ovms::InferenceRequest& request, const std::string& inputName, const int batchSize) { throw 42; } // CAPI binary not supported +void prepareBinaryPredictRequestNoShape(ovms::InferenceRequest& request, const std::string& inputName, const int batchSize) { throw 42; } // CAPI binary not supported +void prepareBinary4x4PredictRequest(ovms::InferenceRequest& request, const std::string& inputName, const int batchSize) { throw 42; } // CAPI binary not supported + +void prepareInferStringRequest(ovms::InferenceRequest& request, const std::string& name, const std::vector& data, bool putBufferInInputTensorContent) { throw 42; } // CAPI binary not supported +void prepareInferStringTensor(ovms::InferenceTensor& tensor, const std::string& name, const std::vector& data, bool putBufferInInputTensorContent, std::string* content) { throw 42; } // CAPI binary not supported + +void preparePredictRequest(ovms::InferenceRequest& request, inputs_info_t requestInputs, const std::vector& data, uint32_t decrementBufferSize, OVMS_BufferType bufferType, std::optional deviceId) { + request.removeAllInputs(); + for (auto const& it : requestInputs) { + prepareCAPIInferInputTensor(request, it.first, it.second, data, decrementBufferSize, bufferType, deviceId); + } +} + +void prepareCAPIInferInputTensor(ovms::InferenceRequest& request, const std::string& name, const std::tuple& inputInfo, + const std::vector& data, uint32_t decrementBufferSize, OVMS_BufferType bufferType, std::optional deviceId) { + auto [shape, type] = inputInfo; + prepareCAPIInferInputTensor(request, name, + {shape, getPrecisionAsOVMSDataType(type)}, + data, decrementBufferSize, bufferType, deviceId); +} + +void prepareCAPIInferInputTensor(ovms::InferenceRequest& request, const std::string& name, const std::tuple& inputInfo, + const std::vector& data, uint32_t decrementBufferSize, OVMS_BufferType bufferType, std::optional deviceId) { + auto [shape, datatype] = inputInfo; + size_t elementsCount = 1; + + bool isShapeNegative = false; + for (auto const& dim : shape) { + if (dim < 0) { + isShapeNegative = true; + } + elementsCount *= dim; + } + + request.addInput(name.c_str(), datatype, shape.data(), shape.size()); + + size_t dataSize = 0; + if (isShapeNegative) { + dataSize = data.size() * ovms::DataTypeToByteSize(datatype); + } else { + dataSize = elementsCount * ovms::DataTypeToByteSize(datatype); + } + if (decrementBufferSize) + dataSize -= decrementBufferSize; + + request.setInputBuffer(name.c_str(), data.data(), dataSize, bufferType, deviceId); +} + +void assertStringOutputProto(const ovms::InferenceTensor& proto, const std::vector& expectedStrings) { + FAIL() << "not implemented"; +} + +void assertStringResponse(const ovms::InferenceResponse& proto, const std::vector& expectedStrings, const std::string& outputName) { + FAIL() << "not implemented"; +} diff --git a/src/test/test_request_utils_capi.hpp b/src/test/test_request_utils_capi.hpp new file mode 100644 index 0000000000..4b2c9ca115 --- /dev/null +++ b/src/test/test_request_utils_capi.hpp @@ -0,0 +1,90 @@ +//***************************************************************************** +// Copyright 2026 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +//***************************************************************************** +#pragma once + +#include +#include +#include +#include + +#include "src/capi_frontend/inferencerequest.hpp" +#include "src/capi_frontend/inferenceresponse.hpp" +#include "src/test/test_utils.hpp" + +using CAPIInterface = std::pair; + +void prepareCAPIInferInputTensor(ovms::InferenceRequest& request, const std::string& name, const std::tuple& inputInfo, + const std::vector& data, uint32_t decrementBufferSize = 0, OVMS_BufferType bufferType = OVMS_BUFFERTYPE_CPU, std::optional deviceId = std::nullopt); +void prepareCAPIInferInputTensor(ovms::InferenceRequest& request, const std::string& name, const std::tuple& inputInfo, + const std::vector& data, uint32_t decrementBufferSize = 0, OVMS_BufferType bufferType = OVMS_BUFFERTYPE_CPU, std::optional deviceId = std::nullopt); + +void preparePredictRequest(ovms::InferenceRequest& request, inputs_info_t requestInputs, const std::vector& data, + uint32_t decrementBufferSize = 0, OVMS_BufferType bufferType = OVMS_BUFFERTYPE_CPU, std::optional deviceId = std::nullopt); + +void prepareInferStringTensor(ovms::InferenceTensor& tensor, const std::string& name, const std::vector& data, bool putBufferInInputTensorContent, std::string* content); +void prepareInferStringRequest(ovms::InferenceRequest& request, const std::string& name, const std::vector& data, bool putBufferInInputTensorContent = true); // CAPI binary not supported + +void prepareBinaryPredictRequest(ovms::InferenceRequest& request, const std::string& inputName, const int batchSize); // CAPI binary not supported +void prepareBinaryPredictRequestNoShape(ovms::InferenceRequest& request, const std::string& inputName, const int batchSize); // CAPI binary not supported +void prepareBinary4x4PredictRequest(ovms::InferenceRequest& request, const std::string& inputName, const int batchSize = 1); // CAPI binary not supported + +void assertStringOutputProto(const ovms::InferenceTensor& proto, const std::vector& expectedStrings); +void assertStringResponse(const ovms::InferenceResponse& proto, const std::vector& expectedStrings, const std::string& outputName); + +static const std::vector SUPPORTED_CAPI_INPUT_PRECISIONS{ + ovms::Precision::FP64, + ovms::Precision::FP32, + ovms::Precision::FP16, + ovms::Precision::I16, + ovms::Precision::U8, + ovms::Precision::U1, + ovms::Precision::I8, + ovms::Precision::U16, + ovms::Precision::I32, + ovms::Precision::I64, + ovms::Precision::U32, + ovms::Precision::U64, + ovms::Precision::BOOL +}; + +static const std::vector UNSUPPORTED_CAPI_INPUT_PRECISIONS{ + ovms::Precision::UNDEFINED, + ovms::Precision::MIXED, + ovms::Precision::Q78, + ovms::Precision::BIN, + ovms::Precision::CUSTOM}; + +static const std::vector SUPPORTED_CAPI_INPUT_PRECISIONS_TENSORINPUTCONTENT{ + ovms::Precision::FP64, + ovms::Precision::FP32, + ovms::Precision::I16, + ovms::Precision::U8, + ovms::Precision::I8, + ovms::Precision::U16, + ovms::Precision::I32, + ovms::Precision::I64, + ovms::Precision::U32, + ovms::Precision::U64, + ovms::Precision::BOOL +}; + +static const std::vector UNSUPPORTED_CAPI_INPUT_PRECISIONS_TENSORINPUTCONTENT{ + ovms::Precision::UNDEFINED, + ovms::Precision::MIXED, + ovms::Precision::FP16, + ovms::Precision::Q78, + ovms::Precision::BIN, +}; diff --git a/src/test/test_request_utils_kfs.cpp b/src/test/test_request_utils_kfs.cpp new file mode 100644 index 0000000000..b9f37c766b --- /dev/null +++ b/src/test/test_request_utils_kfs.cpp @@ -0,0 +1,271 @@ +//***************************************************************************** +// Copyright 2026 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +//***************************************************************************** +#include "test_request_utils_kfs.hpp" + +#include +#include + +#include + +#include "platform_utils.hpp" + +void checkScalarResponse(const std::string outputName, + float inputScalar, ::KFSResponse& response, const std::string& servableName) { + ASSERT_EQ(response.model_name(), servableName); + ASSERT_EQ(response.outputs_size(), 1); + ASSERT_EQ(response.raw_output_contents_size(), 1); + ASSERT_EQ(response.outputs().begin()->name(), outputName) << "Did not find:" << outputName; + const auto& output_proto = *response.outputs().begin(); + std::string* content = response.mutable_raw_output_contents(0); + + ASSERT_EQ(output_proto.shape_size(), 0); + ASSERT_EQ(content->size(), sizeof(float)); + + ASSERT_EQ(*((float*)content->data()), inputScalar); +} + +void checkStringResponse(const std::string outputName, + const std::vector& inputStrings, ::KFSResponse& response, const std::string& servableName) { + ASSERT_EQ(response.model_name(), servableName); + ASSERT_EQ(response.outputs_size(), 1); + ASSERT_EQ(response.raw_output_contents_size(), 1); + ASSERT_EQ(response.outputs().begin()->name(), outputName) << "Did not find:" << outputName; + const auto& output_proto = *response.outputs().begin(); + std::string* content = response.mutable_raw_output_contents(0); + + ASSERT_EQ(output_proto.shape_size(), 1); + ASSERT_EQ(output_proto.shape(0), inputStrings.size()); + + size_t offset = 0; + for (size_t i = 0; i < inputStrings.size(); i++) { + ASSERT_GE(content->size(), offset + 4); + uint32_t batchLength = *((uint32_t*)(content->data() + offset)); + ASSERT_EQ(batchLength, inputStrings[i].size()); + offset += 4; + ASSERT_GE(content->size(), offset + batchLength); + ASSERT_EQ(std::string(content->data() + offset, batchLength), inputStrings[i]); + offset += batchLength; + } + ASSERT_EQ(offset, content->size()); +} + +void assertStringOutputProto(const KFSTensorOutputProto& proto, const std::vector& expectedStrings) { + ASSERT_EQ(proto.contents().bytes_contents_size(), expectedStrings.size()); + for (size_t i = 0; i < expectedStrings.size(); i++) { + ASSERT_EQ(proto.contents().bytes_contents(i), expectedStrings[i]); + } +} + +void assertStringResponse(const ::KFSResponse& proto, const std::vector& expectedStrings, const std::string& outputName) { + ASSERT_EQ(proto.outputs_size(), 1); + ASSERT_EQ(proto.outputs(0).name(), outputName); + ASSERT_EQ(proto.outputs(0).datatype(), "BYTES"); + ASSERT_EQ(proto.outputs(0).shape_size(), 1); + ASSERT_EQ(proto.outputs(0).shape(0), expectedStrings.size()); + std::string expectedString; + for (auto str : expectedStrings) { + int size = str.size(); + for (int k = 0; k < 4; k++, size >>= 8) { + expectedString += static_cast(size & 0xff); + } + expectedString.append(str); + } + ASSERT_EQ(memcmp(proto.raw_output_contents(0).data(), expectedString.data(), expectedString.size()), 0); +} + +void checkAddResponse(const std::string outputName, + const std::vector& requestData1, + const std::vector& requestData2, + ::KFSRequest& request, const ::KFSResponse& response, int seriesLength, int batchSize, const std::string& servableName) { + ASSERT_EQ(response.model_name(), servableName); + ASSERT_EQ(response.outputs_size(), 1); + ASSERT_EQ(response.raw_output_contents_size(), 1); + ASSERT_EQ(response.outputs().begin()->name(), outputName) << "Did not find:" << outputName; + const auto& output_proto = *response.outputs().begin(); + const std::string& content = response.raw_output_contents(0); + + ASSERT_EQ(content.size(), batchSize * DUMMY_MODEL_OUTPUT_SIZE * sizeof(float)); + ASSERT_EQ(output_proto.shape_size(), 2); + ASSERT_EQ(output_proto.shape(0), batchSize); + ASSERT_EQ(output_proto.shape(1), DUMMY_MODEL_OUTPUT_SIZE); + + std::vector responseData = requestData1; + for (size_t i = 0; i < requestData1.size(); ++i) { + responseData[i] += requestData2[i]; + } + + const float* actual_output = (const float*)content.data(); + float* expected_output = responseData.data(); + const int dataLengthToCheck = DUMMY_MODEL_OUTPUT_SIZE * batchSize * sizeof(float); + checkBuffers(actual_output, expected_output, dataLengthToCheck); +} + +bool isShapeTheSame(const KFSShapeType& actual, const std::vector&& expected) { + bool same = true; + int a_size = actual.size(); + if (a_size != int(expected.size())) { + SPDLOG_ERROR("Unexpected dim_size. Got: {}, Expect: {}", a_size, expected.size()); + return false; + } + for (int i = 0; i < a_size; i++) { + if (actual.at(i) != expected[i]) { + SPDLOG_ERROR("Unexpected dim[{}]. Got: {}, Expect: {}", i, actual.at(i), expected[i]); + same = false; + break; + } + } + if (same == false) { + std::stringstream ss; + for (int i = 0; i < a_size; i++) { + ss << "dim[" + << i + << "] got:" + << actual.at(i) + << " expect:" << expected[i]; + } + SPDLOG_ERROR("Shape mismatch: {}", ss.str()); + } + return same; +} + +void prepareInferStringTensor(::KFSRequest::InferInputTensor& tensor, const std::string& name, const std::vector& data, bool putBufferInInputTensorContent, std::string* content) { + if (!putBufferInInputTensorContent && content == nullptr) { + throw std::runtime_error("Preparation of infer string tensor failed"); + return; + } + tensor.set_name(name); + tensor.set_datatype("BYTES"); + tensor.mutable_shape()->Clear(); + tensor.add_shape(data.size()); + if (!putBufferInInputTensorContent) { + size_t dataSize = 0; + for (auto input : data) { + dataSize += input.size() + 4; + } + content->resize(dataSize); + size_t offset = 0; + for (auto input : data) { + uint32_t inputSize = input.size(); + std::memcpy(content->data() + offset, reinterpret_cast(&inputSize), sizeof(uint32_t)); + offset += sizeof(uint32_t); + std::memcpy(content->data() + offset, input.data(), input.length()); + offset += input.length(); + } + } else { + for (auto inputData : data) { + auto bytes_val = tensor.mutable_contents()->mutable_bytes_contents()->Add(); + bytes_val->append(inputData.data(), inputData.size()); + } + } +} + +void prepareInferStringRequest(::KFSRequest& request, const std::string& name, const std::vector& data, bool putBufferInInputTensorContent) { + auto it = request.mutable_inputs()->begin(); + size_t bufferId = 0; + while (it != request.mutable_inputs()->end()) { + if (it->name() == name) + break; + ++it; + ++bufferId; + } + KFSTensorInputProto* tensor; + std::string* content = nullptr; + if (it != request.mutable_inputs()->end()) { + tensor = &*it; + if (!putBufferInInputTensorContent) { + content = request.mutable_raw_input_contents()->Mutable(bufferId); + } + } else { + tensor = request.add_inputs(); + if (!putBufferInInputTensorContent) { + content = request.add_raw_input_contents(); + } + } + prepareInferStringTensor(*tensor, name, data, putBufferInInputTensorContent, content); +} + + + + +void prepareBinaryPredictRequest(::KFSRequest& request, const std::string& inputName, const int batchSize) { + request.add_inputs(); + auto tensor = request.mutable_inputs()->Mutable(0); + tensor->set_name(inputName); + size_t filesize = 0; + std::unique_ptr image_bytes = nullptr; + readRgbJpg(filesize, image_bytes); + + for (int i = 0; i < batchSize; i++) { + tensor->mutable_contents()->add_bytes_contents(image_bytes.get(), filesize); + } + tensor->set_datatype("BYTES"); + tensor->mutable_shape()->Add(batchSize); +} + +void prepareBinaryPredictRequestNoShape(::KFSRequest& request, const std::string& inputName, const int batchSize) { + request.add_inputs(); + auto tensor = request.mutable_inputs()->Mutable(0); + tensor->set_name(inputName); + size_t filesize = 0; + std::unique_ptr image_bytes = nullptr; + readRgbJpg(filesize, image_bytes); + + for (int i = 0; i < batchSize; i++) { + tensor->mutable_contents()->add_bytes_contents(image_bytes.get(), filesize); + } + tensor->set_datatype("BYTES"); +} + +void prepareBinary4x4PredictRequest(::KFSRequest& request, const std::string& inputName, const int batchSize) { + request.add_inputs(); + auto tensor = request.mutable_inputs()->Mutable(0); + tensor->set_name(inputName); + size_t filesize = 0; + std::unique_ptr image_bytes = nullptr; + read4x4RgbJpg(filesize, image_bytes); + + for (int i = 0; i < batchSize; i++) { + tensor->mutable_contents()->add_bytes_contents(image_bytes.get(), filesize); + } + tensor->set_datatype("BYTES"); + tensor->mutable_shape()->Add(batchSize); +} + +::KFSTensorInputProto* findKFSInferInputTensor(::KFSRequest& request, const std::string& name) { + auto it = request.mutable_inputs()->begin(); + while (it != request.mutable_inputs()->end()) { + if (it->name() == name) + break; + ++it; + } + return it == request.mutable_inputs()->end() ? nullptr : &(*it); +} + +std::string* findKFSInferInputTensorContentInRawInputs(::KFSRequest& request, const std::string& name) { + auto it = request.mutable_inputs()->begin(); + size_t bufferId = 0; + std::string* content = nullptr; + while (it != request.mutable_inputs()->end()) { + if (it->name() == name) + break; + ++it; + ++bufferId; + } + if (it != request.mutable_inputs()->end()) { + content = request.mutable_raw_input_contents()->Mutable(bufferId); + } + return content; +} diff --git a/src/test/test_request_utils_kfs.hpp b/src/test/test_request_utils_kfs.hpp new file mode 100644 index 0000000000..91afe1fc6f --- /dev/null +++ b/src/test/test_request_utils_kfs.hpp @@ -0,0 +1,369 @@ +//***************************************************************************** +// Copyright 2026 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +//***************************************************************************** +#pragma once + +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "src/kfs_frontend/kfs_grpc_inference_service.hpp" +#include "src/kfs_frontend/kfs_utils.hpp" +#include "src/test/test_utils.hpp" + +using KFSInterface = std::pair; + +KFSTensorInputProto* findKFSInferInputTensor(::KFSRequest& request, const std::string& name); +std::string* findKFSInferInputTensorContentInRawInputs(::KFSRequest& request, const std::string& name); + +template +void prepareKFSInferInputTensor(::KFSRequest& request, const std::string& name, const std::tuple& inputInfo, + const std::vector& data = std::vector{}, bool putBufferInInputTensorContent = false) { + auto it = request.mutable_inputs()->begin(); + size_t bufferId = 0; + while (it != request.mutable_inputs()->end()) { + if (it->name() == name) + break; + ++it; + ++bufferId; + } + KFSTensorInputProto* tensor; + std::string* content = nullptr; + if (it != request.mutable_inputs()->end()) { + tensor = &*it; + if (!putBufferInInputTensorContent) { + content = request.mutable_raw_input_contents()->Mutable(bufferId); + } + } else { + tensor = request.add_inputs(); + if (!putBufferInInputTensorContent) { + content = request.add_raw_input_contents(); + } + } + auto [shape, datatype] = inputInfo; + tensor->set_name(name); + tensor->set_datatype(datatype); + size_t elementsCount = 1; + tensor->mutable_shape()->Clear(); + bool isNegativeShape = false; + for (auto const& dim : shape) { + tensor->add_shape(dim); + if (dim < 0) { + isNegativeShape = true; + } + elementsCount *= dim; + } + size_t dataSize = isNegativeShape ? data.size() : elementsCount; + if (!putBufferInInputTensorContent) { + if (data.size() == 0) { + content->assign(dataSize * ovms::KFSDataTypeSize(datatype), '1'); + } else { + content->resize(dataSize * ovms::KFSDataTypeSize(datatype)); + std::memcpy(content->data(), data.data(), content->size()); + } + } else { + switch (ovms::KFSPrecisionToOvmsPrecision(datatype)) { + case ovms::Precision::FP64: { + for (size_t i = 0; i < dataSize; ++i) { + auto ptr = tensor->mutable_contents()->mutable_fp64_contents()->Add(); + *ptr = (data.size() ? data[i] : 1); + } + break; + } + case ovms::Precision::FP32: { + for (size_t i = 0; i < dataSize; ++i) { + auto ptr = tensor->mutable_contents()->mutable_fp32_contents()->Add(); + *ptr = (data.size() ? data[i] : 1); + } + break; + } + case ovms::Precision::U64: { + for (size_t i = 0; i < dataSize; ++i) { + auto ptr = tensor->mutable_contents()->mutable_uint64_contents()->Add(); + *ptr = (data.size() ? data[i] : 1); + } + break; + } + case ovms::Precision::U8: + case ovms::Precision::U16: + case ovms::Precision::U32: { + for (size_t i = 0; i < dataSize; ++i) { + auto ptr = tensor->mutable_contents()->mutable_uint_contents()->Add(); + *ptr = (data.size() ? data[i] : 1); + } + break; + } + case ovms::Precision::I64: { + for (size_t i = 0; i < dataSize; ++i) { + auto ptr = tensor->mutable_contents()->mutable_int64_contents()->Add(); + *ptr = (data.size() ? data[i] : 1); + } + break; + } + case ovms::Precision::BOOL: { + for (size_t i = 0; i < dataSize; ++i) { + auto ptr = tensor->mutable_contents()->mutable_bool_contents()->Add(); + *ptr = (data.size() ? data[i] : 1); + } + break; + } + case ovms::Precision::I8: + case ovms::Precision::I16: + case ovms::Precision::I32: { + for (size_t i = 0; i < dataSize; ++i) { + auto ptr = tensor->mutable_contents()->mutable_int_contents()->Add(); + *ptr = (data.size() ? data[i] : 1); + } + break; + } + case ovms::Precision::FP16: + case ovms::Precision::U1: + case ovms::Precision::CUSTOM: + case ovms::Precision::UNDEFINED: + case ovms::Precision::DYNAMIC: + case ovms::Precision::MIXED: + case ovms::Precision::Q78: + case ovms::Precision::BIN: + default: { + } + } + } +} + +template <> +inline void prepareKFSInferInputTensor(::KFSRequest& request, const std::string& name, const std::tuple& inputInfo, + const std::vector& data, bool putBufferInInputTensorContent) { + if (putBufferInInputTensorContent == 0) { + throw std::string("Unsupported"); + } + auto it = request.mutable_inputs()->begin(); + size_t bufferId = 0; + while (it != request.mutable_inputs()->end()) { + if (it->name() == name) + break; + ++it; + ++bufferId; + } + KFSTensorInputProto* tensor; + if (it != request.mutable_inputs()->end()) { + tensor = &*it; + } else { + tensor = request.add_inputs(); + } + auto [shape, datatype] = inputInfo; + tensor->set_name(name); + tensor->set_datatype(datatype); + size_t elementsCount = 1; + tensor->mutable_shape()->Clear(); + bool isNegativeShape = false; + for (auto const& dim : shape) { + tensor->add_shape(dim); + if (dim < 0) { + isNegativeShape = true; + } + elementsCount *= dim; + } + size_t dataSize = isNegativeShape ? data.size() : elementsCount; + for (size_t i = 0; i < dataSize; ++i) { + auto ptr = tensor->mutable_contents()->mutable_bool_contents()->Add(); + *ptr = (data.size() ? data[i] : 1); + } +} + +template +void prepareKFSInferInputTensor(::KFSRequest& request, const std::string& name, const std::tuple& inputInfo, + const std::vector& data = std::vector{}, bool putBufferInInputTensorContent = false) { + auto [shape, type] = inputInfo; + prepareKFSInferInputTensor(request, name, + {shape, ovmsPrecisionToKFSPrecision(type)}, + data, putBufferInInputTensorContent); +} + +template +void preparePredictRequest(::KFSRequest& request, inputs_info_t requestInputs, const std::vector& data = std::vector{}, bool putBufferInInputTensorContent = false) { + request.mutable_inputs()->Clear(); + request.mutable_raw_input_contents()->Clear(); + for (auto const& it : requestInputs) { + prepareKFSInferInputTensor(request, it.first, it.second, data, putBufferInInputTensorContent); + } +} + +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wunused-function" +static std::string vectorTypeToKfsString(const std::type_info& vectorType) { + if (vectorType == typeid(float)) + return std::string("FP32"); + if (vectorType == typeid(int32_t)) + return std::string("INT32"); + if (vectorType == typeid(double)) + return std::string("FP64"); + if (vectorType == typeid(int64_t)) + return std::string("INT64"); + if (vectorType == typeid(int16_t)) + return std::string("INT16"); + if (vectorType == typeid(int8_t)) + return std::string("INT8"); + if (vectorType == typeid(uint64_t)) + return std::string("UINT64"); + if (vectorType == typeid(uint32_t)) + return std::string("UINT32"); + if (vectorType == typeid(uint16_t)) + return std::string("UINT16"); + if (vectorType == typeid(uint8_t)) + return std::string("UINT8"); + if (vectorType == typeid(bool)) + return std::string("BOOL"); + return std::string("UNDEFINED"); +} +#pragma GCC diagnostic pop + +template +void checkDummyResponse(const std::string outputName, + const std::vector& requestData, + ::KFSRequest& request, ::KFSResponse& response, int seriesLength, int batchSize = 1, const std::string& servableName = "", size_t expectedOutputsCount = 1) { + ASSERT_EQ(response.model_name(), servableName); + ASSERT_EQ(response.outputs_size(), expectedOutputsCount); + ASSERT_EQ(response.raw_output_contents_size(), expectedOutputsCount); + auto it = std::find_if(response.outputs().begin(), response.outputs().end(), [&outputName](const ::KFSResponse::InferOutputTensor& tensor) { + return tensor.name() == outputName; + }); + ASSERT_NE(it, response.outputs().end()); + auto outputIndex = it - response.outputs().begin(); + const auto& output_proto = *it; + std::string* content = response.mutable_raw_output_contents(outputIndex); + + ASSERT_EQ(content->size(), batchSize * DUMMY_MODEL_OUTPUT_SIZE * sizeof(T)); + ASSERT_EQ(output_proto.datatype(), vectorTypeToKfsString(typeid(T))); + ASSERT_EQ(output_proto.shape_size(), 2); + ASSERT_EQ(output_proto.shape(0), batchSize); + ASSERT_EQ(output_proto.shape(1), DUMMY_MODEL_OUTPUT_SIZE); + + std::vector responseData = requestData; + std::for_each(responseData.begin(), responseData.end(), [seriesLength](T& v) { + v += 1.0 * seriesLength; + }); + + T* actual_output = (T*)content->data(); + T* expected_output = responseData.data(); + const int dataLengthToCheck = DUMMY_MODEL_OUTPUT_SIZE * batchSize * sizeof(T); + EXPECT_EQ(0, std::memcmp(actual_output, expected_output, dataLengthToCheck)) + << readableError(expected_output, actual_output, dataLengthToCheck / sizeof(T)); +} + +void checkScalarResponse(const std::string outputName, + float inputScalar, ::KFSResponse& response, const std::string& servableName = ""); + +void checkStringResponse(const std::string outputName, + const std::vector& inputStrings, ::KFSResponse& response, const std::string& servableName = ""); + +void assertStringOutputProto(const KFSTensorOutputProto& proto, const std::vector& expectedStrings); + +void assertStringResponse(const ::KFSResponse& proto, const std::vector& expectedStrings, const std::string& outputName); + +void checkAddResponse(const std::string outputName, + const std::vector& requestData1, + const std::vector& requestData2, + ::KFSRequest& request, const ::KFSResponse& response, int seriesLength, int batchSize, const std::string& servableName); + +template +void checkIncrement4DimResponse(const std::string outputName, + const std::vector& expectedData, + ::KFSResponse& response, + const std::vector& expectedShape, + bool checkRaw = true) { + ASSERT_EQ(response.outputs_size(), 1); + ASSERT_EQ(response.mutable_outputs(0)->name(), outputName); + ASSERT_EQ(response.outputs(0).shape_size(), expectedShape.size()); + for (size_t i = 0; i < expectedShape.size(); i++) { + ASSERT_EQ(response.outputs(0).shape(i), expectedShape[i]); + } + + if (checkRaw) { + ASSERT_EQ(response.raw_output_contents_size(), 1); + auto elementsCount = std::accumulate(expectedShape.begin(), expectedShape.end(), 1, std::multiplies()); + ASSERT_EQ(response.raw_output_contents(0).size(), elementsCount * sizeof(T)); + T* actual_output = (T*)response.raw_output_contents(0).data(); + T* expected_output = (T*)expectedData.data(); + const int dataLengthToCheck = elementsCount * sizeof(T); + EXPECT_EQ(0, std::memcmp(actual_output, expected_output, dataLengthToCheck)) + << readableError(expected_output, actual_output, dataLengthToCheck / sizeof(T)); + } else { + ASSERT_EQ(response.outputs(0).datatype(), "UINT8") << "other precision testing currently not supported"; + ASSERT_EQ(sizeof(T), 1) << "other precision testing currently not supported"; + ASSERT_EQ(response.outputs(0).contents().uint_contents_size(), expectedData.size()); + for (size_t i = 0; i < expectedData.size(); i++) { + ASSERT_EQ(response.outputs(0).contents().uint_contents(i), expectedData[i]); + } + } +} + +bool isShapeTheSame(const KFSShapeType&, const std::vector&&); + +void prepareInferStringTensor(::KFSRequest::InferInputTensor& tensor, const std::string& name, const std::vector& data, bool putBufferInInputTensorContent, std::string* content); +void prepareInferStringRequest(::KFSRequest& request, const std::string& name, const std::vector& data, bool putBufferInInputTensorContent = true); + +void prepareBinaryPredictRequest(::KFSRequest& request, const std::string& inputName, const int batchSize); +void prepareBinaryPredictRequestNoShape(::KFSRequest& request, const std::string& inputName, const int batchSize); +void prepareBinary4x4PredictRequest(::KFSRequest& request, const std::string& inputName, const int batchSize = 1); + +static const std::vector SUPPORTED_KFS_INPUT_PRECISIONS{ + ovms::Precision::FP64, + ovms::Precision::FP32, + ovms::Precision::FP16, + ovms::Precision::I16, + ovms::Precision::U8, + ovms::Precision::I8, + ovms::Precision::U16, + ovms::Precision::I32, + ovms::Precision::I64, + ovms::Precision::U32, + ovms::Precision::U64, + ovms::Precision::BOOL +}; + +static const std::vector UNSUPPORTED_KFS_INPUT_PRECISIONS{ + ovms::Precision::UNDEFINED, + ovms::Precision::MIXED, + ovms::Precision::Q78, + ovms::Precision::BIN, + ovms::Precision::CUSTOM}; + +static const std::vector SUPPORTED_KFS_INPUT_PRECISIONS_TENSORINPUTCONTENT{ + ovms::Precision::FP64, + ovms::Precision::FP32, + ovms::Precision::I16, + ovms::Precision::U8, + ovms::Precision::I8, + ovms::Precision::U16, + ovms::Precision::I32, + ovms::Precision::I64, + ovms::Precision::U32, + ovms::Precision::U64, + ovms::Precision::BOOL +}; + +static const std::vector UNSUPPORTED_KFS_INPUT_PRECISIONS_TENSORINPUTCONTENT{ + ovms::Precision::UNDEFINED, + ovms::Precision::MIXED, + ovms::Precision::FP16, + ovms::Precision::Q78, + ovms::Precision::BIN, +}; diff --git a/src/test/test_request_utils_tfs.cpp b/src/test/test_request_utils_tfs.cpp new file mode 100644 index 0000000000..4486db0e46 --- /dev/null +++ b/src/test/test_request_utils_tfs.cpp @@ -0,0 +1,234 @@ +//***************************************************************************** +// Copyright 2026 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +//***************************************************************************** +#include "test_request_utils_tfs.hpp" + +#include +#include + +#include + +#include "platform_utils.hpp" +#include "src/tfs_frontend/tfs_utils.hpp" + +using tensorflow::serving::PredictRequest; +using tensorflow::serving::PredictResponse; + +void preparePredictRequest(tensorflow::serving::PredictRequest& request, inputs_info_t requestInputs, const std::vector& data) { + request.mutable_inputs()->clear(); + for (auto const& it : requestInputs) { + auto& name = it.first; + auto [shape, precision] = it.second; + + auto& input = (*request.mutable_inputs())[name]; + auto datatype = getPrecisionAsDataType(precision); + input.set_dtype(datatype); + size_t numberOfElements = 1; + for (auto const& dim : shape) { + input.mutable_tensor_shape()->add_dim()->set_size(dim); + numberOfElements *= dim; + } + switch (datatype) { + case tensorflow::DataType::DT_HALF: { + if (data.size() == 0) { + for (size_t i = 0; i < numberOfElements; i++) { + input.add_half_val('1'); + } + } else { + for (size_t i = 0; i < data.size(); i++) { + input.add_half_val(data[i]); + } + } + break; + } + case tensorflow::DataType::DT_UINT16: { + if (data.size() == 0) { + for (size_t i = 0; i < numberOfElements; i++) { + input.add_int_val('1'); + } + } else { + for (size_t i = 0; i < data.size(); i++) { + input.add_int_val(data[i]); + } + } + break; + } + default: { + if (data.size() == 0) { + *input.mutable_tensor_content() = std::string(numberOfElements * tensorflow::DataTypeSize(datatype), '1'); + } else { + std::string content; + content.resize(data.size() * tensorflow::DataTypeSize(datatype)); + std::memcpy(content.data(), data.data(), content.size()); + *input.mutable_tensor_content() = content; + } + } + } + } +} + +void checkDummyResponse(const std::string outputName, + const std::vector& requestData, + PredictRequest& request, PredictResponse& response, int seriesLength, int batchSize, const std::string& servableName, size_t expectedOutputsCount) { + ASSERT_EQ(response.outputs().count(outputName), 1) << "Did not find:" << outputName; + const auto& output_proto = response.outputs().at(outputName); + + ASSERT_EQ(output_proto.tensor_content().size(), batchSize * DUMMY_MODEL_OUTPUT_SIZE * sizeof(float)); + ASSERT_EQ(output_proto.tensor_shape().dim_size(), 2); + ASSERT_EQ(output_proto.tensor_shape().dim(0).size(), batchSize); + ASSERT_EQ(output_proto.tensor_shape().dim(1).size(), DUMMY_MODEL_OUTPUT_SIZE); + + std::vector responseData = requestData; + std::for_each(responseData.begin(), responseData.end(), [seriesLength](float& v) { v += 1.0 * seriesLength; }); + + float* actual_output = (float*)output_proto.tensor_content().data(); + float* expected_output = responseData.data(); + const int dataLengthToCheck = DUMMY_MODEL_OUTPUT_SIZE * batchSize * sizeof(float); + checkBuffers(actual_output, expected_output, dataLengthToCheck); +} + +void checkScalarResponse(const std::string outputName, + float inputScalar, PredictResponse& response, const std::string& servableName) { + ASSERT_EQ(response.outputs().count(outputName), 1) << "Did not find:" << outputName; + const auto& output_proto = response.outputs().at(outputName); + + ASSERT_EQ(output_proto.tensor_shape().dim_size(), 0); + + ASSERT_EQ(output_proto.tensor_content().size(), sizeof(float)); + ASSERT_EQ(*((float*)output_proto.tensor_content().data()), inputScalar); +} + +void checkStringResponse(const std::string outputName, + const std::vector& inputStrings, PredictResponse& response, const std::string& servableName) { + ASSERT_EQ(response.outputs().count(outputName), 1) << "Did not find:" << outputName; + const auto& output_proto = response.outputs().at(outputName); + + ASSERT_EQ(output_proto.tensor_shape().dim_size(), 1); + ASSERT_EQ(output_proto.tensor_shape().dim(0).size(), inputStrings.size()); + ASSERT_EQ(output_proto.dtype(), tensorflow::DT_STRING); + + ASSERT_EQ(output_proto.string_val_size(), inputStrings.size()); + for (size_t i = 0; i < inputStrings.size(); i++) { + ASSERT_EQ(output_proto.string_val(i), inputStrings[i]); + } +} + +void assertStringOutputProto(const tensorflow::TensorProto& proto, const std::vector& expectedStrings) { + ASSERT_EQ(proto.string_val_size(), expectedStrings.size()); + for (size_t i = 0; i < expectedStrings.size(); i++) { + ASSERT_EQ(proto.string_val(i), expectedStrings[i]); + } +} + +void assertStringResponse(const tensorflow::serving::PredictResponse& proto, const std::vector& expectedStrings, const std::string& outputName) { + ASSERT_EQ(proto.outputs().count(outputName), 1); + ASSERT_EQ(proto.outputs().at(outputName).dtype(), tensorflow::DataType::DT_STRING); + ASSERT_EQ(proto.outputs().at(outputName).tensor_shape().dim_size(), 1); + ASSERT_EQ(proto.outputs().at(outputName).tensor_shape().dim(0).size(), expectedStrings.size()); + assertStringOutputProto(proto.outputs().at(outputName), expectedStrings); +} + +void checkIncrement4DimShape(const std::string outputName, + PredictResponse& response, + const std::vector& expectedShape) { + ASSERT_EQ(response.outputs().count(outputName), 1) << "Did not find:" << outputName; + const auto& output_proto = response.outputs().at(outputName); + + ASSERT_EQ(output_proto.tensor_shape().dim_size(), expectedShape.size()); + for (size_t i = 0; i < expectedShape.size(); i++) { + ASSERT_EQ(output_proto.tensor_shape().dim(i).size(), expectedShape[i]); + } +} + +bool isShapeTheSame(const tensorflow::TensorShapeProto& actual, const std::vector&& expected) { + bool same = true; + if (static_cast(actual.dim_size()) != expected.size()) { + SPDLOG_ERROR("Unexpected dim_size. Got: {}, Expect: {}", actual.dim_size(), expected.size()); + return false; + } + for (int i = 0; i < actual.dim_size(); i++) { + if (actual.dim(i).size() != expected[i]) { + SPDLOG_ERROR("Unexpected dim[{}]. Got: {}, Expect: {}", i, actual.dim(i).size(), expected[i]); + same = false; + } + } + if (same == false) { + std::stringstream ss; + for (int i = 0; i < actual.dim_size(); i++) { + ss << "dim[" + << i + << "] got:" + << actual.dim(i).size() + << " expect:" << expected[i]; + } + SPDLOG_ERROR("Shape mismatch: {}", ss.str()); + } + return same; +} + +void prepareInferStringTensor(tensorflow::TensorProto& tensor, const std::string& name, const std::vector& data, bool putBufferInInputTensorContent, std::string* content) { + tensor.set_dtype(tensorflow::DataType::DT_STRING); + tensor.mutable_tensor_shape()->add_dim()->set_size(data.size()); + for (auto inputData : data) { + tensor.add_string_val(inputData); + } +} + +void prepareInferStringRequest(tensorflow::serving::PredictRequest& request, const std::string& name, const std::vector& data, bool putBufferInInputTensorContent) { + request.mutable_inputs()->clear(); + auto& input = (*request.mutable_inputs())[name]; + prepareInferStringTensor(input, name, data, putBufferInInputTensorContent, nullptr); +} + + + + +void prepareBinaryPredictRequest(tensorflow::serving::PredictRequest& request, const std::string& inputName, const int batchSize) { + auto& tensor = (*request.mutable_inputs())[inputName]; + size_t filesize = 0; + std::unique_ptr image_bytes = nullptr; + readRgbJpg(filesize, image_bytes); + + for (int i = 0; i < batchSize; i++) { + tensor.add_string_val(image_bytes.get(), filesize); + } + tensor.set_dtype(tensorflow::DataType::DT_STRING); + tensor.mutable_tensor_shape()->add_dim()->set_size(batchSize); +} + +void prepareBinaryPredictRequestNoShape(tensorflow::serving::PredictRequest& request, const std::string& inputName, const int batchSize) { + auto& tensor = (*request.mutable_inputs())[inputName]; + size_t filesize = 0; + std::unique_ptr image_bytes = nullptr; + readRgbJpg(filesize, image_bytes); + + for (int i = 0; i < batchSize; i++) { + tensor.add_string_val(image_bytes.get(), filesize); + } + tensor.set_dtype(tensorflow::DataType::DT_STRING); +} + +void prepareBinary4x4PredictRequest(tensorflow::serving::PredictRequest& request, const std::string& inputName, const int batchSize) { + auto& tensor = (*request.mutable_inputs())[inputName]; + size_t filesize = 0; + std::unique_ptr image_bytes = nullptr; + read4x4RgbJpg(filesize, image_bytes); + + for (int i = 0; i < batchSize; i++) { + tensor.add_string_val(image_bytes.get(), filesize); + } + tensor.set_dtype(tensorflow::DataType::DT_STRING); + tensor.mutable_tensor_shape()->add_dim()->set_size(batchSize); +} diff --git a/src/test/test_request_utils_tfs.hpp b/src/test/test_request_utils_tfs.hpp new file mode 100644 index 0000000000..195c7ffa76 --- /dev/null +++ b/src/test/test_request_utils_tfs.hpp @@ -0,0 +1,131 @@ +//***************************************************************************** +// Copyright 2026 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +//***************************************************************************** +#pragma once + +#include +#include +#include +#include +#include + +#include + +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wall" +#include "tensorflow_serving/apis/prediction_service.grpc.pb.h" +#pragma GCC diagnostic pop + +#include "src/test/test_utils.hpp" + +using TFSRequestType = tensorflow::serving::PredictRequest; +using TFSResponseType = tensorflow::serving::PredictResponse; +using TFSInputTensorType = tensorflow::TensorProto; +using TFSOutputTensorType = tensorflow::TensorProto; +using TFSShapeType = tensorflow::TensorShapeProto; +using TFSInputTensorIteratorType = google::protobuf::Map::const_iterator; +using TFSOutputTensorIteratorType = google::protobuf::Map::const_iterator; +using TFSInterface = std::pair; + +void preparePredictRequest(tensorflow::serving::PredictRequest& request, inputs_info_t requestInputs, const std::vector& data = std::vector{}); + +void checkDummyResponse(const std::string outputName, + const std::vector& requestData, + tensorflow::serving::PredictRequest& request, tensorflow::serving::PredictResponse& response, int seriesLength, int batchSize = 1, const std::string& servableName = "", size_t expectedOutputsCount = 1); + +void checkScalarResponse(const std::string outputName, + float inputScalar, tensorflow::serving::PredictResponse& response, const std::string& servableName = ""); + +void checkStringResponse(const std::string outputName, + const std::vector& inputStrings, tensorflow::serving::PredictResponse& response, const std::string& servableName = ""); + +void assertStringOutputProto(const tensorflow::TensorProto& proto, const std::vector& expectedStrings); + +void assertStringResponse(const tensorflow::serving::PredictResponse& proto, const std::vector& expectedStrings, const std::string& outputName); + +template +void checkIncrement4DimResponse(const std::string outputName, + const std::vector& expectedData, + tensorflow::serving::PredictResponse& response, + const std::vector& expectedShape, + bool checkRaw = true) { + ASSERT_EQ(response.outputs().count(outputName), 1) << "Did not find:" << outputName; + const auto& output_proto = response.outputs().at(outputName); + + auto elementsCount = std::accumulate(expectedShape.begin(), expectedShape.end(), 1, std::multiplies()); + + ASSERT_EQ(output_proto.tensor_content().size(), elementsCount * sizeof(T)); + ASSERT_EQ(output_proto.tensor_shape().dim_size(), expectedShape.size()); + for (size_t i = 0; i < expectedShape.size(); i++) { + ASSERT_EQ(output_proto.tensor_shape().dim(i).size(), expectedShape[i]); + } + + T* actual_output = (T*)output_proto.tensor_content().data(); + T* expected_output = (T*)expectedData.data(); + const int dataLengthToCheck = elementsCount * sizeof(T); + EXPECT_EQ(0, std::memcmp(actual_output, expected_output, dataLengthToCheck)) + << readableError(expected_output, actual_output, dataLengthToCheck / sizeof(T)); +} + +void checkIncrement4DimShape(const std::string outputName, + tensorflow::serving::PredictResponse& response, + const std::vector& expectedShape); + +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wunused-function" +static std::vector asVector(const tensorflow::TensorShapeProto& proto) { + std::vector shape; + for (int i = 0; i < proto.dim_size(); i++) { + shape.push_back(proto.dim(i).size()); + } + return shape; +} + +static std::vector asVector(google::protobuf::RepeatedField* container) { + std::vector result(container->size(), 0); + std::memcpy(result.data(), container->mutable_data(), result.size() * sizeof(google::protobuf::int32)); + return result; +} +#pragma GCC diagnostic pop + +bool isShapeTheSame(const tensorflow::TensorShapeProto&, const std::vector&&); + +void prepareInferStringTensor(tensorflow::TensorProto& tensor, const std::string& name, const std::vector& data, bool putBufferInInputTensorContent, std::string* content); +void prepareInferStringRequest(tensorflow::serving::PredictRequest& request, const std::string& name, const std::vector& data, bool putBufferInInputTensorContent = true); + +void prepareBinaryPredictRequest(tensorflow::serving::PredictRequest& request, const std::string& inputName, const int batchSize); +void prepareBinaryPredictRequestNoShape(tensorflow::serving::PredictRequest& request, const std::string& inputName, const int batchSize); +void prepareBinary4x4PredictRequest(tensorflow::serving::PredictRequest& request, const std::string& inputName, const int batchSize = 1); + +static const std::vector SUPPORTED_TFS_INPUT_PRECISIONS{ + ovms::Precision::FP64, + ovms::Precision::FP32, + ovms::Precision::FP16, + ovms::Precision::I16, + ovms::Precision::U8, + ovms::Precision::I8, + ovms::Precision::U16, + ovms::Precision::U32, + ovms::Precision::I32, + ovms::Precision::I64, +}; + +static const std::vector UNSUPPORTED_TFS_INPUT_PRECISIONS{ + ovms::Precision::UNDEFINED, + ovms::Precision::MIXED, + ovms::Precision::Q78, + ovms::Precision::BIN, + ovms::Precision::BOOL +}; diff --git a/src/test/test_server_utils.cpp b/src/test/test_server_utils.cpp new file mode 100644 index 0000000000..4948191678 --- /dev/null +++ b/src/test/test_server_utils.cpp @@ -0,0 +1,212 @@ +//***************************************************************************** +// Copyright 2026 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +//***************************************************************************** +#include "test_server_utils.hpp" + +#include +#include +#include +#include + +#include + +#include "platform_utils.hpp" + +#include "src/network_utils.hpp" +#include "src/servablemanagermodule.hpp" +#include "src/server.hpp" + +void randomizeAndEnsureFree(std::string& port) { + std::mt19937_64 eng{std::random_device{}()}; + std::uniform_int_distribution<> dist{0, 9}; + int tryCount = 3; + while (tryCount--) { + for (auto j : {1, 2, 3}) { + char* digitToRandomize = (char*)port.c_str() + j; + *digitToRandomize = '0' + dist(eng); + } + if (ovms::isPortAvailable(std::stoi(port))) { + return; + } else { + continue; + } + } + EXPECT_TRUE(false) << "Could not find random available port"; +} + +void randomizeAndEnsureFrees(std::string& port1, std::string& port2) { + randomizeAndEnsureFree(port1); + randomizeAndEnsureFree(port2); + while (port2 == port1) { + randomizeAndEnsureFree(port2); + } +} + +const int64_t SERVER_START_FROM_CONFIG_TIMEOUT_SECONDS = 60; + +void EnsureServerStartedWithTimeout(ovms::Server& server, int timeoutSeconds) { + auto start = std::chrono::high_resolution_clock::now(); + int timestepMs = 20; + while ((server.getModuleState(ovms::SERVABLE_MANAGER_MODULE_NAME) != ovms::ModuleState::INITIALIZED) && + (std::chrono::duration_cast(std::chrono::high_resolution_clock::now() - start).count() < timeoutSeconds)) { + std::this_thread::sleep_for(std::chrono::milliseconds(timestepMs)); + } + ASSERT_EQ(server.getModuleState(ovms::SERVABLE_MANAGER_MODULE_NAME), ovms::ModuleState::INITIALIZED) << "OVMS did not fully load until allowed time:" << timeoutSeconds << "s. Check machine load"; +} + +void EnsureServerModelDownloadFinishedWithTimeout(ovms::Server& server, int timeoutSeconds) { + auto start = std::chrono::high_resolution_clock::now(); + while ((server.getModuleState(ovms::HF_MODEL_PULL_MODULE_NAME) != ovms::ModuleState::SHUTDOWN) && + (std::chrono::duration_cast(std::chrono::high_resolution_clock::now() - start).count() < timeoutSeconds)) { + } + + ASSERT_EQ(server.getModuleState(ovms::HF_MODEL_PULL_MODULE_NAME), ovms::ModuleState::SHUTDOWN) << "OVMS did not download model in allowed time:" << timeoutSeconds << "s. Check machine load and network load"; +} + +void SetUpServerForDownload(std::unique_ptr& t, ovms::Server& server, std::string& source_model, std::string& download_path, std::string& task, int expected_code, int timeoutSeconds) { + server.setShutdownRequest(0); + char* argv[] = {(char*)"ovms", + (char*)"--pull", + (char*)"--source_model", + (char*)source_model.c_str(), + (char*)"--model_repository_path", + (char*)download_path.c_str(), + (char*)"--task", + (char*)task.c_str()}; + + int argc = 8; + t.reset(new std::thread([&argc, &argv, &server, expected_code]() { + EXPECT_EQ(expected_code, server.start(argc, argv)); + })); + + EnsureServerModelDownloadFinishedWithTimeout(server, timeoutSeconds); +} + +void SetUpServerForDownloadWithDraft(std::unique_ptr& t, ovms::Server& server, + std::string& draftModel, std::string& source_model, std::string& download_path, std::string& task, int expected_code, int timeoutSeconds) { + server.setShutdownRequest(0); + char* argv[] = {(char*)"ovms", + (char*)"--pull", + (char*)"--source_model", + (char*)source_model.c_str(), + (char*)"--model_repository_path", + (char*)download_path.c_str(), + (char*)"--task", + (char*)task.c_str(), + (char*)"--draft_source_model", + (char*)draftModel.c_str()}; + + int argc = 10; + t.reset(new std::thread([&argc, &argv, &server, expected_code]() { + EXPECT_EQ(expected_code, server.start(argc, argv)); + })); + + EnsureServerModelDownloadFinishedWithTimeout(server, timeoutSeconds); +} + +void SetUpServerForDownloadAndStart(std::unique_ptr& t, ovms::Server& server, std::string& source_model, std::string& download_path, std::string& task, int timeoutSeconds) { + server.setShutdownRequest(0); + std::string port = "9133"; + randomizeAndEnsureFree(port); + char* argv[] = {(char*)"ovms", + (char*)"--port", + (char*)port.c_str(), + (char*)"--source_model", + (char*)source_model.c_str(), + (char*)"--model_repository_path", + (char*)download_path.c_str(), + (char*)"--task", + (char*)task.c_str()}; + + int argc = 9; + t.reset(new std::thread([&argc, &argv, &server]() { + EXPECT_EQ(EXIT_SUCCESS, server.start(argc, argv)); + })); + + EnsureServerStartedWithTimeout(server, timeoutSeconds); +} + +void SetUpServerForDownloadAndStartGGUF(std::unique_ptr& t, ovms::Server& server, std::string& ggufFilename, std::string& sourceModel, std::string& downloadPath, std::string& task, int timeoutSeconds) { + server.setShutdownRequest(0); + std::string port = "9133"; + randomizeAndEnsureFree(port); + char* argv[] = { + (char*)"ovms", + (char*)"--port", + (char*)port.c_str(), + (char*)"--source_model", + (char*)sourceModel.c_str(), + (char*)"--model_repository_path", + (char*)downloadPath.c_str(), + (char*)"--task", + (char*)task.c_str(), + (char*)"--gguf_filename", + (char*)ggufFilename.c_str(), + }; + + int argc = 11; + t.reset(new std::thread([&argc, &argv, &server]() { + EXPECT_EQ(EXIT_SUCCESS, server.start(argc, argv)); + })); + + EnsureServerStartedWithTimeout(server, timeoutSeconds); +} + +void SetUpServer(std::unique_ptr& t, ovms::Server& server, std::string& port, const char* configPath, int timeoutSeconds, std::string api_key) { + server.setShutdownRequest(0); + randomizeAndEnsureFree(port); + if (!api_key.empty()) { + char* argv[] = {(char*)"ovms", + (char*)"--config_path", + (char*)configPath, + (char*)"--port", + (char*)port.c_str(), + (char*)"--api_key_file", + (char*)api_key.c_str()}; + int argc = 7; + t.reset(new std::thread([&argc, &argv, &server]() { + EXPECT_EQ(EXIT_SUCCESS, server.start(argc, argv)); + })); + EnsureServerStartedWithTimeout(server, timeoutSeconds); + } else { + char* argv[] = {(char*)"ovms", + (char*)"--config_path", + (char*)configPath, + (char*)"--port", + (char*)port.c_str()}; + int argc = 5; + t.reset(new std::thread([&argc, &argv, &server]() { + EXPECT_EQ(EXIT_SUCCESS, server.start(argc, argv)); + })); + EnsureServerStartedWithTimeout(server, timeoutSeconds); + } +} + +void SetUpServer(std::unique_ptr& t, ovms::Server& server, std::string& port, const char* modelPath, const char* modelName, int timeoutSeconds) { + server.setShutdownRequest(0); + randomizeAndEnsureFree(port); + char* argv[] = {(char*)"ovms", + (char*)"--model_name", + (char*)modelName, + (char*)"--model_path", + (char*)getGenericFullPathForSrcTest(modelPath).c_str(), + (char*)"--port", + (char*)port.c_str()}; + int argc = 7; + t.reset(new std::thread([&argc, &argv, &server]() { + EXPECT_EQ(EXIT_SUCCESS, server.start(argc, argv)); + })); + EnsureServerStartedWithTimeout(server, timeoutSeconds); +} diff --git a/src/test/test_server_utils.hpp b/src/test/test_server_utils.hpp new file mode 100644 index 0000000000..0182f4d173 --- /dev/null +++ b/src/test/test_server_utils.hpp @@ -0,0 +1,40 @@ +//***************************************************************************** +// Copyright 2026 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +//***************************************************************************** +#pragma once + +#include +#include +#include + +namespace ovms { +class Server; +} // namespace ovms + +void randomizeAndEnsureFree(std::string& port); +void randomizeAndEnsureFrees(std::string& port1, std::string& port2); + +extern const int64_t SERVER_START_FROM_CONFIG_TIMEOUT_SECONDS; + +void EnsureServerStartedWithTimeout(ovms::Server& server, int timeoutSeconds); +void EnsureServerModelDownloadFinishedWithTimeout(ovms::Server& server, int timeoutSeconds); + +void SetUpServerForDownload(std::unique_ptr& t, ovms::Server& server, std::string& source_model, std::string& download_path, std::string& task, int expected_code = EXIT_SUCCESS, int timeoutSeconds = SERVER_START_FROM_CONFIG_TIMEOUT_SECONDS); +void SetUpServerForDownloadWithDraft(std::unique_ptr& t, ovms::Server& server, + std::string& draftModel, std::string& source_model, std::string& download_path, std::string& task, int expected_code, int timeoutSeconds = 2 * SERVER_START_FROM_CONFIG_TIMEOUT_SECONDS); +void SetUpServerForDownloadAndStartGGUF(std::unique_ptr& t, ovms::Server& server, std::string& ggufFilename, std::string& sourceModel, std::string& downloadPath, std::string& task, int timeoutSeconds = 4 * SERVER_START_FROM_CONFIG_TIMEOUT_SECONDS); +void SetUpServerForDownloadAndStart(std::unique_ptr& t, ovms::Server& server, std::string& source_model, std::string& download_path, std::string& task, int timeoutSeconds = SERVER_START_FROM_CONFIG_TIMEOUT_SECONDS); +void SetUpServer(std::unique_ptr& t, ovms::Server& server, std::string& port, const char* configPath, int timeoutSeconds = SERVER_START_FROM_CONFIG_TIMEOUT_SECONDS, std::string apiKeyFile = ""); +void SetUpServer(std::unique_ptr& t, ovms::Server& server, std::string& port, const char* modelPath, const char* modelName, int timeoutSeconds = SERVER_START_FROM_CONFIG_TIMEOUT_SECONDS); diff --git a/src/test/test_utils.cpp b/src/test/test_utils.cpp index 5c21e15158..5c12a8826a 100644 --- a/src/test/test_utils.cpp +++ b/src/test/test_utils.cpp @@ -14,56 +14,26 @@ // limitations under the License. //***************************************************************************** #include "test_utils.hpp" -#include "light_test_utils.hpp" -#include "platform_utils.hpp" #include -#include -#include #include -#include -#include #include -#include "../capi_frontend/capi_utils.hpp" -#include "../capi_frontend/inferenceparameter.hpp" -#include "../kfs_frontend/kfs_utils.hpp" -#include "../network_utils.hpp" -#include "../prediction_service_utils.hpp" -#include "../servablemanagermodule.hpp" -#include "../server.hpp" -#include "../tensorinfo.hpp" -#include "../tfs_frontend/tfs_utils.hpp" +#include "platform_utils.hpp" -using tensorflow::serving::PredictRequest; -using tensorflow::serving::PredictResponse; +#include "src/tensorinfo.hpp" using ovms::TensorInfo; -void prepareBinaryPredictRequest(ovms::InferenceRequest& request, const std::string& inputName, const int batchSize) { throw 42; } // CAPI binary not supported -void prepareBinaryPredictRequestNoShape(ovms::InferenceRequest& request, const std::string& inputName, const int batchSize) { throw 42; } // CAPI binary not supported -void prepareBinary4x4PredictRequest(ovms::InferenceRequest& request, const std::string& inputName, const int batchSize) { throw 42; } // CAPI binary not supported - -void prepareInferStringRequest(ovms::InferenceRequest& request, const std::string& name, const std::vector& data, bool putBufferInInputTensorContent) { throw 42; } // CAPI binary not supported -void prepareInferStringTensor(ovms::InferenceTensor& tensor, const std::string& name, const std::vector& data, bool putBufferInInputTensorContent, std::string* content) { throw 42; } // CAPI binary not supported - -void preparePredictRequest(ovms::InferenceRequest& request, inputs_info_t requestInputs, const std::vector& data, uint32_t decrementBufferSize, OVMS_BufferType bufferType, std::optional deviceId) { - request.removeAllInputs(); - for (auto const& it : requestInputs) { - prepareCAPIInferInputTensor(request, it.first, it.second, data, decrementBufferSize, bufferType, deviceId); - } -} void printTensor(const ov::Tensor& tensor) { const auto& elementType = tensor.get_element_type(); - // Get pointer to data const void* dataPtr = tensor.data(); size_t limit = 20; if (tensor.get_size() < limit) { limit = tensor.get_size(); } - // Handle different data types (example for float) if (elementType == ov::element::f32) { const float* data = static_cast(dataPtr); std::cout << "Tensor data (f32): "; @@ -101,84 +71,6 @@ void printTensor(const ov::Tensor& tensor) { std::cout << "[ERROR] Unsupported data type: " << elementType << std::endl; } -void preparePredictRequest(tensorflow::serving::PredictRequest& request, inputs_info_t requestInputs, const std::vector& data) { - request.mutable_inputs()->clear(); - for (auto const& it : requestInputs) { - auto& name = it.first; - auto [shape, precision] = it.second; - - auto& input = (*request.mutable_inputs())[name]; - auto datatype = getPrecisionAsDataType(precision); - input.set_dtype(datatype); - size_t numberOfElements = 1; - for (auto const& dim : shape) { - input.mutable_tensor_shape()->add_dim()->set_size(dim); - numberOfElements *= dim; - } - switch (datatype) { - case tensorflow::DataType::DT_HALF: { - if (data.size() == 0) { - for (size_t i = 0; i < numberOfElements; i++) { - input.add_half_val('1'); - } - } else { - for (size_t i = 0; i < data.size(); i++) { - input.add_half_val(data[i]); - } - } - break; - } - case tensorflow::DataType::DT_UINT16: { - if (data.size() == 0) { - for (size_t i = 0; i < numberOfElements; i++) { - input.add_int_val('1'); - } - } else { - for (size_t i = 0; i < data.size(); i++) { - input.add_int_val(data[i]); - } - } - break; - } - default: { - if (data.size() == 0) { - *input.mutable_tensor_content() = std::string(numberOfElements * tensorflow::DataTypeSize(datatype), '1'); - } else { - std::string content; - content.resize(data.size() * tensorflow::DataTypeSize(datatype)); - std::memcpy(content.data(), data.data(), content.size()); - *input.mutable_tensor_content() = content; - } - } - } - } -} - -void waitForOVMSConfigReload(ovms::ModelManager& manager) { - // This is effectively multiplying by 5 to have at least 1 config reload in between - // two test steps, but we check if config files changed to exit earlier if changes are already applied - const float WAIT_MULTIPLIER_FACTOR = 5; - const uint32_t waitTime = WAIT_MULTIPLIER_FACTOR * manager.getWatcherIntervalMillisec() * 1000; - bool reloadIsNeeded = true; - int timestepMs = 10; - - auto start = std::chrono::high_resolution_clock::now(); - while (reloadIsNeeded && - (std::chrono::duration_cast(std::chrono::high_resolution_clock::now() - start).count() < waitTime)) { - std::this_thread::sleep_for(std::chrono::milliseconds(timestepMs)); - manager.configFileReloadNeeded(reloadIsNeeded); - } -} - -void waitForOVMSResourcesCleanup(ovms::ModelManager& manager) { - // This is effectively multiplying by 1.8 to have 1 config reload in between - // two test steps - const float WAIT_MULTIPLIER_FACTOR = 1.8; - const uint32_t waitTime = WAIT_MULTIPLIER_FACTOR * manager.getResourcesCleanupIntervalMillisec(); - SPDLOG_DEBUG("waitForOVMSResourcesCleanup {} ms", waitTime); - std::this_thread::sleep_for(std::chrono::milliseconds(waitTime)); -} - ovms::tensor_map_t prepareTensors( const std::unordered_map&& tensors, ovms::Precision precision) { @@ -213,286 +105,6 @@ std::string readableSetError(std::unordered_set actual, std::unorde return ss.str(); } -void checkDummyResponse(const std::string outputName, - const std::vector& requestData, - PredictRequest& request, PredictResponse& response, int seriesLength, int batchSize, const std::string& servableName, size_t expectedOutputsCount) { - ASSERT_EQ(response.outputs().count(outputName), 1) << "Did not find:" << outputName; - const auto& output_proto = response.outputs().at(outputName); - - ASSERT_EQ(output_proto.tensor_content().size(), batchSize * DUMMY_MODEL_OUTPUT_SIZE * sizeof(float)); - ASSERT_EQ(output_proto.tensor_shape().dim_size(), 2); - ASSERT_EQ(output_proto.tensor_shape().dim(0).size(), batchSize); - ASSERT_EQ(output_proto.tensor_shape().dim(1).size(), DUMMY_MODEL_OUTPUT_SIZE); - - std::vector responseData = requestData; - std::for_each(responseData.begin(), responseData.end(), [seriesLength](float& v) { v += 1.0 * seriesLength; }); - - float* actual_output = (float*)output_proto.tensor_content().data(); - float* expected_output = responseData.data(); - const int dataLengthToCheck = DUMMY_MODEL_OUTPUT_SIZE * batchSize * sizeof(float); - checkBuffers(actual_output, expected_output, dataLengthToCheck); -} - -void checkScalarResponse(const std::string outputName, - float inputScalar, PredictResponse& response, const std::string& servableName) { - ASSERT_EQ(response.outputs().count(outputName), 1) << "Did not find:" << outputName; - const auto& output_proto = response.outputs().at(outputName); - - ASSERT_EQ(output_proto.tensor_shape().dim_size(), 0); - - ASSERT_EQ(output_proto.tensor_content().size(), sizeof(float)); - ASSERT_EQ(*((float*)output_proto.tensor_content().data()), inputScalar); -} - -void checkScalarResponse(const std::string outputName, - float inputScalar, ::KFSResponse& response, const std::string& servableName) { - ASSERT_EQ(response.model_name(), servableName); - ASSERT_EQ(response.outputs_size(), 1); - ASSERT_EQ(response.raw_output_contents_size(), 1); - ASSERT_EQ(response.outputs().begin()->name(), outputName) << "Did not find:" << outputName; - const auto& output_proto = *response.outputs().begin(); - std::string* content = response.mutable_raw_output_contents(0); - - ASSERT_EQ(output_proto.shape_size(), 0); - ASSERT_EQ(content->size(), sizeof(float)); - - ASSERT_EQ(*((float*)content->data()), inputScalar); -} - -void checkStringResponse(const std::string outputName, - const std::vector& inputStrings, PredictResponse& response, const std::string& servableName) { - ASSERT_EQ(response.outputs().count(outputName), 1) << "Did not find:" << outputName; - const auto& output_proto = response.outputs().at(outputName); - - ASSERT_EQ(output_proto.tensor_shape().dim_size(), 1); - ASSERT_EQ(output_proto.tensor_shape().dim(0).size(), inputStrings.size()); - ASSERT_EQ(output_proto.dtype(), tensorflow::DT_STRING); - - ASSERT_EQ(output_proto.string_val_size(), inputStrings.size()); - for (size_t i = 0; i < inputStrings.size(); i++) { - ASSERT_EQ(output_proto.string_val(i), inputStrings[i]); - } -} - -void checkStringResponse(const std::string outputName, - const std::vector& inputStrings, ::KFSResponse& response, const std::string& servableName) { - ASSERT_EQ(response.model_name(), servableName); - ASSERT_EQ(response.outputs_size(), 1); - ASSERT_EQ(response.raw_output_contents_size(), 1); - ASSERT_EQ(response.outputs().begin()->name(), outputName) << "Did not find:" << outputName; - const auto& output_proto = *response.outputs().begin(); - std::string* content = response.mutable_raw_output_contents(0); - - ASSERT_EQ(output_proto.shape_size(), 1); - ASSERT_EQ(output_proto.shape(0), inputStrings.size()); - - size_t offset = 0; - for (size_t i = 0; i < inputStrings.size(); i++) { - ASSERT_GE(content->size(), offset + 4); - uint32_t batchLength = *((uint32_t*)(content->data() + offset)); - ASSERT_EQ(batchLength, inputStrings[i].size()); - offset += 4; - ASSERT_GE(content->size(), offset + batchLength); - ASSERT_EQ(std::string(content->data() + offset, batchLength), inputStrings[i]); - offset += batchLength; - } - ASSERT_EQ(offset, content->size()); -} - -void checkAddResponse(const std::string outputName, - const std::vector& requestData1, - const std::vector& requestData2, - ::KFSRequest& request, const ::KFSResponse& response, int seriesLength, int batchSize, const std::string& servableName) { - ASSERT_EQ(response.model_name(), servableName); - ASSERT_EQ(response.outputs_size(), 1); - ASSERT_EQ(response.raw_output_contents_size(), 1); - ASSERT_EQ(response.outputs().begin()->name(), outputName) << "Did not find:" << outputName; - const auto& output_proto = *response.outputs().begin(); - const std::string& content = response.raw_output_contents(0); - - ASSERT_EQ(content.size(), batchSize * DUMMY_MODEL_OUTPUT_SIZE * sizeof(float)); - ASSERT_EQ(output_proto.shape_size(), 2); - ASSERT_EQ(output_proto.shape(0), batchSize); - ASSERT_EQ(output_proto.shape(1), DUMMY_MODEL_OUTPUT_SIZE); - - std::vector responseData = requestData1; - for (size_t i = 0; i < requestData1.size(); ++i) { - responseData[i] += requestData2[i]; - } - - const float* actual_output = (const float*)content.data(); - float* expected_output = responseData.data(); - const int dataLengthToCheck = DUMMY_MODEL_OUTPUT_SIZE * batchSize * sizeof(float); - checkBuffers(actual_output, expected_output, dataLengthToCheck); -} - -void checkIncrement4DimShape(const std::string outputName, - PredictResponse& response, - const std::vector& expectedShape) { - ASSERT_EQ(response.outputs().count(outputName), 1) << "Did not find:" << outputName; - const auto& output_proto = response.outputs().at(outputName); - - ASSERT_EQ(output_proto.tensor_shape().dim_size(), expectedShape.size()); - for (size_t i = 0; i < expectedShape.size(); i++) { - ASSERT_EQ(output_proto.tensor_shape().dim(i).size(), expectedShape[i]); - } -} - -void RemoveReadonlyFileAttributeFromDir(std::string& directoryPath) { - for (const std::filesystem::directory_entry& dir_entry : std::filesystem::recursive_directory_iterator(directoryPath)) { - std::filesystem::permissions(dir_entry, std::filesystem::perms::owner_read | std::filesystem::perms::owner_write | std::filesystem::perms::owner_exec | std::filesystem::perms::group_read | std::filesystem::perms::group_write | std::filesystem::perms::others_read, std::filesystem::perm_options::add); - } -} - -void SetReadonlyFileAttributeFromDir(std::string& directoryPath) { - for (const std::filesystem::directory_entry& dir_entry : std::filesystem::recursive_directory_iterator(directoryPath)) { - std::filesystem::permissions(dir_entry, std::filesystem::perms::owner_write | std::filesystem::perms::owner_exec | std::filesystem::perms::group_write, std::filesystem::perm_options::remove); - std::filesystem::permissions(dir_entry, std::filesystem::perms::owner_read | std::filesystem::perms::group_read | std::filesystem::perms::others_read, std::filesystem::perm_options::add); - } -} - -bool isShapeTheSame(const tensorflow::TensorShapeProto& actual, const std::vector&& expected) { - bool same = true; - if (static_cast(actual.dim_size()) != expected.size()) { - SPDLOG_ERROR("Unexpected dim_size. Got: {}, Expect: {}", actual.dim_size(), expected.size()); - return false; - } - for (int i = 0; i < actual.dim_size(); i++) { - if (actual.dim(i).size() != expected[i]) { - SPDLOG_ERROR("Unexpected dim[{}]. Got: {}, Expect: {}", i, actual.dim(i).size(), expected[i]); - same = false; - } - } - if (same == false) { - std::stringstream ss; - for (int i = 0; i < actual.dim_size(); i++) { - ss << "dim[" - << i - << "] got:" - << actual.dim(i).size() - << " expect:" << expected[i]; - } - SPDLOG_ERROR("Shape mismatch: {}", ss.str()); - } - return same; -} - -bool isShapeTheSame(const KFSShapeType& actual, const std::vector&& expected) { - bool same = true; - int a_size = actual.size(); - if (a_size != int(expected.size())) { - SPDLOG_ERROR("Unexpected dim_size. Got: {}, Expect: {}", a_size, expected.size()); - return false; - } - for (int i = 0; i < a_size; i++) { - if (actual.at(i) != expected[i]) { - SPDLOG_ERROR("Unexpected dim[{}]. Got: {}, Expect: {}", i, actual.at(i), expected[i]); - same = false; - break; - } - } - if (same == false) { - std::stringstream ss; - for (int i = 0; i < a_size; i++) { - ss << "dim[" - << i - << "] got:" - << actual.at(i) - << " expect:" << expected[i]; - } - SPDLOG_ERROR("Shape mismatch: {}", ss.str()); - } - return same; -} - -void readFile(const std::string& path, size_t& filesize, std::unique_ptr& bytes) { - std::ifstream DataFile; - DataFile.open(path, std::ios::binary); - DataFile.seekg(0, std::ios::end); - filesize = DataFile.tellg(); - DataFile.seekg(0); - bytes = std::make_unique(filesize); - DataFile.read(bytes.get(), filesize); -} - -void readRgbJpg(size_t& filesize, std::unique_ptr& image_bytes) { - return readFile(getGenericFullPathForSrcTest("/ovms/src/test/binaryutils/rgb.jpg"), filesize, image_bytes); -} - -void read4x4RgbJpg(size_t& filesize, std::unique_ptr& image_bytes) { - return readFile(getGenericFullPathForSrcTest("/ovms/src/test/binaryutils/rgb4x4.jpg"), filesize, image_bytes); -} - -void prepareInferStringTensor(::KFSRequest::InferInputTensor& tensor, const std::string& name, const std::vector& data, bool putBufferInInputTensorContent, std::string* content) { - if (!putBufferInInputTensorContent && content == nullptr) { - throw std::runtime_error("Preparation of infer string tensor failed"); - return; - } - tensor.set_name(name); - tensor.set_datatype("BYTES"); - tensor.mutable_shape()->Clear(); - tensor.add_shape(data.size()); - if (!putBufferInInputTensorContent) { - size_t dataSize = 0; - for (auto input : data) { - dataSize += input.size() + 4; - } - content->resize(dataSize); - size_t offset = 0; - for (auto input : data) { - uint32_t inputSize = input.size(); - std::memcpy(content->data() + offset, reinterpret_cast(&inputSize), sizeof(uint32_t)); - offset += sizeof(uint32_t); - std::memcpy(content->data() + offset, input.data(), input.length()); - offset += input.length(); - } - } else { - for (auto inputData : data) { - auto bytes_val = tensor.mutable_contents()->mutable_bytes_contents()->Add(); - bytes_val->append(inputData.data(), inputData.size()); - } - } -} - -void prepareInferStringRequest(::KFSRequest& request, const std::string& name, const std::vector& data, bool putBufferInInputTensorContent) { - auto it = request.mutable_inputs()->begin(); - size_t bufferId = 0; - while (it != request.mutable_inputs()->end()) { - if (it->name() == name) - break; - ++it; - ++bufferId; - } - KFSTensorInputProto* tensor; - std::string* content = nullptr; - if (it != request.mutable_inputs()->end()) { - tensor = &*it; - if (!putBufferInInputTensorContent) { - content = request.mutable_raw_input_contents()->Mutable(bufferId); - } - } else { - tensor = request.add_inputs(); - if (!putBufferInInputTensorContent) { - content = request.add_raw_input_contents(); - } - } - prepareInferStringTensor(*tensor, name, data, putBufferInInputTensorContent, content); -} - -void prepareInferStringTensor(tensorflow::TensorProto& tensor, const std::string& name, const std::vector& data, bool putBufferInInputTensorContent, std::string* content) { - tensor.set_dtype(tensorflow::DataType::DT_STRING); - tensor.mutable_tensor_shape()->add_dim()->set_size(data.size()); - for (auto inputData : data) { - tensor.add_string_val(inputData); - } -} - -void prepareInferStringRequest(tensorflow::serving::PredictRequest& request, const std::string& name, const std::vector& data, bool putBufferInInputTensorContent) { - request.mutable_inputs()->clear(); - auto& input = (*request.mutable_inputs())[name]; - prepareInferStringTensor(input, name, data, putBufferInInputTensorContent, nullptr); -} - void assertOutputTensorMatchExpectations(const ov::Tensor& tensor, std::vector expectedStrings) { size_t maxStringLength = 0; for (const auto& input : expectedStrings) { @@ -518,375 +130,35 @@ void assertOutputTensorMatchExpectations(const ov::Tensor& tensor, std::vector& expectedStrings) { - ASSERT_EQ(proto.string_val_size(), expectedStrings.size()); - for (size_t i = 0; i < expectedStrings.size(); i++) { - ASSERT_EQ(proto.string_val(i), expectedStrings[i]); - } -} -void assertStringOutputProto(const KFSTensorOutputProto& proto, const std::vector& expectedStrings) { - ASSERT_EQ(proto.contents().bytes_contents_size(), expectedStrings.size()); - for (size_t i = 0; i < expectedStrings.size(); i++) { - ASSERT_EQ(proto.contents().bytes_contents(i), expectedStrings[i]); - } -} -void assertStringOutputProto(const ovms::InferenceTensor& proto, const std::vector& expectedStrings) { - FAIL() << "not implemented"; -} - -void assertStringResponse(const tensorflow::serving::PredictResponse& proto, const std::vector& expectedStrings, const std::string& outputName) { - ASSERT_EQ(proto.outputs().count(outputName), 1); - ASSERT_EQ(proto.outputs().at(outputName).dtype(), tensorflow::DataType::DT_STRING); - ASSERT_EQ(proto.outputs().at(outputName).tensor_shape().dim_size(), 1); - ASSERT_EQ(proto.outputs().at(outputName).tensor_shape().dim(0).size(), expectedStrings.size()); - assertStringOutputProto(proto.outputs().at(outputName), expectedStrings); -} -void assertStringResponse(const ::KFSResponse& proto, const std::vector& expectedStrings, const std::string& outputName) { - ASSERT_EQ(proto.outputs_size(), 1); - ASSERT_EQ(proto.outputs(0).name(), outputName); - ASSERT_EQ(proto.outputs(0).datatype(), "BYTES"); - ASSERT_EQ(proto.outputs(0).shape_size(), 1); - ASSERT_EQ(proto.outputs(0).shape(0), expectedStrings.size()); - std::string expectedString; - for (auto str : expectedStrings) { - int size = str.size(); - for (int k = 0; k < 4; k++, size >>= 8) { - expectedString += static_cast(size & 0xff); - } - expectedString.append(str); - } - ASSERT_EQ(memcmp(proto.raw_output_contents(0).data(), expectedString.data(), expectedString.size()), 0); -} -void assertStringResponse(const ovms::InferenceResponse& proto, const std::vector& expectedStrings, const std::string& outputName) { - FAIL() << "not implemented"; -} - -void prepareBinaryPredictRequest(tensorflow::serving::PredictRequest& request, const std::string& inputName, const int batchSize) { - auto& tensor = (*request.mutable_inputs())[inputName]; - size_t filesize = 0; - std::unique_ptr image_bytes = nullptr; - readRgbJpg(filesize, image_bytes); - - for (int i = 0; i < batchSize; i++) { - tensor.add_string_val(image_bytes.get(), filesize); - } - tensor.set_dtype(tensorflow::DataType::DT_STRING); - tensor.mutable_tensor_shape()->add_dim()->set_size(batchSize); -} - -void prepareBinaryPredictRequest(::KFSRequest& request, const std::string& inputName, const int batchSize) { - request.add_inputs(); - auto tensor = request.mutable_inputs()->Mutable(0); - tensor->set_name(inputName); - size_t filesize = 0; - std::unique_ptr image_bytes = nullptr; - readRgbJpg(filesize, image_bytes); - - for (int i = 0; i < batchSize; i++) { - tensor->mutable_contents()->add_bytes_contents(image_bytes.get(), filesize); - } - tensor->set_datatype("BYTES"); - tensor->mutable_shape()->Add(batchSize); -} - -void prepareBinaryPredictRequestNoShape(tensorflow::serving::PredictRequest& request, const std::string& inputName, const int batchSize) { - auto& tensor = (*request.mutable_inputs())[inputName]; - size_t filesize = 0; - std::unique_ptr image_bytes = nullptr; - readRgbJpg(filesize, image_bytes); - - for (int i = 0; i < batchSize; i++) { - tensor.add_string_val(image_bytes.get(), filesize); - } - tensor.set_dtype(tensorflow::DataType::DT_STRING); -} - -void prepareBinaryPredictRequestNoShape(::KFSRequest& request, const std::string& inputName, const int batchSize) { - request.add_inputs(); - auto tensor = request.mutable_inputs()->Mutable(0); - tensor->set_name(inputName); - size_t filesize = 0; - std::unique_ptr image_bytes = nullptr; - readRgbJpg(filesize, image_bytes); - - for (int i = 0; i < batchSize; i++) { - tensor->mutable_contents()->add_bytes_contents(image_bytes.get(), filesize); - } - tensor->set_datatype("BYTES"); -} - -void prepareBinary4x4PredictRequest(tensorflow::serving::PredictRequest& request, const std::string& inputName, const int batchSize) { - auto& tensor = (*request.mutable_inputs())[inputName]; - size_t filesize = 0; - std::unique_ptr image_bytes = nullptr; - read4x4RgbJpg(filesize, image_bytes); - - for (int i = 0; i < batchSize; i++) { - tensor.add_string_val(image_bytes.get(), filesize); - } - tensor.set_dtype(tensorflow::DataType::DT_STRING); - tensor.mutable_tensor_shape()->add_dim()->set_size(batchSize); -} - -void prepareBinary4x4PredictRequest(::KFSRequest& request, const std::string& inputName, const int batchSize) { - request.add_inputs(); - auto tensor = request.mutable_inputs()->Mutable(0); - tensor->set_name(inputName); - size_t filesize = 0; - std::unique_ptr image_bytes = nullptr; - read4x4RgbJpg(filesize, image_bytes); - - for (int i = 0; i < batchSize; i++) { - tensor->mutable_contents()->add_bytes_contents(image_bytes.get(), filesize); - } - tensor->set_datatype("BYTES"); - tensor->mutable_shape()->Add(batchSize); -} - -::KFSTensorInputProto* findKFSInferInputTensor(::KFSRequest& request, const std::string& name) { - auto it = request.mutable_inputs()->begin(); - while (it != request.mutable_inputs()->end()) { - if (it->name() == name) - break; - ++it; - } - return it == request.mutable_inputs()->end() ? nullptr : &(*it); -} - -std::string* findKFSInferInputTensorContentInRawInputs(::KFSRequest& request, const std::string& name) { - auto it = request.mutable_inputs()->begin(); - size_t bufferId = 0; - std::string* content = nullptr; - while (it != request.mutable_inputs()->end()) { - if (it->name() == name) - break; - ++it; - ++bufferId; - } - if (it != request.mutable_inputs()->end()) { - content = request.mutable_raw_input_contents()->Mutable(bufferId); - } - return content; -} - -void prepareCAPIInferInputTensor(ovms::InferenceRequest& request, const std::string& name, const std::tuple& inputInfo, - const std::vector& data, uint32_t decrementBufferSize, OVMS_BufferType bufferType, std::optional deviceId) { - auto [shape, type] = inputInfo; - prepareCAPIInferInputTensor(request, name, - {shape, getPrecisionAsOVMSDataType(type)}, - data, decrementBufferSize, bufferType, deviceId); -} - -void prepareCAPIInferInputTensor(ovms::InferenceRequest& request, const std::string& name, const std::tuple& inputInfo, - const std::vector& data, uint32_t decrementBufferSize, OVMS_BufferType bufferType, std::optional deviceId) { - auto [shape, datatype] = inputInfo; - size_t elementsCount = 1; - - // In case shape is negative, deduce size from provided data size - // Otherwise calculate from shape - bool isShapeNegative = false; - for (auto const& dim : shape) { - if (dim < 0) { - isShapeNegative = true; - } - elementsCount *= dim; - } - - request.addInput(name.c_str(), datatype, shape.data(), shape.size()); - - size_t dataSize = 0; - if (isShapeNegative) { - dataSize = data.size() * ovms::DataTypeToByteSize(datatype); - } else { - dataSize = elementsCount * ovms::DataTypeToByteSize(datatype); - } - if (decrementBufferSize) - dataSize -= decrementBufferSize; - - request.setInputBuffer(name.c_str(), data.data(), dataSize, bufferType, deviceId); -} - -void randomizeAndEnsureFree(std::string& port) { - std::mt19937_64 eng{std::random_device{}()}; - std::uniform_int_distribution<> dist{0, 9}; - int tryCount = 3; - while (tryCount--) { - for (auto j : {1, 2, 3}) { - char* digitToRandomize = (char*)port.c_str() + j; - *digitToRandomize = '0' + dist(eng); - } - if (ovms::isPortAvailable(std::stoi(port))) { - return; - } else { - continue; - } - } - EXPECT_TRUE(false) << "Could not find random available port"; -} -void randomizeAndEnsureFrees(std::string& port1, std::string& port2) { - randomizeAndEnsureFree(port1); - randomizeAndEnsureFree(port2); - while (port2 == port1) { - randomizeAndEnsureFree(port2); - } -} - -const int64_t SERVER_START_FROM_CONFIG_TIMEOUT_SECONDS = 60; - -void EnsureServerStartedWithTimeout(ovms::Server& server, int timeoutSeconds) { - auto start = std::chrono::high_resolution_clock::now(); - int timestepMs = 20; - while ((server.getModuleState(ovms::SERVABLE_MANAGER_MODULE_NAME) != ovms::ModuleState::INITIALIZED) && - (std::chrono::duration_cast(std::chrono::high_resolution_clock::now() - start).count() < timeoutSeconds)) { - std::this_thread::sleep_for(std::chrono::milliseconds(timestepMs)); +void RemoveReadonlyFileAttributeFromDir(std::string& directoryPath) { + for (const std::filesystem::directory_entry& dir_entry : std::filesystem::recursive_directory_iterator(directoryPath)) { + std::filesystem::permissions(dir_entry, std::filesystem::perms::owner_read | std::filesystem::perms::owner_write | std::filesystem::perms::owner_exec | std::filesystem::perms::group_read | std::filesystem::perms::group_write | std::filesystem::perms::others_read, std::filesystem::perm_options::add); } - ASSERT_EQ(server.getModuleState(ovms::SERVABLE_MANAGER_MODULE_NAME), ovms::ModuleState::INITIALIZED) << "OVMS did not fully load until allowed time:" << timeoutSeconds << "s. Check machine load"; } -void EnsureServerModelDownloadFinishedWithTimeout(ovms::Server& server, int timeoutSeconds) { - auto start = std::chrono::high_resolution_clock::now(); - while ((server.getModuleState(ovms::HF_MODEL_PULL_MODULE_NAME) != ovms::ModuleState::SHUTDOWN) && - (std::chrono::duration_cast(std::chrono::high_resolution_clock::now() - start).count() < timeoutSeconds)) { +void SetReadonlyFileAttributeFromDir(std::string& directoryPath) { + for (const std::filesystem::directory_entry& dir_entry : std::filesystem::recursive_directory_iterator(directoryPath)) { + std::filesystem::permissions(dir_entry, std::filesystem::perms::owner_write | std::filesystem::perms::owner_exec | std::filesystem::perms::group_write, std::filesystem::perm_options::remove); + std::filesystem::permissions(dir_entry, std::filesystem::perms::owner_read | std::filesystem::perms::group_read | std::filesystem::perms::others_read, std::filesystem::perm_options::add); } - - ASSERT_EQ(server.getModuleState(ovms::HF_MODEL_PULL_MODULE_NAME), ovms::ModuleState::SHUTDOWN) << "OVMS did not download model in allowed time:" << timeoutSeconds << "s. Check machine load and network load"; } -// --pull --source_model OpenVINO/Phi-3-mini-FastDraft-50M-int8-ov --model_repository_path c:\download -void SetUpServerForDownload(std::unique_ptr& t, ovms::Server& server, std::string& source_model, std::string& download_path, std::string& task, int expected_code, int timeoutSeconds) { - server.setShutdownRequest(0); - char* argv[] = {(char*)"ovms", - (char*)"--pull", - (char*)"--source_model", - (char*)source_model.c_str(), - (char*)"--model_repository_path", - (char*)download_path.c_str(), - (char*)"--task", - (char*)task.c_str()}; - - int argc = 8; - t.reset(new std::thread([&argc, &argv, &server, expected_code]() { - EXPECT_EQ(expected_code, server.start(argc, argv)); - })); - - EnsureServerModelDownloadFinishedWithTimeout(server, timeoutSeconds); -} - -void SetUpServerForDownloadWithDraft(std::unique_ptr& t, ovms::Server& server, - std::string& draftModel, std::string& source_model, std::string& download_path, std::string& task, int expected_code, int timeoutSeconds) { - server.setShutdownRequest(0); - char* argv[] = {(char*)"ovms", - (char*)"--pull", - (char*)"--source_model", - (char*)source_model.c_str(), - (char*)"--model_repository_path", - (char*)download_path.c_str(), - (char*)"--task", - (char*)task.c_str(), - (char*)"--draft_source_model", - (char*)draftModel.c_str()}; - - int argc = 10; - t.reset(new std::thread([&argc, &argv, &server, expected_code]() { - EXPECT_EQ(expected_code, server.start(argc, argv)); - })); - - EnsureServerModelDownloadFinishedWithTimeout(server, timeoutSeconds); -} - -void SetUpServerForDownloadAndStart(std::unique_ptr& t, ovms::Server& server, std::string& source_model, std::string& download_path, std::string& task, int timeoutSeconds) { - server.setShutdownRequest(0); - std::string port = "9133"; - randomizeAndEnsureFree(port); - char* argv[] = {(char*)"ovms", - (char*)"--port", - (char*)port.c_str(), - (char*)"--source_model", - (char*)source_model.c_str(), - (char*)"--model_repository_path", - (char*)download_path.c_str(), - (char*)"--task", - (char*)task.c_str()}; - - int argc = 9; - t.reset(new std::thread([&argc, &argv, &server]() { - EXPECT_EQ(EXIT_SUCCESS, server.start(argc, argv)); - })); - - EnsureServerStartedWithTimeout(server, timeoutSeconds); -} - -void SetUpServerForDownloadAndStartGGUF(std::unique_ptr& t, ovms::Server& server, std::string& ggufFilename, std::string& sourceModel, std::string& downloadPath, std::string& task, int timeoutSeconds) { - server.setShutdownRequest(0); - std::string port = "9133"; - randomizeAndEnsureFree(port); - char* argv[] = { - (char*)"ovms", - (char*)"--port", - (char*)port.c_str(), - (char*)"--source_model", - (char*)sourceModel.c_str(), - (char*)"--model_repository_path", - (char*)downloadPath.c_str(), - (char*)"--task", - (char*)task.c_str(), - (char*)"--gguf_filename", - (char*)ggufFilename.c_str(), - }; - - int argc = 11; - t.reset(new std::thread([&argc, &argv, &server]() { - EXPECT_EQ(EXIT_SUCCESS, server.start(argc, argv)); - })); - - EnsureServerStartedWithTimeout(server, timeoutSeconds); +void readFile(const std::string& path, size_t& filesize, std::unique_ptr& bytes) { + std::ifstream DataFile; + DataFile.open(path, std::ios::binary); + DataFile.seekg(0, std::ios::end); + filesize = DataFile.tellg(); + DataFile.seekg(0); + bytes = std::make_unique(filesize); + DataFile.read(bytes.get(), filesize); } -void SetUpServer(std::unique_ptr& t, ovms::Server& server, std::string& port, const char* configPath, int timeoutSeconds, std::string api_key) { - server.setShutdownRequest(0); - randomizeAndEnsureFree(port); - if (!api_key.empty()) { - char* argv[] = {(char*)"ovms", - (char*)"--config_path", - (char*)configPath, - (char*)"--port", - (char*)port.c_str(), - (char*)"--api_key_file", - (char*)api_key.c_str()}; - int argc = 7; - t.reset(new std::thread([&argc, &argv, &server]() { - EXPECT_EQ(EXIT_SUCCESS, server.start(argc, argv)); - })); - EnsureServerStartedWithTimeout(server, timeoutSeconds); - } else { - char* argv[] = {(char*)"ovms", - (char*)"--config_path", - (char*)configPath, - (char*)"--port", - (char*)port.c_str()}; - int argc = 5; - t.reset(new std::thread([&argc, &argv, &server]() { - EXPECT_EQ(EXIT_SUCCESS, server.start(argc, argv)); - })); - EnsureServerStartedWithTimeout(server, timeoutSeconds); - } +void readRgbJpg(size_t& filesize, std::unique_ptr& image_bytes) { + return readFile(getGenericFullPathForSrcTest("/ovms/src/test/binaryutils/rgb.jpg"), filesize, image_bytes); } -void SetUpServer(std::unique_ptr& t, ovms::Server& server, std::string& port, const char* modelPath, const char* modelName, int timeoutSeconds) { - server.setShutdownRequest(0); - randomizeAndEnsureFree(port); - char* argv[] = {(char*)"ovms", - (char*)"--model_name", - (char*)modelName, - (char*)"--model_path", - (char*)getGenericFullPathForSrcTest(modelPath).c_str(), - (char*)"--port", - (char*)port.c_str()}; - int argc = 7; - t.reset(new std::thread([&argc, &argv, &server]() { - EXPECT_EQ(EXIT_SUCCESS, server.start(argc, argv)); - })); - EnsureServerStartedWithTimeout(server, timeoutSeconds); +void read4x4RgbJpg(size_t& filesize, std::unique_ptr& image_bytes) { + return readFile(getGenericFullPathForSrcTest("/ovms/src/test/binaryutils/rgb4x4.jpg"), filesize, image_bytes); } std::shared_ptr createTensorInfoCopyWithPrecision(std::shared_ptr src, ovms::Precision newPrecision) { diff --git a/src/test/test_utils.hpp b/src/test/test_utils.hpp index 7f5c80202d..d981de3249 100644 --- a/src/test/test_utils.hpp +++ b/src/test/test_utils.hpp @@ -15,53 +15,33 @@ //***************************************************************************** #pragma once +#include #include #include #include #include -#include #include #include -#include #include #include #include -#include #include -#include #include -#include -#pragma GCC diagnostic push -#pragma GCC diagnostic ignored "-Wall" -#include "tensorflow_serving/apis/prediction_service.grpc.pb.h" -#pragma GCC diagnostic pop -#include "../capi_frontend/inferencerequest.hpp" -#include "../capi_frontend/inferenceresponse.hpp" -#include "../config.hpp" -#include "../dags/node_library.hpp" -#include "../execution_context.hpp" -#include "../kfs_frontend/kfs_grpc_inference_service.hpp" -#include "../kfs_frontend/kfs_utils.hpp" -#if (MEDIAPIPE_DISABLE == 0) -#include "../mediapipe_internal/mediapipegraphdefinition.hpp" -#include "../mediapipe_internal/mediapipegraphexecutor.hpp" -#endif -#include "src/metrics/metric_registry.hpp" -#include "../modelinstance.hpp" -#include "../modelmanager.hpp" -#include "../shape.hpp" -#include "../status.hpp" -#include "../tensorinfo.hpp" - -#include "../kfs_frontend/validation.hpp" +#include "src/dags/node_library.hpp" +#include "src/execution_context.hpp" +#include "src/modelconfig.hpp" +#include "src/precision.hpp" +#include "src/shape.hpp" +#include "src/status.hpp" +#include "src/tensorinfo.hpp" -#if (PYTHON_DISABLE == 0) -#include "../python/pythonnoderesources.hpp" -#endif +#include "src/test/test_models.hpp" -#include "test_models.hpp" +// ============================================================================ +// Core test utilities (frontend-agnostic) +// ============================================================================ using inputs_info_t = std::map>; @@ -69,17 +49,6 @@ void adjustConfigToAllowModelFileRemovalWhenLoaded(ovms::ModelConfig& modelConfi static const ovms::ExecutionContext DEFAULT_TEST_CONTEXT{ovms::ExecutionContext::Interface::GRPC, ovms::ExecutionContext::Method::Predict}; -using TFSRequestType = tensorflow::serving::PredictRequest; -using TFSResponseType = tensorflow::serving::PredictResponse; -using TFSInputTensorType = tensorflow::TensorProto; -using TFSOutputTensorType = tensorflow::TensorProto; -using TFSShapeType = tensorflow::TensorShapeProto; -using TFSInputTensorIteratorType = google::protobuf::Map::const_iterator; -using TFSOutputTensorIteratorType = google::protobuf::Map::const_iterator; -using TFSInterface = std::pair; -using KFSInterface = std::pair; -using CAPIInterface = std::pair; - #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wunused-function" void printTensor(const ov::Tensor& tensor); @@ -88,221 +57,6 @@ ovms::tensor_map_t prepareTensors( const std::unordered_map&& tensors, ovms::Precision precision = ovms::Precision::FP32); -void preparePredictRequest(tensorflow::serving::PredictRequest& request, inputs_info_t requestInputs, const std::vector& data = std::vector{}); - -KFSTensorInputProto* findKFSInferInputTensor(::KFSRequest& request, const std::string& name); -std::string* findKFSInferInputTensorContentInRawInputs(::KFSRequest& request, const std::string& name); - -template -void prepareKFSInferInputTensor(::KFSRequest& request, const std::string& name, const std::tuple& inputInfo, - const std::vector& data = std::vector{}, bool putBufferInInputTensorContent = false) { - auto it = request.mutable_inputs()->begin(); - size_t bufferId = 0; - while (it != request.mutable_inputs()->end()) { - if (it->name() == name) - break; - ++it; - ++bufferId; - } - KFSTensorInputProto* tensor; - std::string* content = nullptr; - if (it != request.mutable_inputs()->end()) { - tensor = &*it; - if (!putBufferInInputTensorContent) { - content = request.mutable_raw_input_contents()->Mutable(bufferId); - } - } else { - tensor = request.add_inputs(); - if (!putBufferInInputTensorContent) { - content = request.add_raw_input_contents(); - } - } - auto [shape, datatype] = inputInfo; - tensor->set_name(name); - tensor->set_datatype(datatype); - size_t elementsCount = 1; - tensor->mutable_shape()->Clear(); - bool isNegativeShape = false; - for (auto const& dim : shape) { - tensor->add_shape(dim); - if (dim < 0) { - isNegativeShape = true; - } - elementsCount *= dim; - } - size_t dataSize = isNegativeShape ? data.size() : elementsCount; - if (!putBufferInInputTensorContent) { - if (data.size() == 0) { - content->assign(dataSize * ovms::KFSDataTypeSize(datatype), '1'); - } else { - content->resize(dataSize * ovms::KFSDataTypeSize(datatype)); - std::memcpy(content->data(), data.data(), content->size()); - } - } else { - switch (ovms::KFSPrecisionToOvmsPrecision(datatype)) { - case ovms::Precision::FP64: { - for (size_t i = 0; i < dataSize; ++i) { - auto ptr = tensor->mutable_contents()->mutable_fp64_contents()->Add(); - *ptr = (data.size() ? data[i] : 1); - } - break; - } - case ovms::Precision::FP32: { - for (size_t i = 0; i < dataSize; ++i) { - auto ptr = tensor->mutable_contents()->mutable_fp32_contents()->Add(); - *ptr = (data.size() ? data[i] : 1); - } - break; - } - // uint64_contents - case ovms::Precision::U64: { - for (size_t i = 0; i < dataSize; ++i) { - auto ptr = tensor->mutable_contents()->mutable_uint64_contents()->Add(); - *ptr = (data.size() ? data[i] : 1); - } - break; - } - // uint_contents - case ovms::Precision::U8: - case ovms::Precision::U16: - case ovms::Precision::U32: { - for (size_t i = 0; i < dataSize; ++i) { - auto ptr = tensor->mutable_contents()->mutable_uint_contents()->Add(); - *ptr = (data.size() ? data[i] : 1); - } - break; - } - // int64_contents - case ovms::Precision::I64: { - for (size_t i = 0; i < dataSize; ++i) { - auto ptr = tensor->mutable_contents()->mutable_int64_contents()->Add(); - *ptr = (data.size() ? data[i] : 1); - } - break; - } - // bool_contents - case ovms::Precision::BOOL: { - for (size_t i = 0; i < dataSize; ++i) { - auto ptr = tensor->mutable_contents()->mutable_bool_contents()->Add(); - *ptr = (data.size() ? data[i] : 1); - } - break; - } - // int_contents - case ovms::Precision::I8: - case ovms::Precision::I16: - case ovms::Precision::I32: { - for (size_t i = 0; i < dataSize; ++i) { - auto ptr = tensor->mutable_contents()->mutable_int_contents()->Add(); - *ptr = (data.size() ? data[i] : 1); - } - break; - } - case ovms::Precision::FP16: - case ovms::Precision::U1: - case ovms::Precision::CUSTOM: - case ovms::Precision::UNDEFINED: - case ovms::Precision::DYNAMIC: - case ovms::Precision::MIXED: - case ovms::Precision::Q78: - case ovms::Precision::BIN: - default: { - } - } - } -} - -template <> -inline void prepareKFSInferInputTensor(::KFSRequest& request, const std::string& name, const std::tuple& inputInfo, - const std::vector& data, bool putBufferInInputTensorContent) { - // TODO: Implement for putBufferInInputTensorContent == 0 - if (putBufferInInputTensorContent == 0) { - throw std::string("Unsupported"); - } - auto it = request.mutable_inputs()->begin(); - size_t bufferId = 0; - while (it != request.mutable_inputs()->end()) { - if (it->name() == name) - break; - ++it; - ++bufferId; - } - KFSTensorInputProto* tensor; - if (it != request.mutable_inputs()->end()) { - tensor = &*it; - } else { - tensor = request.add_inputs(); - } - auto [shape, datatype] = inputInfo; - tensor->set_name(name); - tensor->set_datatype(datatype); - size_t elementsCount = 1; - tensor->mutable_shape()->Clear(); - bool isNegativeShape = false; - for (auto const& dim : shape) { - tensor->add_shape(dim); - if (dim < 0) { - isNegativeShape = true; - } - elementsCount *= dim; - } - size_t dataSize = isNegativeShape ? data.size() : elementsCount; - for (size_t i = 0; i < dataSize; ++i) { - auto ptr = tensor->mutable_contents()->mutable_bool_contents()->Add(); - *ptr = (data.size() ? data[i] : 1); - } -} - -template -void prepareKFSInferInputTensor(::KFSRequest& request, const std::string& name, const std::tuple& inputInfo, - const std::vector& data = std::vector{}, bool putBufferInInputTensorContent = false) { - auto [shape, type] = inputInfo; - prepareKFSInferInputTensor(request, name, - {shape, ovmsPrecisionToKFSPrecision(type)}, - data, putBufferInInputTensorContent); -} - -void prepareCAPIInferInputTensor(ovms::InferenceRequest& request, const std::string& name, const std::tuple& inputInfo, - const std::vector& data, uint32_t decrementBufferSize = 0, OVMS_BufferType bufferType = OVMS_BUFFERTYPE_CPU, std::optional deviceId = std::nullopt); -void prepareCAPIInferInputTensor(ovms::InferenceRequest& request, const std::string& name, const std::tuple& inputInfo, - const std::vector& data, uint32_t decrementBufferSize = 0, OVMS_BufferType bufferType = OVMS_BUFFERTYPE_CPU, std::optional deviceId = std::nullopt); - -template -void preparePredictRequest(::KFSRequest& request, inputs_info_t requestInputs, const std::vector& data = std::vector{}, bool putBufferInInputTensorContent = false) { - request.mutable_inputs()->Clear(); - request.mutable_raw_input_contents()->Clear(); - for (auto const& it : requestInputs) { - prepareKFSInferInputTensor(request, it.first, it.second, data, putBufferInInputTensorContent); - } -} - -void preparePredictRequest(ovms::InferenceRequest& request, inputs_info_t requestInputs, const std::vector& data, - uint32_t decrementBufferSize = 0, OVMS_BufferType bufferType = OVMS_BUFFERTYPE_CPU, std::optional deviceId = std::nullopt); - -void prepareInferStringTensor(::KFSRequest::InferInputTensor& tensor, const std::string& name, const std::vector& data, bool putBufferInInputTensorContent, std::string* content); -void prepareInferStringTensor(tensorflow::TensorProto& tensor, const std::string& name, const std::vector& data, bool putBufferInInputTensorContent, std::string* content); -void prepareInferStringTensor(ovms::InferenceTensor& tensor, const std::string& name, const std::vector& data, bool putBufferInInputTensorContent, std::string* content); - -void prepareInferStringRequest(::KFSRequest& request, const std::string& name, const std::vector& data, bool putBufferInInputTensorContent = true); -void prepareInferStringRequest(tensorflow::serving::PredictRequest& request, const std::string& name, const std::vector& data, bool putBufferInInputTensorContent = true); -void prepareInferStringRequest(ovms::InferenceRequest& request, const std::string& name, const std::vector& data, bool putBufferInInputTensorContent = true); // CAPI binary not supported - -void assertOutputTensorMatchExpectations(const ov::Tensor& tensor, std::vector expectedStrings); - -void prepareBinaryPredictRequest(tensorflow::serving::PredictRequest& request, const std::string& inputName, const int batchSize); -void prepareBinaryPredictRequest(::KFSRequest& request, const std::string& inputName, const int batchSize); -void prepareBinaryPredictRequest(ovms::InferenceRequest& request, const std::string& inputName, const int batchSize); // CAPI binary not supported - -void prepareBinaryPredictRequestNoShape(tensorflow::serving::PredictRequest& request, const std::string& inputName, const int batchSize); -void prepareBinaryPredictRequestNoShape(::KFSRequest& request, const std::string& inputName, const int batchSize); -void prepareBinaryPredictRequestNoShape(ovms::InferenceRequest& request, const std::string& inputName, const int batchSize); // CAPI binary not supported -void prepareBinary4x4PredictRequest(tensorflow::serving::PredictRequest& request, const std::string& inputName, const int batchSize = 1); -void prepareBinary4x4PredictRequest(::KFSRequest& request, const std::string& inputName, const int batchSize = 1); -void prepareBinary4x4PredictRequest(ovms::InferenceRequest& request, const std::string& inputName, const int batchSize = 1); // CAPI binary not supported - -template -void prepareInvalidImageBinaryTensor(TensorType& tensor); - template std::string readableError(const T* expected_output, const T* actual_output, const size_t size) { std::stringstream ss; @@ -317,184 +71,16 @@ std::string readableError(const T* expected_output, const T* actual_output, cons std::string readableSetError(std::unordered_set expected, std::unordered_set actual); -void checkDummyResponse(const std::string outputName, - const std::vector& requestData, - tensorflow::serving::PredictRequest& request, tensorflow::serving::PredictResponse& response, int seriesLength, int batchSize = 1, const std::string& servableName = "", size_t expectedOutputsCount = 1); - -static std::string vectorTypeToKfsString(const std::type_info& vectorType) { - // {Precision::BF16, "BF16"}, - // {Precision::FP16, "FP16"}, - // {Precision::FP62, "FP62"}, - if (vectorType == typeid(float)) - return std::string("FP32"); - // {Precision::I32, "INT32"}, - if (vectorType == typeid(int32_t)) - return std::string("INT32"); - // {Precision::FP64, "FP64"}, - if (vectorType == typeid(double)) - return std::string("FP64"); - // {Precision::I64, "INT64"}, - if (vectorType == typeid(int64_t)) - return std::string("INT64"); - // {Precision::I16, "INT16"}, - if (vectorType == typeid(int16_t)) - return std::string("INT16"); - // {Precision::I8, "INT8"}, - if (vectorType == typeid(int8_t)) - return std::string("INT8"); - // {Precision::U64, "UINT64"}, - if (vectorType == typeid(uint64_t)) - return std::string("UINT64"); - // {Precision::U32, "UINT32"}, - if (vectorType == typeid(uint32_t)) - return std::string("UINT32"); - // {Precision::U16, "UINT16"}, - if (vectorType == typeid(uint16_t)) - return std::string("UINT16"); - // {Precision::U8, "UINT8"}, - if (vectorType == typeid(uint8_t)) - return std::string("UINT8"); - // {Precision::BOOL, "BOOL"}, - if (vectorType == typeid(bool)) - return std::string("BOOL"); - - // {Precision::UNDEFINED, "UNDEFINED"}}; - return std::string("UNDEFINED"); -} - -template -void checkDummyResponse(const std::string outputName, - const std::vector& requestData, - ::KFSRequest& request, ::KFSResponse& response, int seriesLength, int batchSize = 1, const std::string& servableName = "", size_t expectedOutputsCount = 1) { - ASSERT_EQ(response.model_name(), servableName); - ASSERT_EQ(response.outputs_size(), expectedOutputsCount); - ASSERT_EQ(response.raw_output_contents_size(), expectedOutputsCount); - // Finding the output with given name - auto it = std::find_if(response.outputs().begin(), response.outputs().end(), [&outputName](const ::KFSResponse::InferOutputTensor& tensor) { - return tensor.name() == outputName; - }); - ASSERT_NE(it, response.outputs().end()); - auto outputIndex = it - response.outputs().begin(); - const auto& output_proto = *it; - std::string* content = response.mutable_raw_output_contents(outputIndex); - - ASSERT_EQ(content->size(), batchSize * DUMMY_MODEL_OUTPUT_SIZE * sizeof(T)); - ASSERT_EQ(output_proto.datatype(), vectorTypeToKfsString(typeid(T))); - ASSERT_EQ(output_proto.shape_size(), 2); - ASSERT_EQ(output_proto.shape(0), batchSize); - ASSERT_EQ(output_proto.shape(1), DUMMY_MODEL_OUTPUT_SIZE); - - std::vector responseData = requestData; - std::for_each(responseData.begin(), responseData.end(), [seriesLength](T& v) { - v += 1.0 * seriesLength; - }); - - T* actual_output = (T*)content->data(); - T* expected_output = responseData.data(); - const int dataLengthToCheck = DUMMY_MODEL_OUTPUT_SIZE * batchSize * sizeof(T); - EXPECT_EQ(0, std::memcmp(actual_output, expected_output, dataLengthToCheck)) - << readableError(expected_output, actual_output, dataLengthToCheck / sizeof(T)); -} - -void checkScalarResponse(const std::string outputName, - float inputScalar, tensorflow::serving::PredictResponse& response, const std::string& servableName = ""); - -void checkScalarResponse(const std::string outputName, - float inputScalar, ::KFSResponse& response, const std::string& servableName = ""); - -void checkStringResponse(const std::string outputName, - const std::vector& inputStrings, tensorflow::serving::PredictResponse& response, const std::string& servableName = ""); - -void checkStringResponse(const std::string outputName, - const std::vector& inputStrings, ::KFSResponse& response, const std::string& servableName = ""); - -void assertStringOutputProto(const tensorflow::TensorProto& proto, const std::vector& expectedStrings); -void assertStringOutputProto(const KFSTensorOutputProto& proto, const std::vector& expectedStrings); -void assertStringOutputProto(const ovms::InferenceTensor& proto, const std::vector& expectedStrings); - -void assertStringResponse(const tensorflow::serving::PredictResponse& proto, const std::vector& expectedStrings, const std::string& outputName); -void assertStringResponse(const ::KFSResponse& proto, const std::vector& expectedStrings, const std::string& outputName); -void assertStringResponse(const ovms::InferenceResponse& proto, const std::vector& expectedStrings, const std::string& outputName); - -void checkAddResponse(const std::string outputName, - const std::vector& requestData1, - const std::vector& requestData2, - ::KFSRequest& request, const ::KFSResponse& response, int seriesLength, int batchSize, const std::string& servableName); - -template -void checkIncrement4DimResponse(const std::string outputName, - const std::vector& expectedData, - tensorflow::serving::PredictResponse& response, - const std::vector& expectedShape, - bool checkRaw = true) { - ASSERT_EQ(response.outputs().count(outputName), 1) << "Did not find:" << outputName; - const auto& output_proto = response.outputs().at(outputName); - - auto elementsCount = std::accumulate(expectedShape.begin(), expectedShape.end(), 1, std::multiplies()); - - ASSERT_EQ(output_proto.tensor_content().size(), elementsCount * sizeof(T)); - ASSERT_EQ(output_proto.tensor_shape().dim_size(), expectedShape.size()); - for (size_t i = 0; i < expectedShape.size(); i++) { - ASSERT_EQ(output_proto.tensor_shape().dim(i).size(), expectedShape[i]); - } - - T* actual_output = (T*)output_proto.tensor_content().data(); - T* expected_output = (T*)expectedData.data(); - const int dataLengthToCheck = elementsCount * sizeof(T); - EXPECT_EQ(0, std::memcmp(actual_output, expected_output, dataLengthToCheck)) - << readableError(expected_output, actual_output, dataLengthToCheck / sizeof(T)); -} - template -void checkIncrement4DimResponse(const std::string outputName, - const std::vector& expectedData, - ::KFSResponse& response, - const std::vector& expectedShape, - bool checkRaw = true) { - ASSERT_EQ(response.outputs_size(), 1); - ASSERT_EQ(response.mutable_outputs(0)->name(), outputName); - ASSERT_EQ(response.outputs(0).shape_size(), expectedShape.size()); - for (size_t i = 0; i < expectedShape.size(); i++) { - ASSERT_EQ(response.outputs(0).shape(i), expectedShape[i]); - } - - if (checkRaw) { - ASSERT_EQ(response.raw_output_contents_size(), 1); - auto elementsCount = std::accumulate(expectedShape.begin(), expectedShape.end(), 1, std::multiplies()); - ASSERT_EQ(response.raw_output_contents(0).size(), elementsCount * sizeof(T)); - T* actual_output = (T*)response.raw_output_contents(0).data(); - T* expected_output = (T*)expectedData.data(); - const int dataLengthToCheck = elementsCount * sizeof(T); - EXPECT_EQ(0, std::memcmp(actual_output, expected_output, dataLengthToCheck)) - << readableError(expected_output, actual_output, dataLengthToCheck / sizeof(T)); - } else { - ASSERT_EQ(response.outputs(0).datatype(), "UINT8") << "other precision testing currently not supported"; - ASSERT_EQ(sizeof(T), 1) << "other precision testing currently not supported"; - ASSERT_EQ(response.outputs(0).contents().uint_contents_size(), expectedData.size()); - for (size_t i = 0; i < expectedData.size(); i++) { - ASSERT_EQ(response.outputs(0).contents().uint_contents(i), expectedData[i]); - } - } +void checkBuffers(const T* expected, const T* actual, size_t bufferSize) { + EXPECT_EQ(0, std::memcmp(actual, expected, bufferSize)) + << readableError(expected, actual, bufferSize / sizeof(T)); } -void checkIncrement4DimShape(const std::string outputName, - tensorflow::serving::PredictResponse& response, - const std::vector& expectedShape); - -static std::vector asVector(const tensorflow::TensorShapeProto& proto) { - std::vector shape; - for (int i = 0; i < proto.dim_size(); i++) { - shape.push_back(proto.dim(i).size()); - } - return shape; -} +template +void prepareInvalidImageBinaryTensor(TensorType& tensor); -static std::vector asVector(google::protobuf::RepeatedField* container) { - std::vector result(container->size(), 0); - std::memcpy(result.data(), container->mutable_data(), result.size() * sizeof(google::protobuf::int32)); - return result; -} -#pragma GCC diagnostic pop +void assertOutputTensorMatchExpectations(const ov::Tensor& tensor, std::vector expectedStrings); template static std::vector asVector(const std::string& tensor_content) { @@ -506,46 +92,14 @@ static std::vector asVector(const std::string& tensor_content) { v.resize(tensor_content.size() / sizeof(T)); return v; } - -class MockedMetadataModelIns : public ovms::ModelInstance { -public: - MockedMetadataModelIns(ov::Core& ieCore) : - ModelInstance("UNUSED_NAME", 42, ieCore) {} - MOCK_METHOD(const ovms::tensor_map_t&, getInputsInfo, (), (const, override)); - MOCK_METHOD(const ovms::tensor_map_t&, getOutputsInfo, (), (const, override)); - MOCK_METHOD(std::optional, getBatchSize, (), (const, override)); - MOCK_METHOD(const ovms::ModelConfig&, getModelConfig, (), (const, override)); - const ovms::Status mockValidate(const tensorflow::serving::PredictRequest* request) { - return validate(request); - } - const ovms::Status mockValidate(const ::KFSRequest* request) { - return validate(request); - } - const ovms::Status mockValidate(const ovms::InferenceRequest* request) { - return validate(request); - } - template - ovms::Status validate(const RequestType* request) { - return ovms::request_validation_utils::validate( - *request, - this->getInputsInfo(), - this->getOutputsInfo(), - this->getName(), - this->getVersion(), - this->getOptionalInputNames(), - this->getModelConfig().getBatchingMode(), - this->getModelConfig().getShapes()); - } -}; +#pragma GCC diagnostic pop void RemoveReadonlyFileAttributeFromDir(std::string& directoryPath); void SetReadonlyFileAttributeFromDir(std::string& directoryPath); -/** - * Wait until ModelManager::configFileReloadNeeded returns false or timeout is reached - */ -void waitForOVMSConfigReload(ovms::ModelManager& manager); -void waitForOVMSResourcesCleanup(ovms::ModelManager& manager); +void readRgbJpg(size_t& filesize, std::unique_ptr& image_bytes); +void read4x4RgbJpg(size_t& filesize, std::unique_ptr& image_bytes); +void readFile(const std::string& path, size_t& filesize, std::unique_ptr& bytes); template static ovms::NodeLibrary createLibraryMock() { @@ -558,298 +112,17 @@ static ovms::NodeLibrary createLibraryMock() { T::release}; } -bool isShapeTheSame(const tensorflow::TensorShapeProto&, const std::vector&&); -bool isShapeTheSame(const KFSShapeType&, const std::vector&&); - -void readRgbJpg(size_t& filesize, std::unique_ptr& image_bytes); -void read4x4RgbJpg(size_t& filesize, std::unique_ptr& image_bytes); -void readFile(const std::string& path, size_t& filesize, std::unique_ptr& bytes); - -static const std::vector SUPPORTED_INPUT_PRECISIONS{ - // ovms::Precision::UNDEFINED, - // ovms::Precision::MIXED, - ovms::Precision::FP64, - ovms::Precision::FP32, - ovms::Precision::FP16, - // ovms::Precision::Q78, - ovms::Precision::I16, - ovms::Precision::U8, - ovms::Precision::I8, - ovms::Precision::U16, - ovms::Precision::U32, - ovms::Precision::I32, - ovms::Precision::I64, - // ovms::Precision::BIN, - // ovms::Precision::BOOL - // ovms::Precision::CUSTOM) -}; - -static const std::vector UNSUPPORTED_INPUT_PRECISIONS{ - ovms::Precision::UNDEFINED, - ovms::Precision::MIXED, - // ovms::Precision::FP64, - // ovms::Precision::FP32, - // ovms::Precision::FP16, - ovms::Precision::Q78, - // ovms::Precision::I16, - // ovms::Precision::U8, - // ovms::Precision::I8, - // ovms::Precision::U16, - // ovms::Precision::I32, - // ovms::Precision::I64, - ovms::Precision::BIN, - ovms::Precision::BOOL - // ovms::Precision::CUSTOM) -}; - -static const std::vector SUPPORTED_CAPI_INPUT_PRECISIONS{ - // ovms::Precision::UNDEFINED, - // ovms::Precision::MIXED, - ovms::Precision::FP64, - ovms::Precision::FP32, - ovms::Precision::FP16, - // ovms::Precision::Q78, - ovms::Precision::I16, - ovms::Precision::U8, - ovms::Precision::U1, - ovms::Precision::I8, - ovms::Precision::U16, - ovms::Precision::I32, - ovms::Precision::I64, - ovms::Precision::U32, - ovms::Precision::U64, - // ovms::Precision::BIN, - ovms::Precision::BOOL - // ovms::Precision::CUSTOM) -}; -static const std::vector UNSUPPORTED_CAPI_INPUT_PRECISIONS{ - ovms::Precision::UNDEFINED, - ovms::Precision::MIXED, - // ovms::Precision::FP64, - // ovms::Precision::FP32, - // ovms::Precision::FP16, - ovms::Precision::Q78, - // ovms::Precision::I16, - // ovms::Precision::U8, - // ovms::Precision::U1, - // vms::Precision::I8, - // ovms::Precision::U16, - // ovms::Precision::I32, - // ovms::Precision::I64, - // ovms::Precision::U32, - // ovms::Precision::U64, - ovms::Precision::BIN, - // ovms::Precision::BOOL - ovms::Precision::CUSTOM}; -static const std::vector SUPPORTED_KFS_INPUT_PRECISIONS{ - // ovms::Precision::UNDEFINED, - // ovms::Precision::MIXED, - ovms::Precision::FP64, - ovms::Precision::FP32, - ovms::Precision::FP16, - // ovms::Precision::Q78, - ovms::Precision::I16, - ovms::Precision::U8, - ovms::Precision::I8, - ovms::Precision::U16, - ovms::Precision::I32, - ovms::Precision::I64, - ovms::Precision::U32, - ovms::Precision::U64, - // ovms::Precision::BIN, - ovms::Precision::BOOL - // ovms::Precision::CUSTOM) -}; - -static const std::vector UNSUPPORTED_KFS_INPUT_PRECISIONS{ - ovms::Precision::UNDEFINED, - ovms::Precision::MIXED, - // ovms::Precision::FP64, - // ovms::Precision::FP32, - // ovms::Precision::FP16, - ovms::Precision::Q78, - // ovms::Precision::I16, - // ovms::Precision::U8, - // ovms::Precision::I8, - // ovms::Precision::U16, - // ovms::Precision::I32, - // ovms::Precision::I64, - // ovms::Precision::U32, - // ovms::Precision::U64, - ovms::Precision::BIN, - // ovms::Precision::BOOL - ovms::Precision::CUSTOM}; - -static const std::vector SUPPORTED_KFS_INPUT_PRECISIONS_TENSORINPUTCONTENT{ - // ovms::Precision::UNDEFINED, - // ovms::Precision::MIXED, - ovms::Precision::FP64, - ovms::Precision::FP32, - // ovms::Precision::FP16, - // ovms::Precision::Q78, - ovms::Precision::I16, - ovms::Precision::U8, - ovms::Precision::I8, - ovms::Precision::U16, - ovms::Precision::I32, - ovms::Precision::I64, - ovms::Precision::U32, - ovms::Precision::U64, - // ovms::Precision::BIN, - ovms::Precision::BOOL - // ovms::Precision::CUSTOM) -}; - -static const std::vector UNSUPPORTED_KFS_INPUT_PRECISIONS_TENSORINPUTCONTENT{ - ovms::Precision::UNDEFINED, - ovms::Precision::MIXED, - // ovms::Precision::FP64, - // ovms::Precision::FP32, - ovms::Precision::FP16, - ovms::Precision::Q78, - // ovms::Precision::I16, - // ovms::Precision::U8, - // ovms::Precision::I8, - // ovms::Precision::U16, - // ovms::Precision::I32, - // ovms::Precision::I64, - // ovms::Precision::U32, - // ovms::Precision::U64, - ovms::Precision::BIN, - // ovms::Precision::BOOL - // ovms::Precision::CUSTOM) -}; - -static const std::vector SUPPORTED_CAPI_INPUT_PRECISIONS_TENSORINPUTCONTENT{ - // ovms::Precision::UNDEFINED, - // ovms::Precision::MIXED, - ovms::Precision::FP64, - ovms::Precision::FP32, - // ovms::Precision::FP16, - // ovms::Precision::Q78, - ovms::Precision::I16, - ovms::Precision::U8, - ovms::Precision::I8, - ovms::Precision::U16, - ovms::Precision::I32, - ovms::Precision::I64, - ovms::Precision::U32, - ovms::Precision::U64, - // ovms::Precision::BIN, - ovms::Precision::BOOL - // ovms::Precision::CUSTOM) -}; - -static const std::vector UNSUPPORTED_CAPI_INPUT_PRECISIONS_TENSORINPUTCONTENT{ - ovms::Precision::UNDEFINED, - ovms::Precision::MIXED, - // ovms::Precision::FP64, - // ovms::Precision::FP32, - ovms::Precision::FP16, - ovms::Precision::Q78, - // ovms::Precision::I16, - // ovms::Precision::U8, - // ovms::Precision::I8, - // ovms::Precision::U16, - // ovms::Precision::I32, - // ovms::Precision::I64, - // ovms::Precision::U32, - // ovms::Precision::U64, - ovms::Precision::BIN, - // ovms::Precision::BOOL - // ovms::Precision::CUSTOM) -}; - -void randomizeAndEnsureFree(std::string& port); -void randomizeAndEnsureFrees(std::string& port1, std::string& port2); - -extern const int64_t SERVER_START_FROM_CONFIG_TIMEOUT_SECONDS; - -/* - * Waits until server is ready - */ -void EnsureServerStartedWithTimeout(ovms::Server& server, int timeoutSeconds); -/* - * Waits until server downloads model - */ -void EnsureServerModelDownloadFinishedWithTimeout(ovms::Server& server, int timeoutSeconds); -/* - * starts loading OVMS on separate thread but waits until it is shutdowned or model is downloaded - * --pull --source_model OpenVINO/Phi-3-mini-FastDraft-50M-int8-ov --model_repository_path /models - */ -void SetUpServerForDownload(std::unique_ptr& t, ovms::Server& server, std::string& source_model, std::string& download_path, std::string& task, int expected_code = EXIT_SUCCESS, int timeoutSeconds = SERVER_START_FROM_CONFIG_TIMEOUT_SECONDS); - -void SetUpServerForDownloadWithDraft(std::unique_ptr& t, ovms::Server& server, - std::string& draftModel, std::string& source_model, std::string& download_path, std::string& task, int expected_code, int timeoutSeconds = 2 * SERVER_START_FROM_CONFIG_TIMEOUT_SECONDS); -/* - * starts loading OVMS on separate thread but waits until it is shutdowned or model is downloaded and check if model is started in ovms - * --source_model Qwen/Qwen3-8B-GGUF --model_repository_path /models --gguf_filename Qwen3-8B-Q4_K_M.gguf - */ -void SetUpServerForDownloadAndStartGGUF(std::unique_ptr& t, ovms::Server& server, std::string& ggufFilename, std::string& sourceModel, std::string& downloadPath, std::string& task, int timeoutSeconds = 4 * SERVER_START_FROM_CONFIG_TIMEOUT_SECONDS); -/* - * starts loading OVMS on separate thread but waits until it is shutdowned or model is downloaded and check if model is started in ovms - * --source_model OpenVINO/Phi-3-mini-FastDraft-50M-int8-ov --model_repository_path /models - */ -void SetUpServerForDownloadAndStart(std::unique_ptr& t, ovms::Server& server, std::string& source_model, std::string& download_path, std::string& task, int timeoutSeconds = SERVER_START_FROM_CONFIG_TIMEOUT_SECONDS); -/* - * starts loading OVMS on separate thread but waits until it is ready - */ -void SetUpServer(std::unique_ptr& t, ovms::Server& server, std::string& port, const char* configPath, int timeoutSeconds = SERVER_START_FROM_CONFIG_TIMEOUT_SECONDS, std::string apiKeyFile = ""); -void SetUpServer(std::unique_ptr& t, ovms::Server& server, std::string& port, const char* modelPath, const char* modelName, int timeoutSeconds = SERVER_START_FROM_CONFIG_TIMEOUT_SECONDS); - -class ConstructorEnabledConfig : public ovms::Config { -public: - ConstructorEnabledConfig() {} -}; - std::shared_ptr createTensorInfoCopyWithPrecision(std::shared_ptr src, ovms::Precision precision); -template -void checkBuffers(const T* expected, const T* actual, size_t bufferSize) { - EXPECT_EQ(0, std::memcmp(actual, expected, bufferSize)) - << readableError(expected, actual, bufferSize / sizeof(T)); -} - -#if (MEDIAPIPE_DISABLE == 0) -class DummyMediapipeGraphDefinition : public ovms::MediapipeGraphDefinition { -public: - std::string inputConfig; -#if (PYTHON_DISABLE == 0) - ovms::PythonNodeResources* getPythonNodeResources(const std::string& nodeName) { - auto it = this->sidePacketMaps.pythonNodeResourcesMap.find(nodeName); - if (it == std::end(this->sidePacketMaps.pythonNodeResourcesMap)) { - return nullptr; - } else { - return it->second.get(); - } - } -#endif - - ovms::GenAiServable* getGenAiServable(const std::string& nodeName) { - auto it = this->sidePacketMaps.genAiServableMap.find(nodeName); - if (it == std::end(this->sidePacketMaps.genAiServableMap)) { - return nullptr; - } else { - return it->second.get(); - } - } - - ovms::Status validateForConfigLoadablenessPublic() { - return this->validateForConfigLoadableness(); - } - - ovms::GenAiServableMap& getGenAiServableMap() { return this->sidePacketMaps.genAiServableMap; } - - DummyMediapipeGraphDefinition(const std::string name, - const ovms::MediapipeGraphConfig& config, - std::string inputConfig, - ovms::PythonBackend* pythonBackend = nullptr) : - ovms::MediapipeGraphDefinition(name, config, nullptr, nullptr, pythonBackend) { this->inputConfig = inputConfig; } - - // Do not read from path - use predefined config contents - ovms::Status validateForConfigFileExistence() override { - this->chosenConfig = this->inputConfig; - return ovms::StatusCode::OK; - } -}; -#endif +// ============================================================================ +// Backward compatibility: re-export split headers. +// Phase 3 will remove these and update consumers to include directly. +// ============================================================================ +#include "src/test/test_request_utils_tfs.hpp" +#include "src/test/test_request_utils_kfs.hpp" +#include "src/test/test_request_utils_capi.hpp" +#include "src/test/test_server_utils.hpp" +#include "src/test/test_model_manager_utils.hpp" +#include "src/test/test_predict_validation_utils.hpp" +#include "src/test/test_config_utils.hpp" +#include "src/test/test_mediapipe_utils.hpp" From e3f1bdd4fc4805148a43253c67a175e9217a36f2 Mon Sep 17 00:00:00 2001 From: Adrian Tobiszewski Date: Mon, 27 Apr 2026 15:30:48 +0200 Subject: [PATCH 2/5] Phase 3: Remove backward-compat re-exports, update consumers Update ~50 consumer files to use direct includes of split headers. Remove all backward-compat re-exports from test_utils.hpp. Fix transitive dependency breakages: - Add missing gmock includes (c_api_tests, kfs_metadata_test) - Add missing modelinstance.hpp (mediapipeflow_test) - Add missing modelmanager.hpp (stress_test_utils, embeddingsnode_test) - Add missing anonymous_input_name.hpp (modelconfig_test) - Add missing (c_api_test_utils.hpp) - Add missing kfs includes (model_service_test, kfs_metadata_test) - Remove dead using namespace tensorflow (pipelinedefinitionstatus_test) Build + 293 tests pass (--config=mp_on_py_on). --- src/test/audio/text2speech_test.cpp | 1 + src/test/c_api_test_utils.hpp | 2 + src/test/c_api_tests.cpp | 4 + src/test/capi_predict_validation_test.cpp | 2 + src/test/custom_loader_test.cpp | 2931 +++++++++-------- src/test/deserialization_tests.cpp | 3 + src/test/disabled_mediapipe_test.cpp | 1 + src/test/embeddingsnode_test.cpp | 3 + src/test/ensemble_config_change_stress.cpp | 1 + src/test/ensemble_flow_custom_node_tests.cpp | 4 + src/test/ensemble_mapping_config_tests.cpp | 2 + src/test/ensemble_tests.cpp | 2 + ...mediapipe_graph_metadata_response_test.cpp | 3 + src/test/get_model_metadata_response_test.cpp | 2 + .../get_model_metadata_signature_test.cpp | 2 + .../get_pipeline_metadata_response_test.cpp | 2 + src/test/http_openai_handler_test.cpp | 1 + src/test/http_rest_api_handler_test.cpp | 1 + src/test/kfs_metadata_test.cpp | 2 + src/test/kfs_rest_test.cpp | 1 + src/test/listmodelsendpoint_test.cpp | 1 + src/test/llm/assisted_decoding_test.cpp | 1 + src/test/llm/llmnode_test.cpp | 1 + src/test/llm/llmtemplate_test.cpp | 1 + src/test/llm/tokenize_endpoint_test.cpp | 1 + .../complete_flow_test.cpp | 1 + .../initialization_test.cpp | 1 + src/test/mediapipe_framework_test.cpp | 2 + src/test/mediapipe_validation_test.cpp | 2 + src/test/mediapipeflow_test.cpp | 4 + src/test/metric_config_test.cpp | 1 + src/test/metrics_flow_test.cpp | 3 + src/test/model_service_test.cpp | 3 + src/test/modelconfig_test.cpp | 1 + src/test/modelmanager_test.cpp | 2 + src/test/multipart_calculator_test.cpp | 1 + src/test/network_utils_test.cpp | 1 + src/test/openvino_remote_tensors_tests.cpp | 1 + src/test/ovmsconfig_test.cpp | 1 + src/test/pipelinedefinitionstatus_test.cpp | 2 - src/test/predict_validation_test.cpp | 3 + src/test/prediction_service_test.cpp | 3 + src/test/pull_gguf_hf_model_test.cpp | 1 + src/test/pull_hf_model_test.cpp | 2 + src/test/pythonnode_test.cpp | 3 + src/test/rest_utils_test.cpp | 1 + src/test/serialization_tests.cpp | 2 + src/test/server_test.cpp | 1 + src/test/stateful_modelinstance_test.cpp | 1 + src/test/streaming_test.cpp | 4 + src/test/stress_test_utils.hpp | 4 + src/test/tensor_conversion_test.cpp | 2 + src/test/test_http_utils.hpp | 1 + src/test/test_utils.hpp | 13 - .../tfs_rest_parser_binary_inputs_test.cpp | 1 + src/test/tfs_rest_parser_column_test.cpp | 1 + src/test/tfs_rest_parser_nonamed_test.cpp | 1 + src/test/tfs_rest_parser_row_test.cpp | 1 + 58 files changed, 1566 insertions(+), 1480 deletions(-) diff --git a/src/test/audio/text2speech_test.cpp b/src/test/audio/text2speech_test.cpp index 208e706cc8..6356bd1118 100644 --- a/src/test/audio/text2speech_test.cpp +++ b/src/test/audio/text2speech_test.cpp @@ -23,6 +23,7 @@ #include "../../server.hpp" #include "rapidjson/document.h" #include "../test_http_utils.hpp" +#include "src/test/test_mediapipe_utils.hpp" #include "../test_utils.hpp" #include "../platform_utils.hpp" #include "../constructor_enabled_model_manager.hpp" diff --git a/src/test/c_api_test_utils.hpp b/src/test/c_api_test_utils.hpp index febb7494af..503476117a 100644 --- a/src/test/c_api_test_utils.hpp +++ b/src/test/c_api_test_utils.hpp @@ -14,12 +14,14 @@ // limitations under the License. //***************************************************************************** #pragma once +#include #include #include #include "../ovms.h" // NOLINT #include "test_utils.hpp" +#include "src/test/test_server_utils.hpp" #define THROW_ON_ERROR_CAPI(C_API_CALL) \ { \ diff --git a/src/test/c_api_tests.cpp b/src/test/c_api_tests.cpp index 637a1edb5d..980aa11669 100644 --- a/src/test/c_api_tests.cpp +++ b/src/test/c_api_tests.cpp @@ -22,6 +22,7 @@ #include #include +#include #include #include @@ -44,6 +45,9 @@ #include "../server.hpp" #include "../version.hpp" #include "c_api_test_utils.hpp" +#include "src/test/test_server_utils.hpp" +#include "src/test/test_model_manager_utils.hpp" +#include "src/test/test_config_utils.hpp" #include "mockmodelinstancechangingstates.hpp" #include "test_models_configs.hpp" #include "test_utils.hpp" diff --git a/src/test/capi_predict_validation_test.cpp b/src/test/capi_predict_validation_test.cpp index 01787ba009..b3d2563c4d 100644 --- a/src/test/capi_predict_validation_test.cpp +++ b/src/test/capi_predict_validation_test.cpp @@ -27,6 +27,8 @@ #include "../modelconfig.hpp" #include "../predict_request_validation_utils.hpp" #include "test_utils.hpp" +#include "src/test/test_request_utils_capi.hpp" +#include "src/test/test_predict_validation_utils.hpp" using ::testing::NiceMock; using ::testing::Return; diff --git a/src/test/custom_loader_test.cpp b/src/test/custom_loader_test.cpp index 9fe0148699..2243ac43f9 100644 --- a/src/test/custom_loader_test.cpp +++ b/src/test/custom_loader_test.cpp @@ -1,1465 +1,1466 @@ -//***************************************************************************** -// Copyright 2020-2021 Intel Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -//***************************************************************************** -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include - -#include "../executingstreamidguard.hpp" -#include "../get_model_metadata_impl.hpp" -#include "src/filesystem/localfilesystem.hpp" -#include "../model.hpp" -#include "../model_service.hpp" -#include "../modelinstance.hpp" -#include "../modelinstanceunloadguard.hpp" -#include "../modelmanager.hpp" -#include "../modelversionstatus.hpp" -#include "../prediction_service_utils.hpp" -#include "../schema.hpp" -#include "../sequence_processing_spec.hpp" -#include "mockmodelinstancechangingstates.hpp" -#include "test_utils.hpp" -#include "light_test_utils.hpp" -#include "platform_utils.hpp" - -using testing::_; -using testing::ContainerEq; -using testing::Each; -using testing::Eq; -using ::testing::NiceMock; -using testing::Return; -using testing::ReturnRef; -using testing::UnorderedElementsAre; - -#pragma GCC diagnostic push -#pragma GCC diagnostic ignored "-Wnarrowing" - -using namespace ovms; - -/* ------------------------------------------------- -AFTER SAMPLE CUSTOM LOADER REMOVAL BELOW CONFIGURATIONS ARE NOT USED -REMOVE THIS ENTIRE FILE ONCE THE FEATURE IS REMOVED -------------------------------------------------- - -namespace { - -// Custom Loader Config Keys -#define ENABLE_FORCE_BLACKLIST_CHECK "ENABLE_FORCE_BLACKLIST_CHECK" - -// config_model_with_customloader -const char* custom_loader_config_model = R"({ - "custom_loader_config_list":[ - { - "config":{ - "loader_name":"sample-loader", - "library_path": "/ovms/bazel-bin/src/libsampleloader.so" - } - } - ], - "model_config_list":[ - { - "config":{ - "name":"dummy", - "base_path": "/tmp/test_cl_models/model1", - "nireq": 1, - "custom_loader_options": {"loader_name": "sample-loader", "model_file": "dummy.xml", "bin_file": "dummy.bin"} - } - } - ] - })"; - -// config_model_with_customloader -const char* custom_loader_config_model_relative_paths = R"({ - "custom_loader_config_list":[ - { - "config":{ - "loader_name":"sample-loader", - "library_path": "libsampleloader.so" - } - } - ], - "model_config_list":[ - { - "config":{ - "name":"dummy", - "base_path": "test_cl_models/model1", - "nireq": 1, - "custom_loader_options": {"loader_name": "sample-loader", "model_file": "dummy.xml", "bin_file": "dummy.bin"} - } - } - ] - })"; - -// config_no_model_with_customloader -const char* custom_loader_config_model_deleted = R"({ - "custom_loader_config_list":[ - { - "config":{ - "loader_name":"sample-loader", - "library_path": "/ovms/bazel-bin/src/libsampleloader.so" - } - } - ], - "model_config_list":[] - })"; - -// config_2_models_with_customloader -const char* custom_loader_config_model_new = R"({ - "custom_loader_config_list":[ - { - "config":{ - "loader_name":"sample-loader", - "library_path": "/ovms/bazel-bin/src/libsampleloader.so" - } - } - ], - "model_config_list":[ - { - "config":{ - "name":"dummy", - "base_path": "/tmp/test_cl_models/model1", - "nireq": 1, - "custom_loader_options": {"loader_name": "sample-loader", "model_file": "dummy.xml", "bin_file": "dummy.bin"} - } - }, - { - "config":{ - "name":"dummy-new", - "base_path": "/tmp/test_cl_models/model2", - "nireq": 1, - "custom_loader_options": {"loader_name": "sample-loader", "model_file": "dummy.xml", "bin_file": "dummy.bin"} - } - } - ] - })"; - -// config_model_without_customloader_options -const char* custom_loader_config_model_customloader_options_removed = R"({ - "custom_loader_config_list":[ - { - "config":{ - "loader_name":"sample-loader", - "library_path": "/ovms/bazel-bin/src/libsampleloader.so" - } - } - ], - "model_config_list":[ - { - "config":{ - "name":"dummy", - "base_path": "/tmp/test_cl_models/model1", - "nireq": 1 - } - } - ] - })"; - -const char* config_model_with_customloader_options_unknown_loadername = R"({ - "custom_loader_config_list":[ - { - "config":{ - "loader_name":"sample-loader", - "library_path": "/ovms/bazel-bin/src/libsampleloader.so" - } - } - ], - "model_config_list":[ - { - "config":{ - "name":"dummy", - "base_path": "/tmp/test_cl_models/model1", - "nireq": 1, - "custom_loader_options": {"loader_name": "unknown", "model_file": "dummy.xml", "bin_file": "dummy.bin"} - } - } - ] - })"; - -// config_model_with_customloader -const char* custom_loader_config_model_multiple = R"({ - "custom_loader_config_list":[ - { - "config":{ - "loader_name":"sample-loader-a", - "library_path": "/ovms/bazel-bin/src/libsampleloader.so" - } - }, - { - "config":{ - "loader_name":"sample-loader-b", - "library_path": "/ovms/bazel-bin/src/libsampleloader.so" - } - }, - { - "config":{ - "loader_name":"sample-loader-c", - "library_path": "/ovms/bazel-bin/src/libsampleloader.so" - } - } - ], - "model_config_list":[ - { - "config":{ - "name":"dummy-a", - "base_path": "/tmp/test_cl_models/model1", - "nireq": 1, - "custom_loader_options": {"loader_name": "sample-loader-a", "model_file": "dummy.xml", "bin_file": "dummy.bin"} - } - }, - { - "config":{ - "name":"dummy-b", - "base_path": "/tmp/test_cl_models/model1", - "nireq": 1, - "custom_loader_options": {"loader_name": "sample-loader-b", "model_file": "dummy.xml", "bin_file": "dummy.bin"} - } - }, - { - "config":{ - "name":"dummy-c", - "base_path": "/tmp/test_cl_models/model1", - "nireq": 1, - "custom_loader_options": {"loader_name": "sample-loader-c", "model_file": "dummy.xml", "bin_file": "dummy.bin"} - } - } - ] - })"; - -const char* custom_loader_config_model_blacklist = R"({ - "custom_loader_config_list":[ - { - "config":{ - "loader_name":"sample-loader", - "library_path": "/ovms/bazel-bin/src/libsampleloader.so", - "loader_config_file": "sample-loader-config" - } - } - ], - "model_config_list":[ - { - "config":{ - "name":"dummy", - "base_path": "/tmp/test_cl_models/model1", - "nireq": 1, - "custom_loader_options": {"loader_name": "sample-loader", "model_file": "dummy.xml", "bin_file": "dummy.bin", "enable_file": "dummy.status"} - } - } - ] - })"; - -const char* empty_config = R"({ - "custom_loader_config_list":[], - "model_config_list":[] - })"; - -const char* expected_json_available = R"({ - "model_version_status": [ - { - "version": "1", - "state": "AVAILABLE", - "status": { - "error_code": "OK", - "error_message": "OK" - } - } - ] -} -)"; - -const char* expected_json_end = R"({ - "model_version_status": [ - { - "version": "1", - "state": "END", - "status": { - "error_code": "OK", - "error_message": "OK" - } - } - ] -} -)"; - -const char* expected_json_loading_error = R"({ - "model_version_status": [ - { - "version": "1", - "state": "LOADING", - "status": { - "error_code": "UNKNOWN", - "error_message": "UNKNOWN" - } - } - ] -} -)"; - -} // namespace - -*/ - -class TestCustomLoader : public ::testing::Test { -public: - void SetUp() { - const ::testing::TestInfo* const test_info = - ::testing::UnitTest::GetInstance()->current_test_info(); - - cl_models_path = getGenericFullPathForTmp("/tmp/" + std::string(test_info->name())); - cl_model_1_path = cl_models_path + "/model1/"; - cl_model_2_path = cl_models_path + "/model2/"; - - const std::string FIRST_MODEL_NAME = "dummy"; - const std::string SECOND_MODEL_NAME = "dummy_new"; - - std::filesystem::remove_all(cl_models_path); - std::filesystem::create_directories(cl_model_1_path); - } - void TearDown() { - // Create config file with an empty config & reload - const char* empty_config = R"({ - "custom_loader_config_list":[], - "model_config_list":[] - })"; - std::string configStr = empty_config; - std::string fileToReload = cl_models_path + "/cl_config.json"; - createConfigFileWithContent(configStr, fileToReload); - ASSERT_EQ(manager.loadConfig(fileToReload), ovms::StatusCode::OK); - - // Clean up temporary destination - std::filesystem::remove_all(cl_models_path); - } - - /** - * @brief This function should mimic most closely predict request to check for thread safety - */ - void performPredict(const std::string modelName, - const ovms::model_version_t modelVersion, - const tensorflow::serving::PredictRequest& request, - std::unique_ptr> waitBeforeGettingModelInstance = nullptr, - std::unique_ptr> waitBeforePerformInference = nullptr); - - void deserialize(const std::vector& input, ov::InferRequest& inferRequest, std::shared_ptr modelInstance) { - try { - ov::Tensor tensor( - modelInstance->getInputsInfo().at(DUMMY_MODEL_INPUT_NAME)->getOvPrecision(), - modelInstance->getInputsInfo().at(DUMMY_MODEL_INPUT_NAME)->getShape().createPartialShape().get_shape(), - const_cast(reinterpret_cast(input.data()))); - inferRequest.set_tensor(DUMMY_MODEL_INPUT_NAME, tensor); - } catch (...) { - ASSERT_TRUE(false) << "exception during deserialize"; - } - } - - void serializeAndCheck(int outputSize, ov::InferRequest& inferRequest) { - std::vector output(outputSize); - ASSERT_THAT(output, Each(Eq(0.))); - auto tensorOutput = inferRequest.get_tensor(DUMMY_MODEL_OUTPUT_NAME); - ASSERT_EQ(tensorOutput.get_byte_size(), outputSize * sizeof(float)); - std::memcpy(output.data(), tensorOutput.data(), outputSize * sizeof(float)); - EXPECT_THAT(output, Each(Eq(2.))); - } - - ovms::Status performInferenceWithRequest(const tensorflow::serving::PredictRequest& request, tensorflow::serving::PredictResponse& response) { - std::shared_ptr model; - std::unique_ptr unload_guard; - auto status = manager.getModelInstance("dummy", 0, model, unload_guard); - if (!status.ok()) { - return status; - } - - response.Clear(); - return model->infer(&request, &response, unload_guard); - } - -public: - ConstructorEnabledModelManager manager; - - ~TestCustomLoader() { - std::cout << "Destructor of TestCustomLoader()" << std::endl; - } - - std::string cl_models_path; - std::string cl_model_1_path; - std::string cl_model_2_path; -}; - -class MockModelInstance : public ovms::ModelInstance { -public: - MockModelInstance(ov::Core& ieCore) : - ModelInstance("UNUSED_NAME", 42, ieCore) {} - const ovms::Status mockValidate(const tensorflow::serving::PredictRequest* request) { - return validate(request); - } -}; - -void TestCustomLoader::performPredict(const std::string modelName, - const ovms::model_version_t modelVersion, - const tensorflow::serving::PredictRequest& request, - std::unique_ptr> waitBeforeGettingModelInstance, - std::unique_ptr> waitBeforePerformInference) { - // only validation is skipped - std::shared_ptr modelInstance; - std::unique_ptr modelInstanceUnloadGuard; - - auto& tensorProto = request.inputs().find("b")->second; - size_t batchSize = tensorProto.tensor_shape().dim(0).size(); - size_t inputSize = 1; - for (int i = 0; i < tensorProto.tensor_shape().dim_size(); i++) { - inputSize *= tensorProto.tensor_shape().dim(i).size(); - } - - if (waitBeforeGettingModelInstance) { - std::cout << "Waiting before getModelInstance. Batch size: " << batchSize << std::endl; - waitBeforeGettingModelInstance->get(); - } - ASSERT_EQ(manager.getModelInstance(modelName, modelVersion, modelInstance, modelInstanceUnloadGuard), ovms::StatusCode::OK); - - if (waitBeforePerformInference) { - std::cout << "Waiting before performInfernce." << std::endl; - waitBeforePerformInference->get(); - } - ovms::Status validationStatus = (std::static_pointer_cast(modelInstance))->mockValidate(&request); - std::cout << validationStatus.string() << std::endl; - ASSERT_TRUE(validationStatus == ovms::StatusCode::OK || - validationStatus == ovms::StatusCode::RESHAPE_REQUIRED || - validationStatus == ovms::StatusCode::BATCHSIZE_CHANGE_REQUIRED); - auto bsPositionIndex = 0; - auto requestBatchSize = ovms::getRequestBatchSize(&request, bsPositionIndex); - auto requestShapes = ovms::getRequestShapes(&request); - ASSERT_EQ(modelInstance->reloadModelIfRequired(validationStatus, requestBatchSize, requestShapes, modelInstanceUnloadGuard), ovms::StatusCode::OK); - - ovms::ExecutingStreamIdGuard executingStreamIdGuard(modelInstance->getInferRequestsQueue(), modelInstance->getMetricReporter()); - ov::InferRequest& inferRequest = executingStreamIdGuard.getInferRequest(); - std::vector input(inputSize); - std::generate(input.begin(), input.end(), []() { return 1.; }); - ASSERT_THAT(input, Each(Eq(1.))); - deserialize(input, inferRequest, modelInstance); - auto status = modelInstance->performInference(inferRequest); - ASSERT_EQ(status, ovms::StatusCode::OK); - size_t outputSize = batchSize * DUMMY_MODEL_OUTPUT_SIZE; - serializeAndCheck(outputSize, inferRequest); -} - -// Schema Validation - -TEST_F(TestCustomLoader, CustomLoaderConfigMatchingSchema) { - const char* customloaderConfigMatchingSchema = R"( - { - "custom_loader_config_list":[ - { - "config":{ - "loader_name":"dummy-loader", - "library_path": "/tmp/loader/dummyloader", - "loader_config_file": "dummyloader-config" - } - } - ], - "model_config_list":[ - { - "config":{ - "name":"dummy-loader-model", - "base_path": "/tmp/models/dummy1", - "custom_loader_options": {"loader_name": "dummy-loader"} - } - } - ] - } - )"; - - rapidjson::Document customloaderConfigMatchingSchemaParsed; - customloaderConfigMatchingSchemaParsed.Parse(customloaderConfigMatchingSchema); - auto result = ovms::validateJsonAgainstSchema(customloaderConfigMatchingSchemaParsed, ovms::MODELS_CONFIG_SCHEMA.c_str()); - EXPECT_EQ(result, ovms::StatusCode::OK); -} - -TEST_F(TestCustomLoader, CustomLoaderConfigMissingLoaderName) { - const char* customloaderConfigMissingLoaderName = R"( - { - "custom_loader_config_list":[ - { - "config":{ - "library_path": "dummyloader", - "loader_config_file": "dummyloader-config" - } - } - ], - "model_config_list": [] - } - )"; - - rapidjson::Document customloaderConfigMissingLoaderNameParsed; - customloaderConfigMissingLoaderNameParsed.Parse(customloaderConfigMissingLoaderName); - auto result = ovms::validateJsonAgainstSchema(customloaderConfigMissingLoaderNameParsed, ovms::MODELS_CONFIG_SCHEMA.c_str()); - EXPECT_EQ(result, ovms::StatusCode::JSON_INVALID); -} - -TEST_F(TestCustomLoader, CustomLoaderConfigMissingLibraryPath) { - const char* customloaderConfigMissingLibraryPath = R"( - { - "custom_loader_config_list":[ - { - "config":{ - "loader_name":"dummy-loader", - "loader_config_file": "dummyloader-config" - } - } - ], - "model_config_list": [] - } - )"; - - rapidjson::Document customloaderConfigMissingLibraryPathParsed; - customloaderConfigMissingLibraryPathParsed.Parse(customloaderConfigMissingLibraryPath); - auto result = ovms::validateJsonAgainstSchema(customloaderConfigMissingLibraryPathParsed, ovms::MODELS_CONFIG_SCHEMA.c_str()); - EXPECT_EQ(result, ovms::StatusCode::JSON_INVALID); -} - -TEST_F(TestCustomLoader, CustomLoaderConfigMissingLoaderConfig) { - const char* customloaderConfigMissingLoaderConfig = R"( - { - "custom_loader_config_list":[ - { - "config":{ - "loader_name":"dummy-loader", - "library_path": "dummyloader" - } - } - ], - "model_config_list": [] - } - )"; - - rapidjson::Document customloaderConfigMissingLoaderConfigParsed; - customloaderConfigMissingLoaderConfigParsed.Parse(customloaderConfigMissingLoaderConfig); - auto result = ovms::validateJsonAgainstSchema(customloaderConfigMissingLoaderConfigParsed, ovms::MODELS_CONFIG_SCHEMA.c_str()); - EXPECT_EQ(result, ovms::StatusCode::OK); -} - -TEST_F(TestCustomLoader, CustomLoaderConfigInvalidCustomLoaderConfig) { - const char* customloaderConfigInvalidCustomLoaderConfig = R"( - { - "model_config_list":[ - { - "config":{ - "name":"dummy-loader-model", - "base_path": "/tmp/models/dummy1", - "custom_loader_options_invalid": {"loader_name": "dummy-loader"} - } - } - ] - } - )"; - - rapidjson::Document customloaderConfigInvalidCustomLoaderConfigParsed; - customloaderConfigInvalidCustomLoaderConfigParsed.Parse(customloaderConfigInvalidCustomLoaderConfig); - auto result = ovms::validateJsonAgainstSchema(customloaderConfigInvalidCustomLoaderConfigParsed, ovms::MODELS_CONFIG_SCHEMA.c_str()); - EXPECT_EQ(result, ovms::StatusCode::JSON_INVALID); -} - -TEST_F(TestCustomLoader, CustomLoaderConfigMissingLoaderNameInCustomLoaderOptions) { - const char* customloaderConfigMissingLoaderNameInCustomLoaderOptions = R"( - { - "model_config_list":[ - { - "config":{ - "name":"dummy-loader-model", - "base_path": "/tmp/models/dummy1", - "custom_loader_options": {"a": "SS"} - } - } - ] - } - )"; - - rapidjson::Document customloaderConfigMissingLoaderNameInCustomLoaderOptionsParsed; - customloaderConfigMissingLoaderNameInCustomLoaderOptionsParsed.Parse(customloaderConfigMissingLoaderNameInCustomLoaderOptions); - auto result = ovms::validateJsonAgainstSchema(customloaderConfigMissingLoaderNameInCustomLoaderOptionsParsed, ovms::MODELS_CONFIG_SCHEMA.c_str()); - EXPECT_EQ(result, ovms::StatusCode::JSON_INVALID); -} - -TEST_F(TestCustomLoader, CustomLoaderConfigMultiplePropertiesInCustomLoaderOptions) { - const char* customloaderConfigMultiplePropertiesInCustomLoaderOptions = R"( - { - "model_config_list":[ - { - "config":{ - "name":"dummy-loader-model", - "base_path": "/tmp/models/dummy1", - "custom_loader_options": {"loader_name": "dummy-loader", "1": "a", "2": "b", "3": "c", "4":"d", "5":"e", "6":"f"} - } - } - ] - } - )"; - - rapidjson::Document customloaderConfigMultiplePropertiesInCustomLoaderOptionsParsed; - customloaderConfigMultiplePropertiesInCustomLoaderOptionsParsed.Parse(customloaderConfigMultiplePropertiesInCustomLoaderOptions); - auto result = ovms::validateJsonAgainstSchema(customloaderConfigMultiplePropertiesInCustomLoaderOptionsParsed, ovms::MODELS_CONFIG_SCHEMA.c_str()); - EXPECT_EQ(result, ovms::StatusCode::OK); -} - -// Functional Validation -/* ------------------------------------------------- -AFTER SAMPLE CUSTOM LOADER REMOVAL BELOW TESTS ARE NOT VALID -REMOVE THIS ENTIRE FILE ONCE THE FEATURE IS REMOVED -------------------------------------------------- -TEST_F(TestCustomLoader, CustomLoaderPrediction) { -#ifdef _WIN32 - GTEST_SKIP() << "Test disabled on windows"; -#endif - // Copy dummy model to temporary destination - std::filesystem::copy(getGenericFullPathForSrcTest("/ovms/src/test/dummy"), cl_model_1_path, std::filesystem::copy_options::recursive); - - // Replace model path in the config string - std::string configStr = custom_loader_config_model; - configStr.replace(configStr.find("/tmp/test_cl_models"), std::string("/tmp/test_cl_models").size(), cl_models_path); - - // Create config file - std::string fileToReload = cl_models_path + "/cl_config.json"; - createConfigFileWithContent(configStr, fileToReload); - ASSERT_EQ(manager.loadConfig(fileToReload), ovms::StatusCode::OK); - - tensorflow::serving::PredictRequest request; - preparePredictRequest(request, - {{DUMMY_MODEL_INPUT_NAME, - std::tuple{{1, 10}, ovms::Precision::FP32}}}); - performPredict("dummy", 1, request); -} - -TEST_F(TestCustomLoader, CustomLoaderPredictionRelativePath) { -#ifdef _WIN32 - GTEST_SKIP() << "Test disabled on windows"; -#endif - // Copy dummy model to temporary destination - std::filesystem::copy(getGenericFullPathForSrcTest("/ovms/src/test/dummy"), cl_model_1_path, std::filesystem::copy_options::recursive); - std::filesystem::copy(getGenericFullPathForSrcTest("/ovms/bazel-bin/src/libsampleloader.so"), cl_models_path, std::filesystem::copy_options::recursive); - - // Replace model path in the config string - std::string configStr = custom_loader_config_model_relative_paths; - configStr.replace(configStr.find("test_cl_models"), std::string("test_cl_models").size(), cl_models_path); - - // Create config file - std::string fileToReload = cl_models_path + "/cl_config.json"; - createConfigFileWithContent(configStr, fileToReload); - ASSERT_EQ(manager.loadConfig(fileToReload), ovms::StatusCode::OK); - - tensorflow::serving::PredictRequest request; - preparePredictRequest(request, - {{DUMMY_MODEL_INPUT_NAME, - std::tuple{{1, 10}, ovms::Precision::FP32}}}); - performPredict("dummy", 1, request); -} - -TEST_F(TestCustomLoader, CustomLoaderGetStatus) { -#ifdef _WIN32 - GTEST_SKIP() << "Test disabled on windows"; -#endif - // Copy dummy model to temporary destination - std::filesystem::copy(getGenericFullPathForSrcTest("/ovms/src/test/dummy"), cl_model_1_path, std::filesystem::copy_options::recursive); - - // Replace model path in the config string - std::string configStr = custom_loader_config_model; - configStr.replace(configStr.find("/tmp/test_cl_models"), std::string("/tmp/test_cl_models").size(), cl_models_path); - - // Create config file - std::string fileToReload = cl_models_path + "/cl_config.json"; - createConfigFileWithContent(configStr, fileToReload); - ASSERT_EQ(manager.loadConfig(fileToReload), ovms::StatusCode::OK); - - tensorflow::serving::GetModelStatusRequest req; - tensorflow::serving::GetModelStatusResponse res; - - auto model_spec = req.mutable_model_spec(); - model_spec->Clear(); - model_spec->set_name("dummy"); - model_spec->mutable_version()->set_value(1); - ASSERT_EQ(GetModelStatusImpl::getModelStatus(&req, &res, manager, DEFAULT_TEST_CONTEXT), StatusCode::OK); - - const tensorflow::serving::GetModelStatusResponse response_const = res; - std::string json_output; - Status error_status = GetModelStatusImpl::serializeResponse2Json(&response_const, &json_output); - ASSERT_EQ(error_status, StatusCode::OK); - EXPECT_EQ(json_output, expected_json_available); -} - -TEST_F(TestCustomLoader, CustomLoaderPredictDeletePredict) { -#ifdef _WIN32 - GTEST_SKIP() << "Test disabled on windows"; -#endif - // Copy dummy model to temporary destination - std::filesystem::copy(getGenericFullPathForSrcTest("/ovms/src/test/dummy"), cl_model_1_path, std::filesystem::copy_options::recursive); - - // Replace model path in the config string - std::string configStr = custom_loader_config_model; - configStr.replace(configStr.find("/tmp/test_cl_models"), std::string("/tmp/test_cl_models").size(), cl_models_path); - - // Create config file - std::string fileToReload = cl_models_path + "/cl_config.json"; - createConfigFileWithContent(configStr, fileToReload); - ASSERT_EQ(manager.loadConfig(fileToReload), ovms::StatusCode::OK); - - tensorflow::serving::PredictRequest request; - preparePredictRequest(request, - {{DUMMY_MODEL_INPUT_NAME, - std::tuple{{1, 10}, ovms::Precision::FP32}}}); - tensorflow::serving::PredictResponse response; - ASSERT_EQ(performInferenceWithRequest(request, response), ovms::StatusCode::OK); - - // Re-create config file - createConfigFileWithContent(custom_loader_config_model_deleted, fileToReload); - ASSERT_EQ(manager.loadConfig(fileToReload), ovms::StatusCode::OK); - - ASSERT_EQ(performInferenceWithRequest(request, response), ovms::StatusCode::MODEL_VERSION_MISSING); -} - -TEST_F(TestCustomLoader, CustomLoaderPredictNewVersionPredict) { -#ifdef _WIN32 - GTEST_SKIP() << "Test disabled on windows"; -#endif - // Copy dummy model to temporary destination - std::filesystem::copy(getGenericFullPathForSrcTest("/ovms/src/test/dummy"), cl_model_1_path, std::filesystem::copy_options::recursive); - - // Replace model path in the config string - std::string configStr = custom_loader_config_model; - configStr.replace(configStr.find("/tmp/test_cl_models"), std::string("/tmp/test_cl_models").size(), cl_models_path); - - // Create config file - std::string fileToReload = cl_models_path + "/cl_config.json"; - createConfigFileWithContent(configStr, fileToReload); - ASSERT_EQ(manager.loadConfig(fileToReload), ovms::StatusCode::OK); - - tensorflow::serving::PredictRequest request; - preparePredictRequest(request, - {{DUMMY_MODEL_INPUT_NAME, - std::tuple{{1, 10}, ovms::Precision::FP32}}}); - performPredict("dummy", 1, request); - - // Copy version 1 to version 2 - std::filesystem::create_directories(cl_model_1_path + "2"); - std::filesystem::copy(cl_model_1_path + "1", cl_model_1_path + "2", std::filesystem::copy_options::recursive); - ASSERT_EQ(manager.loadConfig(fileToReload), ovms::StatusCode::OK); - - preparePredictRequest(request, - {{DUMMY_MODEL_INPUT_NAME, - std::tuple{{1, 10}, ovms::Precision::FP32}}}); - performPredict("dummy", 2, request); -} - -TEST_F(TestCustomLoader, CustomLoaderPredictNewModelPredict) { -#ifdef _WIN32 - GTEST_SKIP() << "Test disabled on windows"; -#endif - // Copy dummy model to temporary destination - std::filesystem::copy(getGenericFullPathForSrcTest("/ovms/src/test/dummy"), cl_model_1_path, std::filesystem::copy_options::recursive); - - // Replace model path in the config string - std::string configStr = custom_loader_config_model; - configStr.replace(configStr.find("/tmp/test_cl_models"), std::string("/tmp/test_cl_models").size(), cl_models_path); - - // Create config file - std::string fileToReload = cl_models_path + "/cl_config.json"; - createConfigFileWithContent(configStr, fileToReload); - ASSERT_EQ(manager.loadConfig(fileToReload), ovms::StatusCode::OK); - - tensorflow::serving::PredictRequest request; - preparePredictRequest(request, - {{DUMMY_MODEL_INPUT_NAME, - std::tuple{{1, 10}, ovms::Precision::FP32}}}); - performPredict("dummy", 1, request); - - // Copy model1 to model2 - std::filesystem::copy(getGenericFullPathForSrcTest("/ovms/src/test/dummy"), cl_model_2_path, std::filesystem::copy_options::recursive); - - // Replace model path in the config string - configStr = custom_loader_config_model_new; - configStr.replace(configStr.find("/tmp/test_cl_models"), std::string("/tmp/test_cl_models").size(), cl_models_path); - configStr.replace(configStr.find("/tmp/test_cl_models"), std::string("/tmp/test_cl_models").size(), cl_models_path); - - // Re-create config file - createConfigFileWithContent(configStr, fileToReload); - ASSERT_EQ(manager.loadConfig(fileToReload), ovms::StatusCode::OK); - - preparePredictRequest(request, - {{DUMMY_MODEL_INPUT_NAME, - std::tuple{{1, 10}, ovms::Precision::FP32}}}); - performPredict("dummy", 1, request); - performPredict("dummy-new", 1, request); -} - -TEST_F(TestCustomLoader, CustomLoaderPredictRemoveCustomLoaderOptionsPredict) { -#ifdef _WIN32 - GTEST_SKIP() << "Test disabled on windows"; -#endif - // Copy dummy model to temporary destination - std::filesystem::copy(getGenericFullPathForSrcTest("/ovms/src/test/dummy"), cl_model_1_path, std::filesystem::copy_options::recursive); - - // Replace model path in the config string - std::string configStr = custom_loader_config_model; - configStr.replace(configStr.find("/tmp/test_cl_models"), std::string("/tmp/test_cl_models").size(), cl_models_path); - - // Create config file - std::string fileToReload = cl_models_path + "/cl_config.json"; - createConfigFileWithContent(configStr, fileToReload); - ASSERT_EQ(manager.loadConfig(fileToReload), ovms::StatusCode::OK); - - tensorflow::serving::PredictRequest request; - preparePredictRequest(request, - {{DUMMY_MODEL_INPUT_NAME, - std::tuple{{1, 10}, ovms::Precision::FP32}}}); - performPredict("dummy", 1, request); - - // Replace model path in the config string - configStr = custom_loader_config_model_customloader_options_removed; - configStr.replace(configStr.find("/tmp/test_cl_models"), std::string("/tmp/test_cl_models").size(), cl_models_path); - - // Re-create config file - createConfigFileWithContent(configStr, fileToReload); - ASSERT_EQ(manager.loadConfig(fileToReload), ovms::StatusCode::OK); - - performPredict("dummy", 1, request); -} - -TEST_F(TestCustomLoader, PredictNormalModelAddCustomLoaderOptionsPredict) { -#ifdef _WIN32 - GTEST_SKIP() << "Test disabled on windows"; -#endif - // Copy dummy model to temporary destination - std::filesystem::copy(getGenericFullPathForSrcTest("/ovms/src/test/dummy"), cl_model_1_path, std::filesystem::copy_options::recursive); - - // Replace model path in the config string - std::string configStr = custom_loader_config_model_customloader_options_removed; - configStr.replace(configStr.find("/tmp/test_cl_models"), std::string("/tmp/test_cl_models").size(), cl_models_path); - - // Create config file - std::string fileToReload = cl_models_path + "/cl_config.json"; - createConfigFileWithContent(configStr, fileToReload); - ASSERT_EQ(manager.loadConfig(fileToReload), ovms::StatusCode::OK); - - tensorflow::serving::PredictRequest request; - preparePredictRequest(request, - {{DUMMY_MODEL_INPUT_NAME, - std::tuple{{1, 10}, ovms::Precision::FP32}}}); - performPredict("dummy", 1, request); - - // Replace model path in the config string - configStr = custom_loader_config_model; - configStr.replace(configStr.find("/tmp/test_cl_models"), std::string("/tmp/test_cl_models").size(), cl_models_path); - - // Create config file - fileToReload = cl_models_path + "/cl_config.json"; - createConfigFileWithContent(configStr, fileToReload); - ASSERT_EQ(manager.loadConfig(fileToReload), ovms::StatusCode::OK); - - performPredict("dummy", 1, request); -} - -TEST_F(TestCustomLoader, CustomLoaderOptionWithUnknownLibrary) { -#ifdef _WIN32 - GTEST_SKIP() << "Test disabled on windows"; -#endif - // Copy dummy model to temporary destination - std::filesystem::copy(getGenericFullPathForSrcTest("/ovms/src/test/dummy"), cl_model_1_path, std::filesystem::copy_options::recursive); - - // Replace model path in the config string - std::string configStr = config_model_with_customloader_options_unknown_loadername; - configStr.replace(configStr.find("/tmp/test_cl_models"), std::string("/tmp/test_cl_models").size(), cl_models_path); - - // Create config file - std::string fileToReload = cl_models_path + "/cl_config.json"; - createConfigFileWithContent(configStr, fileToReload); - ASSERT_EQ(manager.loadConfig(fileToReload), ovms::StatusCode::OK); - - tensorflow::serving::PredictRequest request; - preparePredictRequest(request, - {{DUMMY_MODEL_INPUT_NAME, - std::tuple{{1, 10}, ovms::Precision::FP32}}}); - tensorflow::serving::PredictResponse response; - ASSERT_EQ(performInferenceWithRequest(request, response), ovms::StatusCode::MODEL_VERSION_MISSING); -} - -TEST_F(TestCustomLoader, CustomLoaderWithMissingModelFiles) { -#ifdef _WIN32 - GTEST_SKIP() << "Test disabled on windows"; -#endif - // Replace model path in the config string - std::string configStr = custom_loader_config_model; - configStr.replace(configStr.find("/tmp/test_cl_models"), std::string("/tmp/test_cl_models").size(), cl_models_path); - - // Create config file - std::string fileToReload = cl_models_path + "/cl_config.json"; - createConfigFileWithContent(configStr, fileToReload); - ASSERT_EQ(manager.loadConfig(fileToReload), ovms::StatusCode::OK); - - tensorflow::serving::PredictRequest request; - preparePredictRequest(request, - {{DUMMY_MODEL_INPUT_NAME, - std::tuple{{1, 10}, ovms::Precision::FP32}}}); - tensorflow::serving::PredictResponse response; - ASSERT_EQ(performInferenceWithRequest(request, response), ovms::StatusCode::MODEL_VERSION_MISSING); -} - -TEST_F(TestCustomLoader, CustomLoaderGetStatusDeleteModelGetStatus) { -#ifdef _WIN32 - GTEST_SKIP() << "Test disabled on windows"; -#endif - // Copy dummy model to temporary destination - std::filesystem::copy(getGenericFullPathForSrcTest("/ovms/src/test/dummy"), cl_model_1_path, std::filesystem::copy_options::recursive); - - // Replace model path in the config string - std::string configStr = custom_loader_config_model; - configStr.replace(configStr.find("/tmp/test_cl_models"), std::string("/tmp/test_cl_models").size(), cl_models_path); - - // Create config file - std::string fileToReload = cl_models_path + "/cl_config.json"; - createConfigFileWithContent(configStr, fileToReload); - ASSERT_EQ(manager.loadConfig(fileToReload), ovms::StatusCode::OK); - - tensorflow::serving::GetModelStatusRequest req; - tensorflow::serving::GetModelStatusResponse res; - - auto model_spec = req.mutable_model_spec(); - model_spec->Clear(); - model_spec->set_name("dummy"); - model_spec->mutable_version()->set_value(1); - ASSERT_EQ(GetModelStatusImpl::getModelStatus(&req, &res, manager, DEFAULT_TEST_CONTEXT), StatusCode::OK); - - const tensorflow::serving::GetModelStatusResponse response_const = res; - std::string json_output; - Status error_status = GetModelStatusImpl::serializeResponse2Json(&response_const, &json_output); - ASSERT_EQ(error_status, StatusCode::OK); - EXPECT_EQ(json_output, expected_json_available); - - // Re-create config file - createConfigFileWithContent(custom_loader_config_model_deleted, fileToReload); - ASSERT_EQ(manager.loadConfig(fileToReload), ovms::StatusCode::OK); - - tensorflow::serving::GetModelStatusRequest reqx; - tensorflow::serving::GetModelStatusResponse resx; - - auto model_specx = reqx.mutable_model_spec(); - model_specx->Clear(); - model_specx->set_name("dummy"); - model_specx->mutable_version()->set_value(1); - - ASSERT_EQ(GetModelStatusImpl::getModelStatus(&reqx, &resx, manager, DEFAULT_TEST_CONTEXT), StatusCode::OK); - - const tensorflow::serving::GetModelStatusResponse response_constx = resx; - json_output = ""; - error_status = GetModelStatusImpl::serializeResponse2Json(&response_constx, &json_output); - ASSERT_EQ(error_status, StatusCode::OK); - EXPECT_EQ(json_output, expected_json_end); -} - -TEST_F(TestCustomLoader, CustomLoaderPredictionUsingManyCustomLoaders) { -#ifdef _WIN32 - GTEST_SKIP() << "Test disabled on windows"; -#endif - // Copy dummy model to temporary destination - std::filesystem::copy(getGenericFullPathForSrcTest("/ovms/src/test/dummy"), cl_model_1_path, std::filesystem::copy_options::recursive); - - // Replace model path in the config string - std::string configStr = custom_loader_config_model_multiple; - configStr.replace(configStr.find("/tmp/test_cl_models"), std::string("/tmp/test_cl_models").size(), cl_models_path); - configStr.replace(configStr.find("/tmp/test_cl_models"), std::string("/tmp/test_cl_models").size(), cl_models_path); - configStr.replace(configStr.find("/tmp/test_cl_models"), std::string("/tmp/test_cl_models").size(), cl_models_path); - - // Create config file - std::string fileToReload = cl_models_path + "/cl_config.json"; - createConfigFileWithContent(configStr, fileToReload); - ASSERT_EQ(manager.loadConfig(fileToReload), ovms::StatusCode::OK); - - tensorflow::serving::PredictRequest request; - preparePredictRequest(request, - {{DUMMY_MODEL_INPUT_NAME, - std::tuple{{1, 10}, ovms::Precision::FP32}}}); - - performPredict("dummy-a", 1, request); - performPredict("dummy-b", 1, request); - performPredict("dummy-c", 1, request); -} - -TEST_F(TestCustomLoader, CustomLoaderGetMetaData) { -#ifdef _WIN32 - GTEST_SKIP() << "Test disabled on windows"; -#endif - const char* expected_json = R"({ - "modelSpec": { - "name": "dummy", - "signatureName": "", - "version": "1" - }, - "metadata": { - "signature_def": { - "@type": "type.googleapis.com/tensorflow.serving.SignatureDefMap", - "signatureDef": { - "serving_default": { - "inputs": { - "b": { - "dtype": "DT_FLOAT", - "tensorShape": { - "dim": [ - { - "size": "1", - "name": "" - }, - { - "size": "10", - "name": "" - } - ], - "unknownRank": false - }, - "name": "b" - } - }, - "outputs": { - "a": { - "dtype": "DT_FLOAT", - "tensorShape": { - "dim": [ - { - "size": "1", - "name": "" - }, - { - "size": "10", - "name": "" - } - ], - "unknownRank": false - }, - "name": "a" - } - }, - "methodName": "", - "defaults": {} - } - } - } - } -} -)"; - - // Copy dummy model to temporary destination - std::filesystem::copy(getGenericFullPathForSrcTest("/ovms/src/test/dummy"), cl_model_1_path, std::filesystem::copy_options::recursive); - - // Replace model path in the config string - std::string configStr = custom_loader_config_model; - configStr.replace(configStr.find("/tmp/test_cl_models"), std::string("/tmp/test_cl_models").size(), cl_models_path); - - // Create config file - std::string fileToReload = cl_models_path + "/cl_config.json"; - createConfigFileWithContent(configStr, fileToReload); - ASSERT_EQ(manager.loadConfig(fileToReload), ovms::StatusCode::OK); - - std::shared_ptr model; - std::unique_ptr unload_guard; - ASSERT_EQ(manager.getModelInstance("dummy", 1, model, unload_guard), ovms::StatusCode::OK); - - tensorflow::serving::GetModelMetadataResponse response; - ovms::GetModelMetadataImpl::buildResponse(model, &response); - - std::string json_output = ""; - ovms::GetModelMetadataImpl::serializeResponse2Json(&response, &json_output); - - EXPECT_TRUE(response.has_model_spec()); - EXPECT_EQ(response.model_spec().name(), "dummy"); - - tensorflow::serving::SignatureDefMap def; - response.metadata().at("signature_def").UnpackTo(&def); - - const auto& inputs = ((*def.mutable_signature_def())["serving_default"]).inputs(); - const auto& outputs = ((*def.mutable_signature_def())["serving_default"]).outputs(); - - EXPECT_EQ(inputs.size(), 1); - EXPECT_EQ(outputs.size(), 1); - EXPECT_EQ(json_output, expected_json); -} - -TEST_F(TestCustomLoader, CustomLoaderMultipleLoaderWithSameLoaderName) { -#ifdef _WIN32 - GTEST_SKIP() << "Test disabled on windows"; -#endif - const char* custom_loader_config_model_xx = R"({ - "custom_loader_config_list":[ - { - "config":{ - "loader_name":"sample-loader", - "library_path": "/ovms/bazel-bin/src/libsampleloader.so" - } - }, - { - "config":{ - "loader_name":"sample-loader", - "library_path": "/ovms/bazel-bin/src/libsampleloader.so" - } - } - ], - "model_config_list":[ - { - "config":{ - "name":"dummy", - "base_path": "/tmp/test_cl_models/model1", - "nireq": 1, - "custom_loader_options": {"loader_name": "sample-loader", "model_file": "dummy.xml", "bin_file": "dummy.bin"} - } - } - ] - })"; - - // Copy dummy model to temporary destination - std::filesystem::copy(getGenericFullPathForSrcTest("/ovms/src/test/dummy"), cl_model_1_path, std::filesystem::copy_options::recursive); - - // Replace model path in the config string - std::string configStr = custom_loader_config_model_xx; - configStr.replace(configStr.find("/tmp/test_cl_models"), std::string("/tmp/test_cl_models").size(), cl_models_path); - - // Create config file - std::string fileToReload = cl_models_path + "/cl_config.json"; - createConfigFileWithContent(configStr, fileToReload); - ASSERT_EQ(manager.loadConfig(fileToReload), ovms::StatusCode::OK); - - tensorflow::serving::PredictRequest request; - preparePredictRequest(request, - {{DUMMY_MODEL_INPUT_NAME, - std::tuple{{1, 10}, ovms::Precision::FP32}}}); - performPredict("dummy", 1, request); -} - -TEST_F(TestCustomLoader, CustomLoaderBlackListingModel) { -#ifdef _WIN32 - GTEST_SKIP() << "Test disabled on windows"; -#endif - // Copy dummy model to temporary destination - std::filesystem::copy(getGenericFullPathForSrcTest("/ovms/src/test/dummy"), cl_model_1_path, std::filesystem::copy_options::recursive); - - // Create Sample Custom Loader Config - std::string cl_config_file_path = cl_models_path; - std::string cl_config_str = ENABLE_FORCE_BLACKLIST_CHECK; - std::string cl_config_file = cl_config_file_path + "/customloader_config"; - createConfigFileWithContent(cl_config_str, cl_config_file); - - // Replace model path in the config string - std::string configStr = custom_loader_config_model_blacklist; - configStr.replace(configStr.find("/tmp/test_cl_models"), std::string("/tmp/test_cl_models").size(), cl_models_path); - configStr.replace(configStr.find("sample-loader-config"), std::string("sample-loader-config").size(), cl_config_file); - - // Create config file - std::string fileToReload = cl_models_path + "/cl_config.json"; - createConfigFileWithContent(configStr, fileToReload); - ASSERT_EQ(manager.loadConfig(fileToReload), ovms::StatusCode::OK); - - tensorflow::serving::GetModelStatusRequest req; - tensorflow::serving::GetModelStatusResponse res; - - auto model_spec = req.mutable_model_spec(); - model_spec->Clear(); - model_spec->set_name("dummy"); - model_spec->mutable_version()->set_value(1); - ASSERT_EQ(GetModelStatusImpl::getModelStatus(&req, &res, manager, DEFAULT_TEST_CONTEXT), StatusCode::OK); - - tensorflow::serving::GetModelStatusResponse response_const = res; - std::string json_output; - Status error_status = GetModelStatusImpl::serializeResponse2Json(&response_const, &json_output); - ASSERT_EQ(error_status, StatusCode::OK); - EXPECT_EQ(json_output, expected_json_available); - - // copy status file - std::string status_file_path = cl_model_1_path + "1"; - std::string status_str = "DISABLED"; - std::string status_file = status_file_path + "/dummy.status"; - createConfigFileWithContent(status_str, status_file); - ASSERT_EQ(manager.loadConfig(fileToReload), ovms::StatusCode::OK); - - tensorflow::serving::GetModelStatusRequest reqx; - tensorflow::serving::GetModelStatusResponse resx; - - auto model_specx = reqx.mutable_model_spec(); - model_specx->Clear(); - model_specx->set_name("dummy"); - model_specx->mutable_version()->set_value(1); - - ASSERT_EQ(GetModelStatusImpl::getModelStatus(&reqx, &resx, manager, DEFAULT_TEST_CONTEXT), StatusCode::OK); - - const tensorflow::serving::GetModelStatusResponse response_constx = resx; - json_output = ""; - error_status = GetModelStatusImpl::serializeResponse2Json(&response_constx, &json_output); - ASSERT_EQ(error_status, StatusCode::OK); - EXPECT_EQ(json_output, expected_json_end); -} - -TEST_F(TestCustomLoader, CustomLoaderBlackListingRevoke) { -#ifdef _WIN32 - GTEST_SKIP() << "Test disabled on windows"; -#endif - // Copy dummy model to temporary destination - std::filesystem::copy(getGenericFullPathForSrcTest("/ovms/src/test/dummy"), cl_model_1_path, std::filesystem::copy_options::recursive); - - // Create Sample Custom Loader Config - std::string cl_config_file_path = cl_models_path; - std::string cl_config_str = ENABLE_FORCE_BLACKLIST_CHECK; - std::string cl_config_file = cl_config_file_path + "/customloader_config"; - createConfigFileWithContent(cl_config_str, cl_config_file); - - // Replace model path in the config string - std::string configStr = custom_loader_config_model_blacklist; - configStr.replace(configStr.find("/tmp/test_cl_models"), std::string("/tmp/test_cl_models").size(), cl_models_path); - configStr.replace(configStr.find("sample-loader-config"), std::string("sample-loader-config").size(), cl_config_file); - - // Create config file - std::string fileToReload = cl_models_path + "/cl_config.json"; - createConfigFileWithContent(configStr, fileToReload); - ASSERT_EQ(manager.loadConfig(fileToReload), ovms::StatusCode::OK); - - tensorflow::serving::GetModelStatusRequest req; - tensorflow::serving::GetModelStatusResponse res; - - auto model_spec = req.mutable_model_spec(); - model_spec->Clear(); - model_spec->set_name("dummy"); - model_spec->mutable_version()->set_value(1); - ASSERT_EQ(GetModelStatusImpl::getModelStatus(&req, &res, manager, DEFAULT_TEST_CONTEXT), StatusCode::OK); - - const tensorflow::serving::GetModelStatusResponse response_const = res; - std::string json_output; - Status error_status = GetModelStatusImpl::serializeResponse2Json(&response_const, &json_output); - ASSERT_EQ(error_status, StatusCode::OK); - EXPECT_EQ(json_output, expected_json_available); - - // copy status file - std::string status_file_path = cl_model_1_path + "1"; - std::string status_str = "DISABLED"; - std::string status_file = status_file_path + "/dummy.status"; - createConfigFileWithContent(status_str, status_file); - ASSERT_EQ(manager.loadConfig(fileToReload), ovms::StatusCode::OK); - - tensorflow::serving::GetModelStatusRequest req1; - tensorflow::serving::GetModelStatusResponse res1; - - auto model_spec1 = req1.mutable_model_spec(); - model_spec1->Clear(); - model_spec1->set_name("dummy"); - model_spec1->mutable_version()->set_value(1); - ASSERT_EQ(GetModelStatusImpl::getModelStatus(&req1, &res1, manager, DEFAULT_TEST_CONTEXT), StatusCode::OK); - - const tensorflow::serving::GetModelStatusResponse response_const1 = res1; - json_output = ""; - error_status = GetModelStatusImpl::serializeResponse2Json(&response_const1, &json_output); - ASSERT_EQ(error_status, StatusCode::OK); - EXPECT_EQ(json_output, expected_json_end); - - // Remove status file - std::filesystem::remove(status_file); - ASSERT_EQ(manager.loadConfig(fileToReload), ovms::StatusCode::OK); - - tensorflow::serving::GetModelStatusRequest req2; - tensorflow::serving::GetModelStatusResponse res2; - - auto model_spec2 = req2.mutable_model_spec(); - model_spec2->Clear(); - model_spec2->set_name("dummy"); - model_spec2->mutable_version()->set_value(1); - ASSERT_EQ(GetModelStatusImpl::getModelStatus(&req2, &res2, manager, DEFAULT_TEST_CONTEXT), StatusCode::OK); - - const tensorflow::serving::GetModelStatusResponse response_const2 = res2; - json_output = ""; - error_status = GetModelStatusImpl::serializeResponse2Json(&response_const2, &json_output); - ASSERT_EQ(error_status, StatusCode::OK); - EXPECT_EQ(json_output, expected_json_available); -} - -TEST_F(TestCustomLoader, CustomLoaderBlackListModelReloadError) { -#ifdef _WIN32 - GTEST_SKIP() << "Test disabled on windows"; -#endif - // Copy dummy model to temporary destination - std::filesystem::copy(getGenericFullPathForSrcTest("/ovms/src/test/dummy"), cl_model_1_path, std::filesystem::copy_options::recursive); - - // Create Sample Custom Loader Config - std::string cl_config_file_path = cl_models_path; - std::string cl_config_str = ENABLE_FORCE_BLACKLIST_CHECK; - std::string cl_config_file = cl_config_file_path + "/customloader_config"; - createConfigFileWithContent(cl_config_str, cl_config_file); - - // Replace model path in the config string - std::string configStr = custom_loader_config_model_blacklist; - configStr.replace(configStr.find("/tmp/test_cl_models"), std::string("/tmp/test_cl_models").size(), cl_models_path); - configStr.replace(configStr.find("sample-loader-config"), std::string("sample-loader-config").size(), cl_config_file); - - // Create config file - std::string fileToReload = cl_models_path + "/cl_config.json"; - createConfigFileWithContent(configStr, fileToReload); - ASSERT_EQ(manager.loadConfig(fileToReload), ovms::StatusCode::OK); - - tensorflow::serving::GetModelStatusRequest req; - tensorflow::serving::GetModelStatusResponse res; - - auto model_spec = req.mutable_model_spec(); - model_spec->Clear(); - model_spec->set_name("dummy"); - model_spec->mutable_version()->set_value(1); - ASSERT_EQ(GetModelStatusImpl::getModelStatus(&req, &res, manager, DEFAULT_TEST_CONTEXT), StatusCode::OK); - - const tensorflow::serving::GetModelStatusResponse response_const = res; - std::string json_output; - Status error_status = GetModelStatusImpl::serializeResponse2Json(&response_const, &json_output); - ASSERT_EQ(error_status, StatusCode::OK); - EXPECT_EQ(json_output, expected_json_available); - - // copy status file - std::string status_file_path = cl_model_1_path + "1"; - std::string status_str = "DISABLED"; - std::string status_file = status_file_path + "/dummy.status"; - createConfigFileWithContent(status_str, status_file); - ASSERT_EQ(manager.loadConfig(fileToReload), ovms::StatusCode::OK); - - tensorflow::serving::GetModelStatusRequest req1; - tensorflow::serving::GetModelStatusResponse res1; - - auto model_spec1 = req1.mutable_model_spec(); - model_spec1->Clear(); - model_spec1->set_name("dummy"); - model_spec1->mutable_version()->set_value(1); - ASSERT_EQ(GetModelStatusImpl::getModelStatus(&req1, &res1, manager, DEFAULT_TEST_CONTEXT), StatusCode::OK); - - const tensorflow::serving::GetModelStatusResponse response_const1 = res1; - json_output = ""; - error_status = GetModelStatusImpl::serializeResponse2Json(&response_const1, &json_output); - ASSERT_EQ(error_status, StatusCode::OK); - EXPECT_EQ(json_output, expected_json_end); - - // Remove status file & the Dummy.bin file - std::filesystem::remove(status_file); - std::string bin_file = status_file_path + "/dummy.bin"; - std::filesystem::remove(bin_file); - ASSERT_EQ(manager.loadConfig(fileToReload), ovms::StatusCode::FILE_INVALID); - - tensorflow::serving::GetModelStatusRequest req2; - tensorflow::serving::GetModelStatusResponse res2; - - auto model_spec2 = req2.mutable_model_spec(); - model_spec2->Clear(); - model_spec2->set_name("dummy"); - model_spec2->mutable_version()->set_value(1); - ASSERT_EQ(GetModelStatusImpl::getModelStatus(&req2, &res2, manager, DEFAULT_TEST_CONTEXT), StatusCode::OK); - - const tensorflow::serving::GetModelStatusResponse response_const2 = res2; - json_output = ""; - error_status = GetModelStatusImpl::serializeResponse2Json(&response_const2, &json_output); - ASSERT_EQ(error_status, StatusCode::OK); - EXPECT_EQ(json_output, expected_json_loading_error); - - // Copy back the model files & try reload - std::filesystem::copy(getGenericFullPathForSrcTest("/ovms/src/test/dummy"), cl_model_1_path, std::filesystem::copy_options::recursive | std::filesystem::copy_options::overwrite_existing); - ASSERT_EQ(manager.loadConfig(fileToReload), ovms::StatusCode::OK); - - tensorflow::serving::GetModelStatusRequest req3; - tensorflow::serving::GetModelStatusResponse res3; - - auto model_spec3 = req3.mutable_model_spec(); - model_spec3->Clear(); - model_spec3->set_name("dummy"); - model_spec3->mutable_version()->set_value(1); - ASSERT_EQ(GetModelStatusImpl::getModelStatus(&req3, &res3, manager, DEFAULT_TEST_CONTEXT), StatusCode::OK); - - const tensorflow::serving::GetModelStatusResponse response_const3 = res3; - json_output = ""; - error_status = GetModelStatusImpl::serializeResponse2Json(&response_const3, &json_output); - ASSERT_EQ(error_status, StatusCode::OK); - EXPECT_EQ(json_output, expected_json_available); -} - -TEST_F(TestCustomLoader, CustomLoaderLoadBlackListedModel) { -#ifdef _WIN32 - GTEST_SKIP() << "Test disabled on windows"; -#endif - // Copy dummy model to temporary destination - std::filesystem::copy(getGenericFullPathForSrcTest("/ovms/src/test/dummy"), cl_model_1_path, std::filesystem::copy_options::recursive); - - // Create Sample Custom Loader Config - std::string cl_config_file_path = cl_models_path; - std::string cl_config_str = ENABLE_FORCE_BLACKLIST_CHECK; - std::string cl_config_file = cl_config_file_path + "/customloader_config"; - createConfigFileWithContent(cl_config_str, cl_config_file); - - // Replace model path in the config string - std::string configStr = custom_loader_config_model_blacklist; - configStr.replace(configStr.find("/tmp/test_cl_models"), std::string("/tmp/test_cl_models").size(), cl_models_path); - configStr.replace(configStr.find("sample-loader-config"), std::string("sample-loader-config").size(), cl_config_file); - - // Create config file - std::string fileToReload = cl_models_path + "/cl_config.json"; - createConfigFileWithContent(configStr, fileToReload); - - // Create status file - std::string status_file_path = cl_model_1_path + "1"; - std::string status_str = "DISABLED"; - std::string status_file = status_file_path + "/dummy.status"; - createConfigFileWithContent(status_str, status_file); - ovms::Status status1 = manager.loadConfig(fileToReload); - ASSERT_TRUE(status1 == ovms::StatusCode::INTERNAL_ERROR); - - tensorflow::serving::GetModelStatusRequest req1; - tensorflow::serving::GetModelStatusResponse res1; - - auto model_spec1 = req1.mutable_model_spec(); - model_spec1->Clear(); - model_spec1->set_name("dummy"); - model_spec1->mutable_version()->set_value(1); - ASSERT_EQ(GetModelStatusImpl::getModelStatus(&req1, &res1, manager, DEFAULT_TEST_CONTEXT), StatusCode::OK); - - const tensorflow::serving::GetModelStatusResponse response_const1 = res1; - std::string json_output1; - Status error_status1 = GetModelStatusImpl::serializeResponse2Json(&response_const1, &json_output1); - ASSERT_EQ(error_status1, StatusCode::OK); - EXPECT_EQ(json_output1, expected_json_loading_error); - - // remove enable_file from config file - std::string status_config = ", \"enable_file\": \"dummy.status\""; - configStr.replace(configStr.find(status_config), std::string(status_config).size(), ""); - createConfigFileWithContent(configStr, fileToReload); - - ovms::Status status2 = manager.loadConfig(fileToReload); - ASSERT_TRUE(status2 == ovms::StatusCode::OK); - - tensorflow::serving::GetModelStatusRequest req2; - tensorflow::serving::GetModelStatusResponse res2; - - auto model_spec2 = req2.mutable_model_spec(); - model_spec2->Clear(); - model_spec2->set_name("dummy"); - model_spec2->mutable_version()->set_value(1); - ASSERT_EQ(GetModelStatusImpl::getModelStatus(&req2, &res2, manager, DEFAULT_TEST_CONTEXT), StatusCode::OK); - - const tensorflow::serving::GetModelStatusResponse response_const2 = res2; - std::string json_output2; - Status error_status2 = GetModelStatusImpl::serializeResponse2Json(&response_const2, &json_output2); - ASSERT_EQ(error_status2, StatusCode::OK); - EXPECT_EQ(json_output2, expected_json_available); -} -*/ - -#pragma GCC diagnostic pop +//***************************************************************************** +// Copyright 2020-2021 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +//***************************************************************************** +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include "../executingstreamidguard.hpp" +#include "../get_model_metadata_impl.hpp" +#include "src/filesystem/localfilesystem.hpp" +#include "../model.hpp" +#include "../model_service.hpp" +#include "../modelinstance.hpp" +#include "../modelinstanceunloadguard.hpp" +#include "../modelmanager.hpp" +#include "../modelversionstatus.hpp" +#include "../prediction_service_utils.hpp" +#include "../schema.hpp" +#include "../sequence_processing_spec.hpp" +#include "mockmodelinstancechangingstates.hpp" +#include "test_utils.hpp" +#include "src/test/test_request_utils_tfs.hpp" +#include "light_test_utils.hpp" +#include "platform_utils.hpp" + +using testing::_; +using testing::ContainerEq; +using testing::Each; +using testing::Eq; +using ::testing::NiceMock; +using testing::Return; +using testing::ReturnRef; +using testing::UnorderedElementsAre; + +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wnarrowing" + +using namespace ovms; + +/* +------------------------------------------------ +AFTER SAMPLE CUSTOM LOADER REMOVAL BELOW CONFIGURATIONS ARE NOT USED +REMOVE THIS ENTIRE FILE ONCE THE FEATURE IS REMOVED +------------------------------------------------- + +namespace { + +// Custom Loader Config Keys +#define ENABLE_FORCE_BLACKLIST_CHECK "ENABLE_FORCE_BLACKLIST_CHECK" + +// config_model_with_customloader +const char* custom_loader_config_model = R"({ + "custom_loader_config_list":[ + { + "config":{ + "loader_name":"sample-loader", + "library_path": "/ovms/bazel-bin/src/libsampleloader.so" + } + } + ], + "model_config_list":[ + { + "config":{ + "name":"dummy", + "base_path": "/tmp/test_cl_models/model1", + "nireq": 1, + "custom_loader_options": {"loader_name": "sample-loader", "model_file": "dummy.xml", "bin_file": "dummy.bin"} + } + } + ] + })"; + +// config_model_with_customloader +const char* custom_loader_config_model_relative_paths = R"({ + "custom_loader_config_list":[ + { + "config":{ + "loader_name":"sample-loader", + "library_path": "libsampleloader.so" + } + } + ], + "model_config_list":[ + { + "config":{ + "name":"dummy", + "base_path": "test_cl_models/model1", + "nireq": 1, + "custom_loader_options": {"loader_name": "sample-loader", "model_file": "dummy.xml", "bin_file": "dummy.bin"} + } + } + ] + })"; + +// config_no_model_with_customloader +const char* custom_loader_config_model_deleted = R"({ + "custom_loader_config_list":[ + { + "config":{ + "loader_name":"sample-loader", + "library_path": "/ovms/bazel-bin/src/libsampleloader.so" + } + } + ], + "model_config_list":[] + })"; + +// config_2_models_with_customloader +const char* custom_loader_config_model_new = R"({ + "custom_loader_config_list":[ + { + "config":{ + "loader_name":"sample-loader", + "library_path": "/ovms/bazel-bin/src/libsampleloader.so" + } + } + ], + "model_config_list":[ + { + "config":{ + "name":"dummy", + "base_path": "/tmp/test_cl_models/model1", + "nireq": 1, + "custom_loader_options": {"loader_name": "sample-loader", "model_file": "dummy.xml", "bin_file": "dummy.bin"} + } + }, + { + "config":{ + "name":"dummy-new", + "base_path": "/tmp/test_cl_models/model2", + "nireq": 1, + "custom_loader_options": {"loader_name": "sample-loader", "model_file": "dummy.xml", "bin_file": "dummy.bin"} + } + } + ] + })"; + +// config_model_without_customloader_options +const char* custom_loader_config_model_customloader_options_removed = R"({ + "custom_loader_config_list":[ + { + "config":{ + "loader_name":"sample-loader", + "library_path": "/ovms/bazel-bin/src/libsampleloader.so" + } + } + ], + "model_config_list":[ + { + "config":{ + "name":"dummy", + "base_path": "/tmp/test_cl_models/model1", + "nireq": 1 + } + } + ] + })"; + +const char* config_model_with_customloader_options_unknown_loadername = R"({ + "custom_loader_config_list":[ + { + "config":{ + "loader_name":"sample-loader", + "library_path": "/ovms/bazel-bin/src/libsampleloader.so" + } + } + ], + "model_config_list":[ + { + "config":{ + "name":"dummy", + "base_path": "/tmp/test_cl_models/model1", + "nireq": 1, + "custom_loader_options": {"loader_name": "unknown", "model_file": "dummy.xml", "bin_file": "dummy.bin"} + } + } + ] + })"; + +// config_model_with_customloader +const char* custom_loader_config_model_multiple = R"({ + "custom_loader_config_list":[ + { + "config":{ + "loader_name":"sample-loader-a", + "library_path": "/ovms/bazel-bin/src/libsampleloader.so" + } + }, + { + "config":{ + "loader_name":"sample-loader-b", + "library_path": "/ovms/bazel-bin/src/libsampleloader.so" + } + }, + { + "config":{ + "loader_name":"sample-loader-c", + "library_path": "/ovms/bazel-bin/src/libsampleloader.so" + } + } + ], + "model_config_list":[ + { + "config":{ + "name":"dummy-a", + "base_path": "/tmp/test_cl_models/model1", + "nireq": 1, + "custom_loader_options": {"loader_name": "sample-loader-a", "model_file": "dummy.xml", "bin_file": "dummy.bin"} + } + }, + { + "config":{ + "name":"dummy-b", + "base_path": "/tmp/test_cl_models/model1", + "nireq": 1, + "custom_loader_options": {"loader_name": "sample-loader-b", "model_file": "dummy.xml", "bin_file": "dummy.bin"} + } + }, + { + "config":{ + "name":"dummy-c", + "base_path": "/tmp/test_cl_models/model1", + "nireq": 1, + "custom_loader_options": {"loader_name": "sample-loader-c", "model_file": "dummy.xml", "bin_file": "dummy.bin"} + } + } + ] + })"; + +const char* custom_loader_config_model_blacklist = R"({ + "custom_loader_config_list":[ + { + "config":{ + "loader_name":"sample-loader", + "library_path": "/ovms/bazel-bin/src/libsampleloader.so", + "loader_config_file": "sample-loader-config" + } + } + ], + "model_config_list":[ + { + "config":{ + "name":"dummy", + "base_path": "/tmp/test_cl_models/model1", + "nireq": 1, + "custom_loader_options": {"loader_name": "sample-loader", "model_file": "dummy.xml", "bin_file": "dummy.bin", "enable_file": "dummy.status"} + } + } + ] + })"; + +const char* empty_config = R"({ + "custom_loader_config_list":[], + "model_config_list":[] + })"; + +const char* expected_json_available = R"({ + "model_version_status": [ + { + "version": "1", + "state": "AVAILABLE", + "status": { + "error_code": "OK", + "error_message": "OK" + } + } + ] +} +)"; + +const char* expected_json_end = R"({ + "model_version_status": [ + { + "version": "1", + "state": "END", + "status": { + "error_code": "OK", + "error_message": "OK" + } + } + ] +} +)"; + +const char* expected_json_loading_error = R"({ + "model_version_status": [ + { + "version": "1", + "state": "LOADING", + "status": { + "error_code": "UNKNOWN", + "error_message": "UNKNOWN" + } + } + ] +} +)"; + +} // namespace + +*/ + +class TestCustomLoader : public ::testing::Test { +public: + void SetUp() { + const ::testing::TestInfo* const test_info = + ::testing::UnitTest::GetInstance()->current_test_info(); + + cl_models_path = getGenericFullPathForTmp("/tmp/" + std::string(test_info->name())); + cl_model_1_path = cl_models_path + "/model1/"; + cl_model_2_path = cl_models_path + "/model2/"; + + const std::string FIRST_MODEL_NAME = "dummy"; + const std::string SECOND_MODEL_NAME = "dummy_new"; + + std::filesystem::remove_all(cl_models_path); + std::filesystem::create_directories(cl_model_1_path); + } + void TearDown() { + // Create config file with an empty config & reload + const char* empty_config = R"({ + "custom_loader_config_list":[], + "model_config_list":[] + })"; + std::string configStr = empty_config; + std::string fileToReload = cl_models_path + "/cl_config.json"; + createConfigFileWithContent(configStr, fileToReload); + ASSERT_EQ(manager.loadConfig(fileToReload), ovms::StatusCode::OK); + + // Clean up temporary destination + std::filesystem::remove_all(cl_models_path); + } + + /** + * @brief This function should mimic most closely predict request to check for thread safety + */ + void performPredict(const std::string modelName, + const ovms::model_version_t modelVersion, + const tensorflow::serving::PredictRequest& request, + std::unique_ptr> waitBeforeGettingModelInstance = nullptr, + std::unique_ptr> waitBeforePerformInference = nullptr); + + void deserialize(const std::vector& input, ov::InferRequest& inferRequest, std::shared_ptr modelInstance) { + try { + ov::Tensor tensor( + modelInstance->getInputsInfo().at(DUMMY_MODEL_INPUT_NAME)->getOvPrecision(), + modelInstance->getInputsInfo().at(DUMMY_MODEL_INPUT_NAME)->getShape().createPartialShape().get_shape(), + const_cast(reinterpret_cast(input.data()))); + inferRequest.set_tensor(DUMMY_MODEL_INPUT_NAME, tensor); + } catch (...) { + ASSERT_TRUE(false) << "exception during deserialize"; + } + } + + void serializeAndCheck(int outputSize, ov::InferRequest& inferRequest) { + std::vector output(outputSize); + ASSERT_THAT(output, Each(Eq(0.))); + auto tensorOutput = inferRequest.get_tensor(DUMMY_MODEL_OUTPUT_NAME); + ASSERT_EQ(tensorOutput.get_byte_size(), outputSize * sizeof(float)); + std::memcpy(output.data(), tensorOutput.data(), outputSize * sizeof(float)); + EXPECT_THAT(output, Each(Eq(2.))); + } + + ovms::Status performInferenceWithRequest(const tensorflow::serving::PredictRequest& request, tensorflow::serving::PredictResponse& response) { + std::shared_ptr model; + std::unique_ptr unload_guard; + auto status = manager.getModelInstance("dummy", 0, model, unload_guard); + if (!status.ok()) { + return status; + } + + response.Clear(); + return model->infer(&request, &response, unload_guard); + } + +public: + ConstructorEnabledModelManager manager; + + ~TestCustomLoader() { + std::cout << "Destructor of TestCustomLoader()" << std::endl; + } + + std::string cl_models_path; + std::string cl_model_1_path; + std::string cl_model_2_path; +}; + +class MockModelInstance : public ovms::ModelInstance { +public: + MockModelInstance(ov::Core& ieCore) : + ModelInstance("UNUSED_NAME", 42, ieCore) {} + const ovms::Status mockValidate(const tensorflow::serving::PredictRequest* request) { + return validate(request); + } +}; + +void TestCustomLoader::performPredict(const std::string modelName, + const ovms::model_version_t modelVersion, + const tensorflow::serving::PredictRequest& request, + std::unique_ptr> waitBeforeGettingModelInstance, + std::unique_ptr> waitBeforePerformInference) { + // only validation is skipped + std::shared_ptr modelInstance; + std::unique_ptr modelInstanceUnloadGuard; + + auto& tensorProto = request.inputs().find("b")->second; + size_t batchSize = tensorProto.tensor_shape().dim(0).size(); + size_t inputSize = 1; + for (int i = 0; i < tensorProto.tensor_shape().dim_size(); i++) { + inputSize *= tensorProto.tensor_shape().dim(i).size(); + } + + if (waitBeforeGettingModelInstance) { + std::cout << "Waiting before getModelInstance. Batch size: " << batchSize << std::endl; + waitBeforeGettingModelInstance->get(); + } + ASSERT_EQ(manager.getModelInstance(modelName, modelVersion, modelInstance, modelInstanceUnloadGuard), ovms::StatusCode::OK); + + if (waitBeforePerformInference) { + std::cout << "Waiting before performInfernce." << std::endl; + waitBeforePerformInference->get(); + } + ovms::Status validationStatus = (std::static_pointer_cast(modelInstance))->mockValidate(&request); + std::cout << validationStatus.string() << std::endl; + ASSERT_TRUE(validationStatus == ovms::StatusCode::OK || + validationStatus == ovms::StatusCode::RESHAPE_REQUIRED || + validationStatus == ovms::StatusCode::BATCHSIZE_CHANGE_REQUIRED); + auto bsPositionIndex = 0; + auto requestBatchSize = ovms::getRequestBatchSize(&request, bsPositionIndex); + auto requestShapes = ovms::getRequestShapes(&request); + ASSERT_EQ(modelInstance->reloadModelIfRequired(validationStatus, requestBatchSize, requestShapes, modelInstanceUnloadGuard), ovms::StatusCode::OK); + + ovms::ExecutingStreamIdGuard executingStreamIdGuard(modelInstance->getInferRequestsQueue(), modelInstance->getMetricReporter()); + ov::InferRequest& inferRequest = executingStreamIdGuard.getInferRequest(); + std::vector input(inputSize); + std::generate(input.begin(), input.end(), []() { return 1.; }); + ASSERT_THAT(input, Each(Eq(1.))); + deserialize(input, inferRequest, modelInstance); + auto status = modelInstance->performInference(inferRequest); + ASSERT_EQ(status, ovms::StatusCode::OK); + size_t outputSize = batchSize * DUMMY_MODEL_OUTPUT_SIZE; + serializeAndCheck(outputSize, inferRequest); +} + +// Schema Validation + +TEST_F(TestCustomLoader, CustomLoaderConfigMatchingSchema) { + const char* customloaderConfigMatchingSchema = R"( + { + "custom_loader_config_list":[ + { + "config":{ + "loader_name":"dummy-loader", + "library_path": "/tmp/loader/dummyloader", + "loader_config_file": "dummyloader-config" + } + } + ], + "model_config_list":[ + { + "config":{ + "name":"dummy-loader-model", + "base_path": "/tmp/models/dummy1", + "custom_loader_options": {"loader_name": "dummy-loader"} + } + } + ] + } + )"; + + rapidjson::Document customloaderConfigMatchingSchemaParsed; + customloaderConfigMatchingSchemaParsed.Parse(customloaderConfigMatchingSchema); + auto result = ovms::validateJsonAgainstSchema(customloaderConfigMatchingSchemaParsed, ovms::MODELS_CONFIG_SCHEMA.c_str()); + EXPECT_EQ(result, ovms::StatusCode::OK); +} + +TEST_F(TestCustomLoader, CustomLoaderConfigMissingLoaderName) { + const char* customloaderConfigMissingLoaderName = R"( + { + "custom_loader_config_list":[ + { + "config":{ + "library_path": "dummyloader", + "loader_config_file": "dummyloader-config" + } + } + ], + "model_config_list": [] + } + )"; + + rapidjson::Document customloaderConfigMissingLoaderNameParsed; + customloaderConfigMissingLoaderNameParsed.Parse(customloaderConfigMissingLoaderName); + auto result = ovms::validateJsonAgainstSchema(customloaderConfigMissingLoaderNameParsed, ovms::MODELS_CONFIG_SCHEMA.c_str()); + EXPECT_EQ(result, ovms::StatusCode::JSON_INVALID); +} + +TEST_F(TestCustomLoader, CustomLoaderConfigMissingLibraryPath) { + const char* customloaderConfigMissingLibraryPath = R"( + { + "custom_loader_config_list":[ + { + "config":{ + "loader_name":"dummy-loader", + "loader_config_file": "dummyloader-config" + } + } + ], + "model_config_list": [] + } + )"; + + rapidjson::Document customloaderConfigMissingLibraryPathParsed; + customloaderConfigMissingLibraryPathParsed.Parse(customloaderConfigMissingLibraryPath); + auto result = ovms::validateJsonAgainstSchema(customloaderConfigMissingLibraryPathParsed, ovms::MODELS_CONFIG_SCHEMA.c_str()); + EXPECT_EQ(result, ovms::StatusCode::JSON_INVALID); +} + +TEST_F(TestCustomLoader, CustomLoaderConfigMissingLoaderConfig) { + const char* customloaderConfigMissingLoaderConfig = R"( + { + "custom_loader_config_list":[ + { + "config":{ + "loader_name":"dummy-loader", + "library_path": "dummyloader" + } + } + ], + "model_config_list": [] + } + )"; + + rapidjson::Document customloaderConfigMissingLoaderConfigParsed; + customloaderConfigMissingLoaderConfigParsed.Parse(customloaderConfigMissingLoaderConfig); + auto result = ovms::validateJsonAgainstSchema(customloaderConfigMissingLoaderConfigParsed, ovms::MODELS_CONFIG_SCHEMA.c_str()); + EXPECT_EQ(result, ovms::StatusCode::OK); +} + +TEST_F(TestCustomLoader, CustomLoaderConfigInvalidCustomLoaderConfig) { + const char* customloaderConfigInvalidCustomLoaderConfig = R"( + { + "model_config_list":[ + { + "config":{ + "name":"dummy-loader-model", + "base_path": "/tmp/models/dummy1", + "custom_loader_options_invalid": {"loader_name": "dummy-loader"} + } + } + ] + } + )"; + + rapidjson::Document customloaderConfigInvalidCustomLoaderConfigParsed; + customloaderConfigInvalidCustomLoaderConfigParsed.Parse(customloaderConfigInvalidCustomLoaderConfig); + auto result = ovms::validateJsonAgainstSchema(customloaderConfigInvalidCustomLoaderConfigParsed, ovms::MODELS_CONFIG_SCHEMA.c_str()); + EXPECT_EQ(result, ovms::StatusCode::JSON_INVALID); +} + +TEST_F(TestCustomLoader, CustomLoaderConfigMissingLoaderNameInCustomLoaderOptions) { + const char* customloaderConfigMissingLoaderNameInCustomLoaderOptions = R"( + { + "model_config_list":[ + { + "config":{ + "name":"dummy-loader-model", + "base_path": "/tmp/models/dummy1", + "custom_loader_options": {"a": "SS"} + } + } + ] + } + )"; + + rapidjson::Document customloaderConfigMissingLoaderNameInCustomLoaderOptionsParsed; + customloaderConfigMissingLoaderNameInCustomLoaderOptionsParsed.Parse(customloaderConfigMissingLoaderNameInCustomLoaderOptions); + auto result = ovms::validateJsonAgainstSchema(customloaderConfigMissingLoaderNameInCustomLoaderOptionsParsed, ovms::MODELS_CONFIG_SCHEMA.c_str()); + EXPECT_EQ(result, ovms::StatusCode::JSON_INVALID); +} + +TEST_F(TestCustomLoader, CustomLoaderConfigMultiplePropertiesInCustomLoaderOptions) { + const char* customloaderConfigMultiplePropertiesInCustomLoaderOptions = R"( + { + "model_config_list":[ + { + "config":{ + "name":"dummy-loader-model", + "base_path": "/tmp/models/dummy1", + "custom_loader_options": {"loader_name": "dummy-loader", "1": "a", "2": "b", "3": "c", "4":"d", "5":"e", "6":"f"} + } + } + ] + } + )"; + + rapidjson::Document customloaderConfigMultiplePropertiesInCustomLoaderOptionsParsed; + customloaderConfigMultiplePropertiesInCustomLoaderOptionsParsed.Parse(customloaderConfigMultiplePropertiesInCustomLoaderOptions); + auto result = ovms::validateJsonAgainstSchema(customloaderConfigMultiplePropertiesInCustomLoaderOptionsParsed, ovms::MODELS_CONFIG_SCHEMA.c_str()); + EXPECT_EQ(result, ovms::StatusCode::OK); +} + +// Functional Validation +/* +------------------------------------------------ +AFTER SAMPLE CUSTOM LOADER REMOVAL BELOW TESTS ARE NOT VALID +REMOVE THIS ENTIRE FILE ONCE THE FEATURE IS REMOVED +------------------------------------------------- +TEST_F(TestCustomLoader, CustomLoaderPrediction) { +#ifdef _WIN32 + GTEST_SKIP() << "Test disabled on windows"; +#endif + // Copy dummy model to temporary destination + std::filesystem::copy(getGenericFullPathForSrcTest("/ovms/src/test/dummy"), cl_model_1_path, std::filesystem::copy_options::recursive); + + // Replace model path in the config string + std::string configStr = custom_loader_config_model; + configStr.replace(configStr.find("/tmp/test_cl_models"), std::string("/tmp/test_cl_models").size(), cl_models_path); + + // Create config file + std::string fileToReload = cl_models_path + "/cl_config.json"; + createConfigFileWithContent(configStr, fileToReload); + ASSERT_EQ(manager.loadConfig(fileToReload), ovms::StatusCode::OK); + + tensorflow::serving::PredictRequest request; + preparePredictRequest(request, + {{DUMMY_MODEL_INPUT_NAME, + std::tuple{{1, 10}, ovms::Precision::FP32}}}); + performPredict("dummy", 1, request); +} + +TEST_F(TestCustomLoader, CustomLoaderPredictionRelativePath) { +#ifdef _WIN32 + GTEST_SKIP() << "Test disabled on windows"; +#endif + // Copy dummy model to temporary destination + std::filesystem::copy(getGenericFullPathForSrcTest("/ovms/src/test/dummy"), cl_model_1_path, std::filesystem::copy_options::recursive); + std::filesystem::copy(getGenericFullPathForSrcTest("/ovms/bazel-bin/src/libsampleloader.so"), cl_models_path, std::filesystem::copy_options::recursive); + + // Replace model path in the config string + std::string configStr = custom_loader_config_model_relative_paths; + configStr.replace(configStr.find("test_cl_models"), std::string("test_cl_models").size(), cl_models_path); + + // Create config file + std::string fileToReload = cl_models_path + "/cl_config.json"; + createConfigFileWithContent(configStr, fileToReload); + ASSERT_EQ(manager.loadConfig(fileToReload), ovms::StatusCode::OK); + + tensorflow::serving::PredictRequest request; + preparePredictRequest(request, + {{DUMMY_MODEL_INPUT_NAME, + std::tuple{{1, 10}, ovms::Precision::FP32}}}); + performPredict("dummy", 1, request); +} + +TEST_F(TestCustomLoader, CustomLoaderGetStatus) { +#ifdef _WIN32 + GTEST_SKIP() << "Test disabled on windows"; +#endif + // Copy dummy model to temporary destination + std::filesystem::copy(getGenericFullPathForSrcTest("/ovms/src/test/dummy"), cl_model_1_path, std::filesystem::copy_options::recursive); + + // Replace model path in the config string + std::string configStr = custom_loader_config_model; + configStr.replace(configStr.find("/tmp/test_cl_models"), std::string("/tmp/test_cl_models").size(), cl_models_path); + + // Create config file + std::string fileToReload = cl_models_path + "/cl_config.json"; + createConfigFileWithContent(configStr, fileToReload); + ASSERT_EQ(manager.loadConfig(fileToReload), ovms::StatusCode::OK); + + tensorflow::serving::GetModelStatusRequest req; + tensorflow::serving::GetModelStatusResponse res; + + auto model_spec = req.mutable_model_spec(); + model_spec->Clear(); + model_spec->set_name("dummy"); + model_spec->mutable_version()->set_value(1); + ASSERT_EQ(GetModelStatusImpl::getModelStatus(&req, &res, manager, DEFAULT_TEST_CONTEXT), StatusCode::OK); + + const tensorflow::serving::GetModelStatusResponse response_const = res; + std::string json_output; + Status error_status = GetModelStatusImpl::serializeResponse2Json(&response_const, &json_output); + ASSERT_EQ(error_status, StatusCode::OK); + EXPECT_EQ(json_output, expected_json_available); +} + +TEST_F(TestCustomLoader, CustomLoaderPredictDeletePredict) { +#ifdef _WIN32 + GTEST_SKIP() << "Test disabled on windows"; +#endif + // Copy dummy model to temporary destination + std::filesystem::copy(getGenericFullPathForSrcTest("/ovms/src/test/dummy"), cl_model_1_path, std::filesystem::copy_options::recursive); + + // Replace model path in the config string + std::string configStr = custom_loader_config_model; + configStr.replace(configStr.find("/tmp/test_cl_models"), std::string("/tmp/test_cl_models").size(), cl_models_path); + + // Create config file + std::string fileToReload = cl_models_path + "/cl_config.json"; + createConfigFileWithContent(configStr, fileToReload); + ASSERT_EQ(manager.loadConfig(fileToReload), ovms::StatusCode::OK); + + tensorflow::serving::PredictRequest request; + preparePredictRequest(request, + {{DUMMY_MODEL_INPUT_NAME, + std::tuple{{1, 10}, ovms::Precision::FP32}}}); + tensorflow::serving::PredictResponse response; + ASSERT_EQ(performInferenceWithRequest(request, response), ovms::StatusCode::OK); + + // Re-create config file + createConfigFileWithContent(custom_loader_config_model_deleted, fileToReload); + ASSERT_EQ(manager.loadConfig(fileToReload), ovms::StatusCode::OK); + + ASSERT_EQ(performInferenceWithRequest(request, response), ovms::StatusCode::MODEL_VERSION_MISSING); +} + +TEST_F(TestCustomLoader, CustomLoaderPredictNewVersionPredict) { +#ifdef _WIN32 + GTEST_SKIP() << "Test disabled on windows"; +#endif + // Copy dummy model to temporary destination + std::filesystem::copy(getGenericFullPathForSrcTest("/ovms/src/test/dummy"), cl_model_1_path, std::filesystem::copy_options::recursive); + + // Replace model path in the config string + std::string configStr = custom_loader_config_model; + configStr.replace(configStr.find("/tmp/test_cl_models"), std::string("/tmp/test_cl_models").size(), cl_models_path); + + // Create config file + std::string fileToReload = cl_models_path + "/cl_config.json"; + createConfigFileWithContent(configStr, fileToReload); + ASSERT_EQ(manager.loadConfig(fileToReload), ovms::StatusCode::OK); + + tensorflow::serving::PredictRequest request; + preparePredictRequest(request, + {{DUMMY_MODEL_INPUT_NAME, + std::tuple{{1, 10}, ovms::Precision::FP32}}}); + performPredict("dummy", 1, request); + + // Copy version 1 to version 2 + std::filesystem::create_directories(cl_model_1_path + "2"); + std::filesystem::copy(cl_model_1_path + "1", cl_model_1_path + "2", std::filesystem::copy_options::recursive); + ASSERT_EQ(manager.loadConfig(fileToReload), ovms::StatusCode::OK); + + preparePredictRequest(request, + {{DUMMY_MODEL_INPUT_NAME, + std::tuple{{1, 10}, ovms::Precision::FP32}}}); + performPredict("dummy", 2, request); +} + +TEST_F(TestCustomLoader, CustomLoaderPredictNewModelPredict) { +#ifdef _WIN32 + GTEST_SKIP() << "Test disabled on windows"; +#endif + // Copy dummy model to temporary destination + std::filesystem::copy(getGenericFullPathForSrcTest("/ovms/src/test/dummy"), cl_model_1_path, std::filesystem::copy_options::recursive); + + // Replace model path in the config string + std::string configStr = custom_loader_config_model; + configStr.replace(configStr.find("/tmp/test_cl_models"), std::string("/tmp/test_cl_models").size(), cl_models_path); + + // Create config file + std::string fileToReload = cl_models_path + "/cl_config.json"; + createConfigFileWithContent(configStr, fileToReload); + ASSERT_EQ(manager.loadConfig(fileToReload), ovms::StatusCode::OK); + + tensorflow::serving::PredictRequest request; + preparePredictRequest(request, + {{DUMMY_MODEL_INPUT_NAME, + std::tuple{{1, 10}, ovms::Precision::FP32}}}); + performPredict("dummy", 1, request); + + // Copy model1 to model2 + std::filesystem::copy(getGenericFullPathForSrcTest("/ovms/src/test/dummy"), cl_model_2_path, std::filesystem::copy_options::recursive); + + // Replace model path in the config string + configStr = custom_loader_config_model_new; + configStr.replace(configStr.find("/tmp/test_cl_models"), std::string("/tmp/test_cl_models").size(), cl_models_path); + configStr.replace(configStr.find("/tmp/test_cl_models"), std::string("/tmp/test_cl_models").size(), cl_models_path); + + // Re-create config file + createConfigFileWithContent(configStr, fileToReload); + ASSERT_EQ(manager.loadConfig(fileToReload), ovms::StatusCode::OK); + + preparePredictRequest(request, + {{DUMMY_MODEL_INPUT_NAME, + std::tuple{{1, 10}, ovms::Precision::FP32}}}); + performPredict("dummy", 1, request); + performPredict("dummy-new", 1, request); +} + +TEST_F(TestCustomLoader, CustomLoaderPredictRemoveCustomLoaderOptionsPredict) { +#ifdef _WIN32 + GTEST_SKIP() << "Test disabled on windows"; +#endif + // Copy dummy model to temporary destination + std::filesystem::copy(getGenericFullPathForSrcTest("/ovms/src/test/dummy"), cl_model_1_path, std::filesystem::copy_options::recursive); + + // Replace model path in the config string + std::string configStr = custom_loader_config_model; + configStr.replace(configStr.find("/tmp/test_cl_models"), std::string("/tmp/test_cl_models").size(), cl_models_path); + + // Create config file + std::string fileToReload = cl_models_path + "/cl_config.json"; + createConfigFileWithContent(configStr, fileToReload); + ASSERT_EQ(manager.loadConfig(fileToReload), ovms::StatusCode::OK); + + tensorflow::serving::PredictRequest request; + preparePredictRequest(request, + {{DUMMY_MODEL_INPUT_NAME, + std::tuple{{1, 10}, ovms::Precision::FP32}}}); + performPredict("dummy", 1, request); + + // Replace model path in the config string + configStr = custom_loader_config_model_customloader_options_removed; + configStr.replace(configStr.find("/tmp/test_cl_models"), std::string("/tmp/test_cl_models").size(), cl_models_path); + + // Re-create config file + createConfigFileWithContent(configStr, fileToReload); + ASSERT_EQ(manager.loadConfig(fileToReload), ovms::StatusCode::OK); + + performPredict("dummy", 1, request); +} + +TEST_F(TestCustomLoader, PredictNormalModelAddCustomLoaderOptionsPredict) { +#ifdef _WIN32 + GTEST_SKIP() << "Test disabled on windows"; +#endif + // Copy dummy model to temporary destination + std::filesystem::copy(getGenericFullPathForSrcTest("/ovms/src/test/dummy"), cl_model_1_path, std::filesystem::copy_options::recursive); + + // Replace model path in the config string + std::string configStr = custom_loader_config_model_customloader_options_removed; + configStr.replace(configStr.find("/tmp/test_cl_models"), std::string("/tmp/test_cl_models").size(), cl_models_path); + + // Create config file + std::string fileToReload = cl_models_path + "/cl_config.json"; + createConfigFileWithContent(configStr, fileToReload); + ASSERT_EQ(manager.loadConfig(fileToReload), ovms::StatusCode::OK); + + tensorflow::serving::PredictRequest request; + preparePredictRequest(request, + {{DUMMY_MODEL_INPUT_NAME, + std::tuple{{1, 10}, ovms::Precision::FP32}}}); + performPredict("dummy", 1, request); + + // Replace model path in the config string + configStr = custom_loader_config_model; + configStr.replace(configStr.find("/tmp/test_cl_models"), std::string("/tmp/test_cl_models").size(), cl_models_path); + + // Create config file + fileToReload = cl_models_path + "/cl_config.json"; + createConfigFileWithContent(configStr, fileToReload); + ASSERT_EQ(manager.loadConfig(fileToReload), ovms::StatusCode::OK); + + performPredict("dummy", 1, request); +} + +TEST_F(TestCustomLoader, CustomLoaderOptionWithUnknownLibrary) { +#ifdef _WIN32 + GTEST_SKIP() << "Test disabled on windows"; +#endif + // Copy dummy model to temporary destination + std::filesystem::copy(getGenericFullPathForSrcTest("/ovms/src/test/dummy"), cl_model_1_path, std::filesystem::copy_options::recursive); + + // Replace model path in the config string + std::string configStr = config_model_with_customloader_options_unknown_loadername; + configStr.replace(configStr.find("/tmp/test_cl_models"), std::string("/tmp/test_cl_models").size(), cl_models_path); + + // Create config file + std::string fileToReload = cl_models_path + "/cl_config.json"; + createConfigFileWithContent(configStr, fileToReload); + ASSERT_EQ(manager.loadConfig(fileToReload), ovms::StatusCode::OK); + + tensorflow::serving::PredictRequest request; + preparePredictRequest(request, + {{DUMMY_MODEL_INPUT_NAME, + std::tuple{{1, 10}, ovms::Precision::FP32}}}); + tensorflow::serving::PredictResponse response; + ASSERT_EQ(performInferenceWithRequest(request, response), ovms::StatusCode::MODEL_VERSION_MISSING); +} + +TEST_F(TestCustomLoader, CustomLoaderWithMissingModelFiles) { +#ifdef _WIN32 + GTEST_SKIP() << "Test disabled on windows"; +#endif + // Replace model path in the config string + std::string configStr = custom_loader_config_model; + configStr.replace(configStr.find("/tmp/test_cl_models"), std::string("/tmp/test_cl_models").size(), cl_models_path); + + // Create config file + std::string fileToReload = cl_models_path + "/cl_config.json"; + createConfigFileWithContent(configStr, fileToReload); + ASSERT_EQ(manager.loadConfig(fileToReload), ovms::StatusCode::OK); + + tensorflow::serving::PredictRequest request; + preparePredictRequest(request, + {{DUMMY_MODEL_INPUT_NAME, + std::tuple{{1, 10}, ovms::Precision::FP32}}}); + tensorflow::serving::PredictResponse response; + ASSERT_EQ(performInferenceWithRequest(request, response), ovms::StatusCode::MODEL_VERSION_MISSING); +} + +TEST_F(TestCustomLoader, CustomLoaderGetStatusDeleteModelGetStatus) { +#ifdef _WIN32 + GTEST_SKIP() << "Test disabled on windows"; +#endif + // Copy dummy model to temporary destination + std::filesystem::copy(getGenericFullPathForSrcTest("/ovms/src/test/dummy"), cl_model_1_path, std::filesystem::copy_options::recursive); + + // Replace model path in the config string + std::string configStr = custom_loader_config_model; + configStr.replace(configStr.find("/tmp/test_cl_models"), std::string("/tmp/test_cl_models").size(), cl_models_path); + + // Create config file + std::string fileToReload = cl_models_path + "/cl_config.json"; + createConfigFileWithContent(configStr, fileToReload); + ASSERT_EQ(manager.loadConfig(fileToReload), ovms::StatusCode::OK); + + tensorflow::serving::GetModelStatusRequest req; + tensorflow::serving::GetModelStatusResponse res; + + auto model_spec = req.mutable_model_spec(); + model_spec->Clear(); + model_spec->set_name("dummy"); + model_spec->mutable_version()->set_value(1); + ASSERT_EQ(GetModelStatusImpl::getModelStatus(&req, &res, manager, DEFAULT_TEST_CONTEXT), StatusCode::OK); + + const tensorflow::serving::GetModelStatusResponse response_const = res; + std::string json_output; + Status error_status = GetModelStatusImpl::serializeResponse2Json(&response_const, &json_output); + ASSERT_EQ(error_status, StatusCode::OK); + EXPECT_EQ(json_output, expected_json_available); + + // Re-create config file + createConfigFileWithContent(custom_loader_config_model_deleted, fileToReload); + ASSERT_EQ(manager.loadConfig(fileToReload), ovms::StatusCode::OK); + + tensorflow::serving::GetModelStatusRequest reqx; + tensorflow::serving::GetModelStatusResponse resx; + + auto model_specx = reqx.mutable_model_spec(); + model_specx->Clear(); + model_specx->set_name("dummy"); + model_specx->mutable_version()->set_value(1); + + ASSERT_EQ(GetModelStatusImpl::getModelStatus(&reqx, &resx, manager, DEFAULT_TEST_CONTEXT), StatusCode::OK); + + const tensorflow::serving::GetModelStatusResponse response_constx = resx; + json_output = ""; + error_status = GetModelStatusImpl::serializeResponse2Json(&response_constx, &json_output); + ASSERT_EQ(error_status, StatusCode::OK); + EXPECT_EQ(json_output, expected_json_end); +} + +TEST_F(TestCustomLoader, CustomLoaderPredictionUsingManyCustomLoaders) { +#ifdef _WIN32 + GTEST_SKIP() << "Test disabled on windows"; +#endif + // Copy dummy model to temporary destination + std::filesystem::copy(getGenericFullPathForSrcTest("/ovms/src/test/dummy"), cl_model_1_path, std::filesystem::copy_options::recursive); + + // Replace model path in the config string + std::string configStr = custom_loader_config_model_multiple; + configStr.replace(configStr.find("/tmp/test_cl_models"), std::string("/tmp/test_cl_models").size(), cl_models_path); + configStr.replace(configStr.find("/tmp/test_cl_models"), std::string("/tmp/test_cl_models").size(), cl_models_path); + configStr.replace(configStr.find("/tmp/test_cl_models"), std::string("/tmp/test_cl_models").size(), cl_models_path); + + // Create config file + std::string fileToReload = cl_models_path + "/cl_config.json"; + createConfigFileWithContent(configStr, fileToReload); + ASSERT_EQ(manager.loadConfig(fileToReload), ovms::StatusCode::OK); + + tensorflow::serving::PredictRequest request; + preparePredictRequest(request, + {{DUMMY_MODEL_INPUT_NAME, + std::tuple{{1, 10}, ovms::Precision::FP32}}}); + + performPredict("dummy-a", 1, request); + performPredict("dummy-b", 1, request); + performPredict("dummy-c", 1, request); +} + +TEST_F(TestCustomLoader, CustomLoaderGetMetaData) { +#ifdef _WIN32 + GTEST_SKIP() << "Test disabled on windows"; +#endif + const char* expected_json = R"({ + "modelSpec": { + "name": "dummy", + "signatureName": "", + "version": "1" + }, + "metadata": { + "signature_def": { + "@type": "type.googleapis.com/tensorflow.serving.SignatureDefMap", + "signatureDef": { + "serving_default": { + "inputs": { + "b": { + "dtype": "DT_FLOAT", + "tensorShape": { + "dim": [ + { + "size": "1", + "name": "" + }, + { + "size": "10", + "name": "" + } + ], + "unknownRank": false + }, + "name": "b" + } + }, + "outputs": { + "a": { + "dtype": "DT_FLOAT", + "tensorShape": { + "dim": [ + { + "size": "1", + "name": "" + }, + { + "size": "10", + "name": "" + } + ], + "unknownRank": false + }, + "name": "a" + } + }, + "methodName": "", + "defaults": {} + } + } + } + } +} +)"; + + // Copy dummy model to temporary destination + std::filesystem::copy(getGenericFullPathForSrcTest("/ovms/src/test/dummy"), cl_model_1_path, std::filesystem::copy_options::recursive); + + // Replace model path in the config string + std::string configStr = custom_loader_config_model; + configStr.replace(configStr.find("/tmp/test_cl_models"), std::string("/tmp/test_cl_models").size(), cl_models_path); + + // Create config file + std::string fileToReload = cl_models_path + "/cl_config.json"; + createConfigFileWithContent(configStr, fileToReload); + ASSERT_EQ(manager.loadConfig(fileToReload), ovms::StatusCode::OK); + + std::shared_ptr model; + std::unique_ptr unload_guard; + ASSERT_EQ(manager.getModelInstance("dummy", 1, model, unload_guard), ovms::StatusCode::OK); + + tensorflow::serving::GetModelMetadataResponse response; + ovms::GetModelMetadataImpl::buildResponse(model, &response); + + std::string json_output = ""; + ovms::GetModelMetadataImpl::serializeResponse2Json(&response, &json_output); + + EXPECT_TRUE(response.has_model_spec()); + EXPECT_EQ(response.model_spec().name(), "dummy"); + + tensorflow::serving::SignatureDefMap def; + response.metadata().at("signature_def").UnpackTo(&def); + + const auto& inputs = ((*def.mutable_signature_def())["serving_default"]).inputs(); + const auto& outputs = ((*def.mutable_signature_def())["serving_default"]).outputs(); + + EXPECT_EQ(inputs.size(), 1); + EXPECT_EQ(outputs.size(), 1); + EXPECT_EQ(json_output, expected_json); +} + +TEST_F(TestCustomLoader, CustomLoaderMultipleLoaderWithSameLoaderName) { +#ifdef _WIN32 + GTEST_SKIP() << "Test disabled on windows"; +#endif + const char* custom_loader_config_model_xx = R"({ + "custom_loader_config_list":[ + { + "config":{ + "loader_name":"sample-loader", + "library_path": "/ovms/bazel-bin/src/libsampleloader.so" + } + }, + { + "config":{ + "loader_name":"sample-loader", + "library_path": "/ovms/bazel-bin/src/libsampleloader.so" + } + } + ], + "model_config_list":[ + { + "config":{ + "name":"dummy", + "base_path": "/tmp/test_cl_models/model1", + "nireq": 1, + "custom_loader_options": {"loader_name": "sample-loader", "model_file": "dummy.xml", "bin_file": "dummy.bin"} + } + } + ] + })"; + + // Copy dummy model to temporary destination + std::filesystem::copy(getGenericFullPathForSrcTest("/ovms/src/test/dummy"), cl_model_1_path, std::filesystem::copy_options::recursive); + + // Replace model path in the config string + std::string configStr = custom_loader_config_model_xx; + configStr.replace(configStr.find("/tmp/test_cl_models"), std::string("/tmp/test_cl_models").size(), cl_models_path); + + // Create config file + std::string fileToReload = cl_models_path + "/cl_config.json"; + createConfigFileWithContent(configStr, fileToReload); + ASSERT_EQ(manager.loadConfig(fileToReload), ovms::StatusCode::OK); + + tensorflow::serving::PredictRequest request; + preparePredictRequest(request, + {{DUMMY_MODEL_INPUT_NAME, + std::tuple{{1, 10}, ovms::Precision::FP32}}}); + performPredict("dummy", 1, request); +} + +TEST_F(TestCustomLoader, CustomLoaderBlackListingModel) { +#ifdef _WIN32 + GTEST_SKIP() << "Test disabled on windows"; +#endif + // Copy dummy model to temporary destination + std::filesystem::copy(getGenericFullPathForSrcTest("/ovms/src/test/dummy"), cl_model_1_path, std::filesystem::copy_options::recursive); + + // Create Sample Custom Loader Config + std::string cl_config_file_path = cl_models_path; + std::string cl_config_str = ENABLE_FORCE_BLACKLIST_CHECK; + std::string cl_config_file = cl_config_file_path + "/customloader_config"; + createConfigFileWithContent(cl_config_str, cl_config_file); + + // Replace model path in the config string + std::string configStr = custom_loader_config_model_blacklist; + configStr.replace(configStr.find("/tmp/test_cl_models"), std::string("/tmp/test_cl_models").size(), cl_models_path); + configStr.replace(configStr.find("sample-loader-config"), std::string("sample-loader-config").size(), cl_config_file); + + // Create config file + std::string fileToReload = cl_models_path + "/cl_config.json"; + createConfigFileWithContent(configStr, fileToReload); + ASSERT_EQ(manager.loadConfig(fileToReload), ovms::StatusCode::OK); + + tensorflow::serving::GetModelStatusRequest req; + tensorflow::serving::GetModelStatusResponse res; + + auto model_spec = req.mutable_model_spec(); + model_spec->Clear(); + model_spec->set_name("dummy"); + model_spec->mutable_version()->set_value(1); + ASSERT_EQ(GetModelStatusImpl::getModelStatus(&req, &res, manager, DEFAULT_TEST_CONTEXT), StatusCode::OK); + + tensorflow::serving::GetModelStatusResponse response_const = res; + std::string json_output; + Status error_status = GetModelStatusImpl::serializeResponse2Json(&response_const, &json_output); + ASSERT_EQ(error_status, StatusCode::OK); + EXPECT_EQ(json_output, expected_json_available); + + // copy status file + std::string status_file_path = cl_model_1_path + "1"; + std::string status_str = "DISABLED"; + std::string status_file = status_file_path + "/dummy.status"; + createConfigFileWithContent(status_str, status_file); + ASSERT_EQ(manager.loadConfig(fileToReload), ovms::StatusCode::OK); + + tensorflow::serving::GetModelStatusRequest reqx; + tensorflow::serving::GetModelStatusResponse resx; + + auto model_specx = reqx.mutable_model_spec(); + model_specx->Clear(); + model_specx->set_name("dummy"); + model_specx->mutable_version()->set_value(1); + + ASSERT_EQ(GetModelStatusImpl::getModelStatus(&reqx, &resx, manager, DEFAULT_TEST_CONTEXT), StatusCode::OK); + + const tensorflow::serving::GetModelStatusResponse response_constx = resx; + json_output = ""; + error_status = GetModelStatusImpl::serializeResponse2Json(&response_constx, &json_output); + ASSERT_EQ(error_status, StatusCode::OK); + EXPECT_EQ(json_output, expected_json_end); +} + +TEST_F(TestCustomLoader, CustomLoaderBlackListingRevoke) { +#ifdef _WIN32 + GTEST_SKIP() << "Test disabled on windows"; +#endif + // Copy dummy model to temporary destination + std::filesystem::copy(getGenericFullPathForSrcTest("/ovms/src/test/dummy"), cl_model_1_path, std::filesystem::copy_options::recursive); + + // Create Sample Custom Loader Config + std::string cl_config_file_path = cl_models_path; + std::string cl_config_str = ENABLE_FORCE_BLACKLIST_CHECK; + std::string cl_config_file = cl_config_file_path + "/customloader_config"; + createConfigFileWithContent(cl_config_str, cl_config_file); + + // Replace model path in the config string + std::string configStr = custom_loader_config_model_blacklist; + configStr.replace(configStr.find("/tmp/test_cl_models"), std::string("/tmp/test_cl_models").size(), cl_models_path); + configStr.replace(configStr.find("sample-loader-config"), std::string("sample-loader-config").size(), cl_config_file); + + // Create config file + std::string fileToReload = cl_models_path + "/cl_config.json"; + createConfigFileWithContent(configStr, fileToReload); + ASSERT_EQ(manager.loadConfig(fileToReload), ovms::StatusCode::OK); + + tensorflow::serving::GetModelStatusRequest req; + tensorflow::serving::GetModelStatusResponse res; + + auto model_spec = req.mutable_model_spec(); + model_spec->Clear(); + model_spec->set_name("dummy"); + model_spec->mutable_version()->set_value(1); + ASSERT_EQ(GetModelStatusImpl::getModelStatus(&req, &res, manager, DEFAULT_TEST_CONTEXT), StatusCode::OK); + + const tensorflow::serving::GetModelStatusResponse response_const = res; + std::string json_output; + Status error_status = GetModelStatusImpl::serializeResponse2Json(&response_const, &json_output); + ASSERT_EQ(error_status, StatusCode::OK); + EXPECT_EQ(json_output, expected_json_available); + + // copy status file + std::string status_file_path = cl_model_1_path + "1"; + std::string status_str = "DISABLED"; + std::string status_file = status_file_path + "/dummy.status"; + createConfigFileWithContent(status_str, status_file); + ASSERT_EQ(manager.loadConfig(fileToReload), ovms::StatusCode::OK); + + tensorflow::serving::GetModelStatusRequest req1; + tensorflow::serving::GetModelStatusResponse res1; + + auto model_spec1 = req1.mutable_model_spec(); + model_spec1->Clear(); + model_spec1->set_name("dummy"); + model_spec1->mutable_version()->set_value(1); + ASSERT_EQ(GetModelStatusImpl::getModelStatus(&req1, &res1, manager, DEFAULT_TEST_CONTEXT), StatusCode::OK); + + const tensorflow::serving::GetModelStatusResponse response_const1 = res1; + json_output = ""; + error_status = GetModelStatusImpl::serializeResponse2Json(&response_const1, &json_output); + ASSERT_EQ(error_status, StatusCode::OK); + EXPECT_EQ(json_output, expected_json_end); + + // Remove status file + std::filesystem::remove(status_file); + ASSERT_EQ(manager.loadConfig(fileToReload), ovms::StatusCode::OK); + + tensorflow::serving::GetModelStatusRequest req2; + tensorflow::serving::GetModelStatusResponse res2; + + auto model_spec2 = req2.mutable_model_spec(); + model_spec2->Clear(); + model_spec2->set_name("dummy"); + model_spec2->mutable_version()->set_value(1); + ASSERT_EQ(GetModelStatusImpl::getModelStatus(&req2, &res2, manager, DEFAULT_TEST_CONTEXT), StatusCode::OK); + + const tensorflow::serving::GetModelStatusResponse response_const2 = res2; + json_output = ""; + error_status = GetModelStatusImpl::serializeResponse2Json(&response_const2, &json_output); + ASSERT_EQ(error_status, StatusCode::OK); + EXPECT_EQ(json_output, expected_json_available); +} + +TEST_F(TestCustomLoader, CustomLoaderBlackListModelReloadError) { +#ifdef _WIN32 + GTEST_SKIP() << "Test disabled on windows"; +#endif + // Copy dummy model to temporary destination + std::filesystem::copy(getGenericFullPathForSrcTest("/ovms/src/test/dummy"), cl_model_1_path, std::filesystem::copy_options::recursive); + + // Create Sample Custom Loader Config + std::string cl_config_file_path = cl_models_path; + std::string cl_config_str = ENABLE_FORCE_BLACKLIST_CHECK; + std::string cl_config_file = cl_config_file_path + "/customloader_config"; + createConfigFileWithContent(cl_config_str, cl_config_file); + + // Replace model path in the config string + std::string configStr = custom_loader_config_model_blacklist; + configStr.replace(configStr.find("/tmp/test_cl_models"), std::string("/tmp/test_cl_models").size(), cl_models_path); + configStr.replace(configStr.find("sample-loader-config"), std::string("sample-loader-config").size(), cl_config_file); + + // Create config file + std::string fileToReload = cl_models_path + "/cl_config.json"; + createConfigFileWithContent(configStr, fileToReload); + ASSERT_EQ(manager.loadConfig(fileToReload), ovms::StatusCode::OK); + + tensorflow::serving::GetModelStatusRequest req; + tensorflow::serving::GetModelStatusResponse res; + + auto model_spec = req.mutable_model_spec(); + model_spec->Clear(); + model_spec->set_name("dummy"); + model_spec->mutable_version()->set_value(1); + ASSERT_EQ(GetModelStatusImpl::getModelStatus(&req, &res, manager, DEFAULT_TEST_CONTEXT), StatusCode::OK); + + const tensorflow::serving::GetModelStatusResponse response_const = res; + std::string json_output; + Status error_status = GetModelStatusImpl::serializeResponse2Json(&response_const, &json_output); + ASSERT_EQ(error_status, StatusCode::OK); + EXPECT_EQ(json_output, expected_json_available); + + // copy status file + std::string status_file_path = cl_model_1_path + "1"; + std::string status_str = "DISABLED"; + std::string status_file = status_file_path + "/dummy.status"; + createConfigFileWithContent(status_str, status_file); + ASSERT_EQ(manager.loadConfig(fileToReload), ovms::StatusCode::OK); + + tensorflow::serving::GetModelStatusRequest req1; + tensorflow::serving::GetModelStatusResponse res1; + + auto model_spec1 = req1.mutable_model_spec(); + model_spec1->Clear(); + model_spec1->set_name("dummy"); + model_spec1->mutable_version()->set_value(1); + ASSERT_EQ(GetModelStatusImpl::getModelStatus(&req1, &res1, manager, DEFAULT_TEST_CONTEXT), StatusCode::OK); + + const tensorflow::serving::GetModelStatusResponse response_const1 = res1; + json_output = ""; + error_status = GetModelStatusImpl::serializeResponse2Json(&response_const1, &json_output); + ASSERT_EQ(error_status, StatusCode::OK); + EXPECT_EQ(json_output, expected_json_end); + + // Remove status file & the Dummy.bin file + std::filesystem::remove(status_file); + std::string bin_file = status_file_path + "/dummy.bin"; + std::filesystem::remove(bin_file); + ASSERT_EQ(manager.loadConfig(fileToReload), ovms::StatusCode::FILE_INVALID); + + tensorflow::serving::GetModelStatusRequest req2; + tensorflow::serving::GetModelStatusResponse res2; + + auto model_spec2 = req2.mutable_model_spec(); + model_spec2->Clear(); + model_spec2->set_name("dummy"); + model_spec2->mutable_version()->set_value(1); + ASSERT_EQ(GetModelStatusImpl::getModelStatus(&req2, &res2, manager, DEFAULT_TEST_CONTEXT), StatusCode::OK); + + const tensorflow::serving::GetModelStatusResponse response_const2 = res2; + json_output = ""; + error_status = GetModelStatusImpl::serializeResponse2Json(&response_const2, &json_output); + ASSERT_EQ(error_status, StatusCode::OK); + EXPECT_EQ(json_output, expected_json_loading_error); + + // Copy back the model files & try reload + std::filesystem::copy(getGenericFullPathForSrcTest("/ovms/src/test/dummy"), cl_model_1_path, std::filesystem::copy_options::recursive | std::filesystem::copy_options::overwrite_existing); + ASSERT_EQ(manager.loadConfig(fileToReload), ovms::StatusCode::OK); + + tensorflow::serving::GetModelStatusRequest req3; + tensorflow::serving::GetModelStatusResponse res3; + + auto model_spec3 = req3.mutable_model_spec(); + model_spec3->Clear(); + model_spec3->set_name("dummy"); + model_spec3->mutable_version()->set_value(1); + ASSERT_EQ(GetModelStatusImpl::getModelStatus(&req3, &res3, manager, DEFAULT_TEST_CONTEXT), StatusCode::OK); + + const tensorflow::serving::GetModelStatusResponse response_const3 = res3; + json_output = ""; + error_status = GetModelStatusImpl::serializeResponse2Json(&response_const3, &json_output); + ASSERT_EQ(error_status, StatusCode::OK); + EXPECT_EQ(json_output, expected_json_available); +} + +TEST_F(TestCustomLoader, CustomLoaderLoadBlackListedModel) { +#ifdef _WIN32 + GTEST_SKIP() << "Test disabled on windows"; +#endif + // Copy dummy model to temporary destination + std::filesystem::copy(getGenericFullPathForSrcTest("/ovms/src/test/dummy"), cl_model_1_path, std::filesystem::copy_options::recursive); + + // Create Sample Custom Loader Config + std::string cl_config_file_path = cl_models_path; + std::string cl_config_str = ENABLE_FORCE_BLACKLIST_CHECK; + std::string cl_config_file = cl_config_file_path + "/customloader_config"; + createConfigFileWithContent(cl_config_str, cl_config_file); + + // Replace model path in the config string + std::string configStr = custom_loader_config_model_blacklist; + configStr.replace(configStr.find("/tmp/test_cl_models"), std::string("/tmp/test_cl_models").size(), cl_models_path); + configStr.replace(configStr.find("sample-loader-config"), std::string("sample-loader-config").size(), cl_config_file); + + // Create config file + std::string fileToReload = cl_models_path + "/cl_config.json"; + createConfigFileWithContent(configStr, fileToReload); + + // Create status file + std::string status_file_path = cl_model_1_path + "1"; + std::string status_str = "DISABLED"; + std::string status_file = status_file_path + "/dummy.status"; + createConfigFileWithContent(status_str, status_file); + ovms::Status status1 = manager.loadConfig(fileToReload); + ASSERT_TRUE(status1 == ovms::StatusCode::INTERNAL_ERROR); + + tensorflow::serving::GetModelStatusRequest req1; + tensorflow::serving::GetModelStatusResponse res1; + + auto model_spec1 = req1.mutable_model_spec(); + model_spec1->Clear(); + model_spec1->set_name("dummy"); + model_spec1->mutable_version()->set_value(1); + ASSERT_EQ(GetModelStatusImpl::getModelStatus(&req1, &res1, manager, DEFAULT_TEST_CONTEXT), StatusCode::OK); + + const tensorflow::serving::GetModelStatusResponse response_const1 = res1; + std::string json_output1; + Status error_status1 = GetModelStatusImpl::serializeResponse2Json(&response_const1, &json_output1); + ASSERT_EQ(error_status1, StatusCode::OK); + EXPECT_EQ(json_output1, expected_json_loading_error); + + // remove enable_file from config file + std::string status_config = ", \"enable_file\": \"dummy.status\""; + configStr.replace(configStr.find(status_config), std::string(status_config).size(), ""); + createConfigFileWithContent(configStr, fileToReload); + + ovms::Status status2 = manager.loadConfig(fileToReload); + ASSERT_TRUE(status2 == ovms::StatusCode::OK); + + tensorflow::serving::GetModelStatusRequest req2; + tensorflow::serving::GetModelStatusResponse res2; + + auto model_spec2 = req2.mutable_model_spec(); + model_spec2->Clear(); + model_spec2->set_name("dummy"); + model_spec2->mutable_version()->set_value(1); + ASSERT_EQ(GetModelStatusImpl::getModelStatus(&req2, &res2, manager, DEFAULT_TEST_CONTEXT), StatusCode::OK); + + const tensorflow::serving::GetModelStatusResponse response_const2 = res2; + std::string json_output2; + Status error_status2 = GetModelStatusImpl::serializeResponse2Json(&response_const2, &json_output2); + ASSERT_EQ(error_status2, StatusCode::OK); + EXPECT_EQ(json_output2, expected_json_available); +} +*/ + +#pragma GCC diagnostic pop diff --git a/src/test/deserialization_tests.cpp b/src/test/deserialization_tests.cpp index 3136ee3a3d..dd4e031c37 100644 --- a/src/test/deserialization_tests.cpp +++ b/src/test/deserialization_tests.cpp @@ -40,6 +40,9 @@ #include "../deserialization_main.hpp" #include "../regularovtensorfactory.hpp" #include "test_utils.hpp" +#include "src/test/test_request_utils_tfs.hpp" +#include "src/test/test_request_utils_kfs.hpp" +#include "src/test/test_request_utils_capi.hpp" using TFTensorProto = tensorflow::TensorProto; diff --git a/src/test/disabled_mediapipe_test.cpp b/src/test/disabled_mediapipe_test.cpp index dc0d33a66a..af11ce295d 100644 --- a/src/test/disabled_mediapipe_test.cpp +++ b/src/test/disabled_mediapipe_test.cpp @@ -22,6 +22,7 @@ #include "../ov_utils.hpp" #include "../server.hpp" #include "test_http_utils.hpp" +#include "src/test/test_server_utils.hpp" #include "test_utils.hpp" #include "platform_utils.hpp" diff --git a/src/test/embeddingsnode_test.cpp b/src/test/embeddingsnode_test.cpp index 6443bba525..a48ffe7744 100644 --- a/src/test/embeddingsnode_test.cpp +++ b/src/test/embeddingsnode_test.cpp @@ -20,10 +20,13 @@ #include "../http_rest_api_handler.hpp" #include "../mediapipe_internal/mediapipefactory.hpp" +#include "../mediapipe_internal/mediapipegraphdefinition.hpp" +#include "../modelmanager.hpp" #include "../servablemanagermodule.hpp" #include "../server.hpp" #include "rapidjson/document.h" #include "test_http_utils.hpp" +#include "src/test/test_server_utils.hpp" #include "test_utils.hpp" #include "platform_utils.hpp" diff --git a/src/test/ensemble_config_change_stress.cpp b/src/test/ensemble_config_change_stress.cpp index 225a2ab703..78b0b49dbe 100644 --- a/src/test/ensemble_config_change_stress.cpp +++ b/src/test/ensemble_config_change_stress.cpp @@ -39,6 +39,7 @@ #include "../stringutils.hpp" #include "../tfs_frontend/tfs_utils.hpp" #include "c_api_test_utils.hpp" +#include "src/test/test_request_utils_tfs.hpp" #include "stress_test_utils.hpp" #include "test_models.hpp" #include "test_utils.hpp" diff --git a/src/test/ensemble_flow_custom_node_tests.cpp b/src/test/ensemble_flow_custom_node_tests.cpp index b3a4eb6244..94cdb70ec5 100644 --- a/src/test/ensemble_flow_custom_node_tests.cpp +++ b/src/test/ensemble_flow_custom_node_tests.cpp @@ -61,7 +61,11 @@ #include "constructor_enabled_model_manager.hpp" #include "test_models_configs.hpp" #include "test_utils.hpp" +#include "src/test/test_request_utils_tfs.hpp" +#include "src/test/test_request_utils_kfs.hpp" #include "light_test_utils.hpp" +#include "src/test/test_request_utils_tfs.hpp" +#include "src/test/test_request_utils_kfs.hpp" #include "test_with_temp_dir.hpp" using namespace ovms; diff --git a/src/test/ensemble_mapping_config_tests.cpp b/src/test/ensemble_mapping_config_tests.cpp index 134aa85630..9fd92f0b07 100644 --- a/src/test/ensemble_mapping_config_tests.cpp +++ b/src/test/ensemble_mapping_config_tests.cpp @@ -34,7 +34,9 @@ #include "constructor_enabled_model_manager.hpp" #include "test_models_configs.hpp" #include "test_utils.hpp" +#include "src/test/test_request_utils_tfs.hpp" #include "light_test_utils.hpp" +#include "src/test/test_request_utils_tfs.hpp" #include "platform_utils.hpp" #include "test_with_temp_dir.hpp" diff --git a/src/test/ensemble_tests.cpp b/src/test/ensemble_tests.cpp index 579daa28f5..e7d5b98427 100644 --- a/src/test/ensemble_tests.cpp +++ b/src/test/ensemble_tests.cpp @@ -56,6 +56,8 @@ #include "platform_utils.hpp" #include "test_models_configs.hpp" #include "test_utils.hpp" +#include "src/test/test_request_utils_tfs.hpp" +#include "src/test/test_request_utils_kfs.hpp" #include "light_test_utils.hpp" #include "test_with_temp_dir.hpp" diff --git a/src/test/get_mediapipe_graph_metadata_response_test.cpp b/src/test/get_mediapipe_graph_metadata_response_test.cpp index bbbae9472c..21708110ea 100644 --- a/src/test/get_mediapipe_graph_metadata_response_test.cpp +++ b/src/test/get_mediapipe_graph_metadata_response_test.cpp @@ -45,6 +45,9 @@ #include "constructor_enabled_model_manager.hpp" #include "mockmodelinstancechangingstates.hpp" #include "test_utils.hpp" +#include "src/test/test_server_utils.hpp" +#include "src/test/test_mediapipe_utils.hpp" +#include "src/test/test_config_utils.hpp" #include "light_test_utils.hpp" #include "platform_utils.hpp" diff --git a/src/test/get_model_metadata_response_test.cpp b/src/test/get_model_metadata_response_test.cpp index 9b9dca66c1..6b0226fa95 100644 --- a/src/test/get_model_metadata_response_test.cpp +++ b/src/test/get_model_metadata_response_test.cpp @@ -26,6 +26,8 @@ #include "../modelmanager.hpp" #include "../status.hpp" #include "mockmodelinstancechangingstates.hpp" +#include "src/test/test_request_utils_kfs.hpp" +#include "src/test/test_request_utils_tfs.hpp" #include "test_models_configs.hpp" using ::testing::NiceMock; diff --git a/src/test/get_model_metadata_signature_test.cpp b/src/test/get_model_metadata_signature_test.cpp index 22a77ca346..071ffe3e10 100644 --- a/src/test/get_model_metadata_signature_test.cpp +++ b/src/test/get_model_metadata_signature_test.cpp @@ -20,6 +20,8 @@ #include "../get_model_metadata_impl.hpp" #include "test_utils.hpp" +#include "src/test/test_request_utils_tfs.hpp" +#include "src/test/test_request_utils_kfs.hpp" class GetModelMetadataSignature : public ::testing::Test { struct Info { diff --git a/src/test/get_pipeline_metadata_response_test.cpp b/src/test/get_pipeline_metadata_response_test.cpp index ce65a1f55a..060dc0cddf 100644 --- a/src/test/get_pipeline_metadata_response_test.cpp +++ b/src/test/get_pipeline_metadata_response_test.cpp @@ -28,6 +28,8 @@ #include "constructor_enabled_model_manager.hpp" #include "test_utils.hpp" +#include "src/test/test_request_utils_kfs.hpp" +#include "src/test/test_request_utils_tfs.hpp" using namespace ovms; using namespace rapidjson; diff --git a/src/test/http_openai_handler_test.cpp b/src/test/http_openai_handler_test.cpp index a4e6585af0..db03c4a923 100644 --- a/src/test/http_openai_handler_test.cpp +++ b/src/test/http_openai_handler_test.cpp @@ -32,6 +32,7 @@ #include "../server.hpp" #include "environment.hpp" #include "test_http_utils.hpp" +#include "src/test/test_server_utils.hpp" #include "test_utils.hpp" #include "platform_utils.hpp" diff --git a/src/test/http_rest_api_handler_test.cpp b/src/test/http_rest_api_handler_test.cpp index c640012344..a0fa4abb09 100644 --- a/src/test/http_rest_api_handler_test.cpp +++ b/src/test/http_rest_api_handler_test.cpp @@ -24,6 +24,7 @@ #include "../server.hpp" #include "platform_utils.hpp" #include "test_utils.hpp" +#include "src/test/test_server_utils.hpp" #include "light_test_utils.hpp" #include "test_with_temp_dir.hpp" diff --git a/src/test/kfs_metadata_test.cpp b/src/test/kfs_metadata_test.cpp index fb2d5e4bd2..d43109f3a1 100644 --- a/src/test/kfs_metadata_test.cpp +++ b/src/test/kfs_metadata_test.cpp @@ -13,6 +13,7 @@ // See the License for the specific language governing permissions and // limitations under the License. //***************************************************************************** +#include #include #include @@ -23,6 +24,7 @@ #include "constructor_enabled_model_manager.hpp" #include "mockmodelinstancechangingstates.hpp" +#include "src/test/test_request_utils_kfs.hpp" #include "test_models_configs.hpp" using ::testing::NiceMock; diff --git a/src/test/kfs_rest_test.cpp b/src/test/kfs_rest_test.cpp index cede07567f..33a989ec27 100644 --- a/src/test/kfs_rest_test.cpp +++ b/src/test/kfs_rest_test.cpp @@ -30,6 +30,7 @@ #include "../status.hpp" #include "../version.hpp" #include "test_utils.hpp" +#include "src/test/test_server_utils.hpp" #include "platform_utils.hpp" using ovms::Config; diff --git a/src/test/listmodelsendpoint_test.cpp b/src/test/listmodelsendpoint_test.cpp index 35066b9f4d..7ee94f586c 100644 --- a/src/test/listmodelsendpoint_test.cpp +++ b/src/test/listmodelsendpoint_test.cpp @@ -22,6 +22,7 @@ #include "../server.hpp" #include "rapidjson/document.h" #include "test_http_utils.hpp" +#include "src/test/test_server_utils.hpp" #include "test_utils.hpp" #include "platform_utils.hpp" diff --git a/src/test/llm/assisted_decoding_test.cpp b/src/test/llm/assisted_decoding_test.cpp index 9c870a1f63..68f6158c1e 100644 --- a/src/test/llm/assisted_decoding_test.cpp +++ b/src/test/llm/assisted_decoding_test.cpp @@ -47,6 +47,7 @@ #include "rapidjson/stringbuffer.h" #include "rapidjson/writer.h" #include "../test_http_utils.hpp" +#include "src/test/test_server_utils.hpp" #include "../test_utils.hpp" using namespace ovms; diff --git a/src/test/llm/llmnode_test.cpp b/src/test/llm/llmnode_test.cpp index 2d3f80f2e2..b612234fb6 100644 --- a/src/test/llm/llmnode_test.cpp +++ b/src/test/llm/llmnode_test.cpp @@ -52,6 +52,7 @@ #include "../constructor_enabled_model_manager.hpp" #include "../platform_utils.hpp" #include "../test_http_utils.hpp" +#include "src/test/test_mediapipe_utils.hpp" #include "../test_utils.hpp" #include "src/test/environment.hpp" diff --git a/src/test/llm/llmtemplate_test.cpp b/src/test/llm/llmtemplate_test.cpp index 1fdb5cd0d0..3e1f10b760 100644 --- a/src/test/llm/llmtemplate_test.cpp +++ b/src/test/llm/llmtemplate_test.cpp @@ -46,6 +46,7 @@ #pragma GCC diagnostic pop #include "src/test/test_http_utils.hpp" +#include "src/test/test_server_utils.hpp" #include "src/test/test_utils.hpp" #include "src/test/light_test_utils.hpp" #include "src/test/test_with_temp_dir.hpp" diff --git a/src/test/llm/tokenize_endpoint_test.cpp b/src/test/llm/tokenize_endpoint_test.cpp index 17c390b650..1273f7be1e 100644 --- a/src/test/llm/tokenize_endpoint_test.cpp +++ b/src/test/llm/tokenize_endpoint_test.cpp @@ -38,6 +38,7 @@ #include "../constructor_enabled_model_manager.hpp" #include "../platform_utils.hpp" #include "../test_http_utils.hpp" +#include "src/test/test_server_utils.hpp" #include "../test_utils.hpp" using namespace ovms; diff --git a/src/test/llm/visual_language_model/complete_flow_test.cpp b/src/test/llm/visual_language_model/complete_flow_test.cpp index 5f2b380556..86d21e9e77 100644 --- a/src/test/llm/visual_language_model/complete_flow_test.cpp +++ b/src/test/llm/visual_language_model/complete_flow_test.cpp @@ -32,6 +32,7 @@ #include "../../../ov_utils.hpp" #include "../../../server.hpp" #include "../../test_http_utils.hpp" +#include "src/test/test_server_utils.hpp" #include "../../test_utils.hpp" #include "rapidjson/document.h" #include "rapidjson/stringbuffer.h" diff --git a/src/test/llm/visual_language_model/initialization_test.cpp b/src/test/llm/visual_language_model/initialization_test.cpp index 43f16aa3cc..7d964375ab 100644 --- a/src/test/llm/visual_language_model/initialization_test.cpp +++ b/src/test/llm/visual_language_model/initialization_test.cpp @@ -22,6 +22,7 @@ #include "../../constructor_enabled_model_manager.hpp" #include "../../platform_utils.hpp" #include "../../test_utils.hpp" +#include "src/test/test_mediapipe_utils.hpp" using namespace ovms; diff --git a/src/test/mediapipe_framework_test.cpp b/src/test/mediapipe_framework_test.cpp index e66d965c59..8a558e74e1 100644 --- a/src/test/mediapipe_framework_test.cpp +++ b/src/test/mediapipe_framework_test.cpp @@ -43,6 +43,8 @@ #include "../stringutils.hpp" #include "../tfs_frontend/tfs_utils.hpp" #include "c_api_test_utils.hpp" +#include "src/test/test_request_utils_kfs.hpp" +#include "src/test/test_server_utils.hpp" #include "test_utils.hpp" #include "platform_utils.hpp" #include "test_with_temp_dir.hpp" diff --git a/src/test/mediapipe_validation_test.cpp b/src/test/mediapipe_validation_test.cpp index bdaa588887..9e7a05ac5a 100644 --- a/src/test/mediapipe_validation_test.cpp +++ b/src/test/mediapipe_validation_test.cpp @@ -27,6 +27,8 @@ #include "../precision.hpp" #include "../server.hpp" #include "test_utils.hpp" +#include "src/test/test_request_utils_kfs.hpp" +#include "src/test/test_server_utils.hpp" #include "platform_utils.hpp" using namespace ovms; diff --git a/src/test/mediapipeflow_test.cpp b/src/test/mediapipeflow_test.cpp index 459ad864f1..8d56ba7180 100644 --- a/src/test/mediapipeflow_test.cpp +++ b/src/test/mediapipeflow_test.cpp @@ -51,6 +51,7 @@ #include "src/metrics/metric_config.hpp" #include "src/metrics/metric_module.hpp" #include "../model.hpp" +#include "../modelinstance.hpp" #include "../model_service.hpp" #include "../ovms_exit_codes.hpp" #include "../precision.hpp" @@ -61,6 +62,9 @@ #include "../tfs_frontend/tfs_utils.hpp" #include "constructor_enabled_model_manager.hpp" #include "c_api_test_utils.hpp" +#include "src/test/test_request_utils_kfs.hpp" +#include "src/test/test_server_utils.hpp" +#include "src/test/test_mediapipe_utils.hpp" #include "mediapipe/framework/formats/image_frame.h" #include "mediapipe/framework/formats/tensor.h" #include "opencv2/opencv.hpp" diff --git a/src/test/metric_config_test.cpp b/src/test/metric_config_test.cpp index 23184bdf73..e4801db11d 100644 --- a/src/test/metric_config_test.cpp +++ b/src/test/metric_config_test.cpp @@ -27,6 +27,7 @@ #include "../modelinstance.hpp" #include "constructor_enabled_model_manager.hpp" #include "test_utils.hpp" +#include "src/test/test_server_utils.hpp" #include "light_test_utils.hpp" #include "platform_utils.hpp" #include "test_with_temp_dir.hpp" diff --git a/src/test/metrics_flow_test.cpp b/src/test/metrics_flow_test.cpp index 61349c8b64..58045b2941 100644 --- a/src/test/metrics_flow_test.cpp +++ b/src/test/metrics_flow_test.cpp @@ -41,6 +41,9 @@ #include "constructor_enabled_model_manager.hpp" #include "platform_utils.hpp" #include "test_http_utils.hpp" +#include "src/test/test_request_utils_tfs.hpp" +#include "src/test/test_request_utils_kfs.hpp" +#include "src/test/test_server_utils.hpp" #include "test_utils.hpp" #include "light_test_utils.hpp" #include "test_with_temp_dir.hpp" diff --git a/src/test/model_service_test.cpp b/src/test/model_service_test.cpp index 7fbb5d0e8c..97c4868553 100644 --- a/src/test/model_service_test.cpp +++ b/src/test/model_service_test.cpp @@ -31,6 +31,8 @@ #include "../execution_context.hpp" #include "../grpc_utils.hpp" #include "../model_service.hpp" +#include "../kfs_frontend/kfs_grpc_inference_service.hpp" +#include "../kfs_frontend/kfs_utils.hpp" #include "../modelinstanceunloadguard.hpp" #include "../model_version_policy.hpp" #include "../modelmanager.hpp" @@ -39,6 +41,7 @@ #include "gtest/gtest.h" #include "platform_utils.hpp" #include "test_utils.hpp" +#include "src/test/test_server_utils.hpp" #include "constructor_enabled_model_manager.hpp" #include "test_models_configs.hpp" #include "light_test_utils.hpp" diff --git a/src/test/modelconfig_test.cpp b/src/test/modelconfig_test.cpp index c1efe8edf1..4dfed8f647 100644 --- a/src/test/modelconfig_test.cpp +++ b/src/test/modelconfig_test.cpp @@ -23,6 +23,7 @@ #include #include "../modelconfig.hpp" +#include "../anonymous_input_name.hpp" #include "../status.hpp" #include "test_utils.hpp" diff --git a/src/test/modelmanager_test.cpp b/src/test/modelmanager_test.cpp index 10e245e308..3cf43a4fea 100644 --- a/src/test/modelmanager_test.cpp +++ b/src/test/modelmanager_test.cpp @@ -37,6 +37,8 @@ #include "mockmodelinstancechangingstates.hpp" #include "test_models_configs.hpp" #include "light_test_utils.hpp" +#include "src/test/test_server_utils.hpp" +#include "src/test/test_model_manager_utils.hpp" #include "platform_utils.hpp" #include "test_with_temp_dir.hpp" diff --git a/src/test/multipart_calculator_test.cpp b/src/test/multipart_calculator_test.cpp index e18eae2cbc..ae7dcfc949 100644 --- a/src/test/multipart_calculator_test.cpp +++ b/src/test/multipart_calculator_test.cpp @@ -22,6 +22,7 @@ #include "../servablemanagermodule.hpp" #include "../server.hpp" #include "test_http_utils.hpp" +#include "src/test/test_server_utils.hpp" #include "test_utils.hpp" #include "platform_utils.hpp" diff --git a/src/test/network_utils_test.cpp b/src/test/network_utils_test.cpp index 25149b242c..5c1bdbfcf5 100644 --- a/src/test/network_utils_test.cpp +++ b/src/test/network_utils_test.cpp @@ -31,6 +31,7 @@ #include "src/logging.hpp" #include "test_utils.hpp" +#include "src/test/test_server_utils.hpp" TEST(NetworkUtils, IsPortAvailable_Positive) { uint64_t availablePort = 12345; diff --git a/src/test/openvino_remote_tensors_tests.cpp b/src/test/openvino_remote_tensors_tests.cpp index a73245578a..73eea3881b 100644 --- a/src/test/openvino_remote_tensors_tests.cpp +++ b/src/test/openvino_remote_tensors_tests.cpp @@ -34,6 +34,7 @@ #include "../ovms_internal.h" // NOLINT #include "../status.hpp" #include "c_api_test_utils.hpp" +#include "src/test/test_server_utils.hpp" #include "gpuenvironment.hpp" #include "light_test_utils.hpp" #include "test_with_temp_dir.hpp" diff --git a/src/test/ovmsconfig_test.cpp b/src/test/ovmsconfig_test.cpp index 6b76d0e2fa..79b4a063d2 100644 --- a/src/test/ovmsconfig_test.cpp +++ b/src/test/ovmsconfig_test.cpp @@ -30,6 +30,7 @@ #include "../ovms_exit_codes.hpp" #include "../systeminfo.hpp" #include "test_utils.hpp" +#include "src/test/test_config_utils.hpp" using testing::_; using testing::ContainerEq; diff --git a/src/test/pipelinedefinitionstatus_test.cpp b/src/test/pipelinedefinitionstatus_test.cpp index 578ad38295..8c3d722f6c 100644 --- a/src/test/pipelinedefinitionstatus_test.cpp +++ b/src/test/pipelinedefinitionstatus_test.cpp @@ -24,8 +24,6 @@ #include "test_utils.hpp" using namespace ovms; -using namespace tensorflow; -using namespace tensorflow::serving; using testing::_; using testing::Return; diff --git a/src/test/predict_validation_test.cpp b/src/test/predict_validation_test.cpp index 2fb46140e8..dbc2b6ff72 100644 --- a/src/test/predict_validation_test.cpp +++ b/src/test/predict_validation_test.cpp @@ -27,6 +27,9 @@ #include "../modelinstance.hpp" #include "../predict_request_validation_utils.hpp" #include "test_utils.hpp" +#include "src/test/test_request_utils_tfs.hpp" +#include "src/test/test_request_utils_kfs.hpp" +#include "src/test/test_predict_validation_utils.hpp" using ::testing::NiceMock; using ::testing::Return; diff --git a/src/test/prediction_service_test.cpp b/src/test/prediction_service_test.cpp index 99833da9fa..8248459ade 100644 --- a/src/test/prediction_service_test.cpp +++ b/src/test/prediction_service_test.cpp @@ -57,6 +57,9 @@ #include "constructor_enabled_model_manager.hpp" #include "test_models_configs.hpp" #include "test_utils.hpp" +#include "src/test/test_request_utils_tfs.hpp" +#include "src/test/test_request_utils_kfs.hpp" +#include "src/test/test_request_utils_capi.hpp" #include "light_test_utils.hpp" #include "platform_utils.hpp" #include "test_with_temp_dir.hpp" diff --git a/src/test/pull_gguf_hf_model_test.cpp b/src/test/pull_gguf_hf_model_test.cpp index ffb5350fa2..457cfb1508 100644 --- a/src/test/pull_gguf_hf_model_test.cpp +++ b/src/test/pull_gguf_hf_model_test.cpp @@ -23,6 +23,7 @@ #include #include "../utils/env_guard.hpp" #include "test_utils.hpp" +#include "src/test/test_server_utils.hpp" #include "test_with_temp_dir.hpp" #include "gguf_environment.hpp" #include "src/filesystem/filesystem.hpp" diff --git a/src/test/pull_hf_model_test.cpp b/src/test/pull_hf_model_test.cpp index d622f9882d..fd41e8961d 100644 --- a/src/test/pull_hf_model_test.cpp +++ b/src/test/pull_hf_model_test.cpp @@ -29,6 +29,8 @@ #include "src/utils/env_guard.hpp" #include "src/test/light_test_utils.hpp" #include "src/test/test_utils.hpp" +#include "src/test/test_server_utils.hpp" +#include "src/test/test_config_utils.hpp" #include "src/test/test_file_utils.hpp" #include "src/test/test_with_temp_dir.hpp" #include "src/filesystem/filesystem.hpp" diff --git a/src/test/pythonnode_test.cpp b/src/test/pythonnode_test.cpp index 5b31146d19..0d85ca0f35 100644 --- a/src/test/pythonnode_test.cpp +++ b/src/test/pythonnode_test.cpp @@ -56,6 +56,9 @@ #include "../python/python_backend.hpp" #include "c_api_test_utils.hpp" +#include "src/test/test_request_utils_kfs.hpp" +#include "src/test/test_server_utils.hpp" +#include "src/test/test_mediapipe_utils.hpp" #include "constructor_enabled_model_manager.hpp" #include "platform_utils.hpp" #include "test_utils.hpp" diff --git a/src/test/rest_utils_test.cpp b/src/test/rest_utils_test.cpp index 6d6df46be5..e388872c6d 100644 --- a/src/test/rest_utils_test.cpp +++ b/src/test/rest_utils_test.cpp @@ -22,6 +22,7 @@ #include "../rest_utils.hpp" #include "../status.hpp" #include "test_utils.hpp" +#include "src/test/test_request_utils_tfs.hpp" using namespace ovms; diff --git a/src/test/serialization_tests.cpp b/src/test/serialization_tests.cpp index 13356316a7..4fe555a1af 100644 --- a/src/test/serialization_tests.cpp +++ b/src/test/serialization_tests.cpp @@ -39,6 +39,8 @@ #include "../capi_frontend/serialization.hpp" #include "../tfs_frontend/tfs_utils.hpp" #include "test_utils.hpp" +#include "src/test/test_request_utils_tfs.hpp" +#include "src/test/test_request_utils_kfs.hpp" using TFTensorProto = tensorflow::TensorProto; diff --git a/src/test/server_test.cpp b/src/test/server_test.cpp index 0665772689..5017862da5 100644 --- a/src/test/server_test.cpp +++ b/src/test/server_test.cpp @@ -37,6 +37,7 @@ #include "../server.hpp" #include "../version.hpp" #include "c_api_test_utils.hpp" +#include "src/test/test_server_utils.hpp" #include "mockmodelinstancechangingstates.hpp" using ovms::ModelManager; diff --git a/src/test/stateful_modelinstance_test.cpp b/src/test/stateful_modelinstance_test.cpp index 25d2b414a0..b9a96542ff 100644 --- a/src/test/stateful_modelinstance_test.cpp +++ b/src/test/stateful_modelinstance_test.cpp @@ -45,6 +45,7 @@ #include "../timer.hpp" #include "stateful_test_utils.hpp" #include "test_utils.hpp" +#include "src/test/test_request_utils_tfs.hpp" #include "light_test_utils.hpp" #include "platform_utils.hpp" #include "test_with_temp_dir.hpp" diff --git a/src/test/streaming_test.cpp b/src/test/streaming_test.cpp index 5af193b6c3..b65c1464f8 100644 --- a/src/test/streaming_test.cpp +++ b/src/test/streaming_test.cpp @@ -33,6 +33,10 @@ #include "constructor_enabled_model_manager.hpp" #include "platform_utils.hpp" #include "test_utils.hpp" +#include "src/test/test_request_utils_kfs.hpp" +#include "src/test/test_server_utils.hpp" +#include "src/test/test_config_utils.hpp" +#include "src/test/test_mediapipe_utils.hpp" #if (PYTHON_DISABLE == 0) #include "../python/pythoninterpretermodule.hpp" diff --git a/src/test/stress_test_utils.hpp b/src/test/stress_test_utils.hpp index 15caaae3b3..b1f1feced5 100644 --- a/src/test/stress_test_utils.hpp +++ b/src/test/stress_test_utils.hpp @@ -47,6 +47,7 @@ #include "../model_service.hpp" #include "../modelconfig.hpp" #include "../modelinstance.hpp" +#include "../modelmanager.hpp" #include "../prediction_service_utils.hpp" #include "../servablemanagermodule.hpp" #include "../server.hpp" @@ -54,6 +55,9 @@ #include "../stringutils.hpp" #include "../tfs_frontend/tfs_utils.hpp" #include "c_api_test_utils.hpp" +#include "src/test/test_request_utils_tfs.hpp" +#include "src/test/test_request_utils_kfs.hpp" +#include "src/test/test_server_utils.hpp" #include "test_utils.hpp" #include "light_test_utils.hpp" #include "platform_utils.hpp" diff --git a/src/test/tensor_conversion_test.cpp b/src/test/tensor_conversion_test.cpp index 950fd6aed1..ccc91b64bc 100644 --- a/src/test/tensor_conversion_test.cpp +++ b/src/test/tensor_conversion_test.cpp @@ -28,6 +28,8 @@ #include "../tensor_conversion.hpp" #include "opencv2/opencv.hpp" #include "test_utils.hpp" +#include "src/test/test_request_utils_tfs.hpp" +#include "src/test/test_request_utils_kfs.hpp" #include "platform_utils.hpp" using namespace ovms; diff --git a/src/test/test_http_utils.hpp b/src/test/test_http_utils.hpp index 62ca393c40..b952d5431b 100644 --- a/src/test/test_http_utils.hpp +++ b/src/test/test_http_utils.hpp @@ -30,6 +30,7 @@ #include "../multi_part_parser.hpp" #include "test_utils.hpp" +#include "src/test/test_server_utils.hpp" class MockedServerRequestInterface final : public ovms::HttpAsyncWriter { public: diff --git a/src/test/test_utils.hpp b/src/test/test_utils.hpp index d981de3249..5b52646ab5 100644 --- a/src/test/test_utils.hpp +++ b/src/test/test_utils.hpp @@ -113,16 +113,3 @@ static ovms::NodeLibrary createLibraryMock() { } std::shared_ptr createTensorInfoCopyWithPrecision(std::shared_ptr src, ovms::Precision precision); - -// ============================================================================ -// Backward compatibility: re-export split headers. -// Phase 3 will remove these and update consumers to include directly. -// ============================================================================ -#include "src/test/test_request_utils_tfs.hpp" -#include "src/test/test_request_utils_kfs.hpp" -#include "src/test/test_request_utils_capi.hpp" -#include "src/test/test_server_utils.hpp" -#include "src/test/test_model_manager_utils.hpp" -#include "src/test/test_predict_validation_utils.hpp" -#include "src/test/test_config_utils.hpp" -#include "src/test/test_mediapipe_utils.hpp" diff --git a/src/test/tfs_rest_parser_binary_inputs_test.cpp b/src/test/tfs_rest_parser_binary_inputs_test.cpp index 874bd9d6b6..db1157c116 100644 --- a/src/test/tfs_rest_parser_binary_inputs_test.cpp +++ b/src/test/tfs_rest_parser_binary_inputs_test.cpp @@ -25,6 +25,7 @@ #include "absl/strings/escaping.h" #pragma warning(pop) #include "test_utils.hpp" +#include "src/test/test_request_utils_tfs.hpp" using namespace ovms; diff --git a/src/test/tfs_rest_parser_column_test.cpp b/src/test/tfs_rest_parser_column_test.cpp index 3366b72dae..6bebd812f0 100644 --- a/src/test/tfs_rest_parser_column_test.cpp +++ b/src/test/tfs_rest_parser_column_test.cpp @@ -27,6 +27,7 @@ #include "../rest_parser.hpp" #include "test_utils.hpp" +#include "src/test/test_request_utils_tfs.hpp" using namespace ovms; diff --git a/src/test/tfs_rest_parser_nonamed_test.cpp b/src/test/tfs_rest_parser_nonamed_test.cpp index cb149a089a..b6ed1eae8b 100644 --- a/src/test/tfs_rest_parser_nonamed_test.cpp +++ b/src/test/tfs_rest_parser_nonamed_test.cpp @@ -22,6 +22,7 @@ #include "absl/strings/escaping.h" #pragma warning(pop) #include "test_utils.hpp" +#include "src/test/test_request_utils_tfs.hpp" using namespace ovms; diff --git a/src/test/tfs_rest_parser_row_test.cpp b/src/test/tfs_rest_parser_row_test.cpp index dc332f6196..f9639963dc 100644 --- a/src/test/tfs_rest_parser_row_test.cpp +++ b/src/test/tfs_rest_parser_row_test.cpp @@ -29,6 +29,7 @@ #include "../rest_parser.hpp" #include "test_utils.hpp" +#include "src/test/test_request_utils_tfs.hpp" using namespace ovms; From 1cfe4ed0e610949cf9a18026cad2b2c82cd237eb Mon Sep 17 00:00:00 2001 From: Adrian Tobiszewski Date: Tue, 28 Apr 2026 10:34:05 +0200 Subject: [PATCH 3/5] Phase 4: ovms_cc_test/ovms_test_cc_library macros, fix include paths, remove unused test_utils.hpp, split 17 test libs - Add ovms_cc_test and ovms_test_cc_library macros to common_settings.bzl (defaults: linkstatic=1, alwayslink=True, COMMON_STATIC_TEST_COPTS, gtest) - Move COPTS_CLOUD to common_settings.bzl (shared across BUILD files) - Fix ~58 test files: src/test/test_* -> test_* relative include paths - Remove unnecessary test_utils.hpp from ~38 files - Make test_models.hpp self-contained (add filesystem, shape, modelversion) - Split 17 test .cpp files into ovms_test_cc_library targets with minimal deps: stringutils, threadsafequeue, tensorinfo, layout, tensorutils, custom_node_buffersqueue, model_version_policy, kfs_rest_parser, rerank_chunking, rerank_handler, metrics, custom_node_output_allocator, systeminfo, localfilesystem, gcsfilesystem, azurefilesystem, schema --- common_settings.bzl | 61 ++++++ src/BUILD | 200 +++++++++++++++--- src/filesystem/BUILD | 7 +- src/test/audio/text2speech_test.cpp | 3 +- src/test/c_api_stress_tests.cpp | 1 - src/test/c_api_test_utils.cpp | 6 +- src/test/c_api_test_utils.hpp | 3 +- src/test/c_api_tests.cpp | 7 +- src/test/capi_predict_validation_test.cpp | 4 +- src/test/config_export_full_test.cpp | 1 - src/test/custom_loader_test.cpp | 2 +- src/test/deserialization_tests.cpp | 6 +- src/test/disabled_mediapipe_test.cpp | 3 +- src/test/embeddingsnode_test.cpp | 3 +- src/test/ensemble_config_change_stress.cpp | 2 +- src/test/ensemble_flow_custom_node_tests.cpp | 8 +- src/test/ensemble_mapping_config_tests.cpp | 4 +- src/test/ensemble_metadata_test.cpp | 1 + src/test/ensemble_tests.cpp | 4 +- ...mediapipe_graph_metadata_response_test.cpp | 6 +- src/test/get_model_metadata_response_test.cpp | 5 +- .../get_model_metadata_signature_test.cpp | 5 +- .../get_pipeline_metadata_response_test.cpp | 5 +- src/test/http_openai_handler_test.cpp | 3 +- src/test/http_rest_api_handler_test.cpp | 3 +- src/test/kfs_metadata_test.cpp | 3 +- src/test/kfs_rest_test.cpp | 4 +- src/test/libgit2_test.cpp | 1 - src/test/listmodelsendpoint_test.cpp | 3 +- src/test/llm/assisted_decoding_test.cpp | 4 +- src/test/llm/llmnode_test.cpp | 3 +- src/test/llm/llmtemplate_test.cpp | 7 +- src/test/llm/max_model_length_test.cpp | 3 +- src/test/llm/text_streamer_test.cpp | 7 +- src/test/llm/tokenize_endpoint_test.cpp | 3 +- .../complete_flow_test.cpp | 4 +- .../initialization_test.cpp | 3 +- src/test/mediapipe_framework_test.cpp | 4 +- src/test/mediapipe_validation_test.cpp | 4 +- src/test/mediapipeflow_test.cpp | 6 +- src/test/metric_config_test.cpp | 3 +- src/test/metrics_flow_test.cpp | 6 +- src/test/mockmodelinstancechangingstates.hpp | 2 +- src/test/model_cache_test.cpp | 1 + src/test/model_service_test.cpp | 2 +- src/test/modelconfig_test.cpp | 2 +- src/test/modelinstance_test.cpp | 1 + src/test/modelmanager_test.cpp | 5 +- src/test/multipart_calculator_test.cpp | 3 +- src/test/network_utils_test.cpp | 3 +- src/test/openvino_remote_tensors_tests.cpp | 3 +- src/test/openvino_tests.cpp | 2 +- src/test/ovmsconfig_test.cpp | 3 +- src/test/pipelinedefinitionstatus_test.cpp | 1 - src/test/predict_validation_test.cpp | 6 +- src/test/prediction_service_test.cpp | 6 +- src/test/pull_gguf_hf_model_test.cpp | 2 +- src/test/pull_hf_model_test.cpp | 10 +- src/test/pythonnode_test.cpp | 6 +- src/test/reranknode_test.cpp | 1 - src/test/rest_utils_test.cpp | 3 +- src/test/serialization_tests.cpp | 6 +- src/test/server_test.cpp | 2 +- src/test/shape_test.cpp | 1 - src/test/stateful_config_test.cpp | 1 - src/test/stateful_modelinstance_test.cpp | 2 +- src/test/status_test.cpp | 1 - src/test/streaming_test.cpp | 9 +- src/test/stress_test_utils.hpp | 6 +- src/test/tensor_conversion_test.cpp | 4 +- src/test/test_http_utils.hpp | 3 +- src/test/test_models.hpp | 3 + src/test/test_request_utils_capi.hpp | 2 +- src/test/test_request_utils_kfs.hpp | 2 +- src/test/test_request_utils_tfs.hpp | 2 +- src/test/test_utils.hpp | 2 +- .../tfs_rest_parser_binary_inputs_test.cpp | 2 +- src/test/tfs_rest_parser_column_test.cpp | 2 +- src/test/tfs_rest_parser_nonamed_test.cpp | 2 +- src/test/tfs_rest_parser_row_test.cpp | 2 +- 80 files changed, 359 insertions(+), 178 deletions(-) diff --git a/common_settings.bzl b/common_settings.bzl index 3faad26bb5..97a9f978f4 100644 --- a/common_settings.bzl +++ b/common_settings.bzl @@ -50,6 +50,63 @@ def ovms_cc_library(**kwargs): **kwargs ) +def ovms_test_cc_library(**kwargs): + """ + Wrapper for cc_library for test utility libraries. + Defaults: linkstatic=1, alwayslink=True, copts=COMMON_STATIC_TEST_COPTS, + linkopts=COMMON_STATIC_LIBS_LINKOPTS, local_defines=COMMON_LOCAL_DEFINES. + gtest is always added to deps. Use additional_copts for feature-flag copts. + """ + if "linkstatic" not in kwargs: + kwargs["linkstatic"] = 1 + if "alwayslink" not in kwargs: + kwargs["alwayslink"] = True + if "copts" not in kwargs: + kwargs["copts"] = COMMON_STATIC_TEST_COPTS + if "linkopts" not in kwargs: + kwargs["linkopts"] = COMMON_STATIC_LIBS_LINKOPTS + if "local_defines" not in kwargs: + kwargs["local_defines"] = COMMON_LOCAL_DEFINES + if "additional_copts" in kwargs: + kwargs["copts"] += kwargs.pop("additional_copts") + if "additional_linkopts" in kwargs: + kwargs["linkopts"] += kwargs.pop("additional_linkopts") + if "deps" in kwargs: + kwargs["deps"] = kwargs["deps"] + ["@com_google_googletest//:gtest"] + else: + kwargs["deps"] = ["@com_google_googletest//:gtest"] + + native.cc_library( + **kwargs + ) + +def ovms_cc_test(**kwargs): + """ + Wrapper for cc_test for test binaries. + Defaults: linkstatic=1, copts=COMMON_STATIC_TEST_COPTS, linkopts=LINKOPTS_ADJUSTED, + local_defines=COMMON_LOCAL_DEFINES. + Use additional_copts to add feature-flag copts (COPTS_MEDIAPIPE, COPTS_PYTHON, etc.) as needed. + """ + if "linkstatic" not in kwargs: + kwargs["linkstatic"] = 1 + if "copts" not in kwargs: + kwargs["copts"] = COMMON_STATIC_TEST_COPTS + if "linkopts" not in kwargs: + kwargs["linkopts"] = COMMON_STATIC_LIBS_LINKOPTS + select({ + "//conditions:default": [], + "//:fuzzer_build": COMMON_FUZZER_LINKOPTS, + }) + if "local_defines" not in kwargs: + kwargs["local_defines"] = COMMON_LOCAL_DEFINES + if "additional_copts" in kwargs: + kwargs["copts"] += kwargs.pop("additional_copts") + if "additional_linkopts" in kwargs: + kwargs["linkopts"] += kwargs.pop("additional_linkopts") + + native.cc_test( + **kwargs + ) + def create_config_settings(): distro_flag() native.config_setting( @@ -249,6 +306,10 @@ COPTS_DROGON = select({ "//conditions:default": ["-DUSE_DROGON=0"], "//:enable_drogon" : ["-DUSE_DROGON=1"], }) +COPTS_CLOUD = select({ + "//conditions:default": ["-DCLOUD_DISABLE=1"], + "//:not_disable_cloud" : ["-DCLOUD_DISABLE=0"], +}) COMMON_FUZZER_COPTS = [ "-fsanitize=address", "-fprofile-generate", diff --git a/src/BUILD b/src/BUILD index f29dc6c181..149ea52c29 100644 --- a/src/BUILD +++ b/src/BUILD @@ -16,8 +16,8 @@ load("@bazel_skylib//lib:selects.bzl", "selects") load("@mediapipe//mediapipe/framework:more_selects.bzl", "more_selects") load("//:common_settings.bzl", - "COMMON_STATIC_TEST_COPTS", "COMMON_STATIC_LIBS_COPTS", "COMMON_STATIC_LIBS_LINKOPTS", "COMMON_FUZZER_COPTS", "COMMON_FUZZER_LINKOPTS", "COMMON_LOCAL_DEFINES", "COPTS_PYTHON", "COPTS_MEDIAPIPE", "COPTS_DROGON", - "create_config_settings", "PYBIND_DEPS", "ovms_cc_library") + "COMMON_STATIC_TEST_COPTS", "COMMON_STATIC_LIBS_COPTS", "COMMON_STATIC_LIBS_LINKOPTS", "COMMON_FUZZER_COPTS", "COMMON_FUZZER_LINKOPTS", "COMMON_LOCAL_DEFINES", "COPTS_PYTHON", "COPTS_MEDIAPIPE", "COPTS_DROGON", "COPTS_CLOUD", + "create_config_settings", "PYBIND_DEPS", "ovms_cc_library", "ovms_test_cc_library", "ovms_cc_test") COPTS_OV_TRACE = select({ "//conditions:default": ["-DOV_TRACE=0"], @@ -27,11 +27,6 @@ LINKOPTS_ADJUSTED = COMMON_STATIC_LIBS_LINKOPTS + select({ "//conditions:default": [], "//:fuzzer_build" : COMMON_FUZZER_LINKOPTS, }) -COPTS_CLOUD = select({ - "//conditions:default": ["-DCLOUD_DISABLE=1"], - "//:not_disable_cloud" : ["-DCLOUD_DISABLE=0"], -}) - COPTS_TESTS = COMMON_STATIC_TEST_COPTS + COPTS_PYTHON + COPTS_MEDIAPIPE + COPTS_DROGON + COPTS_CLOUD config_setting( @@ -2264,16 +2259,159 @@ cc_binary( # linkstatic = False, # Use for dynamic linking when necessary ) -cc_test( +ovms_test_cc_library( + name = "stringutils_test", + srcs = ["test/stringutils_test.cpp"], + deps = [ + "//src:libovmsstring_utils", + ], +) + +ovms_test_cc_library( + name = "threadsafequeue_test", + srcs = ["test/threadsafequeue_test.cpp"], + deps = [ + "//src:libovms_threadsafequeue", + "//src:libovmslogging", + ], +) + +ovms_test_cc_library( + name = "tensorinfo_test", + srcs = ["test/tensorinfo_test.cpp"], + deps = [ + "//src:libovms_tensorinfo", + ], +) + +ovms_test_cc_library( + name = "layout_test", + srcs = ["test/layout_test.cpp"], + deps = [ + "//src:libovmslayout", + "//src:libovmsstatus", + ], +) + +ovms_test_cc_library( + name = "tensorutils_test", + srcs = ["test/tensorutils_test.cpp"], + deps = [ + "//src:libovmstensor_utils", + ], +) + +ovms_test_cc_library( + name = "custom_node_buffersqueue_test", + srcs = ["test/custom_node_buffersqueue_test.cpp"], + deps = [ + "//src:custom_nodes_common_buffersqueue", + ], +) + +ovms_test_cc_library( + name = "model_version_policy_test", + srcs = ["test/model_version_policy_test.cpp"], + deps = [ + "//src:libovmsmodelversioning", + ], +) + +ovms_test_cc_library( + name = "kfs_rest_parser_test", + srcs = ["test/kfs_rest_parser_test.cpp"], + deps = [ + "//src:rest_parser_utils", + "//src:libovmsstatus", + ], +) + +ovms_test_cc_library( + name = "rerank_chunking_test", + srcs = ["test/rerank_chunking_test.cpp"], + deps = [ + "//src/rerank:rerank_api_handler", + ], +) + +ovms_test_cc_library( + name = "rerank_handler_test", + srcs = ["test/rerank_handler_test.cpp"], + deps = [ + "//src/rerank:rerank_api_handler", + ], +) + +ovms_test_cc_library( + name = "metrics_test", + srcs = ["test/metrics_test.cpp"], + deps = [ + "//src/metrics:libovmsmetrics", + ], +) + +ovms_test_cc_library( + name = "custom_node_output_allocator_test", + srcs = ["test/custom_node_output_allocator_test.cpp"], + deps = [ + "//src/dags:custom_node_output_allocator", + "//src:libovmsprecision", + "//src:libovmsshape", + "//third_party:openvino", + ], +) + +ovms_test_cc_library( + name = "systeminfo_test", + srcs = ["test/systeminfo_test.cpp"], + deps = [ + "//src:libovms_systeminfo", + "//src:libovmsstatus", + ], +) + +ovms_test_cc_library( + name = "localfilesystem_test", + srcs = ["test/localfilesystem_test.cpp"], + deps = [ + "//src/filesystem:libovmslocalfilesystem", + "//src/filesystem:libovmsfilesystem", + ], +) + + +ovms_test_cc_library( + name = "gcsfilesystem_test", + srcs = ["test/gcsfilesystem_test.cpp"], + deps = [ + "//src/filesystem:libovmsgcsfilesystem", + ], +) + +ovms_test_cc_library( + name = "azurefilesystem_test", + srcs = ["test/azurefilesystem_test.cpp"], + deps = [ + "//src/filesystem:libovmsazurefilesystem", + ], +) + +ovms_test_cc_library( + name = "schema_test", + srcs = ["test/schema_test.cpp"], + deps = [ + "//src:libovmsschema", + "//src:libovmsstatus", + ], +) + +ovms_cc_test( name = "ovms_test", - linkstatic = 1, srcs = [ "test/c_api_stress_tests.cpp", "test/c_api_tests.cpp", "test/capi_predict_validation_test.cpp", #"test/custom_loader_test.cpp", # TODO remove? - "test/custom_node_buffersqueue_test.cpp", - "test/custom_node_output_allocator_test.cpp", "test/demultiplexer_node_test.cpp", "test/deserialization_tests.cpp", "test/ensemble_config_change_stress.cpp", @@ -2290,16 +2428,12 @@ cc_test( "test/http_rest_api_handler_test.cpp", "test/kfs_metadata_test.cpp", "test/kfs_rest_test.cpp", - "test/kfs_rest_parser_test.cpp", - "test/layout_test.cpp", "test/metric_config_test.cpp", - "test/metrics_test.cpp", "test/metrics_flow_test.cpp", "test/mockmodelinstancechangingstates.hpp", "test/model_cache_test.cpp", "test/model_service_test.cpp", "test/model_test.cpp", - "test/model_version_policy_test.cpp", "test/modelconfig_test.cpp", "test/modelinstance_test.cpp", "test/modelmanager_test.cpp", @@ -2313,7 +2447,6 @@ cc_test( "test/predict_validation_test.cpp", "test/prediction_service_test.cpp", "test/rest_utils_test.cpp", - "test/schema_test.cpp", "test/sequence_manager_test.cpp", "test/sequence_test.cpp", "test/serialization_tests.cpp", @@ -2323,23 +2456,15 @@ cc_test( #"test/stateful_modelinstance_test.cpp", # TODO remove? "test/stateful_test_utils.hpp", "test/status_test.cpp", - "test/stringutils_test.cpp", - "test/systeminfo_test.cpp", "test/tensor_conversion_test.cpp", - "test/tensorinfo_test.cpp", - "test/tensorutils_test.cpp", "test/test_http_utils.hpp", "test/tfs_rest_parser_binary_inputs_test.cpp", "test/tfs_rest_parser_column_test.cpp", "test/tfs_rest_parser_nonamed_test.cpp", "test/tfs_rest_parser_row_test.cpp", - "test/threadsafequeue_test.cpp", "test/unit_tests.cpp", - "test/localfilesystem_test.cpp", ] + select({ "//:not_disable_cloud": [ - "test/gcsfilesystem_test.cpp", - "test/azurefilesystem_test.cpp", ], "//:disable_cloud": [] }) @@ -2350,8 +2475,6 @@ cc_test( "test/mediapipeflow_test.cpp", "test/mediapipe/inputsidepacketusertestcalc.cc", "test/reranknode_test.cpp", - "test/rerank_handler_test.cpp", - "test/rerank_chunking_test.cpp", "test/streaming_test.cpp", "test/mediapipe_validation_test.cpp", # Mediapipe enabled "test/get_mediapipe_graph_metadata_response_test.cpp", @@ -2544,7 +2667,26 @@ cc_test( ":test_test_models", ":test_test_models_configs", ":test_cmd_exec", + ":stringutils_test", + ":threadsafequeue_test", + ":tensorinfo_test", + ":layout_test", + ":tensorutils_test", + ":custom_node_buffersqueue_test", + ":model_version_policy_test", + ":kfs_rest_parser_test", + ":metrics_test", + ":custom_node_output_allocator_test", + ":systeminfo_test", + ":localfilesystem_test", + ":schema_test", ] + select({ + "//:not_disable_cloud": [ + ":gcsfilesystem_test", + ":azurefilesystem_test", + ], + "//:disable_cloud": [], + }) + select({ "//conditions:default": [ ":openvino_remote_tensors_tests", ":openvino_tests", @@ -2557,6 +2699,8 @@ cc_test( "//src/llm:genai_servables", "//src/llm:output_parsers", ":test_llm_output_parser_tests", + ":rerank_chunking_test", + ":rerank_handler_test", "//src/test/mediapipe/calculators:mediapipe_test_calculators", "//src/test/mediapipe/calculators:dependency_free_http_test_calculators", "@mediapipe//mediapipe/calculators/ovms:ovms_calculator", @@ -2568,9 +2712,7 @@ cc_test( "serialization_common", ], }), - copts = COPTS_TESTS, - local_defines = COMMON_LOCAL_DEFINES, - linkopts = LINKOPTS_ADJUSTED, + additional_copts = COPTS_PYTHON + COPTS_MEDIAPIPE + COPTS_DROGON + COPTS_CLOUD, ) cc_library( diff --git a/src/filesystem/BUILD b/src/filesystem/BUILD index 2c9c2cebd0..0f64e025a1 100644 --- a/src/filesystem/BUILD +++ b/src/filesystem/BUILD @@ -13,12 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. # -load("//:common_settings.bzl", "ovms_cc_library") - -COPTS_CLOUD = select({ - "//conditions:default": ["-DCLOUD_DISABLE=1"], - "//:not_disable_cloud" : ["-DCLOUD_DISABLE=0"], -}) +load("//:common_settings.bzl", "ovms_cc_library", "COPTS_CLOUD") ovms_cc_library( name = "libovmsfilesystem", diff --git a/src/test/audio/text2speech_test.cpp b/src/test/audio/text2speech_test.cpp index 6356bd1118..bd254853ea 100644 --- a/src/test/audio/text2speech_test.cpp +++ b/src/test/audio/text2speech_test.cpp @@ -23,8 +23,7 @@ #include "../../server.hpp" #include "rapidjson/document.h" #include "../test_http_utils.hpp" -#include "src/test/test_mediapipe_utils.hpp" -#include "../test_utils.hpp" +#include "../test_mediapipe_utils.hpp" #include "../platform_utils.hpp" #include "../constructor_enabled_model_manager.hpp" diff --git a/src/test/c_api_stress_tests.cpp b/src/test/c_api_stress_tests.cpp index 4b134cd8d3..abf8484cb2 100644 --- a/src/test/c_api_stress_tests.cpp +++ b/src/test/c_api_stress_tests.cpp @@ -39,7 +39,6 @@ #include "../stringutils.hpp" #include "c_api_test_utils.hpp" #include "stress_test_utils.hpp" -#include "test_utils.hpp" using namespace ovms; using namespace tensorflow; diff --git a/src/test/c_api_test_utils.cpp b/src/test/c_api_test_utils.cpp index 0a05f3c798..416381ce7e 100644 --- a/src/test/c_api_test_utils.cpp +++ b/src/test/c_api_test_utils.cpp @@ -17,8 +17,10 @@ #include -#include "../logging.hpp" -#include "../ovms.h" // NOLINT +#include "src/logging.hpp" +#include "src/status.hpp" +#include "src/ovms.h" // NOLINT +#include "test_models.hpp" void callbackMarkingItWasUsedWith42AndUnblockingAndCheckingCAPICorrectness(OVMS_InferenceResponse* response, uint32_t flag, void* userStruct) { using ovms::StatusCode; diff --git a/src/test/c_api_test_utils.hpp b/src/test/c_api_test_utils.hpp index 503476117a..26fd41e5b0 100644 --- a/src/test/c_api_test_utils.hpp +++ b/src/test/c_api_test_utils.hpp @@ -20,8 +20,7 @@ #include #include "../ovms.h" // NOLINT -#include "test_utils.hpp" -#include "src/test/test_server_utils.hpp" +#include "test_server_utils.hpp" #define THROW_ON_ERROR_CAPI(C_API_CALL) \ { \ diff --git a/src/test/c_api_tests.cpp b/src/test/c_api_tests.cpp index 980aa11669..cae19b3e28 100644 --- a/src/test/c_api_tests.cpp +++ b/src/test/c_api_tests.cpp @@ -45,12 +45,11 @@ #include "../server.hpp" #include "../version.hpp" #include "c_api_test_utils.hpp" -#include "src/test/test_server_utils.hpp" -#include "src/test/test_model_manager_utils.hpp" -#include "src/test/test_config_utils.hpp" +#include "test_server_utils.hpp" +#include "test_model_manager_utils.hpp" +#include "test_config_utils.hpp" #include "mockmodelinstancechangingstates.hpp" #include "test_models_configs.hpp" -#include "test_utils.hpp" #include "light_test_utils.hpp" #include "platform_utils.hpp" #include "test_with_temp_dir.hpp" diff --git a/src/test/capi_predict_validation_test.cpp b/src/test/capi_predict_validation_test.cpp index b3d2563c4d..5d716064a7 100644 --- a/src/test/capi_predict_validation_test.cpp +++ b/src/test/capi_predict_validation_test.cpp @@ -27,8 +27,8 @@ #include "../modelconfig.hpp" #include "../predict_request_validation_utils.hpp" #include "test_utils.hpp" -#include "src/test/test_request_utils_capi.hpp" -#include "src/test/test_predict_validation_utils.hpp" +#include "test_request_utils_capi.hpp" +#include "test_predict_validation_utils.hpp" using ::testing::NiceMock; using ::testing::Return; diff --git a/src/test/config_export_full_test.cpp b/src/test/config_export_full_test.cpp index 3fb363db51..2a541286de 100644 --- a/src/test/config_export_full_test.cpp +++ b/src/test/config_export_full_test.cpp @@ -20,7 +20,6 @@ #include #include "constructor_enabled_model_manager.hpp" -#include "test_utils.hpp" #include "light_test_utils.hpp" #include "platform_utils.hpp" #include "test_with_temp_dir.hpp" diff --git a/src/test/custom_loader_test.cpp b/src/test/custom_loader_test.cpp index 2243ac43f9..9099ce2b30 100644 --- a/src/test/custom_loader_test.cpp +++ b/src/test/custom_loader_test.cpp @@ -40,7 +40,7 @@ #include "../sequence_processing_spec.hpp" #include "mockmodelinstancechangingstates.hpp" #include "test_utils.hpp" -#include "src/test/test_request_utils_tfs.hpp" +#include "test_request_utils_tfs.hpp" #include "light_test_utils.hpp" #include "platform_utils.hpp" diff --git a/src/test/deserialization_tests.cpp b/src/test/deserialization_tests.cpp index dd4e031c37..592e9f7d79 100644 --- a/src/test/deserialization_tests.cpp +++ b/src/test/deserialization_tests.cpp @@ -40,9 +40,9 @@ #include "../deserialization_main.hpp" #include "../regularovtensorfactory.hpp" #include "test_utils.hpp" -#include "src/test/test_request_utils_tfs.hpp" -#include "src/test/test_request_utils_kfs.hpp" -#include "src/test/test_request_utils_capi.hpp" +#include "test_request_utils_tfs.hpp" +#include "test_request_utils_kfs.hpp" +#include "test_request_utils_capi.hpp" using TFTensorProto = tensorflow::TensorProto; diff --git a/src/test/disabled_mediapipe_test.cpp b/src/test/disabled_mediapipe_test.cpp index af11ce295d..49a801664f 100644 --- a/src/test/disabled_mediapipe_test.cpp +++ b/src/test/disabled_mediapipe_test.cpp @@ -22,8 +22,7 @@ #include "../ov_utils.hpp" #include "../server.hpp" #include "test_http_utils.hpp" -#include "src/test/test_server_utils.hpp" -#include "test_utils.hpp" +#include "test_server_utils.hpp" #include "platform_utils.hpp" using namespace ovms; diff --git a/src/test/embeddingsnode_test.cpp b/src/test/embeddingsnode_test.cpp index a48ffe7744..d17ae0e1bd 100644 --- a/src/test/embeddingsnode_test.cpp +++ b/src/test/embeddingsnode_test.cpp @@ -26,8 +26,7 @@ #include "../server.hpp" #include "rapidjson/document.h" #include "test_http_utils.hpp" -#include "src/test/test_server_utils.hpp" -#include "test_utils.hpp" +#include "test_server_utils.hpp" #include "platform_utils.hpp" using namespace ovms; diff --git a/src/test/ensemble_config_change_stress.cpp b/src/test/ensemble_config_change_stress.cpp index 78b0b49dbe..420e29f02d 100644 --- a/src/test/ensemble_config_change_stress.cpp +++ b/src/test/ensemble_config_change_stress.cpp @@ -39,7 +39,7 @@ #include "../stringutils.hpp" #include "../tfs_frontend/tfs_utils.hpp" #include "c_api_test_utils.hpp" -#include "src/test/test_request_utils_tfs.hpp" +#include "test_request_utils_tfs.hpp" #include "stress_test_utils.hpp" #include "test_models.hpp" #include "test_utils.hpp" diff --git a/src/test/ensemble_flow_custom_node_tests.cpp b/src/test/ensemble_flow_custom_node_tests.cpp index 94cdb70ec5..22b8ef87f4 100644 --- a/src/test/ensemble_flow_custom_node_tests.cpp +++ b/src/test/ensemble_flow_custom_node_tests.cpp @@ -61,11 +61,11 @@ #include "constructor_enabled_model_manager.hpp" #include "test_models_configs.hpp" #include "test_utils.hpp" -#include "src/test/test_request_utils_tfs.hpp" -#include "src/test/test_request_utils_kfs.hpp" +#include "test_request_utils_tfs.hpp" +#include "test_request_utils_kfs.hpp" #include "light_test_utils.hpp" -#include "src/test/test_request_utils_tfs.hpp" -#include "src/test/test_request_utils_kfs.hpp" +#include "test_request_utils_tfs.hpp" +#include "test_request_utils_kfs.hpp" #include "test_with_temp_dir.hpp" using namespace ovms; diff --git a/src/test/ensemble_mapping_config_tests.cpp b/src/test/ensemble_mapping_config_tests.cpp index 9fd92f0b07..76c185e3ce 100644 --- a/src/test/ensemble_mapping_config_tests.cpp +++ b/src/test/ensemble_mapping_config_tests.cpp @@ -34,9 +34,9 @@ #include "constructor_enabled_model_manager.hpp" #include "test_models_configs.hpp" #include "test_utils.hpp" -#include "src/test/test_request_utils_tfs.hpp" +#include "test_request_utils_tfs.hpp" #include "light_test_utils.hpp" -#include "src/test/test_request_utils_tfs.hpp" +#include "test_request_utils_tfs.hpp" #include "platform_utils.hpp" #include "test_with_temp_dir.hpp" diff --git a/src/test/ensemble_metadata_test.cpp b/src/test/ensemble_metadata_test.cpp index b7ebf144a0..3424b4e1de 100644 --- a/src/test/ensemble_metadata_test.cpp +++ b/src/test/ensemble_metadata_test.cpp @@ -27,6 +27,7 @@ #include "../modelinstance.hpp" #include "constructor_enabled_model_manager.hpp" #include "test_models_configs.hpp" +#include "test_models.hpp" using namespace ovms; diff --git a/src/test/ensemble_tests.cpp b/src/test/ensemble_tests.cpp index e7d5b98427..cab3575c4a 100644 --- a/src/test/ensemble_tests.cpp +++ b/src/test/ensemble_tests.cpp @@ -56,8 +56,8 @@ #include "platform_utils.hpp" #include "test_models_configs.hpp" #include "test_utils.hpp" -#include "src/test/test_request_utils_tfs.hpp" -#include "src/test/test_request_utils_kfs.hpp" +#include "test_request_utils_tfs.hpp" +#include "test_request_utils_kfs.hpp" #include "light_test_utils.hpp" #include "test_with_temp_dir.hpp" diff --git a/src/test/get_mediapipe_graph_metadata_response_test.cpp b/src/test/get_mediapipe_graph_metadata_response_test.cpp index 21708110ea..89eb9dfb0b 100644 --- a/src/test/get_mediapipe_graph_metadata_response_test.cpp +++ b/src/test/get_mediapipe_graph_metadata_response_test.cpp @@ -45,9 +45,9 @@ #include "constructor_enabled_model_manager.hpp" #include "mockmodelinstancechangingstates.hpp" #include "test_utils.hpp" -#include "src/test/test_server_utils.hpp" -#include "src/test/test_mediapipe_utils.hpp" -#include "src/test/test_config_utils.hpp" +#include "test_server_utils.hpp" +#include "test_mediapipe_utils.hpp" +#include "test_config_utils.hpp" #include "light_test_utils.hpp" #include "platform_utils.hpp" diff --git a/src/test/get_model_metadata_response_test.cpp b/src/test/get_model_metadata_response_test.cpp index 6b0226fa95..2c671be1d9 100644 --- a/src/test/get_model_metadata_response_test.cpp +++ b/src/test/get_model_metadata_response_test.cpp @@ -26,9 +26,10 @@ #include "../modelmanager.hpp" #include "../status.hpp" #include "mockmodelinstancechangingstates.hpp" -#include "src/test/test_request_utils_kfs.hpp" -#include "src/test/test_request_utils_tfs.hpp" +#include "test_request_utils_kfs.hpp" +#include "test_request_utils_tfs.hpp" #include "test_models_configs.hpp" +#include "test_models.hpp" using ::testing::NiceMock; using ::testing::Return; diff --git a/src/test/get_model_metadata_signature_test.cpp b/src/test/get_model_metadata_signature_test.cpp index 071ffe3e10..52027bf39e 100644 --- a/src/test/get_model_metadata_signature_test.cpp +++ b/src/test/get_model_metadata_signature_test.cpp @@ -19,9 +19,8 @@ #include #include "../get_model_metadata_impl.hpp" -#include "test_utils.hpp" -#include "src/test/test_request_utils_tfs.hpp" -#include "src/test/test_request_utils_kfs.hpp" +#include "test_request_utils_tfs.hpp" +#include "test_request_utils_kfs.hpp" class GetModelMetadataSignature : public ::testing::Test { struct Info { diff --git a/src/test/get_pipeline_metadata_response_test.cpp b/src/test/get_pipeline_metadata_response_test.cpp index 060dc0cddf..6d181565d8 100644 --- a/src/test/get_pipeline_metadata_response_test.cpp +++ b/src/test/get_pipeline_metadata_response_test.cpp @@ -27,9 +27,8 @@ #include "../model_metric_reporter.hpp" #include "constructor_enabled_model_manager.hpp" -#include "test_utils.hpp" -#include "src/test/test_request_utils_kfs.hpp" -#include "src/test/test_request_utils_tfs.hpp" +#include "test_request_utils_kfs.hpp" +#include "test_request_utils_tfs.hpp" using namespace ovms; using namespace rapidjson; diff --git a/src/test/http_openai_handler_test.cpp b/src/test/http_openai_handler_test.cpp index db03c4a923..41ca7767a2 100644 --- a/src/test/http_openai_handler_test.cpp +++ b/src/test/http_openai_handler_test.cpp @@ -32,8 +32,7 @@ #include "../server.hpp" #include "environment.hpp" #include "test_http_utils.hpp" -#include "src/test/test_server_utils.hpp" -#include "test_utils.hpp" +#include "test_server_utils.hpp" #include "platform_utils.hpp" class HttpOpenAIHandlerTest : public ::testing::Test { diff --git a/src/test/http_rest_api_handler_test.cpp b/src/test/http_rest_api_handler_test.cpp index a0fa4abb09..3a3d37bbf3 100644 --- a/src/test/http_rest_api_handler_test.cpp +++ b/src/test/http_rest_api_handler_test.cpp @@ -23,8 +23,7 @@ #include "../servablemanagermodule.hpp" #include "../server.hpp" #include "platform_utils.hpp" -#include "test_utils.hpp" -#include "src/test/test_server_utils.hpp" +#include "test_server_utils.hpp" #include "light_test_utils.hpp" #include "test_with_temp_dir.hpp" diff --git a/src/test/kfs_metadata_test.cpp b/src/test/kfs_metadata_test.cpp index d43109f3a1..50e67a47ac 100644 --- a/src/test/kfs_metadata_test.cpp +++ b/src/test/kfs_metadata_test.cpp @@ -24,8 +24,9 @@ #include "constructor_enabled_model_manager.hpp" #include "mockmodelinstancechangingstates.hpp" -#include "src/test/test_request_utils_kfs.hpp" +#include "test_request_utils_kfs.hpp" #include "test_models_configs.hpp" +#include "test_models.hpp" using ::testing::NiceMock; using ::testing::Return; diff --git a/src/test/kfs_rest_test.cpp b/src/test/kfs_rest_test.cpp index 33a989ec27..f5bfeeaec2 100644 --- a/src/test/kfs_rest_test.cpp +++ b/src/test/kfs_rest_test.cpp @@ -29,9 +29,9 @@ #include "../server.hpp" #include "../status.hpp" #include "../version.hpp" -#include "test_utils.hpp" -#include "src/test/test_server_utils.hpp" +#include "test_server_utils.hpp" #include "platform_utils.hpp" +#include "test_models.hpp" using ovms::Config; using ovms::HttpRestApiHandler; diff --git a/src/test/libgit2_test.cpp b/src/test/libgit2_test.cpp index 09e873ae01..f5e104b943 100644 --- a/src/test/libgit2_test.cpp +++ b/src/test/libgit2_test.cpp @@ -26,7 +26,6 @@ #include "src/pull_module/libgit2.hpp" #include "environment.hpp" -#include "test_utils.hpp" #include "test_file_utils.hpp" namespace fs = std::filesystem; diff --git a/src/test/listmodelsendpoint_test.cpp b/src/test/listmodelsendpoint_test.cpp index 7ee94f586c..b0ae9ddee3 100644 --- a/src/test/listmodelsendpoint_test.cpp +++ b/src/test/listmodelsendpoint_test.cpp @@ -22,8 +22,7 @@ #include "../server.hpp" #include "rapidjson/document.h" #include "test_http_utils.hpp" -#include "src/test/test_server_utils.hpp" -#include "test_utils.hpp" +#include "test_server_utils.hpp" #include "platform_utils.hpp" using namespace ovms; diff --git a/src/test/llm/assisted_decoding_test.cpp b/src/test/llm/assisted_decoding_test.cpp index 68f6158c1e..b2799e2230 100644 --- a/src/test/llm/assisted_decoding_test.cpp +++ b/src/test/llm/assisted_decoding_test.cpp @@ -47,8 +47,8 @@ #include "rapidjson/stringbuffer.h" #include "rapidjson/writer.h" #include "../test_http_utils.hpp" -#include "src/test/test_server_utils.hpp" -#include "../test_utils.hpp" +#include "../test_server_utils.hpp" +#include "../platform_utils.hpp" using namespace ovms; diff --git a/src/test/llm/llmnode_test.cpp b/src/test/llm/llmnode_test.cpp index b612234fb6..0362d8ed37 100644 --- a/src/test/llm/llmnode_test.cpp +++ b/src/test/llm/llmnode_test.cpp @@ -52,8 +52,7 @@ #include "../constructor_enabled_model_manager.hpp" #include "../platform_utils.hpp" #include "../test_http_utils.hpp" -#include "src/test/test_mediapipe_utils.hpp" -#include "../test_utils.hpp" +#include "../test_mediapipe_utils.hpp" #include "src/test/environment.hpp" using namespace ovms; diff --git a/src/test/llm/llmtemplate_test.cpp b/src/test/llm/llmtemplate_test.cpp index 3e1f10b760..3b99438958 100644 --- a/src/test/llm/llmtemplate_test.cpp +++ b/src/test/llm/llmtemplate_test.cpp @@ -45,11 +45,10 @@ #include "mediapipe/framework/calculator_runner.h" #pragma GCC diagnostic pop -#include "src/test/test_http_utils.hpp" -#include "src/test/test_server_utils.hpp" -#include "src/test/test_utils.hpp" +#include "../test_http_utils.hpp" +#include "../test_server_utils.hpp" #include "src/test/light_test_utils.hpp" -#include "src/test/test_with_temp_dir.hpp" +#include "../test_with_temp_dir.hpp" using namespace ovms; diff --git a/src/test/llm/max_model_length_test.cpp b/src/test/llm/max_model_length_test.cpp index b48290c539..62e23afe29 100644 --- a/src/test/llm/max_model_length_test.cpp +++ b/src/test/llm/max_model_length_test.cpp @@ -20,9 +20,8 @@ #include #include "../../llm/servable_initializer.hpp" -#include "src/test/test_utils.hpp" #include "src/test/light_test_utils.hpp" -#include "src/test/test_with_temp_dir.hpp" +#include "../test_with_temp_dir.hpp" using namespace ovms; diff --git a/src/test/llm/text_streamer_test.cpp b/src/test/llm/text_streamer_test.cpp index 7e161f0fe1..63721cb76d 100644 --- a/src/test/llm/text_streamer_test.cpp +++ b/src/test/llm/text_streamer_test.cpp @@ -15,10 +15,11 @@ //***************************************************************************** #include -#include "../../llm/servable.hpp" -#include "../../llm/servable_initializer.hpp" +#include "src/llm/servable.hpp" +#include "src/llm/servable_initializer.hpp" +#include "src/status.hpp" #include "gtest/gtest.h" -#include "../test_utils.hpp" +#include "../platform_utils.hpp" class TextStreamerTest : public ::testing::Test { public: diff --git a/src/test/llm/tokenize_endpoint_test.cpp b/src/test/llm/tokenize_endpoint_test.cpp index 1273f7be1e..281aad125e 100644 --- a/src/test/llm/tokenize_endpoint_test.cpp +++ b/src/test/llm/tokenize_endpoint_test.cpp @@ -38,8 +38,7 @@ #include "../constructor_enabled_model_manager.hpp" #include "../platform_utils.hpp" #include "../test_http_utils.hpp" -#include "src/test/test_server_utils.hpp" -#include "../test_utils.hpp" +#include "../test_server_utils.hpp" using namespace ovms; diff --git a/src/test/llm/visual_language_model/complete_flow_test.cpp b/src/test/llm/visual_language_model/complete_flow_test.cpp index 86d21e9e77..7b4b957f65 100644 --- a/src/test/llm/visual_language_model/complete_flow_test.cpp +++ b/src/test/llm/visual_language_model/complete_flow_test.cpp @@ -32,11 +32,11 @@ #include "../../../ov_utils.hpp" #include "../../../server.hpp" #include "../../test_http_utils.hpp" -#include "src/test/test_server_utils.hpp" -#include "../../test_utils.hpp" +#include "../../test_server_utils.hpp" #include "rapidjson/document.h" #include "rapidjson/stringbuffer.h" #include "rapidjson/writer.h" +#include "../../platform_utils.hpp" using namespace ovms; diff --git a/src/test/llm/visual_language_model/initialization_test.cpp b/src/test/llm/visual_language_model/initialization_test.cpp index 7d964375ab..ed862af4ec 100644 --- a/src/test/llm/visual_language_model/initialization_test.cpp +++ b/src/test/llm/visual_language_model/initialization_test.cpp @@ -21,8 +21,7 @@ #include "../../../llm/servable_initializer.hpp" #include "../../constructor_enabled_model_manager.hpp" #include "../../platform_utils.hpp" -#include "../../test_utils.hpp" -#include "src/test/test_mediapipe_utils.hpp" +#include "../../test_mediapipe_utils.hpp" using namespace ovms; diff --git a/src/test/mediapipe_framework_test.cpp b/src/test/mediapipe_framework_test.cpp index 8a558e74e1..864aabf2ec 100644 --- a/src/test/mediapipe_framework_test.cpp +++ b/src/test/mediapipe_framework_test.cpp @@ -43,8 +43,8 @@ #include "../stringutils.hpp" #include "../tfs_frontend/tfs_utils.hpp" #include "c_api_test_utils.hpp" -#include "src/test/test_request_utils_kfs.hpp" -#include "src/test/test_server_utils.hpp" +#include "test_request_utils_kfs.hpp" +#include "test_server_utils.hpp" #include "test_utils.hpp" #include "platform_utils.hpp" #include "test_with_temp_dir.hpp" diff --git a/src/test/mediapipe_validation_test.cpp b/src/test/mediapipe_validation_test.cpp index 9e7a05ac5a..bfab9327a6 100644 --- a/src/test/mediapipe_validation_test.cpp +++ b/src/test/mediapipe_validation_test.cpp @@ -27,8 +27,8 @@ #include "../precision.hpp" #include "../server.hpp" #include "test_utils.hpp" -#include "src/test/test_request_utils_kfs.hpp" -#include "src/test/test_server_utils.hpp" +#include "test_request_utils_kfs.hpp" +#include "test_server_utils.hpp" #include "platform_utils.hpp" using namespace ovms; diff --git a/src/test/mediapipeflow_test.cpp b/src/test/mediapipeflow_test.cpp index 8d56ba7180..36d978bc8c 100644 --- a/src/test/mediapipeflow_test.cpp +++ b/src/test/mediapipeflow_test.cpp @@ -62,9 +62,9 @@ #include "../tfs_frontend/tfs_utils.hpp" #include "constructor_enabled_model_manager.hpp" #include "c_api_test_utils.hpp" -#include "src/test/test_request_utils_kfs.hpp" -#include "src/test/test_server_utils.hpp" -#include "src/test/test_mediapipe_utils.hpp" +#include "test_request_utils_kfs.hpp" +#include "test_server_utils.hpp" +#include "test_mediapipe_utils.hpp" #include "mediapipe/framework/formats/image_frame.h" #include "mediapipe/framework/formats/tensor.h" #include "opencv2/opencv.hpp" diff --git a/src/test/metric_config_test.cpp b/src/test/metric_config_test.cpp index e4801db11d..ec93a70f77 100644 --- a/src/test/metric_config_test.cpp +++ b/src/test/metric_config_test.cpp @@ -26,8 +26,7 @@ #include "../modelconfig.hpp" #include "../modelinstance.hpp" #include "constructor_enabled_model_manager.hpp" -#include "test_utils.hpp" -#include "src/test/test_server_utils.hpp" +#include "test_server_utils.hpp" #include "light_test_utils.hpp" #include "platform_utils.hpp" #include "test_with_temp_dir.hpp" diff --git a/src/test/metrics_flow_test.cpp b/src/test/metrics_flow_test.cpp index 58045b2941..15d314f434 100644 --- a/src/test/metrics_flow_test.cpp +++ b/src/test/metrics_flow_test.cpp @@ -41,9 +41,9 @@ #include "constructor_enabled_model_manager.hpp" #include "platform_utils.hpp" #include "test_http_utils.hpp" -#include "src/test/test_request_utils_tfs.hpp" -#include "src/test/test_request_utils_kfs.hpp" -#include "src/test/test_server_utils.hpp" +#include "test_request_utils_tfs.hpp" +#include "test_request_utils_kfs.hpp" +#include "test_server_utils.hpp" #include "test_utils.hpp" #include "light_test_utils.hpp" #include "test_with_temp_dir.hpp" diff --git a/src/test/mockmodelinstancechangingstates.hpp b/src/test/mockmodelinstancechangingstates.hpp index 9f03b2310b..bf25651291 100644 --- a/src/test/mockmodelinstancechangingstates.hpp +++ b/src/test/mockmodelinstancechangingstates.hpp @@ -23,7 +23,7 @@ #include "../modelinstance.hpp" #include "../modelversionstatus.hpp" #include "../status.hpp" -#include "test_utils.hpp" +#include "test_models.hpp" class MockModelInstanceChangingStates : public ovms::ModelInstance { public: diff --git a/src/test/model_cache_test.cpp b/src/test/model_cache_test.cpp index 16b41a38e3..2ca907a32b 100644 --- a/src/test/model_cache_test.cpp +++ b/src/test/model_cache_test.cpp @@ -27,6 +27,7 @@ #include "constructor_enabled_model_manager.hpp" #include "test_models_configs.hpp" #include "test_with_temp_dir.hpp" +#include "test_models.hpp" using namespace ovms; diff --git a/src/test/model_service_test.cpp b/src/test/model_service_test.cpp index 97c4868553..3ec92c0345 100644 --- a/src/test/model_service_test.cpp +++ b/src/test/model_service_test.cpp @@ -41,7 +41,7 @@ #include "gtest/gtest.h" #include "platform_utils.hpp" #include "test_utils.hpp" -#include "src/test/test_server_utils.hpp" +#include "test_server_utils.hpp" #include "constructor_enabled_model_manager.hpp" #include "test_models_configs.hpp" #include "light_test_utils.hpp" diff --git a/src/test/modelconfig_test.cpp b/src/test/modelconfig_test.cpp index 4dfed8f647..4d5fdec824 100644 --- a/src/test/modelconfig_test.cpp +++ b/src/test/modelconfig_test.cpp @@ -21,11 +21,11 @@ #include #include #include +#include "platform_utils.hpp" #include "../modelconfig.hpp" #include "../anonymous_input_name.hpp" #include "../status.hpp" -#include "test_utils.hpp" using namespace testing; using ::testing::UnorderedElementsAre; diff --git a/src/test/modelinstance_test.cpp b/src/test/modelinstance_test.cpp index 16e94e83e3..5df4265c96 100644 --- a/src/test/modelinstance_test.cpp +++ b/src/test/modelinstance_test.cpp @@ -29,6 +29,7 @@ #include "gpuenvironment.hpp" #include "test_models_configs.hpp" #include "test_with_temp_dir.hpp" +#include "test_models.hpp" using testing::Return; diff --git a/src/test/modelmanager_test.cpp b/src/test/modelmanager_test.cpp index 3cf43a4fea..3d8613be11 100644 --- a/src/test/modelmanager_test.cpp +++ b/src/test/modelmanager_test.cpp @@ -37,8 +37,9 @@ #include "mockmodelinstancechangingstates.hpp" #include "test_models_configs.hpp" #include "light_test_utils.hpp" -#include "src/test/test_server_utils.hpp" -#include "src/test/test_model_manager_utils.hpp" +#include "test_server_utils.hpp" +#include "test_model_manager_utils.hpp" +#include "test_utils.hpp" #include "platform_utils.hpp" #include "test_with_temp_dir.hpp" diff --git a/src/test/multipart_calculator_test.cpp b/src/test/multipart_calculator_test.cpp index ae7dcfc949..8b841d2bff 100644 --- a/src/test/multipart_calculator_test.cpp +++ b/src/test/multipart_calculator_test.cpp @@ -22,8 +22,7 @@ #include "../servablemanagermodule.hpp" #include "../server.hpp" #include "test_http_utils.hpp" -#include "src/test/test_server_utils.hpp" -#include "test_utils.hpp" +#include "test_server_utils.hpp" #include "platform_utils.hpp" class MultiPartCalculatorTest : public ::testing::Test { diff --git a/src/test/network_utils_test.cpp b/src/test/network_utils_test.cpp index 5c1bdbfcf5..5414259848 100644 --- a/src/test/network_utils_test.cpp +++ b/src/test/network_utils_test.cpp @@ -30,8 +30,7 @@ #include "src/network_utils.hpp" #include "src/logging.hpp" -#include "test_utils.hpp" -#include "src/test/test_server_utils.hpp" +#include "test_server_utils.hpp" TEST(NetworkUtils, IsPortAvailable_Positive) { uint64_t availablePort = 12345; diff --git a/src/test/openvino_remote_tensors_tests.cpp b/src/test/openvino_remote_tensors_tests.cpp index 73eea3881b..7bf281a3d2 100644 --- a/src/test/openvino_remote_tensors_tests.cpp +++ b/src/test/openvino_remote_tensors_tests.cpp @@ -34,7 +34,8 @@ #include "../ovms_internal.h" // NOLINT #include "../status.hpp" #include "c_api_test_utils.hpp" -#include "src/test/test_server_utils.hpp" +#include "test_models.hpp" +#include "test_server_utils.hpp" #include "gpuenvironment.hpp" #include "light_test_utils.hpp" #include "test_with_temp_dir.hpp" diff --git a/src/test/openvino_tests.cpp b/src/test/openvino_tests.cpp index b082c525a9..b08c52082a 100644 --- a/src/test/openvino_tests.cpp +++ b/src/test/openvino_tests.cpp @@ -23,7 +23,7 @@ #include "../ov_utils.hpp" #include "../ovms.h" // NOLINT #include "c_api_test_utils.hpp" -#include "test_utils.hpp" +#include "test_models.hpp" using namespace ov; diff --git a/src/test/ovmsconfig_test.cpp b/src/test/ovmsconfig_test.cpp index 79b4a063d2..0b238d6bbc 100644 --- a/src/test/ovmsconfig_test.cpp +++ b/src/test/ovmsconfig_test.cpp @@ -29,8 +29,7 @@ #include "src/filesystem/filesystem.hpp" #include "../ovms_exit_codes.hpp" #include "../systeminfo.hpp" -#include "test_utils.hpp" -#include "src/test/test_config_utils.hpp" +#include "test_config_utils.hpp" using testing::_; using testing::ContainerEq; diff --git a/src/test/pipelinedefinitionstatus_test.cpp b/src/test/pipelinedefinitionstatus_test.cpp index 8c3d722f6c..1193ae055a 100644 --- a/src/test/pipelinedefinitionstatus_test.cpp +++ b/src/test/pipelinedefinitionstatus_test.cpp @@ -21,7 +21,6 @@ #include "../prediction_service_utils.hpp" #include "../status.hpp" #include "../timer.hpp" -#include "test_utils.hpp" using namespace ovms; diff --git a/src/test/predict_validation_test.cpp b/src/test/predict_validation_test.cpp index dbc2b6ff72..659707b73c 100644 --- a/src/test/predict_validation_test.cpp +++ b/src/test/predict_validation_test.cpp @@ -27,9 +27,9 @@ #include "../modelinstance.hpp" #include "../predict_request_validation_utils.hpp" #include "test_utils.hpp" -#include "src/test/test_request_utils_tfs.hpp" -#include "src/test/test_request_utils_kfs.hpp" -#include "src/test/test_predict_validation_utils.hpp" +#include "test_request_utils_tfs.hpp" +#include "test_request_utils_kfs.hpp" +#include "test_predict_validation_utils.hpp" using ::testing::NiceMock; using ::testing::Return; diff --git a/src/test/prediction_service_test.cpp b/src/test/prediction_service_test.cpp index 8248459ade..e17365b7ba 100644 --- a/src/test/prediction_service_test.cpp +++ b/src/test/prediction_service_test.cpp @@ -57,9 +57,9 @@ #include "constructor_enabled_model_manager.hpp" #include "test_models_configs.hpp" #include "test_utils.hpp" -#include "src/test/test_request_utils_tfs.hpp" -#include "src/test/test_request_utils_kfs.hpp" -#include "src/test/test_request_utils_capi.hpp" +#include "test_request_utils_tfs.hpp" +#include "test_request_utils_kfs.hpp" +#include "test_request_utils_capi.hpp" #include "light_test_utils.hpp" #include "platform_utils.hpp" #include "test_with_temp_dir.hpp" diff --git a/src/test/pull_gguf_hf_model_test.cpp b/src/test/pull_gguf_hf_model_test.cpp index 457cfb1508..321d4c3caa 100644 --- a/src/test/pull_gguf_hf_model_test.cpp +++ b/src/test/pull_gguf_hf_model_test.cpp @@ -23,7 +23,7 @@ #include #include "../utils/env_guard.hpp" #include "test_utils.hpp" -#include "src/test/test_server_utils.hpp" +#include "test_server_utils.hpp" #include "test_with_temp_dir.hpp" #include "gguf_environment.hpp" #include "src/filesystem/filesystem.hpp" diff --git a/src/test/pull_hf_model_test.cpp b/src/test/pull_hf_model_test.cpp index fd41e8961d..fde3f3c66f 100644 --- a/src/test/pull_hf_model_test.cpp +++ b/src/test/pull_hf_model_test.cpp @@ -28,11 +28,11 @@ #include "src/utils/env_guard.hpp" #include "src/test/light_test_utils.hpp" -#include "src/test/test_utils.hpp" -#include "src/test/test_server_utils.hpp" -#include "src/test/test_config_utils.hpp" -#include "src/test/test_file_utils.hpp" -#include "src/test/test_with_temp_dir.hpp" +#include "test_utils.hpp" +#include "test_server_utils.hpp" +#include "test_config_utils.hpp" +#include "test_file_utils.hpp" +#include "test_with_temp_dir.hpp" #include "src/filesystem/filesystem.hpp" #include "src/pull_module/hf_pull_model_module.hpp" #include "src/pull_module/libgit2.hpp" diff --git a/src/test/pythonnode_test.cpp b/src/test/pythonnode_test.cpp index 0d85ca0f35..6bfd827728 100644 --- a/src/test/pythonnode_test.cpp +++ b/src/test/pythonnode_test.cpp @@ -56,9 +56,9 @@ #include "../python/python_backend.hpp" #include "c_api_test_utils.hpp" -#include "src/test/test_request_utils_kfs.hpp" -#include "src/test/test_server_utils.hpp" -#include "src/test/test_mediapipe_utils.hpp" +#include "test_request_utils_kfs.hpp" +#include "test_server_utils.hpp" +#include "test_mediapipe_utils.hpp" #include "constructor_enabled_model_manager.hpp" #include "platform_utils.hpp" #include "test_utils.hpp" diff --git a/src/test/reranknode_test.cpp b/src/test/reranknode_test.cpp index fa19dba86e..ecc274485f 100644 --- a/src/test/reranknode_test.cpp +++ b/src/test/reranknode_test.cpp @@ -25,7 +25,6 @@ #include "rapidjson/stringbuffer.h" // TODO: Move out together with rerank tests #include "rapidjson/writer.h" // TODO: Move out together with rerank tests #include "test_http_utils.hpp" -#include "test_utils.hpp" #include "platform_utils.hpp" using namespace ovms; diff --git a/src/test/rest_utils_test.cpp b/src/test/rest_utils_test.cpp index e388872c6d..7a6e034753 100644 --- a/src/test/rest_utils_test.cpp +++ b/src/test/rest_utils_test.cpp @@ -21,8 +21,7 @@ #include "../logging.hpp" #include "../rest_utils.hpp" #include "../status.hpp" -#include "test_utils.hpp" -#include "src/test/test_request_utils_tfs.hpp" +#include "test_request_utils_tfs.hpp" using namespace ovms; diff --git a/src/test/serialization_tests.cpp b/src/test/serialization_tests.cpp index 4fe555a1af..f13eccf990 100644 --- a/src/test/serialization_tests.cpp +++ b/src/test/serialization_tests.cpp @@ -38,9 +38,9 @@ #include "../kfs_frontend/serialization.hpp" #include "../capi_frontend/serialization.hpp" #include "../tfs_frontend/tfs_utils.hpp" -#include "test_utils.hpp" -#include "src/test/test_request_utils_tfs.hpp" -#include "src/test/test_request_utils_kfs.hpp" +#include "test_request_utils_tfs.hpp" +#include "test_request_utils_kfs.hpp" +#include "test_models.hpp" using TFTensorProto = tensorflow::TensorProto; diff --git a/src/test/server_test.cpp b/src/test/server_test.cpp index 5017862da5..7ab9b06e06 100644 --- a/src/test/server_test.cpp +++ b/src/test/server_test.cpp @@ -37,7 +37,7 @@ #include "../server.hpp" #include "../version.hpp" #include "c_api_test_utils.hpp" -#include "src/test/test_server_utils.hpp" +#include "test_server_utils.hpp" #include "mockmodelinstancechangingstates.hpp" using ovms::ModelManager; diff --git a/src/test/shape_test.cpp b/src/test/shape_test.cpp index 97de98666f..c378c8775f 100644 --- a/src/test/shape_test.cpp +++ b/src/test/shape_test.cpp @@ -19,7 +19,6 @@ #include #include "../shape.hpp" -#include "test_utils.hpp" using ovms::Dimension; using ovms::Shape; diff --git a/src/test/stateful_config_test.cpp b/src/test/stateful_config_test.cpp index b1e83a60ec..a805004bd4 100644 --- a/src/test/stateful_config_test.cpp +++ b/src/test/stateful_config_test.cpp @@ -22,7 +22,6 @@ #include "constructor_enabled_model_manager.hpp" #include "light_test_utils.hpp" #include "platform_utils.hpp" -#include "test_utils.hpp" #include "test_with_temp_dir.hpp" using namespace ovms; diff --git a/src/test/stateful_modelinstance_test.cpp b/src/test/stateful_modelinstance_test.cpp index b9a96542ff..82c22a9d04 100644 --- a/src/test/stateful_modelinstance_test.cpp +++ b/src/test/stateful_modelinstance_test.cpp @@ -45,7 +45,7 @@ #include "../timer.hpp" #include "stateful_test_utils.hpp" #include "test_utils.hpp" -#include "src/test/test_request_utils_tfs.hpp" +#include "test_request_utils_tfs.hpp" #include "light_test_utils.hpp" #include "platform_utils.hpp" #include "test_with_temp_dir.hpp" diff --git a/src/test/status_test.cpp b/src/test/status_test.cpp index 92f67e1b15..dcf02d2ed3 100644 --- a/src/test/status_test.cpp +++ b/src/test/status_test.cpp @@ -22,7 +22,6 @@ #include "../ovms.h" #include "../status.hpp" -#include "test_utils.hpp" namespace ovms { static StatusCode& operator++(StatusCode& statusCode) { diff --git a/src/test/streaming_test.cpp b/src/test/streaming_test.cpp index b65c1464f8..c9a4635fa1 100644 --- a/src/test/streaming_test.cpp +++ b/src/test/streaming_test.cpp @@ -32,11 +32,10 @@ #include "../mediapipe_internal/mediapipefactory.hpp" #include "constructor_enabled_model_manager.hpp" #include "platform_utils.hpp" -#include "test_utils.hpp" -#include "src/test/test_request_utils_kfs.hpp" -#include "src/test/test_server_utils.hpp" -#include "src/test/test_config_utils.hpp" -#include "src/test/test_mediapipe_utils.hpp" +#include "test_request_utils_kfs.hpp" +#include "test_server_utils.hpp" +#include "test_config_utils.hpp" +#include "test_mediapipe_utils.hpp" #if (PYTHON_DISABLE == 0) #include "../python/pythoninterpretermodule.hpp" diff --git a/src/test/stress_test_utils.hpp b/src/test/stress_test_utils.hpp index b1f1feced5..c5899346fb 100644 --- a/src/test/stress_test_utils.hpp +++ b/src/test/stress_test_utils.hpp @@ -55,9 +55,9 @@ #include "../stringutils.hpp" #include "../tfs_frontend/tfs_utils.hpp" #include "c_api_test_utils.hpp" -#include "src/test/test_request_utils_tfs.hpp" -#include "src/test/test_request_utils_kfs.hpp" -#include "src/test/test_server_utils.hpp" +#include "test_request_utils_tfs.hpp" +#include "test_request_utils_kfs.hpp" +#include "test_server_utils.hpp" #include "test_utils.hpp" #include "light_test_utils.hpp" #include "platform_utils.hpp" diff --git a/src/test/tensor_conversion_test.cpp b/src/test/tensor_conversion_test.cpp index ccc91b64bc..829b509707 100644 --- a/src/test/tensor_conversion_test.cpp +++ b/src/test/tensor_conversion_test.cpp @@ -28,8 +28,8 @@ #include "../tensor_conversion.hpp" #include "opencv2/opencv.hpp" #include "test_utils.hpp" -#include "src/test/test_request_utils_tfs.hpp" -#include "src/test/test_request_utils_kfs.hpp" +#include "test_request_utils_tfs.hpp" +#include "test_request_utils_kfs.hpp" #include "platform_utils.hpp" using namespace ovms; diff --git a/src/test/test_http_utils.hpp b/src/test/test_http_utils.hpp index b952d5431b..91db2e5242 100644 --- a/src/test/test_http_utils.hpp +++ b/src/test/test_http_utils.hpp @@ -29,8 +29,7 @@ #include "../http_status_code.hpp" #include "../multi_part_parser.hpp" -#include "test_utils.hpp" -#include "src/test/test_server_utils.hpp" +#include "test_server_utils.hpp" class MockedServerRequestInterface final : public ovms::HttpAsyncWriter { public: diff --git a/src/test/test_models.hpp b/src/test/test_models.hpp index c5f686d2ac..880999e042 100644 --- a/src/test/test_models.hpp +++ b/src/test/test_models.hpp @@ -15,8 +15,11 @@ //***************************************************************************** #pragma once +#include #include +#include "src/modelversion.hpp" +#include "src/shape.hpp" #include "platform_utils.hpp" const std::string dummy_model_location = getGenericFullPathForSrcTest(std::filesystem::current_path().u8string() + "/src/test/dummy", false); diff --git a/src/test/test_request_utils_capi.hpp b/src/test/test_request_utils_capi.hpp index 4b2c9ca115..2ea196de9e 100644 --- a/src/test/test_request_utils_capi.hpp +++ b/src/test/test_request_utils_capi.hpp @@ -22,7 +22,7 @@ #include "src/capi_frontend/inferencerequest.hpp" #include "src/capi_frontend/inferenceresponse.hpp" -#include "src/test/test_utils.hpp" +#include "test_utils.hpp" using CAPIInterface = std::pair; diff --git a/src/test/test_request_utils_kfs.hpp b/src/test/test_request_utils_kfs.hpp index 91afe1fc6f..0b05d2256e 100644 --- a/src/test/test_request_utils_kfs.hpp +++ b/src/test/test_request_utils_kfs.hpp @@ -27,7 +27,7 @@ #include "src/kfs_frontend/kfs_grpc_inference_service.hpp" #include "src/kfs_frontend/kfs_utils.hpp" -#include "src/test/test_utils.hpp" +#include "test_utils.hpp" using KFSInterface = std::pair; diff --git a/src/test/test_request_utils_tfs.hpp b/src/test/test_request_utils_tfs.hpp index 195c7ffa76..a26cf59123 100644 --- a/src/test/test_request_utils_tfs.hpp +++ b/src/test/test_request_utils_tfs.hpp @@ -28,7 +28,7 @@ #include "tensorflow_serving/apis/prediction_service.grpc.pb.h" #pragma GCC diagnostic pop -#include "src/test/test_utils.hpp" +#include "test_utils.hpp" using TFSRequestType = tensorflow::serving::PredictRequest; using TFSResponseType = tensorflow::serving::PredictResponse; diff --git a/src/test/test_utils.hpp b/src/test/test_utils.hpp index 5b52646ab5..7e36cccd04 100644 --- a/src/test/test_utils.hpp +++ b/src/test/test_utils.hpp @@ -37,7 +37,7 @@ #include "src/status.hpp" #include "src/tensorinfo.hpp" -#include "src/test/test_models.hpp" +#include "test_models.hpp" // ============================================================================ // Core test utilities (frontend-agnostic) diff --git a/src/test/tfs_rest_parser_binary_inputs_test.cpp b/src/test/tfs_rest_parser_binary_inputs_test.cpp index db1157c116..542eef3bf7 100644 --- a/src/test/tfs_rest_parser_binary_inputs_test.cpp +++ b/src/test/tfs_rest_parser_binary_inputs_test.cpp @@ -25,7 +25,7 @@ #include "absl/strings/escaping.h" #pragma warning(pop) #include "test_utils.hpp" -#include "src/test/test_request_utils_tfs.hpp" +#include "test_request_utils_tfs.hpp" using namespace ovms; diff --git a/src/test/tfs_rest_parser_column_test.cpp b/src/test/tfs_rest_parser_column_test.cpp index 6bebd812f0..ff8c1d580a 100644 --- a/src/test/tfs_rest_parser_column_test.cpp +++ b/src/test/tfs_rest_parser_column_test.cpp @@ -27,7 +27,7 @@ #include "../rest_parser.hpp" #include "test_utils.hpp" -#include "src/test/test_request_utils_tfs.hpp" +#include "test_request_utils_tfs.hpp" using namespace ovms; diff --git a/src/test/tfs_rest_parser_nonamed_test.cpp b/src/test/tfs_rest_parser_nonamed_test.cpp index b6ed1eae8b..2878fd5d07 100644 --- a/src/test/tfs_rest_parser_nonamed_test.cpp +++ b/src/test/tfs_rest_parser_nonamed_test.cpp @@ -22,7 +22,7 @@ #include "absl/strings/escaping.h" #pragma warning(pop) #include "test_utils.hpp" -#include "src/test/test_request_utils_tfs.hpp" +#include "test_request_utils_tfs.hpp" using namespace ovms; diff --git a/src/test/tfs_rest_parser_row_test.cpp b/src/test/tfs_rest_parser_row_test.cpp index f9639963dc..5ca46911bb 100644 --- a/src/test/tfs_rest_parser_row_test.cpp +++ b/src/test/tfs_rest_parser_row_test.cpp @@ -29,7 +29,7 @@ #include "../rest_parser.hpp" #include "test_utils.hpp" -#include "src/test/test_request_utils_tfs.hpp" +#include "test_request_utils_tfs.hpp" using namespace ovms; From 8500d064a20d03965fa5195a462e7872274240c6 Mon Sep 17 00:00:00 2001 From: Adrian Tobiszewski Date: Tue, 28 Apr 2026 10:50:39 +0200 Subject: [PATCH 4/5] Phase 5: Extract 6 test files as ovms_test_cc_library targets - shape_test: minimal deps (libovmsshape, openvino) - ovinferrequestqueue_test: deps (ovinferrequestsqueue, timer, openvino) - modelconfig_test: deps (modelconfig, anonymous_input_name, status, platform_utils, openvino) - ovmsconfig_test: deps (config, server_settings, exit_codes, systeminfo, env_guard, filesystem, spdlog) - pipelinedefinitionstatus_test: deps (pipelinedefinitionstatus, status, timer) - Removed unused includes (modelinstance.hpp, prediction_service_utils.hpp) - ov_utils_test: deps (ov_utils, modelinstance, filesystem, test_utils, openvino) Also extracted test_config_utils as header-only cc_library target. Added test_config_utils dep to pull_hf_model_test (was getting it transitively from test_utils before). All 180 filtered tests pass. --- src/BUILD | 95 ++++++++++++++++++++-- src/test/pipelinedefinitionstatus_test.cpp | 2 - 2 files changed, 88 insertions(+), 9 deletions(-) diff --git a/src/BUILD b/src/BUILD index 149ea52c29..0527db7be4 100644 --- a/src/BUILD +++ b/src/BUILD @@ -2405,6 +2405,86 @@ ovms_test_cc_library( ], ) +ovms_test_cc_library( + name = "test_config_utils", + hdrs = ["test/test_config_utils.hpp"], + deps = [ + "//src:libovms_config", + ], +) + +ovms_test_cc_library( + name = "shape_test", + srcs = ["test/shape_test.cpp"], + deps = [ + "//src:libovmsshape", + "//third_party:openvino", + ], +) + +ovms_test_cc_library( + name = "ovinferrequestqueue_test", + srcs = ["test/ovinferrequestqueue_test.cpp"], + deps = [ + "//src:libovms_ovinferrequestsqueue", + "//src:libovmstimer", + "//third_party:openvino", + ], + data = [ + "test/dummy/1/dummy.xml", + "test/dummy/1/dummy.bin", + ], +) + +ovms_test_cc_library( + name = "modelconfig_test", + srcs = ["test/modelconfig_test.cpp"], + deps = [ + "//src:modelconfig", + "//src:anonymous_input_name", + "//src:libovmsstatus", + "//src:test_platform_utils", + "//third_party:openvino", + ], +) + +ovms_test_cc_library( + name = "ovmsconfig_test", + srcs = ["test/ovmsconfig_test.cpp"], + deps = [ + "//src:libovms_config", + "//src:libovms_server_settings", + "//src:ovms_exit_codes", + "//src:libovms_systeminfo", + "//src/utils:env_guard", + "//src/filesystem:libovmsfilesystem", + ":test_config_utils", + "@com_github_gabime_spdlog//:spdlog", + ], +) + +ovms_test_cc_library( + name = "pipelinedefinitionstatus_test", + srcs = ["test/pipelinedefinitionstatus_test.cpp"], + deps = [ + "//src/dags:pipelinedefinitionstatus", + "//src:libovmsstatus", + "//src:libovmstimer", + ], +) + +ovms_test_cc_library( + name = "ov_utils_test", + srcs = ["test/ov_utils_test.cpp"], + deps = [ + "//src:libovms_ov_utils", + "//src:modelinstance", + "//src/filesystem:libovmsfilesystem", + ":test_utils", + "//third_party:openvino", + ], +) + ovms_cc_test( name = "ovms_test", srcs = [ @@ -2434,16 +2514,11 @@ ovms_cc_test( "test/model_cache_test.cpp", "test/model_service_test.cpp", "test/model_test.cpp", - "test/modelconfig_test.cpp", "test/modelinstance_test.cpp", "test/modelmanager_test.cpp", "test/modelversionstatus_test.cpp", "test/node_library_manager_test.cpp", "test/nodesessionmetadata_test.cpp", - "test/ov_utils_test.cpp", - "test/ovinferrequestqueue_test.cpp", - "test/ovmsconfig_test.cpp", - "test/pipelinedefinitionstatus_test.cpp", "test/predict_validation_test.cpp", "test/prediction_service_test.cpp", "test/rest_utils_test.cpp", @@ -2451,7 +2526,6 @@ ovms_cc_test( "test/sequence_test.cpp", "test/serialization_tests.cpp", "test/server_test.cpp", - "test/shape_test.cpp", "test/stateful_config_test.cpp", #"test/stateful_modelinstance_test.cpp", # TODO remove? "test/stateful_test_utils.hpp", @@ -2680,6 +2754,13 @@ ovms_cc_test( ":systeminfo_test", ":localfilesystem_test", ":schema_test", + ":shape_test", + ":ovinferrequestqueue_test", + ":modelconfig_test", + ":ovmsconfig_test", + ":pipelinedefinitionstatus_test", + ":ov_utils_test", + ":test_config_utils", ] + select({ "//:not_disable_cloud": [ ":gcsfilesystem_test", @@ -2795,7 +2876,6 @@ cc_library( "test/test_server_utils.hpp", "test/test_mediapipe_utils.hpp", "test/test_predict_validation_utils.hpp", - "test/test_config_utils.hpp", "test/test_model_manager_utils.hpp", "test/stress_test_utils.hpp", ], @@ -2968,6 +3048,7 @@ cc_library( "//src/servables_config_manager_module:listmodels", "//src/utils:env_guard", ":test_utils", + ":test_config_utils", ":test_file_utils", ":libtest_environment", ":test_platform_utils", diff --git a/src/test/pipelinedefinitionstatus_test.cpp b/src/test/pipelinedefinitionstatus_test.cpp index 1193ae055a..38bd383bd1 100644 --- a/src/test/pipelinedefinitionstatus_test.cpp +++ b/src/test/pipelinedefinitionstatus_test.cpp @@ -17,8 +17,6 @@ #include #include "../dags/pipelinedefinitionstatus.hpp" -#include "../modelinstance.hpp" -#include "../prediction_service_utils.hpp" #include "../status.hpp" #include "../timer.hpp" From 48d8fe52a45436949478a8bf6bea865786465540 Mon Sep 17 00:00:00 2001 From: Adrian Tobiszewski Date: Tue, 28 Apr 2026 11:52:06 +0200 Subject: [PATCH 5/5] Fix custom_loader_test.cpp: restore CRLF line endings The file was accidentally converted from CRLF to LF in Phase 3, causing GitHub to show every line as changed. Restored original CRLF endings so only the actual include addition is visible. --- src/test/custom_loader_test.cpp | 2932 +++++++++++++++---------------- 1 file changed, 1466 insertions(+), 1466 deletions(-) diff --git a/src/test/custom_loader_test.cpp b/src/test/custom_loader_test.cpp index 9099ce2b30..c671a141f1 100644 --- a/src/test/custom_loader_test.cpp +++ b/src/test/custom_loader_test.cpp @@ -1,1466 +1,1466 @@ -//***************************************************************************** -// Copyright 2020-2021 Intel Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -//***************************************************************************** -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include - -#include "../executingstreamidguard.hpp" -#include "../get_model_metadata_impl.hpp" -#include "src/filesystem/localfilesystem.hpp" -#include "../model.hpp" -#include "../model_service.hpp" -#include "../modelinstance.hpp" -#include "../modelinstanceunloadguard.hpp" -#include "../modelmanager.hpp" -#include "../modelversionstatus.hpp" -#include "../prediction_service_utils.hpp" -#include "../schema.hpp" -#include "../sequence_processing_spec.hpp" -#include "mockmodelinstancechangingstates.hpp" -#include "test_utils.hpp" -#include "test_request_utils_tfs.hpp" -#include "light_test_utils.hpp" -#include "platform_utils.hpp" - -using testing::_; -using testing::ContainerEq; -using testing::Each; -using testing::Eq; -using ::testing::NiceMock; -using testing::Return; -using testing::ReturnRef; -using testing::UnorderedElementsAre; - -#pragma GCC diagnostic push -#pragma GCC diagnostic ignored "-Wnarrowing" - -using namespace ovms; - -/* ------------------------------------------------- -AFTER SAMPLE CUSTOM LOADER REMOVAL BELOW CONFIGURATIONS ARE NOT USED -REMOVE THIS ENTIRE FILE ONCE THE FEATURE IS REMOVED -------------------------------------------------- - -namespace { - -// Custom Loader Config Keys -#define ENABLE_FORCE_BLACKLIST_CHECK "ENABLE_FORCE_BLACKLIST_CHECK" - -// config_model_with_customloader -const char* custom_loader_config_model = R"({ - "custom_loader_config_list":[ - { - "config":{ - "loader_name":"sample-loader", - "library_path": "/ovms/bazel-bin/src/libsampleloader.so" - } - } - ], - "model_config_list":[ - { - "config":{ - "name":"dummy", - "base_path": "/tmp/test_cl_models/model1", - "nireq": 1, - "custom_loader_options": {"loader_name": "sample-loader", "model_file": "dummy.xml", "bin_file": "dummy.bin"} - } - } - ] - })"; - -// config_model_with_customloader -const char* custom_loader_config_model_relative_paths = R"({ - "custom_loader_config_list":[ - { - "config":{ - "loader_name":"sample-loader", - "library_path": "libsampleloader.so" - } - } - ], - "model_config_list":[ - { - "config":{ - "name":"dummy", - "base_path": "test_cl_models/model1", - "nireq": 1, - "custom_loader_options": {"loader_name": "sample-loader", "model_file": "dummy.xml", "bin_file": "dummy.bin"} - } - } - ] - })"; - -// config_no_model_with_customloader -const char* custom_loader_config_model_deleted = R"({ - "custom_loader_config_list":[ - { - "config":{ - "loader_name":"sample-loader", - "library_path": "/ovms/bazel-bin/src/libsampleloader.so" - } - } - ], - "model_config_list":[] - })"; - -// config_2_models_with_customloader -const char* custom_loader_config_model_new = R"({ - "custom_loader_config_list":[ - { - "config":{ - "loader_name":"sample-loader", - "library_path": "/ovms/bazel-bin/src/libsampleloader.so" - } - } - ], - "model_config_list":[ - { - "config":{ - "name":"dummy", - "base_path": "/tmp/test_cl_models/model1", - "nireq": 1, - "custom_loader_options": {"loader_name": "sample-loader", "model_file": "dummy.xml", "bin_file": "dummy.bin"} - } - }, - { - "config":{ - "name":"dummy-new", - "base_path": "/tmp/test_cl_models/model2", - "nireq": 1, - "custom_loader_options": {"loader_name": "sample-loader", "model_file": "dummy.xml", "bin_file": "dummy.bin"} - } - } - ] - })"; - -// config_model_without_customloader_options -const char* custom_loader_config_model_customloader_options_removed = R"({ - "custom_loader_config_list":[ - { - "config":{ - "loader_name":"sample-loader", - "library_path": "/ovms/bazel-bin/src/libsampleloader.so" - } - } - ], - "model_config_list":[ - { - "config":{ - "name":"dummy", - "base_path": "/tmp/test_cl_models/model1", - "nireq": 1 - } - } - ] - })"; - -const char* config_model_with_customloader_options_unknown_loadername = R"({ - "custom_loader_config_list":[ - { - "config":{ - "loader_name":"sample-loader", - "library_path": "/ovms/bazel-bin/src/libsampleloader.so" - } - } - ], - "model_config_list":[ - { - "config":{ - "name":"dummy", - "base_path": "/tmp/test_cl_models/model1", - "nireq": 1, - "custom_loader_options": {"loader_name": "unknown", "model_file": "dummy.xml", "bin_file": "dummy.bin"} - } - } - ] - })"; - -// config_model_with_customloader -const char* custom_loader_config_model_multiple = R"({ - "custom_loader_config_list":[ - { - "config":{ - "loader_name":"sample-loader-a", - "library_path": "/ovms/bazel-bin/src/libsampleloader.so" - } - }, - { - "config":{ - "loader_name":"sample-loader-b", - "library_path": "/ovms/bazel-bin/src/libsampleloader.so" - } - }, - { - "config":{ - "loader_name":"sample-loader-c", - "library_path": "/ovms/bazel-bin/src/libsampleloader.so" - } - } - ], - "model_config_list":[ - { - "config":{ - "name":"dummy-a", - "base_path": "/tmp/test_cl_models/model1", - "nireq": 1, - "custom_loader_options": {"loader_name": "sample-loader-a", "model_file": "dummy.xml", "bin_file": "dummy.bin"} - } - }, - { - "config":{ - "name":"dummy-b", - "base_path": "/tmp/test_cl_models/model1", - "nireq": 1, - "custom_loader_options": {"loader_name": "sample-loader-b", "model_file": "dummy.xml", "bin_file": "dummy.bin"} - } - }, - { - "config":{ - "name":"dummy-c", - "base_path": "/tmp/test_cl_models/model1", - "nireq": 1, - "custom_loader_options": {"loader_name": "sample-loader-c", "model_file": "dummy.xml", "bin_file": "dummy.bin"} - } - } - ] - })"; - -const char* custom_loader_config_model_blacklist = R"({ - "custom_loader_config_list":[ - { - "config":{ - "loader_name":"sample-loader", - "library_path": "/ovms/bazel-bin/src/libsampleloader.so", - "loader_config_file": "sample-loader-config" - } - } - ], - "model_config_list":[ - { - "config":{ - "name":"dummy", - "base_path": "/tmp/test_cl_models/model1", - "nireq": 1, - "custom_loader_options": {"loader_name": "sample-loader", "model_file": "dummy.xml", "bin_file": "dummy.bin", "enable_file": "dummy.status"} - } - } - ] - })"; - -const char* empty_config = R"({ - "custom_loader_config_list":[], - "model_config_list":[] - })"; - -const char* expected_json_available = R"({ - "model_version_status": [ - { - "version": "1", - "state": "AVAILABLE", - "status": { - "error_code": "OK", - "error_message": "OK" - } - } - ] -} -)"; - -const char* expected_json_end = R"({ - "model_version_status": [ - { - "version": "1", - "state": "END", - "status": { - "error_code": "OK", - "error_message": "OK" - } - } - ] -} -)"; - -const char* expected_json_loading_error = R"({ - "model_version_status": [ - { - "version": "1", - "state": "LOADING", - "status": { - "error_code": "UNKNOWN", - "error_message": "UNKNOWN" - } - } - ] -} -)"; - -} // namespace - -*/ - -class TestCustomLoader : public ::testing::Test { -public: - void SetUp() { - const ::testing::TestInfo* const test_info = - ::testing::UnitTest::GetInstance()->current_test_info(); - - cl_models_path = getGenericFullPathForTmp("/tmp/" + std::string(test_info->name())); - cl_model_1_path = cl_models_path + "/model1/"; - cl_model_2_path = cl_models_path + "/model2/"; - - const std::string FIRST_MODEL_NAME = "dummy"; - const std::string SECOND_MODEL_NAME = "dummy_new"; - - std::filesystem::remove_all(cl_models_path); - std::filesystem::create_directories(cl_model_1_path); - } - void TearDown() { - // Create config file with an empty config & reload - const char* empty_config = R"({ - "custom_loader_config_list":[], - "model_config_list":[] - })"; - std::string configStr = empty_config; - std::string fileToReload = cl_models_path + "/cl_config.json"; - createConfigFileWithContent(configStr, fileToReload); - ASSERT_EQ(manager.loadConfig(fileToReload), ovms::StatusCode::OK); - - // Clean up temporary destination - std::filesystem::remove_all(cl_models_path); - } - - /** - * @brief This function should mimic most closely predict request to check for thread safety - */ - void performPredict(const std::string modelName, - const ovms::model_version_t modelVersion, - const tensorflow::serving::PredictRequest& request, - std::unique_ptr> waitBeforeGettingModelInstance = nullptr, - std::unique_ptr> waitBeforePerformInference = nullptr); - - void deserialize(const std::vector& input, ov::InferRequest& inferRequest, std::shared_ptr modelInstance) { - try { - ov::Tensor tensor( - modelInstance->getInputsInfo().at(DUMMY_MODEL_INPUT_NAME)->getOvPrecision(), - modelInstance->getInputsInfo().at(DUMMY_MODEL_INPUT_NAME)->getShape().createPartialShape().get_shape(), - const_cast(reinterpret_cast(input.data()))); - inferRequest.set_tensor(DUMMY_MODEL_INPUT_NAME, tensor); - } catch (...) { - ASSERT_TRUE(false) << "exception during deserialize"; - } - } - - void serializeAndCheck(int outputSize, ov::InferRequest& inferRequest) { - std::vector output(outputSize); - ASSERT_THAT(output, Each(Eq(0.))); - auto tensorOutput = inferRequest.get_tensor(DUMMY_MODEL_OUTPUT_NAME); - ASSERT_EQ(tensorOutput.get_byte_size(), outputSize * sizeof(float)); - std::memcpy(output.data(), tensorOutput.data(), outputSize * sizeof(float)); - EXPECT_THAT(output, Each(Eq(2.))); - } - - ovms::Status performInferenceWithRequest(const tensorflow::serving::PredictRequest& request, tensorflow::serving::PredictResponse& response) { - std::shared_ptr model; - std::unique_ptr unload_guard; - auto status = manager.getModelInstance("dummy", 0, model, unload_guard); - if (!status.ok()) { - return status; - } - - response.Clear(); - return model->infer(&request, &response, unload_guard); - } - -public: - ConstructorEnabledModelManager manager; - - ~TestCustomLoader() { - std::cout << "Destructor of TestCustomLoader()" << std::endl; - } - - std::string cl_models_path; - std::string cl_model_1_path; - std::string cl_model_2_path; -}; - -class MockModelInstance : public ovms::ModelInstance { -public: - MockModelInstance(ov::Core& ieCore) : - ModelInstance("UNUSED_NAME", 42, ieCore) {} - const ovms::Status mockValidate(const tensorflow::serving::PredictRequest* request) { - return validate(request); - } -}; - -void TestCustomLoader::performPredict(const std::string modelName, - const ovms::model_version_t modelVersion, - const tensorflow::serving::PredictRequest& request, - std::unique_ptr> waitBeforeGettingModelInstance, - std::unique_ptr> waitBeforePerformInference) { - // only validation is skipped - std::shared_ptr modelInstance; - std::unique_ptr modelInstanceUnloadGuard; - - auto& tensorProto = request.inputs().find("b")->second; - size_t batchSize = tensorProto.tensor_shape().dim(0).size(); - size_t inputSize = 1; - for (int i = 0; i < tensorProto.tensor_shape().dim_size(); i++) { - inputSize *= tensorProto.tensor_shape().dim(i).size(); - } - - if (waitBeforeGettingModelInstance) { - std::cout << "Waiting before getModelInstance. Batch size: " << batchSize << std::endl; - waitBeforeGettingModelInstance->get(); - } - ASSERT_EQ(manager.getModelInstance(modelName, modelVersion, modelInstance, modelInstanceUnloadGuard), ovms::StatusCode::OK); - - if (waitBeforePerformInference) { - std::cout << "Waiting before performInfernce." << std::endl; - waitBeforePerformInference->get(); - } - ovms::Status validationStatus = (std::static_pointer_cast(modelInstance))->mockValidate(&request); - std::cout << validationStatus.string() << std::endl; - ASSERT_TRUE(validationStatus == ovms::StatusCode::OK || - validationStatus == ovms::StatusCode::RESHAPE_REQUIRED || - validationStatus == ovms::StatusCode::BATCHSIZE_CHANGE_REQUIRED); - auto bsPositionIndex = 0; - auto requestBatchSize = ovms::getRequestBatchSize(&request, bsPositionIndex); - auto requestShapes = ovms::getRequestShapes(&request); - ASSERT_EQ(modelInstance->reloadModelIfRequired(validationStatus, requestBatchSize, requestShapes, modelInstanceUnloadGuard), ovms::StatusCode::OK); - - ovms::ExecutingStreamIdGuard executingStreamIdGuard(modelInstance->getInferRequestsQueue(), modelInstance->getMetricReporter()); - ov::InferRequest& inferRequest = executingStreamIdGuard.getInferRequest(); - std::vector input(inputSize); - std::generate(input.begin(), input.end(), []() { return 1.; }); - ASSERT_THAT(input, Each(Eq(1.))); - deserialize(input, inferRequest, modelInstance); - auto status = modelInstance->performInference(inferRequest); - ASSERT_EQ(status, ovms::StatusCode::OK); - size_t outputSize = batchSize * DUMMY_MODEL_OUTPUT_SIZE; - serializeAndCheck(outputSize, inferRequest); -} - -// Schema Validation - -TEST_F(TestCustomLoader, CustomLoaderConfigMatchingSchema) { - const char* customloaderConfigMatchingSchema = R"( - { - "custom_loader_config_list":[ - { - "config":{ - "loader_name":"dummy-loader", - "library_path": "/tmp/loader/dummyloader", - "loader_config_file": "dummyloader-config" - } - } - ], - "model_config_list":[ - { - "config":{ - "name":"dummy-loader-model", - "base_path": "/tmp/models/dummy1", - "custom_loader_options": {"loader_name": "dummy-loader"} - } - } - ] - } - )"; - - rapidjson::Document customloaderConfigMatchingSchemaParsed; - customloaderConfigMatchingSchemaParsed.Parse(customloaderConfigMatchingSchema); - auto result = ovms::validateJsonAgainstSchema(customloaderConfigMatchingSchemaParsed, ovms::MODELS_CONFIG_SCHEMA.c_str()); - EXPECT_EQ(result, ovms::StatusCode::OK); -} - -TEST_F(TestCustomLoader, CustomLoaderConfigMissingLoaderName) { - const char* customloaderConfigMissingLoaderName = R"( - { - "custom_loader_config_list":[ - { - "config":{ - "library_path": "dummyloader", - "loader_config_file": "dummyloader-config" - } - } - ], - "model_config_list": [] - } - )"; - - rapidjson::Document customloaderConfigMissingLoaderNameParsed; - customloaderConfigMissingLoaderNameParsed.Parse(customloaderConfigMissingLoaderName); - auto result = ovms::validateJsonAgainstSchema(customloaderConfigMissingLoaderNameParsed, ovms::MODELS_CONFIG_SCHEMA.c_str()); - EXPECT_EQ(result, ovms::StatusCode::JSON_INVALID); -} - -TEST_F(TestCustomLoader, CustomLoaderConfigMissingLibraryPath) { - const char* customloaderConfigMissingLibraryPath = R"( - { - "custom_loader_config_list":[ - { - "config":{ - "loader_name":"dummy-loader", - "loader_config_file": "dummyloader-config" - } - } - ], - "model_config_list": [] - } - )"; - - rapidjson::Document customloaderConfigMissingLibraryPathParsed; - customloaderConfigMissingLibraryPathParsed.Parse(customloaderConfigMissingLibraryPath); - auto result = ovms::validateJsonAgainstSchema(customloaderConfigMissingLibraryPathParsed, ovms::MODELS_CONFIG_SCHEMA.c_str()); - EXPECT_EQ(result, ovms::StatusCode::JSON_INVALID); -} - -TEST_F(TestCustomLoader, CustomLoaderConfigMissingLoaderConfig) { - const char* customloaderConfigMissingLoaderConfig = R"( - { - "custom_loader_config_list":[ - { - "config":{ - "loader_name":"dummy-loader", - "library_path": "dummyloader" - } - } - ], - "model_config_list": [] - } - )"; - - rapidjson::Document customloaderConfigMissingLoaderConfigParsed; - customloaderConfigMissingLoaderConfigParsed.Parse(customloaderConfigMissingLoaderConfig); - auto result = ovms::validateJsonAgainstSchema(customloaderConfigMissingLoaderConfigParsed, ovms::MODELS_CONFIG_SCHEMA.c_str()); - EXPECT_EQ(result, ovms::StatusCode::OK); -} - -TEST_F(TestCustomLoader, CustomLoaderConfigInvalidCustomLoaderConfig) { - const char* customloaderConfigInvalidCustomLoaderConfig = R"( - { - "model_config_list":[ - { - "config":{ - "name":"dummy-loader-model", - "base_path": "/tmp/models/dummy1", - "custom_loader_options_invalid": {"loader_name": "dummy-loader"} - } - } - ] - } - )"; - - rapidjson::Document customloaderConfigInvalidCustomLoaderConfigParsed; - customloaderConfigInvalidCustomLoaderConfigParsed.Parse(customloaderConfigInvalidCustomLoaderConfig); - auto result = ovms::validateJsonAgainstSchema(customloaderConfigInvalidCustomLoaderConfigParsed, ovms::MODELS_CONFIG_SCHEMA.c_str()); - EXPECT_EQ(result, ovms::StatusCode::JSON_INVALID); -} - -TEST_F(TestCustomLoader, CustomLoaderConfigMissingLoaderNameInCustomLoaderOptions) { - const char* customloaderConfigMissingLoaderNameInCustomLoaderOptions = R"( - { - "model_config_list":[ - { - "config":{ - "name":"dummy-loader-model", - "base_path": "/tmp/models/dummy1", - "custom_loader_options": {"a": "SS"} - } - } - ] - } - )"; - - rapidjson::Document customloaderConfigMissingLoaderNameInCustomLoaderOptionsParsed; - customloaderConfigMissingLoaderNameInCustomLoaderOptionsParsed.Parse(customloaderConfigMissingLoaderNameInCustomLoaderOptions); - auto result = ovms::validateJsonAgainstSchema(customloaderConfigMissingLoaderNameInCustomLoaderOptionsParsed, ovms::MODELS_CONFIG_SCHEMA.c_str()); - EXPECT_EQ(result, ovms::StatusCode::JSON_INVALID); -} - -TEST_F(TestCustomLoader, CustomLoaderConfigMultiplePropertiesInCustomLoaderOptions) { - const char* customloaderConfigMultiplePropertiesInCustomLoaderOptions = R"( - { - "model_config_list":[ - { - "config":{ - "name":"dummy-loader-model", - "base_path": "/tmp/models/dummy1", - "custom_loader_options": {"loader_name": "dummy-loader", "1": "a", "2": "b", "3": "c", "4":"d", "5":"e", "6":"f"} - } - } - ] - } - )"; - - rapidjson::Document customloaderConfigMultiplePropertiesInCustomLoaderOptionsParsed; - customloaderConfigMultiplePropertiesInCustomLoaderOptionsParsed.Parse(customloaderConfigMultiplePropertiesInCustomLoaderOptions); - auto result = ovms::validateJsonAgainstSchema(customloaderConfigMultiplePropertiesInCustomLoaderOptionsParsed, ovms::MODELS_CONFIG_SCHEMA.c_str()); - EXPECT_EQ(result, ovms::StatusCode::OK); -} - -// Functional Validation -/* ------------------------------------------------- -AFTER SAMPLE CUSTOM LOADER REMOVAL BELOW TESTS ARE NOT VALID -REMOVE THIS ENTIRE FILE ONCE THE FEATURE IS REMOVED -------------------------------------------------- -TEST_F(TestCustomLoader, CustomLoaderPrediction) { -#ifdef _WIN32 - GTEST_SKIP() << "Test disabled on windows"; -#endif - // Copy dummy model to temporary destination - std::filesystem::copy(getGenericFullPathForSrcTest("/ovms/src/test/dummy"), cl_model_1_path, std::filesystem::copy_options::recursive); - - // Replace model path in the config string - std::string configStr = custom_loader_config_model; - configStr.replace(configStr.find("/tmp/test_cl_models"), std::string("/tmp/test_cl_models").size(), cl_models_path); - - // Create config file - std::string fileToReload = cl_models_path + "/cl_config.json"; - createConfigFileWithContent(configStr, fileToReload); - ASSERT_EQ(manager.loadConfig(fileToReload), ovms::StatusCode::OK); - - tensorflow::serving::PredictRequest request; - preparePredictRequest(request, - {{DUMMY_MODEL_INPUT_NAME, - std::tuple{{1, 10}, ovms::Precision::FP32}}}); - performPredict("dummy", 1, request); -} - -TEST_F(TestCustomLoader, CustomLoaderPredictionRelativePath) { -#ifdef _WIN32 - GTEST_SKIP() << "Test disabled on windows"; -#endif - // Copy dummy model to temporary destination - std::filesystem::copy(getGenericFullPathForSrcTest("/ovms/src/test/dummy"), cl_model_1_path, std::filesystem::copy_options::recursive); - std::filesystem::copy(getGenericFullPathForSrcTest("/ovms/bazel-bin/src/libsampleloader.so"), cl_models_path, std::filesystem::copy_options::recursive); - - // Replace model path in the config string - std::string configStr = custom_loader_config_model_relative_paths; - configStr.replace(configStr.find("test_cl_models"), std::string("test_cl_models").size(), cl_models_path); - - // Create config file - std::string fileToReload = cl_models_path + "/cl_config.json"; - createConfigFileWithContent(configStr, fileToReload); - ASSERT_EQ(manager.loadConfig(fileToReload), ovms::StatusCode::OK); - - tensorflow::serving::PredictRequest request; - preparePredictRequest(request, - {{DUMMY_MODEL_INPUT_NAME, - std::tuple{{1, 10}, ovms::Precision::FP32}}}); - performPredict("dummy", 1, request); -} - -TEST_F(TestCustomLoader, CustomLoaderGetStatus) { -#ifdef _WIN32 - GTEST_SKIP() << "Test disabled on windows"; -#endif - // Copy dummy model to temporary destination - std::filesystem::copy(getGenericFullPathForSrcTest("/ovms/src/test/dummy"), cl_model_1_path, std::filesystem::copy_options::recursive); - - // Replace model path in the config string - std::string configStr = custom_loader_config_model; - configStr.replace(configStr.find("/tmp/test_cl_models"), std::string("/tmp/test_cl_models").size(), cl_models_path); - - // Create config file - std::string fileToReload = cl_models_path + "/cl_config.json"; - createConfigFileWithContent(configStr, fileToReload); - ASSERT_EQ(manager.loadConfig(fileToReload), ovms::StatusCode::OK); - - tensorflow::serving::GetModelStatusRequest req; - tensorflow::serving::GetModelStatusResponse res; - - auto model_spec = req.mutable_model_spec(); - model_spec->Clear(); - model_spec->set_name("dummy"); - model_spec->mutable_version()->set_value(1); - ASSERT_EQ(GetModelStatusImpl::getModelStatus(&req, &res, manager, DEFAULT_TEST_CONTEXT), StatusCode::OK); - - const tensorflow::serving::GetModelStatusResponse response_const = res; - std::string json_output; - Status error_status = GetModelStatusImpl::serializeResponse2Json(&response_const, &json_output); - ASSERT_EQ(error_status, StatusCode::OK); - EXPECT_EQ(json_output, expected_json_available); -} - -TEST_F(TestCustomLoader, CustomLoaderPredictDeletePredict) { -#ifdef _WIN32 - GTEST_SKIP() << "Test disabled on windows"; -#endif - // Copy dummy model to temporary destination - std::filesystem::copy(getGenericFullPathForSrcTest("/ovms/src/test/dummy"), cl_model_1_path, std::filesystem::copy_options::recursive); - - // Replace model path in the config string - std::string configStr = custom_loader_config_model; - configStr.replace(configStr.find("/tmp/test_cl_models"), std::string("/tmp/test_cl_models").size(), cl_models_path); - - // Create config file - std::string fileToReload = cl_models_path + "/cl_config.json"; - createConfigFileWithContent(configStr, fileToReload); - ASSERT_EQ(manager.loadConfig(fileToReload), ovms::StatusCode::OK); - - tensorflow::serving::PredictRequest request; - preparePredictRequest(request, - {{DUMMY_MODEL_INPUT_NAME, - std::tuple{{1, 10}, ovms::Precision::FP32}}}); - tensorflow::serving::PredictResponse response; - ASSERT_EQ(performInferenceWithRequest(request, response), ovms::StatusCode::OK); - - // Re-create config file - createConfigFileWithContent(custom_loader_config_model_deleted, fileToReload); - ASSERT_EQ(manager.loadConfig(fileToReload), ovms::StatusCode::OK); - - ASSERT_EQ(performInferenceWithRequest(request, response), ovms::StatusCode::MODEL_VERSION_MISSING); -} - -TEST_F(TestCustomLoader, CustomLoaderPredictNewVersionPredict) { -#ifdef _WIN32 - GTEST_SKIP() << "Test disabled on windows"; -#endif - // Copy dummy model to temporary destination - std::filesystem::copy(getGenericFullPathForSrcTest("/ovms/src/test/dummy"), cl_model_1_path, std::filesystem::copy_options::recursive); - - // Replace model path in the config string - std::string configStr = custom_loader_config_model; - configStr.replace(configStr.find("/tmp/test_cl_models"), std::string("/tmp/test_cl_models").size(), cl_models_path); - - // Create config file - std::string fileToReload = cl_models_path + "/cl_config.json"; - createConfigFileWithContent(configStr, fileToReload); - ASSERT_EQ(manager.loadConfig(fileToReload), ovms::StatusCode::OK); - - tensorflow::serving::PredictRequest request; - preparePredictRequest(request, - {{DUMMY_MODEL_INPUT_NAME, - std::tuple{{1, 10}, ovms::Precision::FP32}}}); - performPredict("dummy", 1, request); - - // Copy version 1 to version 2 - std::filesystem::create_directories(cl_model_1_path + "2"); - std::filesystem::copy(cl_model_1_path + "1", cl_model_1_path + "2", std::filesystem::copy_options::recursive); - ASSERT_EQ(manager.loadConfig(fileToReload), ovms::StatusCode::OK); - - preparePredictRequest(request, - {{DUMMY_MODEL_INPUT_NAME, - std::tuple{{1, 10}, ovms::Precision::FP32}}}); - performPredict("dummy", 2, request); -} - -TEST_F(TestCustomLoader, CustomLoaderPredictNewModelPredict) { -#ifdef _WIN32 - GTEST_SKIP() << "Test disabled on windows"; -#endif - // Copy dummy model to temporary destination - std::filesystem::copy(getGenericFullPathForSrcTest("/ovms/src/test/dummy"), cl_model_1_path, std::filesystem::copy_options::recursive); - - // Replace model path in the config string - std::string configStr = custom_loader_config_model; - configStr.replace(configStr.find("/tmp/test_cl_models"), std::string("/tmp/test_cl_models").size(), cl_models_path); - - // Create config file - std::string fileToReload = cl_models_path + "/cl_config.json"; - createConfigFileWithContent(configStr, fileToReload); - ASSERT_EQ(manager.loadConfig(fileToReload), ovms::StatusCode::OK); - - tensorflow::serving::PredictRequest request; - preparePredictRequest(request, - {{DUMMY_MODEL_INPUT_NAME, - std::tuple{{1, 10}, ovms::Precision::FP32}}}); - performPredict("dummy", 1, request); - - // Copy model1 to model2 - std::filesystem::copy(getGenericFullPathForSrcTest("/ovms/src/test/dummy"), cl_model_2_path, std::filesystem::copy_options::recursive); - - // Replace model path in the config string - configStr = custom_loader_config_model_new; - configStr.replace(configStr.find("/tmp/test_cl_models"), std::string("/tmp/test_cl_models").size(), cl_models_path); - configStr.replace(configStr.find("/tmp/test_cl_models"), std::string("/tmp/test_cl_models").size(), cl_models_path); - - // Re-create config file - createConfigFileWithContent(configStr, fileToReload); - ASSERT_EQ(manager.loadConfig(fileToReload), ovms::StatusCode::OK); - - preparePredictRequest(request, - {{DUMMY_MODEL_INPUT_NAME, - std::tuple{{1, 10}, ovms::Precision::FP32}}}); - performPredict("dummy", 1, request); - performPredict("dummy-new", 1, request); -} - -TEST_F(TestCustomLoader, CustomLoaderPredictRemoveCustomLoaderOptionsPredict) { -#ifdef _WIN32 - GTEST_SKIP() << "Test disabled on windows"; -#endif - // Copy dummy model to temporary destination - std::filesystem::copy(getGenericFullPathForSrcTest("/ovms/src/test/dummy"), cl_model_1_path, std::filesystem::copy_options::recursive); - - // Replace model path in the config string - std::string configStr = custom_loader_config_model; - configStr.replace(configStr.find("/tmp/test_cl_models"), std::string("/tmp/test_cl_models").size(), cl_models_path); - - // Create config file - std::string fileToReload = cl_models_path + "/cl_config.json"; - createConfigFileWithContent(configStr, fileToReload); - ASSERT_EQ(manager.loadConfig(fileToReload), ovms::StatusCode::OK); - - tensorflow::serving::PredictRequest request; - preparePredictRequest(request, - {{DUMMY_MODEL_INPUT_NAME, - std::tuple{{1, 10}, ovms::Precision::FP32}}}); - performPredict("dummy", 1, request); - - // Replace model path in the config string - configStr = custom_loader_config_model_customloader_options_removed; - configStr.replace(configStr.find("/tmp/test_cl_models"), std::string("/tmp/test_cl_models").size(), cl_models_path); - - // Re-create config file - createConfigFileWithContent(configStr, fileToReload); - ASSERT_EQ(manager.loadConfig(fileToReload), ovms::StatusCode::OK); - - performPredict("dummy", 1, request); -} - -TEST_F(TestCustomLoader, PredictNormalModelAddCustomLoaderOptionsPredict) { -#ifdef _WIN32 - GTEST_SKIP() << "Test disabled on windows"; -#endif - // Copy dummy model to temporary destination - std::filesystem::copy(getGenericFullPathForSrcTest("/ovms/src/test/dummy"), cl_model_1_path, std::filesystem::copy_options::recursive); - - // Replace model path in the config string - std::string configStr = custom_loader_config_model_customloader_options_removed; - configStr.replace(configStr.find("/tmp/test_cl_models"), std::string("/tmp/test_cl_models").size(), cl_models_path); - - // Create config file - std::string fileToReload = cl_models_path + "/cl_config.json"; - createConfigFileWithContent(configStr, fileToReload); - ASSERT_EQ(manager.loadConfig(fileToReload), ovms::StatusCode::OK); - - tensorflow::serving::PredictRequest request; - preparePredictRequest(request, - {{DUMMY_MODEL_INPUT_NAME, - std::tuple{{1, 10}, ovms::Precision::FP32}}}); - performPredict("dummy", 1, request); - - // Replace model path in the config string - configStr = custom_loader_config_model; - configStr.replace(configStr.find("/tmp/test_cl_models"), std::string("/tmp/test_cl_models").size(), cl_models_path); - - // Create config file - fileToReload = cl_models_path + "/cl_config.json"; - createConfigFileWithContent(configStr, fileToReload); - ASSERT_EQ(manager.loadConfig(fileToReload), ovms::StatusCode::OK); - - performPredict("dummy", 1, request); -} - -TEST_F(TestCustomLoader, CustomLoaderOptionWithUnknownLibrary) { -#ifdef _WIN32 - GTEST_SKIP() << "Test disabled on windows"; -#endif - // Copy dummy model to temporary destination - std::filesystem::copy(getGenericFullPathForSrcTest("/ovms/src/test/dummy"), cl_model_1_path, std::filesystem::copy_options::recursive); - - // Replace model path in the config string - std::string configStr = config_model_with_customloader_options_unknown_loadername; - configStr.replace(configStr.find("/tmp/test_cl_models"), std::string("/tmp/test_cl_models").size(), cl_models_path); - - // Create config file - std::string fileToReload = cl_models_path + "/cl_config.json"; - createConfigFileWithContent(configStr, fileToReload); - ASSERT_EQ(manager.loadConfig(fileToReload), ovms::StatusCode::OK); - - tensorflow::serving::PredictRequest request; - preparePredictRequest(request, - {{DUMMY_MODEL_INPUT_NAME, - std::tuple{{1, 10}, ovms::Precision::FP32}}}); - tensorflow::serving::PredictResponse response; - ASSERT_EQ(performInferenceWithRequest(request, response), ovms::StatusCode::MODEL_VERSION_MISSING); -} - -TEST_F(TestCustomLoader, CustomLoaderWithMissingModelFiles) { -#ifdef _WIN32 - GTEST_SKIP() << "Test disabled on windows"; -#endif - // Replace model path in the config string - std::string configStr = custom_loader_config_model; - configStr.replace(configStr.find("/tmp/test_cl_models"), std::string("/tmp/test_cl_models").size(), cl_models_path); - - // Create config file - std::string fileToReload = cl_models_path + "/cl_config.json"; - createConfigFileWithContent(configStr, fileToReload); - ASSERT_EQ(manager.loadConfig(fileToReload), ovms::StatusCode::OK); - - tensorflow::serving::PredictRequest request; - preparePredictRequest(request, - {{DUMMY_MODEL_INPUT_NAME, - std::tuple{{1, 10}, ovms::Precision::FP32}}}); - tensorflow::serving::PredictResponse response; - ASSERT_EQ(performInferenceWithRequest(request, response), ovms::StatusCode::MODEL_VERSION_MISSING); -} - -TEST_F(TestCustomLoader, CustomLoaderGetStatusDeleteModelGetStatus) { -#ifdef _WIN32 - GTEST_SKIP() << "Test disabled on windows"; -#endif - // Copy dummy model to temporary destination - std::filesystem::copy(getGenericFullPathForSrcTest("/ovms/src/test/dummy"), cl_model_1_path, std::filesystem::copy_options::recursive); - - // Replace model path in the config string - std::string configStr = custom_loader_config_model; - configStr.replace(configStr.find("/tmp/test_cl_models"), std::string("/tmp/test_cl_models").size(), cl_models_path); - - // Create config file - std::string fileToReload = cl_models_path + "/cl_config.json"; - createConfigFileWithContent(configStr, fileToReload); - ASSERT_EQ(manager.loadConfig(fileToReload), ovms::StatusCode::OK); - - tensorflow::serving::GetModelStatusRequest req; - tensorflow::serving::GetModelStatusResponse res; - - auto model_spec = req.mutable_model_spec(); - model_spec->Clear(); - model_spec->set_name("dummy"); - model_spec->mutable_version()->set_value(1); - ASSERT_EQ(GetModelStatusImpl::getModelStatus(&req, &res, manager, DEFAULT_TEST_CONTEXT), StatusCode::OK); - - const tensorflow::serving::GetModelStatusResponse response_const = res; - std::string json_output; - Status error_status = GetModelStatusImpl::serializeResponse2Json(&response_const, &json_output); - ASSERT_EQ(error_status, StatusCode::OK); - EXPECT_EQ(json_output, expected_json_available); - - // Re-create config file - createConfigFileWithContent(custom_loader_config_model_deleted, fileToReload); - ASSERT_EQ(manager.loadConfig(fileToReload), ovms::StatusCode::OK); - - tensorflow::serving::GetModelStatusRequest reqx; - tensorflow::serving::GetModelStatusResponse resx; - - auto model_specx = reqx.mutable_model_spec(); - model_specx->Clear(); - model_specx->set_name("dummy"); - model_specx->mutable_version()->set_value(1); - - ASSERT_EQ(GetModelStatusImpl::getModelStatus(&reqx, &resx, manager, DEFAULT_TEST_CONTEXT), StatusCode::OK); - - const tensorflow::serving::GetModelStatusResponse response_constx = resx; - json_output = ""; - error_status = GetModelStatusImpl::serializeResponse2Json(&response_constx, &json_output); - ASSERT_EQ(error_status, StatusCode::OK); - EXPECT_EQ(json_output, expected_json_end); -} - -TEST_F(TestCustomLoader, CustomLoaderPredictionUsingManyCustomLoaders) { -#ifdef _WIN32 - GTEST_SKIP() << "Test disabled on windows"; -#endif - // Copy dummy model to temporary destination - std::filesystem::copy(getGenericFullPathForSrcTest("/ovms/src/test/dummy"), cl_model_1_path, std::filesystem::copy_options::recursive); - - // Replace model path in the config string - std::string configStr = custom_loader_config_model_multiple; - configStr.replace(configStr.find("/tmp/test_cl_models"), std::string("/tmp/test_cl_models").size(), cl_models_path); - configStr.replace(configStr.find("/tmp/test_cl_models"), std::string("/tmp/test_cl_models").size(), cl_models_path); - configStr.replace(configStr.find("/tmp/test_cl_models"), std::string("/tmp/test_cl_models").size(), cl_models_path); - - // Create config file - std::string fileToReload = cl_models_path + "/cl_config.json"; - createConfigFileWithContent(configStr, fileToReload); - ASSERT_EQ(manager.loadConfig(fileToReload), ovms::StatusCode::OK); - - tensorflow::serving::PredictRequest request; - preparePredictRequest(request, - {{DUMMY_MODEL_INPUT_NAME, - std::tuple{{1, 10}, ovms::Precision::FP32}}}); - - performPredict("dummy-a", 1, request); - performPredict("dummy-b", 1, request); - performPredict("dummy-c", 1, request); -} - -TEST_F(TestCustomLoader, CustomLoaderGetMetaData) { -#ifdef _WIN32 - GTEST_SKIP() << "Test disabled on windows"; -#endif - const char* expected_json = R"({ - "modelSpec": { - "name": "dummy", - "signatureName": "", - "version": "1" - }, - "metadata": { - "signature_def": { - "@type": "type.googleapis.com/tensorflow.serving.SignatureDefMap", - "signatureDef": { - "serving_default": { - "inputs": { - "b": { - "dtype": "DT_FLOAT", - "tensorShape": { - "dim": [ - { - "size": "1", - "name": "" - }, - { - "size": "10", - "name": "" - } - ], - "unknownRank": false - }, - "name": "b" - } - }, - "outputs": { - "a": { - "dtype": "DT_FLOAT", - "tensorShape": { - "dim": [ - { - "size": "1", - "name": "" - }, - { - "size": "10", - "name": "" - } - ], - "unknownRank": false - }, - "name": "a" - } - }, - "methodName": "", - "defaults": {} - } - } - } - } -} -)"; - - // Copy dummy model to temporary destination - std::filesystem::copy(getGenericFullPathForSrcTest("/ovms/src/test/dummy"), cl_model_1_path, std::filesystem::copy_options::recursive); - - // Replace model path in the config string - std::string configStr = custom_loader_config_model; - configStr.replace(configStr.find("/tmp/test_cl_models"), std::string("/tmp/test_cl_models").size(), cl_models_path); - - // Create config file - std::string fileToReload = cl_models_path + "/cl_config.json"; - createConfigFileWithContent(configStr, fileToReload); - ASSERT_EQ(manager.loadConfig(fileToReload), ovms::StatusCode::OK); - - std::shared_ptr model; - std::unique_ptr unload_guard; - ASSERT_EQ(manager.getModelInstance("dummy", 1, model, unload_guard), ovms::StatusCode::OK); - - tensorflow::serving::GetModelMetadataResponse response; - ovms::GetModelMetadataImpl::buildResponse(model, &response); - - std::string json_output = ""; - ovms::GetModelMetadataImpl::serializeResponse2Json(&response, &json_output); - - EXPECT_TRUE(response.has_model_spec()); - EXPECT_EQ(response.model_spec().name(), "dummy"); - - tensorflow::serving::SignatureDefMap def; - response.metadata().at("signature_def").UnpackTo(&def); - - const auto& inputs = ((*def.mutable_signature_def())["serving_default"]).inputs(); - const auto& outputs = ((*def.mutable_signature_def())["serving_default"]).outputs(); - - EXPECT_EQ(inputs.size(), 1); - EXPECT_EQ(outputs.size(), 1); - EXPECT_EQ(json_output, expected_json); -} - -TEST_F(TestCustomLoader, CustomLoaderMultipleLoaderWithSameLoaderName) { -#ifdef _WIN32 - GTEST_SKIP() << "Test disabled on windows"; -#endif - const char* custom_loader_config_model_xx = R"({ - "custom_loader_config_list":[ - { - "config":{ - "loader_name":"sample-loader", - "library_path": "/ovms/bazel-bin/src/libsampleloader.so" - } - }, - { - "config":{ - "loader_name":"sample-loader", - "library_path": "/ovms/bazel-bin/src/libsampleloader.so" - } - } - ], - "model_config_list":[ - { - "config":{ - "name":"dummy", - "base_path": "/tmp/test_cl_models/model1", - "nireq": 1, - "custom_loader_options": {"loader_name": "sample-loader", "model_file": "dummy.xml", "bin_file": "dummy.bin"} - } - } - ] - })"; - - // Copy dummy model to temporary destination - std::filesystem::copy(getGenericFullPathForSrcTest("/ovms/src/test/dummy"), cl_model_1_path, std::filesystem::copy_options::recursive); - - // Replace model path in the config string - std::string configStr = custom_loader_config_model_xx; - configStr.replace(configStr.find("/tmp/test_cl_models"), std::string("/tmp/test_cl_models").size(), cl_models_path); - - // Create config file - std::string fileToReload = cl_models_path + "/cl_config.json"; - createConfigFileWithContent(configStr, fileToReload); - ASSERT_EQ(manager.loadConfig(fileToReload), ovms::StatusCode::OK); - - tensorflow::serving::PredictRequest request; - preparePredictRequest(request, - {{DUMMY_MODEL_INPUT_NAME, - std::tuple{{1, 10}, ovms::Precision::FP32}}}); - performPredict("dummy", 1, request); -} - -TEST_F(TestCustomLoader, CustomLoaderBlackListingModel) { -#ifdef _WIN32 - GTEST_SKIP() << "Test disabled on windows"; -#endif - // Copy dummy model to temporary destination - std::filesystem::copy(getGenericFullPathForSrcTest("/ovms/src/test/dummy"), cl_model_1_path, std::filesystem::copy_options::recursive); - - // Create Sample Custom Loader Config - std::string cl_config_file_path = cl_models_path; - std::string cl_config_str = ENABLE_FORCE_BLACKLIST_CHECK; - std::string cl_config_file = cl_config_file_path + "/customloader_config"; - createConfigFileWithContent(cl_config_str, cl_config_file); - - // Replace model path in the config string - std::string configStr = custom_loader_config_model_blacklist; - configStr.replace(configStr.find("/tmp/test_cl_models"), std::string("/tmp/test_cl_models").size(), cl_models_path); - configStr.replace(configStr.find("sample-loader-config"), std::string("sample-loader-config").size(), cl_config_file); - - // Create config file - std::string fileToReload = cl_models_path + "/cl_config.json"; - createConfigFileWithContent(configStr, fileToReload); - ASSERT_EQ(manager.loadConfig(fileToReload), ovms::StatusCode::OK); - - tensorflow::serving::GetModelStatusRequest req; - tensorflow::serving::GetModelStatusResponse res; - - auto model_spec = req.mutable_model_spec(); - model_spec->Clear(); - model_spec->set_name("dummy"); - model_spec->mutable_version()->set_value(1); - ASSERT_EQ(GetModelStatusImpl::getModelStatus(&req, &res, manager, DEFAULT_TEST_CONTEXT), StatusCode::OK); - - tensorflow::serving::GetModelStatusResponse response_const = res; - std::string json_output; - Status error_status = GetModelStatusImpl::serializeResponse2Json(&response_const, &json_output); - ASSERT_EQ(error_status, StatusCode::OK); - EXPECT_EQ(json_output, expected_json_available); - - // copy status file - std::string status_file_path = cl_model_1_path + "1"; - std::string status_str = "DISABLED"; - std::string status_file = status_file_path + "/dummy.status"; - createConfigFileWithContent(status_str, status_file); - ASSERT_EQ(manager.loadConfig(fileToReload), ovms::StatusCode::OK); - - tensorflow::serving::GetModelStatusRequest reqx; - tensorflow::serving::GetModelStatusResponse resx; - - auto model_specx = reqx.mutable_model_spec(); - model_specx->Clear(); - model_specx->set_name("dummy"); - model_specx->mutable_version()->set_value(1); - - ASSERT_EQ(GetModelStatusImpl::getModelStatus(&reqx, &resx, manager, DEFAULT_TEST_CONTEXT), StatusCode::OK); - - const tensorflow::serving::GetModelStatusResponse response_constx = resx; - json_output = ""; - error_status = GetModelStatusImpl::serializeResponse2Json(&response_constx, &json_output); - ASSERT_EQ(error_status, StatusCode::OK); - EXPECT_EQ(json_output, expected_json_end); -} - -TEST_F(TestCustomLoader, CustomLoaderBlackListingRevoke) { -#ifdef _WIN32 - GTEST_SKIP() << "Test disabled on windows"; -#endif - // Copy dummy model to temporary destination - std::filesystem::copy(getGenericFullPathForSrcTest("/ovms/src/test/dummy"), cl_model_1_path, std::filesystem::copy_options::recursive); - - // Create Sample Custom Loader Config - std::string cl_config_file_path = cl_models_path; - std::string cl_config_str = ENABLE_FORCE_BLACKLIST_CHECK; - std::string cl_config_file = cl_config_file_path + "/customloader_config"; - createConfigFileWithContent(cl_config_str, cl_config_file); - - // Replace model path in the config string - std::string configStr = custom_loader_config_model_blacklist; - configStr.replace(configStr.find("/tmp/test_cl_models"), std::string("/tmp/test_cl_models").size(), cl_models_path); - configStr.replace(configStr.find("sample-loader-config"), std::string("sample-loader-config").size(), cl_config_file); - - // Create config file - std::string fileToReload = cl_models_path + "/cl_config.json"; - createConfigFileWithContent(configStr, fileToReload); - ASSERT_EQ(manager.loadConfig(fileToReload), ovms::StatusCode::OK); - - tensorflow::serving::GetModelStatusRequest req; - tensorflow::serving::GetModelStatusResponse res; - - auto model_spec = req.mutable_model_spec(); - model_spec->Clear(); - model_spec->set_name("dummy"); - model_spec->mutable_version()->set_value(1); - ASSERT_EQ(GetModelStatusImpl::getModelStatus(&req, &res, manager, DEFAULT_TEST_CONTEXT), StatusCode::OK); - - const tensorflow::serving::GetModelStatusResponse response_const = res; - std::string json_output; - Status error_status = GetModelStatusImpl::serializeResponse2Json(&response_const, &json_output); - ASSERT_EQ(error_status, StatusCode::OK); - EXPECT_EQ(json_output, expected_json_available); - - // copy status file - std::string status_file_path = cl_model_1_path + "1"; - std::string status_str = "DISABLED"; - std::string status_file = status_file_path + "/dummy.status"; - createConfigFileWithContent(status_str, status_file); - ASSERT_EQ(manager.loadConfig(fileToReload), ovms::StatusCode::OK); - - tensorflow::serving::GetModelStatusRequest req1; - tensorflow::serving::GetModelStatusResponse res1; - - auto model_spec1 = req1.mutable_model_spec(); - model_spec1->Clear(); - model_spec1->set_name("dummy"); - model_spec1->mutable_version()->set_value(1); - ASSERT_EQ(GetModelStatusImpl::getModelStatus(&req1, &res1, manager, DEFAULT_TEST_CONTEXT), StatusCode::OK); - - const tensorflow::serving::GetModelStatusResponse response_const1 = res1; - json_output = ""; - error_status = GetModelStatusImpl::serializeResponse2Json(&response_const1, &json_output); - ASSERT_EQ(error_status, StatusCode::OK); - EXPECT_EQ(json_output, expected_json_end); - - // Remove status file - std::filesystem::remove(status_file); - ASSERT_EQ(manager.loadConfig(fileToReload), ovms::StatusCode::OK); - - tensorflow::serving::GetModelStatusRequest req2; - tensorflow::serving::GetModelStatusResponse res2; - - auto model_spec2 = req2.mutable_model_spec(); - model_spec2->Clear(); - model_spec2->set_name("dummy"); - model_spec2->mutable_version()->set_value(1); - ASSERT_EQ(GetModelStatusImpl::getModelStatus(&req2, &res2, manager, DEFAULT_TEST_CONTEXT), StatusCode::OK); - - const tensorflow::serving::GetModelStatusResponse response_const2 = res2; - json_output = ""; - error_status = GetModelStatusImpl::serializeResponse2Json(&response_const2, &json_output); - ASSERT_EQ(error_status, StatusCode::OK); - EXPECT_EQ(json_output, expected_json_available); -} - -TEST_F(TestCustomLoader, CustomLoaderBlackListModelReloadError) { -#ifdef _WIN32 - GTEST_SKIP() << "Test disabled on windows"; -#endif - // Copy dummy model to temporary destination - std::filesystem::copy(getGenericFullPathForSrcTest("/ovms/src/test/dummy"), cl_model_1_path, std::filesystem::copy_options::recursive); - - // Create Sample Custom Loader Config - std::string cl_config_file_path = cl_models_path; - std::string cl_config_str = ENABLE_FORCE_BLACKLIST_CHECK; - std::string cl_config_file = cl_config_file_path + "/customloader_config"; - createConfigFileWithContent(cl_config_str, cl_config_file); - - // Replace model path in the config string - std::string configStr = custom_loader_config_model_blacklist; - configStr.replace(configStr.find("/tmp/test_cl_models"), std::string("/tmp/test_cl_models").size(), cl_models_path); - configStr.replace(configStr.find("sample-loader-config"), std::string("sample-loader-config").size(), cl_config_file); - - // Create config file - std::string fileToReload = cl_models_path + "/cl_config.json"; - createConfigFileWithContent(configStr, fileToReload); - ASSERT_EQ(manager.loadConfig(fileToReload), ovms::StatusCode::OK); - - tensorflow::serving::GetModelStatusRequest req; - tensorflow::serving::GetModelStatusResponse res; - - auto model_spec = req.mutable_model_spec(); - model_spec->Clear(); - model_spec->set_name("dummy"); - model_spec->mutable_version()->set_value(1); - ASSERT_EQ(GetModelStatusImpl::getModelStatus(&req, &res, manager, DEFAULT_TEST_CONTEXT), StatusCode::OK); - - const tensorflow::serving::GetModelStatusResponse response_const = res; - std::string json_output; - Status error_status = GetModelStatusImpl::serializeResponse2Json(&response_const, &json_output); - ASSERT_EQ(error_status, StatusCode::OK); - EXPECT_EQ(json_output, expected_json_available); - - // copy status file - std::string status_file_path = cl_model_1_path + "1"; - std::string status_str = "DISABLED"; - std::string status_file = status_file_path + "/dummy.status"; - createConfigFileWithContent(status_str, status_file); - ASSERT_EQ(manager.loadConfig(fileToReload), ovms::StatusCode::OK); - - tensorflow::serving::GetModelStatusRequest req1; - tensorflow::serving::GetModelStatusResponse res1; - - auto model_spec1 = req1.mutable_model_spec(); - model_spec1->Clear(); - model_spec1->set_name("dummy"); - model_spec1->mutable_version()->set_value(1); - ASSERT_EQ(GetModelStatusImpl::getModelStatus(&req1, &res1, manager, DEFAULT_TEST_CONTEXT), StatusCode::OK); - - const tensorflow::serving::GetModelStatusResponse response_const1 = res1; - json_output = ""; - error_status = GetModelStatusImpl::serializeResponse2Json(&response_const1, &json_output); - ASSERT_EQ(error_status, StatusCode::OK); - EXPECT_EQ(json_output, expected_json_end); - - // Remove status file & the Dummy.bin file - std::filesystem::remove(status_file); - std::string bin_file = status_file_path + "/dummy.bin"; - std::filesystem::remove(bin_file); - ASSERT_EQ(manager.loadConfig(fileToReload), ovms::StatusCode::FILE_INVALID); - - tensorflow::serving::GetModelStatusRequest req2; - tensorflow::serving::GetModelStatusResponse res2; - - auto model_spec2 = req2.mutable_model_spec(); - model_spec2->Clear(); - model_spec2->set_name("dummy"); - model_spec2->mutable_version()->set_value(1); - ASSERT_EQ(GetModelStatusImpl::getModelStatus(&req2, &res2, manager, DEFAULT_TEST_CONTEXT), StatusCode::OK); - - const tensorflow::serving::GetModelStatusResponse response_const2 = res2; - json_output = ""; - error_status = GetModelStatusImpl::serializeResponse2Json(&response_const2, &json_output); - ASSERT_EQ(error_status, StatusCode::OK); - EXPECT_EQ(json_output, expected_json_loading_error); - - // Copy back the model files & try reload - std::filesystem::copy(getGenericFullPathForSrcTest("/ovms/src/test/dummy"), cl_model_1_path, std::filesystem::copy_options::recursive | std::filesystem::copy_options::overwrite_existing); - ASSERT_EQ(manager.loadConfig(fileToReload), ovms::StatusCode::OK); - - tensorflow::serving::GetModelStatusRequest req3; - tensorflow::serving::GetModelStatusResponse res3; - - auto model_spec3 = req3.mutable_model_spec(); - model_spec3->Clear(); - model_spec3->set_name("dummy"); - model_spec3->mutable_version()->set_value(1); - ASSERT_EQ(GetModelStatusImpl::getModelStatus(&req3, &res3, manager, DEFAULT_TEST_CONTEXT), StatusCode::OK); - - const tensorflow::serving::GetModelStatusResponse response_const3 = res3; - json_output = ""; - error_status = GetModelStatusImpl::serializeResponse2Json(&response_const3, &json_output); - ASSERT_EQ(error_status, StatusCode::OK); - EXPECT_EQ(json_output, expected_json_available); -} - -TEST_F(TestCustomLoader, CustomLoaderLoadBlackListedModel) { -#ifdef _WIN32 - GTEST_SKIP() << "Test disabled on windows"; -#endif - // Copy dummy model to temporary destination - std::filesystem::copy(getGenericFullPathForSrcTest("/ovms/src/test/dummy"), cl_model_1_path, std::filesystem::copy_options::recursive); - - // Create Sample Custom Loader Config - std::string cl_config_file_path = cl_models_path; - std::string cl_config_str = ENABLE_FORCE_BLACKLIST_CHECK; - std::string cl_config_file = cl_config_file_path + "/customloader_config"; - createConfigFileWithContent(cl_config_str, cl_config_file); - - // Replace model path in the config string - std::string configStr = custom_loader_config_model_blacklist; - configStr.replace(configStr.find("/tmp/test_cl_models"), std::string("/tmp/test_cl_models").size(), cl_models_path); - configStr.replace(configStr.find("sample-loader-config"), std::string("sample-loader-config").size(), cl_config_file); - - // Create config file - std::string fileToReload = cl_models_path + "/cl_config.json"; - createConfigFileWithContent(configStr, fileToReload); - - // Create status file - std::string status_file_path = cl_model_1_path + "1"; - std::string status_str = "DISABLED"; - std::string status_file = status_file_path + "/dummy.status"; - createConfigFileWithContent(status_str, status_file); - ovms::Status status1 = manager.loadConfig(fileToReload); - ASSERT_TRUE(status1 == ovms::StatusCode::INTERNAL_ERROR); - - tensorflow::serving::GetModelStatusRequest req1; - tensorflow::serving::GetModelStatusResponse res1; - - auto model_spec1 = req1.mutable_model_spec(); - model_spec1->Clear(); - model_spec1->set_name("dummy"); - model_spec1->mutable_version()->set_value(1); - ASSERT_EQ(GetModelStatusImpl::getModelStatus(&req1, &res1, manager, DEFAULT_TEST_CONTEXT), StatusCode::OK); - - const tensorflow::serving::GetModelStatusResponse response_const1 = res1; - std::string json_output1; - Status error_status1 = GetModelStatusImpl::serializeResponse2Json(&response_const1, &json_output1); - ASSERT_EQ(error_status1, StatusCode::OK); - EXPECT_EQ(json_output1, expected_json_loading_error); - - // remove enable_file from config file - std::string status_config = ", \"enable_file\": \"dummy.status\""; - configStr.replace(configStr.find(status_config), std::string(status_config).size(), ""); - createConfigFileWithContent(configStr, fileToReload); - - ovms::Status status2 = manager.loadConfig(fileToReload); - ASSERT_TRUE(status2 == ovms::StatusCode::OK); - - tensorflow::serving::GetModelStatusRequest req2; - tensorflow::serving::GetModelStatusResponse res2; - - auto model_spec2 = req2.mutable_model_spec(); - model_spec2->Clear(); - model_spec2->set_name("dummy"); - model_spec2->mutable_version()->set_value(1); - ASSERT_EQ(GetModelStatusImpl::getModelStatus(&req2, &res2, manager, DEFAULT_TEST_CONTEXT), StatusCode::OK); - - const tensorflow::serving::GetModelStatusResponse response_const2 = res2; - std::string json_output2; - Status error_status2 = GetModelStatusImpl::serializeResponse2Json(&response_const2, &json_output2); - ASSERT_EQ(error_status2, StatusCode::OK); - EXPECT_EQ(json_output2, expected_json_available); -} -*/ - -#pragma GCC diagnostic pop +//***************************************************************************** +// Copyright 2020-2021 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +//***************************************************************************** +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include "../executingstreamidguard.hpp" +#include "../get_model_metadata_impl.hpp" +#include "src/filesystem/localfilesystem.hpp" +#include "../model.hpp" +#include "../model_service.hpp" +#include "../modelinstance.hpp" +#include "../modelinstanceunloadguard.hpp" +#include "../modelmanager.hpp" +#include "../modelversionstatus.hpp" +#include "../prediction_service_utils.hpp" +#include "../schema.hpp" +#include "../sequence_processing_spec.hpp" +#include "mockmodelinstancechangingstates.hpp" +#include "test_utils.hpp" +#include "test_request_utils_tfs.hpp" +#include "light_test_utils.hpp" +#include "platform_utils.hpp" + +using testing::_; +using testing::ContainerEq; +using testing::Each; +using testing::Eq; +using ::testing::NiceMock; +using testing::Return; +using testing::ReturnRef; +using testing::UnorderedElementsAre; + +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wnarrowing" + +using namespace ovms; + +/* +------------------------------------------------ +AFTER SAMPLE CUSTOM LOADER REMOVAL BELOW CONFIGURATIONS ARE NOT USED +REMOVE THIS ENTIRE FILE ONCE THE FEATURE IS REMOVED +------------------------------------------------- + +namespace { + +// Custom Loader Config Keys +#define ENABLE_FORCE_BLACKLIST_CHECK "ENABLE_FORCE_BLACKLIST_CHECK" + +// config_model_with_customloader +const char* custom_loader_config_model = R"({ + "custom_loader_config_list":[ + { + "config":{ + "loader_name":"sample-loader", + "library_path": "/ovms/bazel-bin/src/libsampleloader.so" + } + } + ], + "model_config_list":[ + { + "config":{ + "name":"dummy", + "base_path": "/tmp/test_cl_models/model1", + "nireq": 1, + "custom_loader_options": {"loader_name": "sample-loader", "model_file": "dummy.xml", "bin_file": "dummy.bin"} + } + } + ] + })"; + +// config_model_with_customloader +const char* custom_loader_config_model_relative_paths = R"({ + "custom_loader_config_list":[ + { + "config":{ + "loader_name":"sample-loader", + "library_path": "libsampleloader.so" + } + } + ], + "model_config_list":[ + { + "config":{ + "name":"dummy", + "base_path": "test_cl_models/model1", + "nireq": 1, + "custom_loader_options": {"loader_name": "sample-loader", "model_file": "dummy.xml", "bin_file": "dummy.bin"} + } + } + ] + })"; + +// config_no_model_with_customloader +const char* custom_loader_config_model_deleted = R"({ + "custom_loader_config_list":[ + { + "config":{ + "loader_name":"sample-loader", + "library_path": "/ovms/bazel-bin/src/libsampleloader.so" + } + } + ], + "model_config_list":[] + })"; + +// config_2_models_with_customloader +const char* custom_loader_config_model_new = R"({ + "custom_loader_config_list":[ + { + "config":{ + "loader_name":"sample-loader", + "library_path": "/ovms/bazel-bin/src/libsampleloader.so" + } + } + ], + "model_config_list":[ + { + "config":{ + "name":"dummy", + "base_path": "/tmp/test_cl_models/model1", + "nireq": 1, + "custom_loader_options": {"loader_name": "sample-loader", "model_file": "dummy.xml", "bin_file": "dummy.bin"} + } + }, + { + "config":{ + "name":"dummy-new", + "base_path": "/tmp/test_cl_models/model2", + "nireq": 1, + "custom_loader_options": {"loader_name": "sample-loader", "model_file": "dummy.xml", "bin_file": "dummy.bin"} + } + } + ] + })"; + +// config_model_without_customloader_options +const char* custom_loader_config_model_customloader_options_removed = R"({ + "custom_loader_config_list":[ + { + "config":{ + "loader_name":"sample-loader", + "library_path": "/ovms/bazel-bin/src/libsampleloader.so" + } + } + ], + "model_config_list":[ + { + "config":{ + "name":"dummy", + "base_path": "/tmp/test_cl_models/model1", + "nireq": 1 + } + } + ] + })"; + +const char* config_model_with_customloader_options_unknown_loadername = R"({ + "custom_loader_config_list":[ + { + "config":{ + "loader_name":"sample-loader", + "library_path": "/ovms/bazel-bin/src/libsampleloader.so" + } + } + ], + "model_config_list":[ + { + "config":{ + "name":"dummy", + "base_path": "/tmp/test_cl_models/model1", + "nireq": 1, + "custom_loader_options": {"loader_name": "unknown", "model_file": "dummy.xml", "bin_file": "dummy.bin"} + } + } + ] + })"; + +// config_model_with_customloader +const char* custom_loader_config_model_multiple = R"({ + "custom_loader_config_list":[ + { + "config":{ + "loader_name":"sample-loader-a", + "library_path": "/ovms/bazel-bin/src/libsampleloader.so" + } + }, + { + "config":{ + "loader_name":"sample-loader-b", + "library_path": "/ovms/bazel-bin/src/libsampleloader.so" + } + }, + { + "config":{ + "loader_name":"sample-loader-c", + "library_path": "/ovms/bazel-bin/src/libsampleloader.so" + } + } + ], + "model_config_list":[ + { + "config":{ + "name":"dummy-a", + "base_path": "/tmp/test_cl_models/model1", + "nireq": 1, + "custom_loader_options": {"loader_name": "sample-loader-a", "model_file": "dummy.xml", "bin_file": "dummy.bin"} + } + }, + { + "config":{ + "name":"dummy-b", + "base_path": "/tmp/test_cl_models/model1", + "nireq": 1, + "custom_loader_options": {"loader_name": "sample-loader-b", "model_file": "dummy.xml", "bin_file": "dummy.bin"} + } + }, + { + "config":{ + "name":"dummy-c", + "base_path": "/tmp/test_cl_models/model1", + "nireq": 1, + "custom_loader_options": {"loader_name": "sample-loader-c", "model_file": "dummy.xml", "bin_file": "dummy.bin"} + } + } + ] + })"; + +const char* custom_loader_config_model_blacklist = R"({ + "custom_loader_config_list":[ + { + "config":{ + "loader_name":"sample-loader", + "library_path": "/ovms/bazel-bin/src/libsampleloader.so", + "loader_config_file": "sample-loader-config" + } + } + ], + "model_config_list":[ + { + "config":{ + "name":"dummy", + "base_path": "/tmp/test_cl_models/model1", + "nireq": 1, + "custom_loader_options": {"loader_name": "sample-loader", "model_file": "dummy.xml", "bin_file": "dummy.bin", "enable_file": "dummy.status"} + } + } + ] + })"; + +const char* empty_config = R"({ + "custom_loader_config_list":[], + "model_config_list":[] + })"; + +const char* expected_json_available = R"({ + "model_version_status": [ + { + "version": "1", + "state": "AVAILABLE", + "status": { + "error_code": "OK", + "error_message": "OK" + } + } + ] +} +)"; + +const char* expected_json_end = R"({ + "model_version_status": [ + { + "version": "1", + "state": "END", + "status": { + "error_code": "OK", + "error_message": "OK" + } + } + ] +} +)"; + +const char* expected_json_loading_error = R"({ + "model_version_status": [ + { + "version": "1", + "state": "LOADING", + "status": { + "error_code": "UNKNOWN", + "error_message": "UNKNOWN" + } + } + ] +} +)"; + +} // namespace + +*/ + +class TestCustomLoader : public ::testing::Test { +public: + void SetUp() { + const ::testing::TestInfo* const test_info = + ::testing::UnitTest::GetInstance()->current_test_info(); + + cl_models_path = getGenericFullPathForTmp("/tmp/" + std::string(test_info->name())); + cl_model_1_path = cl_models_path + "/model1/"; + cl_model_2_path = cl_models_path + "/model2/"; + + const std::string FIRST_MODEL_NAME = "dummy"; + const std::string SECOND_MODEL_NAME = "dummy_new"; + + std::filesystem::remove_all(cl_models_path); + std::filesystem::create_directories(cl_model_1_path); + } + void TearDown() { + // Create config file with an empty config & reload + const char* empty_config = R"({ + "custom_loader_config_list":[], + "model_config_list":[] + })"; + std::string configStr = empty_config; + std::string fileToReload = cl_models_path + "/cl_config.json"; + createConfigFileWithContent(configStr, fileToReload); + ASSERT_EQ(manager.loadConfig(fileToReload), ovms::StatusCode::OK); + + // Clean up temporary destination + std::filesystem::remove_all(cl_models_path); + } + + /** + * @brief This function should mimic most closely predict request to check for thread safety + */ + void performPredict(const std::string modelName, + const ovms::model_version_t modelVersion, + const tensorflow::serving::PredictRequest& request, + std::unique_ptr> waitBeforeGettingModelInstance = nullptr, + std::unique_ptr> waitBeforePerformInference = nullptr); + + void deserialize(const std::vector& input, ov::InferRequest& inferRequest, std::shared_ptr modelInstance) { + try { + ov::Tensor tensor( + modelInstance->getInputsInfo().at(DUMMY_MODEL_INPUT_NAME)->getOvPrecision(), + modelInstance->getInputsInfo().at(DUMMY_MODEL_INPUT_NAME)->getShape().createPartialShape().get_shape(), + const_cast(reinterpret_cast(input.data()))); + inferRequest.set_tensor(DUMMY_MODEL_INPUT_NAME, tensor); + } catch (...) { + ASSERT_TRUE(false) << "exception during deserialize"; + } + } + + void serializeAndCheck(int outputSize, ov::InferRequest& inferRequest) { + std::vector output(outputSize); + ASSERT_THAT(output, Each(Eq(0.))); + auto tensorOutput = inferRequest.get_tensor(DUMMY_MODEL_OUTPUT_NAME); + ASSERT_EQ(tensorOutput.get_byte_size(), outputSize * sizeof(float)); + std::memcpy(output.data(), tensorOutput.data(), outputSize * sizeof(float)); + EXPECT_THAT(output, Each(Eq(2.))); + } + + ovms::Status performInferenceWithRequest(const tensorflow::serving::PredictRequest& request, tensorflow::serving::PredictResponse& response) { + std::shared_ptr model; + std::unique_ptr unload_guard; + auto status = manager.getModelInstance("dummy", 0, model, unload_guard); + if (!status.ok()) { + return status; + } + + response.Clear(); + return model->infer(&request, &response, unload_guard); + } + +public: + ConstructorEnabledModelManager manager; + + ~TestCustomLoader() { + std::cout << "Destructor of TestCustomLoader()" << std::endl; + } + + std::string cl_models_path; + std::string cl_model_1_path; + std::string cl_model_2_path; +}; + +class MockModelInstance : public ovms::ModelInstance { +public: + MockModelInstance(ov::Core& ieCore) : + ModelInstance("UNUSED_NAME", 42, ieCore) {} + const ovms::Status mockValidate(const tensorflow::serving::PredictRequest* request) { + return validate(request); + } +}; + +void TestCustomLoader::performPredict(const std::string modelName, + const ovms::model_version_t modelVersion, + const tensorflow::serving::PredictRequest& request, + std::unique_ptr> waitBeforeGettingModelInstance, + std::unique_ptr> waitBeforePerformInference) { + // only validation is skipped + std::shared_ptr modelInstance; + std::unique_ptr modelInstanceUnloadGuard; + + auto& tensorProto = request.inputs().find("b")->second; + size_t batchSize = tensorProto.tensor_shape().dim(0).size(); + size_t inputSize = 1; + for (int i = 0; i < tensorProto.tensor_shape().dim_size(); i++) { + inputSize *= tensorProto.tensor_shape().dim(i).size(); + } + + if (waitBeforeGettingModelInstance) { + std::cout << "Waiting before getModelInstance. Batch size: " << batchSize << std::endl; + waitBeforeGettingModelInstance->get(); + } + ASSERT_EQ(manager.getModelInstance(modelName, modelVersion, modelInstance, modelInstanceUnloadGuard), ovms::StatusCode::OK); + + if (waitBeforePerformInference) { + std::cout << "Waiting before performInfernce." << std::endl; + waitBeforePerformInference->get(); + } + ovms::Status validationStatus = (std::static_pointer_cast(modelInstance))->mockValidate(&request); + std::cout << validationStatus.string() << std::endl; + ASSERT_TRUE(validationStatus == ovms::StatusCode::OK || + validationStatus == ovms::StatusCode::RESHAPE_REQUIRED || + validationStatus == ovms::StatusCode::BATCHSIZE_CHANGE_REQUIRED); + auto bsPositionIndex = 0; + auto requestBatchSize = ovms::getRequestBatchSize(&request, bsPositionIndex); + auto requestShapes = ovms::getRequestShapes(&request); + ASSERT_EQ(modelInstance->reloadModelIfRequired(validationStatus, requestBatchSize, requestShapes, modelInstanceUnloadGuard), ovms::StatusCode::OK); + + ovms::ExecutingStreamIdGuard executingStreamIdGuard(modelInstance->getInferRequestsQueue(), modelInstance->getMetricReporter()); + ov::InferRequest& inferRequest = executingStreamIdGuard.getInferRequest(); + std::vector input(inputSize); + std::generate(input.begin(), input.end(), []() { return 1.; }); + ASSERT_THAT(input, Each(Eq(1.))); + deserialize(input, inferRequest, modelInstance); + auto status = modelInstance->performInference(inferRequest); + ASSERT_EQ(status, ovms::StatusCode::OK); + size_t outputSize = batchSize * DUMMY_MODEL_OUTPUT_SIZE; + serializeAndCheck(outputSize, inferRequest); +} + +// Schema Validation + +TEST_F(TestCustomLoader, CustomLoaderConfigMatchingSchema) { + const char* customloaderConfigMatchingSchema = R"( + { + "custom_loader_config_list":[ + { + "config":{ + "loader_name":"dummy-loader", + "library_path": "/tmp/loader/dummyloader", + "loader_config_file": "dummyloader-config" + } + } + ], + "model_config_list":[ + { + "config":{ + "name":"dummy-loader-model", + "base_path": "/tmp/models/dummy1", + "custom_loader_options": {"loader_name": "dummy-loader"} + } + } + ] + } + )"; + + rapidjson::Document customloaderConfigMatchingSchemaParsed; + customloaderConfigMatchingSchemaParsed.Parse(customloaderConfigMatchingSchema); + auto result = ovms::validateJsonAgainstSchema(customloaderConfigMatchingSchemaParsed, ovms::MODELS_CONFIG_SCHEMA.c_str()); + EXPECT_EQ(result, ovms::StatusCode::OK); +} + +TEST_F(TestCustomLoader, CustomLoaderConfigMissingLoaderName) { + const char* customloaderConfigMissingLoaderName = R"( + { + "custom_loader_config_list":[ + { + "config":{ + "library_path": "dummyloader", + "loader_config_file": "dummyloader-config" + } + } + ], + "model_config_list": [] + } + )"; + + rapidjson::Document customloaderConfigMissingLoaderNameParsed; + customloaderConfigMissingLoaderNameParsed.Parse(customloaderConfigMissingLoaderName); + auto result = ovms::validateJsonAgainstSchema(customloaderConfigMissingLoaderNameParsed, ovms::MODELS_CONFIG_SCHEMA.c_str()); + EXPECT_EQ(result, ovms::StatusCode::JSON_INVALID); +} + +TEST_F(TestCustomLoader, CustomLoaderConfigMissingLibraryPath) { + const char* customloaderConfigMissingLibraryPath = R"( + { + "custom_loader_config_list":[ + { + "config":{ + "loader_name":"dummy-loader", + "loader_config_file": "dummyloader-config" + } + } + ], + "model_config_list": [] + } + )"; + + rapidjson::Document customloaderConfigMissingLibraryPathParsed; + customloaderConfigMissingLibraryPathParsed.Parse(customloaderConfigMissingLibraryPath); + auto result = ovms::validateJsonAgainstSchema(customloaderConfigMissingLibraryPathParsed, ovms::MODELS_CONFIG_SCHEMA.c_str()); + EXPECT_EQ(result, ovms::StatusCode::JSON_INVALID); +} + +TEST_F(TestCustomLoader, CustomLoaderConfigMissingLoaderConfig) { + const char* customloaderConfigMissingLoaderConfig = R"( + { + "custom_loader_config_list":[ + { + "config":{ + "loader_name":"dummy-loader", + "library_path": "dummyloader" + } + } + ], + "model_config_list": [] + } + )"; + + rapidjson::Document customloaderConfigMissingLoaderConfigParsed; + customloaderConfigMissingLoaderConfigParsed.Parse(customloaderConfigMissingLoaderConfig); + auto result = ovms::validateJsonAgainstSchema(customloaderConfigMissingLoaderConfigParsed, ovms::MODELS_CONFIG_SCHEMA.c_str()); + EXPECT_EQ(result, ovms::StatusCode::OK); +} + +TEST_F(TestCustomLoader, CustomLoaderConfigInvalidCustomLoaderConfig) { + const char* customloaderConfigInvalidCustomLoaderConfig = R"( + { + "model_config_list":[ + { + "config":{ + "name":"dummy-loader-model", + "base_path": "/tmp/models/dummy1", + "custom_loader_options_invalid": {"loader_name": "dummy-loader"} + } + } + ] + } + )"; + + rapidjson::Document customloaderConfigInvalidCustomLoaderConfigParsed; + customloaderConfigInvalidCustomLoaderConfigParsed.Parse(customloaderConfigInvalidCustomLoaderConfig); + auto result = ovms::validateJsonAgainstSchema(customloaderConfigInvalidCustomLoaderConfigParsed, ovms::MODELS_CONFIG_SCHEMA.c_str()); + EXPECT_EQ(result, ovms::StatusCode::JSON_INVALID); +} + +TEST_F(TestCustomLoader, CustomLoaderConfigMissingLoaderNameInCustomLoaderOptions) { + const char* customloaderConfigMissingLoaderNameInCustomLoaderOptions = R"( + { + "model_config_list":[ + { + "config":{ + "name":"dummy-loader-model", + "base_path": "/tmp/models/dummy1", + "custom_loader_options": {"a": "SS"} + } + } + ] + } + )"; + + rapidjson::Document customloaderConfigMissingLoaderNameInCustomLoaderOptionsParsed; + customloaderConfigMissingLoaderNameInCustomLoaderOptionsParsed.Parse(customloaderConfigMissingLoaderNameInCustomLoaderOptions); + auto result = ovms::validateJsonAgainstSchema(customloaderConfigMissingLoaderNameInCustomLoaderOptionsParsed, ovms::MODELS_CONFIG_SCHEMA.c_str()); + EXPECT_EQ(result, ovms::StatusCode::JSON_INVALID); +} + +TEST_F(TestCustomLoader, CustomLoaderConfigMultiplePropertiesInCustomLoaderOptions) { + const char* customloaderConfigMultiplePropertiesInCustomLoaderOptions = R"( + { + "model_config_list":[ + { + "config":{ + "name":"dummy-loader-model", + "base_path": "/tmp/models/dummy1", + "custom_loader_options": {"loader_name": "dummy-loader", "1": "a", "2": "b", "3": "c", "4":"d", "5":"e", "6":"f"} + } + } + ] + } + )"; + + rapidjson::Document customloaderConfigMultiplePropertiesInCustomLoaderOptionsParsed; + customloaderConfigMultiplePropertiesInCustomLoaderOptionsParsed.Parse(customloaderConfigMultiplePropertiesInCustomLoaderOptions); + auto result = ovms::validateJsonAgainstSchema(customloaderConfigMultiplePropertiesInCustomLoaderOptionsParsed, ovms::MODELS_CONFIG_SCHEMA.c_str()); + EXPECT_EQ(result, ovms::StatusCode::OK); +} + +// Functional Validation +/* +------------------------------------------------ +AFTER SAMPLE CUSTOM LOADER REMOVAL BELOW TESTS ARE NOT VALID +REMOVE THIS ENTIRE FILE ONCE THE FEATURE IS REMOVED +------------------------------------------------- +TEST_F(TestCustomLoader, CustomLoaderPrediction) { +#ifdef _WIN32 + GTEST_SKIP() << "Test disabled on windows"; +#endif + // Copy dummy model to temporary destination + std::filesystem::copy(getGenericFullPathForSrcTest("/ovms/src/test/dummy"), cl_model_1_path, std::filesystem::copy_options::recursive); + + // Replace model path in the config string + std::string configStr = custom_loader_config_model; + configStr.replace(configStr.find("/tmp/test_cl_models"), std::string("/tmp/test_cl_models").size(), cl_models_path); + + // Create config file + std::string fileToReload = cl_models_path + "/cl_config.json"; + createConfigFileWithContent(configStr, fileToReload); + ASSERT_EQ(manager.loadConfig(fileToReload), ovms::StatusCode::OK); + + tensorflow::serving::PredictRequest request; + preparePredictRequest(request, + {{DUMMY_MODEL_INPUT_NAME, + std::tuple{{1, 10}, ovms::Precision::FP32}}}); + performPredict("dummy", 1, request); +} + +TEST_F(TestCustomLoader, CustomLoaderPredictionRelativePath) { +#ifdef _WIN32 + GTEST_SKIP() << "Test disabled on windows"; +#endif + // Copy dummy model to temporary destination + std::filesystem::copy(getGenericFullPathForSrcTest("/ovms/src/test/dummy"), cl_model_1_path, std::filesystem::copy_options::recursive); + std::filesystem::copy(getGenericFullPathForSrcTest("/ovms/bazel-bin/src/libsampleloader.so"), cl_models_path, std::filesystem::copy_options::recursive); + + // Replace model path in the config string + std::string configStr = custom_loader_config_model_relative_paths; + configStr.replace(configStr.find("test_cl_models"), std::string("test_cl_models").size(), cl_models_path); + + // Create config file + std::string fileToReload = cl_models_path + "/cl_config.json"; + createConfigFileWithContent(configStr, fileToReload); + ASSERT_EQ(manager.loadConfig(fileToReload), ovms::StatusCode::OK); + + tensorflow::serving::PredictRequest request; + preparePredictRequest(request, + {{DUMMY_MODEL_INPUT_NAME, + std::tuple{{1, 10}, ovms::Precision::FP32}}}); + performPredict("dummy", 1, request); +} + +TEST_F(TestCustomLoader, CustomLoaderGetStatus) { +#ifdef _WIN32 + GTEST_SKIP() << "Test disabled on windows"; +#endif + // Copy dummy model to temporary destination + std::filesystem::copy(getGenericFullPathForSrcTest("/ovms/src/test/dummy"), cl_model_1_path, std::filesystem::copy_options::recursive); + + // Replace model path in the config string + std::string configStr = custom_loader_config_model; + configStr.replace(configStr.find("/tmp/test_cl_models"), std::string("/tmp/test_cl_models").size(), cl_models_path); + + // Create config file + std::string fileToReload = cl_models_path + "/cl_config.json"; + createConfigFileWithContent(configStr, fileToReload); + ASSERT_EQ(manager.loadConfig(fileToReload), ovms::StatusCode::OK); + + tensorflow::serving::GetModelStatusRequest req; + tensorflow::serving::GetModelStatusResponse res; + + auto model_spec = req.mutable_model_spec(); + model_spec->Clear(); + model_spec->set_name("dummy"); + model_spec->mutable_version()->set_value(1); + ASSERT_EQ(GetModelStatusImpl::getModelStatus(&req, &res, manager, DEFAULT_TEST_CONTEXT), StatusCode::OK); + + const tensorflow::serving::GetModelStatusResponse response_const = res; + std::string json_output; + Status error_status = GetModelStatusImpl::serializeResponse2Json(&response_const, &json_output); + ASSERT_EQ(error_status, StatusCode::OK); + EXPECT_EQ(json_output, expected_json_available); +} + +TEST_F(TestCustomLoader, CustomLoaderPredictDeletePredict) { +#ifdef _WIN32 + GTEST_SKIP() << "Test disabled on windows"; +#endif + // Copy dummy model to temporary destination + std::filesystem::copy(getGenericFullPathForSrcTest("/ovms/src/test/dummy"), cl_model_1_path, std::filesystem::copy_options::recursive); + + // Replace model path in the config string + std::string configStr = custom_loader_config_model; + configStr.replace(configStr.find("/tmp/test_cl_models"), std::string("/tmp/test_cl_models").size(), cl_models_path); + + // Create config file + std::string fileToReload = cl_models_path + "/cl_config.json"; + createConfigFileWithContent(configStr, fileToReload); + ASSERT_EQ(manager.loadConfig(fileToReload), ovms::StatusCode::OK); + + tensorflow::serving::PredictRequest request; + preparePredictRequest(request, + {{DUMMY_MODEL_INPUT_NAME, + std::tuple{{1, 10}, ovms::Precision::FP32}}}); + tensorflow::serving::PredictResponse response; + ASSERT_EQ(performInferenceWithRequest(request, response), ovms::StatusCode::OK); + + // Re-create config file + createConfigFileWithContent(custom_loader_config_model_deleted, fileToReload); + ASSERT_EQ(manager.loadConfig(fileToReload), ovms::StatusCode::OK); + + ASSERT_EQ(performInferenceWithRequest(request, response), ovms::StatusCode::MODEL_VERSION_MISSING); +} + +TEST_F(TestCustomLoader, CustomLoaderPredictNewVersionPredict) { +#ifdef _WIN32 + GTEST_SKIP() << "Test disabled on windows"; +#endif + // Copy dummy model to temporary destination + std::filesystem::copy(getGenericFullPathForSrcTest("/ovms/src/test/dummy"), cl_model_1_path, std::filesystem::copy_options::recursive); + + // Replace model path in the config string + std::string configStr = custom_loader_config_model; + configStr.replace(configStr.find("/tmp/test_cl_models"), std::string("/tmp/test_cl_models").size(), cl_models_path); + + // Create config file + std::string fileToReload = cl_models_path + "/cl_config.json"; + createConfigFileWithContent(configStr, fileToReload); + ASSERT_EQ(manager.loadConfig(fileToReload), ovms::StatusCode::OK); + + tensorflow::serving::PredictRequest request; + preparePredictRequest(request, + {{DUMMY_MODEL_INPUT_NAME, + std::tuple{{1, 10}, ovms::Precision::FP32}}}); + performPredict("dummy", 1, request); + + // Copy version 1 to version 2 + std::filesystem::create_directories(cl_model_1_path + "2"); + std::filesystem::copy(cl_model_1_path + "1", cl_model_1_path + "2", std::filesystem::copy_options::recursive); + ASSERT_EQ(manager.loadConfig(fileToReload), ovms::StatusCode::OK); + + preparePredictRequest(request, + {{DUMMY_MODEL_INPUT_NAME, + std::tuple{{1, 10}, ovms::Precision::FP32}}}); + performPredict("dummy", 2, request); +} + +TEST_F(TestCustomLoader, CustomLoaderPredictNewModelPredict) { +#ifdef _WIN32 + GTEST_SKIP() << "Test disabled on windows"; +#endif + // Copy dummy model to temporary destination + std::filesystem::copy(getGenericFullPathForSrcTest("/ovms/src/test/dummy"), cl_model_1_path, std::filesystem::copy_options::recursive); + + // Replace model path in the config string + std::string configStr = custom_loader_config_model; + configStr.replace(configStr.find("/tmp/test_cl_models"), std::string("/tmp/test_cl_models").size(), cl_models_path); + + // Create config file + std::string fileToReload = cl_models_path + "/cl_config.json"; + createConfigFileWithContent(configStr, fileToReload); + ASSERT_EQ(manager.loadConfig(fileToReload), ovms::StatusCode::OK); + + tensorflow::serving::PredictRequest request; + preparePredictRequest(request, + {{DUMMY_MODEL_INPUT_NAME, + std::tuple{{1, 10}, ovms::Precision::FP32}}}); + performPredict("dummy", 1, request); + + // Copy model1 to model2 + std::filesystem::copy(getGenericFullPathForSrcTest("/ovms/src/test/dummy"), cl_model_2_path, std::filesystem::copy_options::recursive); + + // Replace model path in the config string + configStr = custom_loader_config_model_new; + configStr.replace(configStr.find("/tmp/test_cl_models"), std::string("/tmp/test_cl_models").size(), cl_models_path); + configStr.replace(configStr.find("/tmp/test_cl_models"), std::string("/tmp/test_cl_models").size(), cl_models_path); + + // Re-create config file + createConfigFileWithContent(configStr, fileToReload); + ASSERT_EQ(manager.loadConfig(fileToReload), ovms::StatusCode::OK); + + preparePredictRequest(request, + {{DUMMY_MODEL_INPUT_NAME, + std::tuple{{1, 10}, ovms::Precision::FP32}}}); + performPredict("dummy", 1, request); + performPredict("dummy-new", 1, request); +} + +TEST_F(TestCustomLoader, CustomLoaderPredictRemoveCustomLoaderOptionsPredict) { +#ifdef _WIN32 + GTEST_SKIP() << "Test disabled on windows"; +#endif + // Copy dummy model to temporary destination + std::filesystem::copy(getGenericFullPathForSrcTest("/ovms/src/test/dummy"), cl_model_1_path, std::filesystem::copy_options::recursive); + + // Replace model path in the config string + std::string configStr = custom_loader_config_model; + configStr.replace(configStr.find("/tmp/test_cl_models"), std::string("/tmp/test_cl_models").size(), cl_models_path); + + // Create config file + std::string fileToReload = cl_models_path + "/cl_config.json"; + createConfigFileWithContent(configStr, fileToReload); + ASSERT_EQ(manager.loadConfig(fileToReload), ovms::StatusCode::OK); + + tensorflow::serving::PredictRequest request; + preparePredictRequest(request, + {{DUMMY_MODEL_INPUT_NAME, + std::tuple{{1, 10}, ovms::Precision::FP32}}}); + performPredict("dummy", 1, request); + + // Replace model path in the config string + configStr = custom_loader_config_model_customloader_options_removed; + configStr.replace(configStr.find("/tmp/test_cl_models"), std::string("/tmp/test_cl_models").size(), cl_models_path); + + // Re-create config file + createConfigFileWithContent(configStr, fileToReload); + ASSERT_EQ(manager.loadConfig(fileToReload), ovms::StatusCode::OK); + + performPredict("dummy", 1, request); +} + +TEST_F(TestCustomLoader, PredictNormalModelAddCustomLoaderOptionsPredict) { +#ifdef _WIN32 + GTEST_SKIP() << "Test disabled on windows"; +#endif + // Copy dummy model to temporary destination + std::filesystem::copy(getGenericFullPathForSrcTest("/ovms/src/test/dummy"), cl_model_1_path, std::filesystem::copy_options::recursive); + + // Replace model path in the config string + std::string configStr = custom_loader_config_model_customloader_options_removed; + configStr.replace(configStr.find("/tmp/test_cl_models"), std::string("/tmp/test_cl_models").size(), cl_models_path); + + // Create config file + std::string fileToReload = cl_models_path + "/cl_config.json"; + createConfigFileWithContent(configStr, fileToReload); + ASSERT_EQ(manager.loadConfig(fileToReload), ovms::StatusCode::OK); + + tensorflow::serving::PredictRequest request; + preparePredictRequest(request, + {{DUMMY_MODEL_INPUT_NAME, + std::tuple{{1, 10}, ovms::Precision::FP32}}}); + performPredict("dummy", 1, request); + + // Replace model path in the config string + configStr = custom_loader_config_model; + configStr.replace(configStr.find("/tmp/test_cl_models"), std::string("/tmp/test_cl_models").size(), cl_models_path); + + // Create config file + fileToReload = cl_models_path + "/cl_config.json"; + createConfigFileWithContent(configStr, fileToReload); + ASSERT_EQ(manager.loadConfig(fileToReload), ovms::StatusCode::OK); + + performPredict("dummy", 1, request); +} + +TEST_F(TestCustomLoader, CustomLoaderOptionWithUnknownLibrary) { +#ifdef _WIN32 + GTEST_SKIP() << "Test disabled on windows"; +#endif + // Copy dummy model to temporary destination + std::filesystem::copy(getGenericFullPathForSrcTest("/ovms/src/test/dummy"), cl_model_1_path, std::filesystem::copy_options::recursive); + + // Replace model path in the config string + std::string configStr = config_model_with_customloader_options_unknown_loadername; + configStr.replace(configStr.find("/tmp/test_cl_models"), std::string("/tmp/test_cl_models").size(), cl_models_path); + + // Create config file + std::string fileToReload = cl_models_path + "/cl_config.json"; + createConfigFileWithContent(configStr, fileToReload); + ASSERT_EQ(manager.loadConfig(fileToReload), ovms::StatusCode::OK); + + tensorflow::serving::PredictRequest request; + preparePredictRequest(request, + {{DUMMY_MODEL_INPUT_NAME, + std::tuple{{1, 10}, ovms::Precision::FP32}}}); + tensorflow::serving::PredictResponse response; + ASSERT_EQ(performInferenceWithRequest(request, response), ovms::StatusCode::MODEL_VERSION_MISSING); +} + +TEST_F(TestCustomLoader, CustomLoaderWithMissingModelFiles) { +#ifdef _WIN32 + GTEST_SKIP() << "Test disabled on windows"; +#endif + // Replace model path in the config string + std::string configStr = custom_loader_config_model; + configStr.replace(configStr.find("/tmp/test_cl_models"), std::string("/tmp/test_cl_models").size(), cl_models_path); + + // Create config file + std::string fileToReload = cl_models_path + "/cl_config.json"; + createConfigFileWithContent(configStr, fileToReload); + ASSERT_EQ(manager.loadConfig(fileToReload), ovms::StatusCode::OK); + + tensorflow::serving::PredictRequest request; + preparePredictRequest(request, + {{DUMMY_MODEL_INPUT_NAME, + std::tuple{{1, 10}, ovms::Precision::FP32}}}); + tensorflow::serving::PredictResponse response; + ASSERT_EQ(performInferenceWithRequest(request, response), ovms::StatusCode::MODEL_VERSION_MISSING); +} + +TEST_F(TestCustomLoader, CustomLoaderGetStatusDeleteModelGetStatus) { +#ifdef _WIN32 + GTEST_SKIP() << "Test disabled on windows"; +#endif + // Copy dummy model to temporary destination + std::filesystem::copy(getGenericFullPathForSrcTest("/ovms/src/test/dummy"), cl_model_1_path, std::filesystem::copy_options::recursive); + + // Replace model path in the config string + std::string configStr = custom_loader_config_model; + configStr.replace(configStr.find("/tmp/test_cl_models"), std::string("/tmp/test_cl_models").size(), cl_models_path); + + // Create config file + std::string fileToReload = cl_models_path + "/cl_config.json"; + createConfigFileWithContent(configStr, fileToReload); + ASSERT_EQ(manager.loadConfig(fileToReload), ovms::StatusCode::OK); + + tensorflow::serving::GetModelStatusRequest req; + tensorflow::serving::GetModelStatusResponse res; + + auto model_spec = req.mutable_model_spec(); + model_spec->Clear(); + model_spec->set_name("dummy"); + model_spec->mutable_version()->set_value(1); + ASSERT_EQ(GetModelStatusImpl::getModelStatus(&req, &res, manager, DEFAULT_TEST_CONTEXT), StatusCode::OK); + + const tensorflow::serving::GetModelStatusResponse response_const = res; + std::string json_output; + Status error_status = GetModelStatusImpl::serializeResponse2Json(&response_const, &json_output); + ASSERT_EQ(error_status, StatusCode::OK); + EXPECT_EQ(json_output, expected_json_available); + + // Re-create config file + createConfigFileWithContent(custom_loader_config_model_deleted, fileToReload); + ASSERT_EQ(manager.loadConfig(fileToReload), ovms::StatusCode::OK); + + tensorflow::serving::GetModelStatusRequest reqx; + tensorflow::serving::GetModelStatusResponse resx; + + auto model_specx = reqx.mutable_model_spec(); + model_specx->Clear(); + model_specx->set_name("dummy"); + model_specx->mutable_version()->set_value(1); + + ASSERT_EQ(GetModelStatusImpl::getModelStatus(&reqx, &resx, manager, DEFAULT_TEST_CONTEXT), StatusCode::OK); + + const tensorflow::serving::GetModelStatusResponse response_constx = resx; + json_output = ""; + error_status = GetModelStatusImpl::serializeResponse2Json(&response_constx, &json_output); + ASSERT_EQ(error_status, StatusCode::OK); + EXPECT_EQ(json_output, expected_json_end); +} + +TEST_F(TestCustomLoader, CustomLoaderPredictionUsingManyCustomLoaders) { +#ifdef _WIN32 + GTEST_SKIP() << "Test disabled on windows"; +#endif + // Copy dummy model to temporary destination + std::filesystem::copy(getGenericFullPathForSrcTest("/ovms/src/test/dummy"), cl_model_1_path, std::filesystem::copy_options::recursive); + + // Replace model path in the config string + std::string configStr = custom_loader_config_model_multiple; + configStr.replace(configStr.find("/tmp/test_cl_models"), std::string("/tmp/test_cl_models").size(), cl_models_path); + configStr.replace(configStr.find("/tmp/test_cl_models"), std::string("/tmp/test_cl_models").size(), cl_models_path); + configStr.replace(configStr.find("/tmp/test_cl_models"), std::string("/tmp/test_cl_models").size(), cl_models_path); + + // Create config file + std::string fileToReload = cl_models_path + "/cl_config.json"; + createConfigFileWithContent(configStr, fileToReload); + ASSERT_EQ(manager.loadConfig(fileToReload), ovms::StatusCode::OK); + + tensorflow::serving::PredictRequest request; + preparePredictRequest(request, + {{DUMMY_MODEL_INPUT_NAME, + std::tuple{{1, 10}, ovms::Precision::FP32}}}); + + performPredict("dummy-a", 1, request); + performPredict("dummy-b", 1, request); + performPredict("dummy-c", 1, request); +} + +TEST_F(TestCustomLoader, CustomLoaderGetMetaData) { +#ifdef _WIN32 + GTEST_SKIP() << "Test disabled on windows"; +#endif + const char* expected_json = R"({ + "modelSpec": { + "name": "dummy", + "signatureName": "", + "version": "1" + }, + "metadata": { + "signature_def": { + "@type": "type.googleapis.com/tensorflow.serving.SignatureDefMap", + "signatureDef": { + "serving_default": { + "inputs": { + "b": { + "dtype": "DT_FLOAT", + "tensorShape": { + "dim": [ + { + "size": "1", + "name": "" + }, + { + "size": "10", + "name": "" + } + ], + "unknownRank": false + }, + "name": "b" + } + }, + "outputs": { + "a": { + "dtype": "DT_FLOAT", + "tensorShape": { + "dim": [ + { + "size": "1", + "name": "" + }, + { + "size": "10", + "name": "" + } + ], + "unknownRank": false + }, + "name": "a" + } + }, + "methodName": "", + "defaults": {} + } + } + } + } +} +)"; + + // Copy dummy model to temporary destination + std::filesystem::copy(getGenericFullPathForSrcTest("/ovms/src/test/dummy"), cl_model_1_path, std::filesystem::copy_options::recursive); + + // Replace model path in the config string + std::string configStr = custom_loader_config_model; + configStr.replace(configStr.find("/tmp/test_cl_models"), std::string("/tmp/test_cl_models").size(), cl_models_path); + + // Create config file + std::string fileToReload = cl_models_path + "/cl_config.json"; + createConfigFileWithContent(configStr, fileToReload); + ASSERT_EQ(manager.loadConfig(fileToReload), ovms::StatusCode::OK); + + std::shared_ptr model; + std::unique_ptr unload_guard; + ASSERT_EQ(manager.getModelInstance("dummy", 1, model, unload_guard), ovms::StatusCode::OK); + + tensorflow::serving::GetModelMetadataResponse response; + ovms::GetModelMetadataImpl::buildResponse(model, &response); + + std::string json_output = ""; + ovms::GetModelMetadataImpl::serializeResponse2Json(&response, &json_output); + + EXPECT_TRUE(response.has_model_spec()); + EXPECT_EQ(response.model_spec().name(), "dummy"); + + tensorflow::serving::SignatureDefMap def; + response.metadata().at("signature_def").UnpackTo(&def); + + const auto& inputs = ((*def.mutable_signature_def())["serving_default"]).inputs(); + const auto& outputs = ((*def.mutable_signature_def())["serving_default"]).outputs(); + + EXPECT_EQ(inputs.size(), 1); + EXPECT_EQ(outputs.size(), 1); + EXPECT_EQ(json_output, expected_json); +} + +TEST_F(TestCustomLoader, CustomLoaderMultipleLoaderWithSameLoaderName) { +#ifdef _WIN32 + GTEST_SKIP() << "Test disabled on windows"; +#endif + const char* custom_loader_config_model_xx = R"({ + "custom_loader_config_list":[ + { + "config":{ + "loader_name":"sample-loader", + "library_path": "/ovms/bazel-bin/src/libsampleloader.so" + } + }, + { + "config":{ + "loader_name":"sample-loader", + "library_path": "/ovms/bazel-bin/src/libsampleloader.so" + } + } + ], + "model_config_list":[ + { + "config":{ + "name":"dummy", + "base_path": "/tmp/test_cl_models/model1", + "nireq": 1, + "custom_loader_options": {"loader_name": "sample-loader", "model_file": "dummy.xml", "bin_file": "dummy.bin"} + } + } + ] + })"; + + // Copy dummy model to temporary destination + std::filesystem::copy(getGenericFullPathForSrcTest("/ovms/src/test/dummy"), cl_model_1_path, std::filesystem::copy_options::recursive); + + // Replace model path in the config string + std::string configStr = custom_loader_config_model_xx; + configStr.replace(configStr.find("/tmp/test_cl_models"), std::string("/tmp/test_cl_models").size(), cl_models_path); + + // Create config file + std::string fileToReload = cl_models_path + "/cl_config.json"; + createConfigFileWithContent(configStr, fileToReload); + ASSERT_EQ(manager.loadConfig(fileToReload), ovms::StatusCode::OK); + + tensorflow::serving::PredictRequest request; + preparePredictRequest(request, + {{DUMMY_MODEL_INPUT_NAME, + std::tuple{{1, 10}, ovms::Precision::FP32}}}); + performPredict("dummy", 1, request); +} + +TEST_F(TestCustomLoader, CustomLoaderBlackListingModel) { +#ifdef _WIN32 + GTEST_SKIP() << "Test disabled on windows"; +#endif + // Copy dummy model to temporary destination + std::filesystem::copy(getGenericFullPathForSrcTest("/ovms/src/test/dummy"), cl_model_1_path, std::filesystem::copy_options::recursive); + + // Create Sample Custom Loader Config + std::string cl_config_file_path = cl_models_path; + std::string cl_config_str = ENABLE_FORCE_BLACKLIST_CHECK; + std::string cl_config_file = cl_config_file_path + "/customloader_config"; + createConfigFileWithContent(cl_config_str, cl_config_file); + + // Replace model path in the config string + std::string configStr = custom_loader_config_model_blacklist; + configStr.replace(configStr.find("/tmp/test_cl_models"), std::string("/tmp/test_cl_models").size(), cl_models_path); + configStr.replace(configStr.find("sample-loader-config"), std::string("sample-loader-config").size(), cl_config_file); + + // Create config file + std::string fileToReload = cl_models_path + "/cl_config.json"; + createConfigFileWithContent(configStr, fileToReload); + ASSERT_EQ(manager.loadConfig(fileToReload), ovms::StatusCode::OK); + + tensorflow::serving::GetModelStatusRequest req; + tensorflow::serving::GetModelStatusResponse res; + + auto model_spec = req.mutable_model_spec(); + model_spec->Clear(); + model_spec->set_name("dummy"); + model_spec->mutable_version()->set_value(1); + ASSERT_EQ(GetModelStatusImpl::getModelStatus(&req, &res, manager, DEFAULT_TEST_CONTEXT), StatusCode::OK); + + tensorflow::serving::GetModelStatusResponse response_const = res; + std::string json_output; + Status error_status = GetModelStatusImpl::serializeResponse2Json(&response_const, &json_output); + ASSERT_EQ(error_status, StatusCode::OK); + EXPECT_EQ(json_output, expected_json_available); + + // copy status file + std::string status_file_path = cl_model_1_path + "1"; + std::string status_str = "DISABLED"; + std::string status_file = status_file_path + "/dummy.status"; + createConfigFileWithContent(status_str, status_file); + ASSERT_EQ(manager.loadConfig(fileToReload), ovms::StatusCode::OK); + + tensorflow::serving::GetModelStatusRequest reqx; + tensorflow::serving::GetModelStatusResponse resx; + + auto model_specx = reqx.mutable_model_spec(); + model_specx->Clear(); + model_specx->set_name("dummy"); + model_specx->mutable_version()->set_value(1); + + ASSERT_EQ(GetModelStatusImpl::getModelStatus(&reqx, &resx, manager, DEFAULT_TEST_CONTEXT), StatusCode::OK); + + const tensorflow::serving::GetModelStatusResponse response_constx = resx; + json_output = ""; + error_status = GetModelStatusImpl::serializeResponse2Json(&response_constx, &json_output); + ASSERT_EQ(error_status, StatusCode::OK); + EXPECT_EQ(json_output, expected_json_end); +} + +TEST_F(TestCustomLoader, CustomLoaderBlackListingRevoke) { +#ifdef _WIN32 + GTEST_SKIP() << "Test disabled on windows"; +#endif + // Copy dummy model to temporary destination + std::filesystem::copy(getGenericFullPathForSrcTest("/ovms/src/test/dummy"), cl_model_1_path, std::filesystem::copy_options::recursive); + + // Create Sample Custom Loader Config + std::string cl_config_file_path = cl_models_path; + std::string cl_config_str = ENABLE_FORCE_BLACKLIST_CHECK; + std::string cl_config_file = cl_config_file_path + "/customloader_config"; + createConfigFileWithContent(cl_config_str, cl_config_file); + + // Replace model path in the config string + std::string configStr = custom_loader_config_model_blacklist; + configStr.replace(configStr.find("/tmp/test_cl_models"), std::string("/tmp/test_cl_models").size(), cl_models_path); + configStr.replace(configStr.find("sample-loader-config"), std::string("sample-loader-config").size(), cl_config_file); + + // Create config file + std::string fileToReload = cl_models_path + "/cl_config.json"; + createConfigFileWithContent(configStr, fileToReload); + ASSERT_EQ(manager.loadConfig(fileToReload), ovms::StatusCode::OK); + + tensorflow::serving::GetModelStatusRequest req; + tensorflow::serving::GetModelStatusResponse res; + + auto model_spec = req.mutable_model_spec(); + model_spec->Clear(); + model_spec->set_name("dummy"); + model_spec->mutable_version()->set_value(1); + ASSERT_EQ(GetModelStatusImpl::getModelStatus(&req, &res, manager, DEFAULT_TEST_CONTEXT), StatusCode::OK); + + const tensorflow::serving::GetModelStatusResponse response_const = res; + std::string json_output; + Status error_status = GetModelStatusImpl::serializeResponse2Json(&response_const, &json_output); + ASSERT_EQ(error_status, StatusCode::OK); + EXPECT_EQ(json_output, expected_json_available); + + // copy status file + std::string status_file_path = cl_model_1_path + "1"; + std::string status_str = "DISABLED"; + std::string status_file = status_file_path + "/dummy.status"; + createConfigFileWithContent(status_str, status_file); + ASSERT_EQ(manager.loadConfig(fileToReload), ovms::StatusCode::OK); + + tensorflow::serving::GetModelStatusRequest req1; + tensorflow::serving::GetModelStatusResponse res1; + + auto model_spec1 = req1.mutable_model_spec(); + model_spec1->Clear(); + model_spec1->set_name("dummy"); + model_spec1->mutable_version()->set_value(1); + ASSERT_EQ(GetModelStatusImpl::getModelStatus(&req1, &res1, manager, DEFAULT_TEST_CONTEXT), StatusCode::OK); + + const tensorflow::serving::GetModelStatusResponse response_const1 = res1; + json_output = ""; + error_status = GetModelStatusImpl::serializeResponse2Json(&response_const1, &json_output); + ASSERT_EQ(error_status, StatusCode::OK); + EXPECT_EQ(json_output, expected_json_end); + + // Remove status file + std::filesystem::remove(status_file); + ASSERT_EQ(manager.loadConfig(fileToReload), ovms::StatusCode::OK); + + tensorflow::serving::GetModelStatusRequest req2; + tensorflow::serving::GetModelStatusResponse res2; + + auto model_spec2 = req2.mutable_model_spec(); + model_spec2->Clear(); + model_spec2->set_name("dummy"); + model_spec2->mutable_version()->set_value(1); + ASSERT_EQ(GetModelStatusImpl::getModelStatus(&req2, &res2, manager, DEFAULT_TEST_CONTEXT), StatusCode::OK); + + const tensorflow::serving::GetModelStatusResponse response_const2 = res2; + json_output = ""; + error_status = GetModelStatusImpl::serializeResponse2Json(&response_const2, &json_output); + ASSERT_EQ(error_status, StatusCode::OK); + EXPECT_EQ(json_output, expected_json_available); +} + +TEST_F(TestCustomLoader, CustomLoaderBlackListModelReloadError) { +#ifdef _WIN32 + GTEST_SKIP() << "Test disabled on windows"; +#endif + // Copy dummy model to temporary destination + std::filesystem::copy(getGenericFullPathForSrcTest("/ovms/src/test/dummy"), cl_model_1_path, std::filesystem::copy_options::recursive); + + // Create Sample Custom Loader Config + std::string cl_config_file_path = cl_models_path; + std::string cl_config_str = ENABLE_FORCE_BLACKLIST_CHECK; + std::string cl_config_file = cl_config_file_path + "/customloader_config"; + createConfigFileWithContent(cl_config_str, cl_config_file); + + // Replace model path in the config string + std::string configStr = custom_loader_config_model_blacklist; + configStr.replace(configStr.find("/tmp/test_cl_models"), std::string("/tmp/test_cl_models").size(), cl_models_path); + configStr.replace(configStr.find("sample-loader-config"), std::string("sample-loader-config").size(), cl_config_file); + + // Create config file + std::string fileToReload = cl_models_path + "/cl_config.json"; + createConfigFileWithContent(configStr, fileToReload); + ASSERT_EQ(manager.loadConfig(fileToReload), ovms::StatusCode::OK); + + tensorflow::serving::GetModelStatusRequest req; + tensorflow::serving::GetModelStatusResponse res; + + auto model_spec = req.mutable_model_spec(); + model_spec->Clear(); + model_spec->set_name("dummy"); + model_spec->mutable_version()->set_value(1); + ASSERT_EQ(GetModelStatusImpl::getModelStatus(&req, &res, manager, DEFAULT_TEST_CONTEXT), StatusCode::OK); + + const tensorflow::serving::GetModelStatusResponse response_const = res; + std::string json_output; + Status error_status = GetModelStatusImpl::serializeResponse2Json(&response_const, &json_output); + ASSERT_EQ(error_status, StatusCode::OK); + EXPECT_EQ(json_output, expected_json_available); + + // copy status file + std::string status_file_path = cl_model_1_path + "1"; + std::string status_str = "DISABLED"; + std::string status_file = status_file_path + "/dummy.status"; + createConfigFileWithContent(status_str, status_file); + ASSERT_EQ(manager.loadConfig(fileToReload), ovms::StatusCode::OK); + + tensorflow::serving::GetModelStatusRequest req1; + tensorflow::serving::GetModelStatusResponse res1; + + auto model_spec1 = req1.mutable_model_spec(); + model_spec1->Clear(); + model_spec1->set_name("dummy"); + model_spec1->mutable_version()->set_value(1); + ASSERT_EQ(GetModelStatusImpl::getModelStatus(&req1, &res1, manager, DEFAULT_TEST_CONTEXT), StatusCode::OK); + + const tensorflow::serving::GetModelStatusResponse response_const1 = res1; + json_output = ""; + error_status = GetModelStatusImpl::serializeResponse2Json(&response_const1, &json_output); + ASSERT_EQ(error_status, StatusCode::OK); + EXPECT_EQ(json_output, expected_json_end); + + // Remove status file & the Dummy.bin file + std::filesystem::remove(status_file); + std::string bin_file = status_file_path + "/dummy.bin"; + std::filesystem::remove(bin_file); + ASSERT_EQ(manager.loadConfig(fileToReload), ovms::StatusCode::FILE_INVALID); + + tensorflow::serving::GetModelStatusRequest req2; + tensorflow::serving::GetModelStatusResponse res2; + + auto model_spec2 = req2.mutable_model_spec(); + model_spec2->Clear(); + model_spec2->set_name("dummy"); + model_spec2->mutable_version()->set_value(1); + ASSERT_EQ(GetModelStatusImpl::getModelStatus(&req2, &res2, manager, DEFAULT_TEST_CONTEXT), StatusCode::OK); + + const tensorflow::serving::GetModelStatusResponse response_const2 = res2; + json_output = ""; + error_status = GetModelStatusImpl::serializeResponse2Json(&response_const2, &json_output); + ASSERT_EQ(error_status, StatusCode::OK); + EXPECT_EQ(json_output, expected_json_loading_error); + + // Copy back the model files & try reload + std::filesystem::copy(getGenericFullPathForSrcTest("/ovms/src/test/dummy"), cl_model_1_path, std::filesystem::copy_options::recursive | std::filesystem::copy_options::overwrite_existing); + ASSERT_EQ(manager.loadConfig(fileToReload), ovms::StatusCode::OK); + + tensorflow::serving::GetModelStatusRequest req3; + tensorflow::serving::GetModelStatusResponse res3; + + auto model_spec3 = req3.mutable_model_spec(); + model_spec3->Clear(); + model_spec3->set_name("dummy"); + model_spec3->mutable_version()->set_value(1); + ASSERT_EQ(GetModelStatusImpl::getModelStatus(&req3, &res3, manager, DEFAULT_TEST_CONTEXT), StatusCode::OK); + + const tensorflow::serving::GetModelStatusResponse response_const3 = res3; + json_output = ""; + error_status = GetModelStatusImpl::serializeResponse2Json(&response_const3, &json_output); + ASSERT_EQ(error_status, StatusCode::OK); + EXPECT_EQ(json_output, expected_json_available); +} + +TEST_F(TestCustomLoader, CustomLoaderLoadBlackListedModel) { +#ifdef _WIN32 + GTEST_SKIP() << "Test disabled on windows"; +#endif + // Copy dummy model to temporary destination + std::filesystem::copy(getGenericFullPathForSrcTest("/ovms/src/test/dummy"), cl_model_1_path, std::filesystem::copy_options::recursive); + + // Create Sample Custom Loader Config + std::string cl_config_file_path = cl_models_path; + std::string cl_config_str = ENABLE_FORCE_BLACKLIST_CHECK; + std::string cl_config_file = cl_config_file_path + "/customloader_config"; + createConfigFileWithContent(cl_config_str, cl_config_file); + + // Replace model path in the config string + std::string configStr = custom_loader_config_model_blacklist; + configStr.replace(configStr.find("/tmp/test_cl_models"), std::string("/tmp/test_cl_models").size(), cl_models_path); + configStr.replace(configStr.find("sample-loader-config"), std::string("sample-loader-config").size(), cl_config_file); + + // Create config file + std::string fileToReload = cl_models_path + "/cl_config.json"; + createConfigFileWithContent(configStr, fileToReload); + + // Create status file + std::string status_file_path = cl_model_1_path + "1"; + std::string status_str = "DISABLED"; + std::string status_file = status_file_path + "/dummy.status"; + createConfigFileWithContent(status_str, status_file); + ovms::Status status1 = manager.loadConfig(fileToReload); + ASSERT_TRUE(status1 == ovms::StatusCode::INTERNAL_ERROR); + + tensorflow::serving::GetModelStatusRequest req1; + tensorflow::serving::GetModelStatusResponse res1; + + auto model_spec1 = req1.mutable_model_spec(); + model_spec1->Clear(); + model_spec1->set_name("dummy"); + model_spec1->mutable_version()->set_value(1); + ASSERT_EQ(GetModelStatusImpl::getModelStatus(&req1, &res1, manager, DEFAULT_TEST_CONTEXT), StatusCode::OK); + + const tensorflow::serving::GetModelStatusResponse response_const1 = res1; + std::string json_output1; + Status error_status1 = GetModelStatusImpl::serializeResponse2Json(&response_const1, &json_output1); + ASSERT_EQ(error_status1, StatusCode::OK); + EXPECT_EQ(json_output1, expected_json_loading_error); + + // remove enable_file from config file + std::string status_config = ", \"enable_file\": \"dummy.status\""; + configStr.replace(configStr.find(status_config), std::string(status_config).size(), ""); + createConfigFileWithContent(configStr, fileToReload); + + ovms::Status status2 = manager.loadConfig(fileToReload); + ASSERT_TRUE(status2 == ovms::StatusCode::OK); + + tensorflow::serving::GetModelStatusRequest req2; + tensorflow::serving::GetModelStatusResponse res2; + + auto model_spec2 = req2.mutable_model_spec(); + model_spec2->Clear(); + model_spec2->set_name("dummy"); + model_spec2->mutable_version()->set_value(1); + ASSERT_EQ(GetModelStatusImpl::getModelStatus(&req2, &res2, manager, DEFAULT_TEST_CONTEXT), StatusCode::OK); + + const tensorflow::serving::GetModelStatusResponse response_const2 = res2; + std::string json_output2; + Status error_status2 = GetModelStatusImpl::serializeResponse2Json(&response_const2, &json_output2); + ASSERT_EQ(error_status2, StatusCode::OK); + EXPECT_EQ(json_output2, expected_json_available); +} +*/ + +#pragma GCC diagnostic pop