diff --git a/BUILD.gn b/BUILD.gn index bc1599a46337dc6058a1d439c31e2c3ce2ea9d92..a6f086602841b997ab54e63d092cd195d4b2530d 100644 --- a/BUILD.gn +++ b/BUILD.gn @@ -17,7 +17,7 @@ group("nnrt_target") { deps = [ "frameworks:libneural_network_runtime" ] } -group("nnrt_test_target") { - testonly = true - deps = [ "test/unittest:unittest" ] -} +# group("nnrt_test_target") { +# testonly = true +# deps = [ "test/unittest:unittest" ] +# } diff --git a/bundle.json b/bundle.json index 06ee22904e6c1691f5b05235fe132cab1cd5e78b..f95ed56f43d37ce745d2a0a1642455b324bbe3d5 100644 --- a/bundle.json +++ b/bundle.json @@ -41,9 +41,6 @@ "header_base":"//foundation/ai/neural_network_runtime/interfaces/innerkits/c" } } - ], - "test": [ - "//foundation/ai/neural_network_runtime:nnrt_test_target" ] } } diff --git a/frameworks/BUILD.gn b/frameworks/BUILD.gn index 321fb2f9e1c5e4c8319eca89ca275ef9dd641ad6..f5096496af83fb0f0afee25074cb4e63a7a4510c 100644 --- a/frameworks/BUILD.gn +++ b/frameworks/BUILD.gn @@ -23,8 +23,10 @@ nnrt_sources = [ "native/device_registrar.cpp", "native/execution_plan.cpp", "native/executor.cpp", - "native/hdi_device.cpp", - "native/hdi_prepared_model.cpp", + "native/hdi_device_v1_0.cpp", + "native/hdi_device_v2_0.cpp", + "native/hdi_prepared_model_v1_0.cpp", + "native/hdi_prepared_model_v2_0.cpp", "native/inner_model.cpp", "native/memory_manager.cpp", "native/neural_network_runtime.cpp", @@ -122,6 +124,7 @@ ohos_shared_library("libneural_network_runtime") { external_deps = [ "c_utils:utils", "drivers_interface_nnrt:libnnrt_proxy_1.0", + "drivers_interface_nnrt:libnnrt_proxy_2.0", "hdf_core:libhdf_utils", "hilog_native:libhilog", "hitrace_native:libhitracechain", diff --git a/frameworks/native/device.h b/frameworks/native/device.h index 93415e4bb527e26049cba0563cc3e86bcfa4b148..c34e0432d139de3c9d54934b8f9f2a10620746d8 100644 --- a/frameworks/native/device.h +++ b/frameworks/native/device.h @@ -34,6 +34,7 @@ public: virtual OH_NN_ReturnCode GetDeviceName(std::string& name) = 0; virtual OH_NN_ReturnCode GetVendorName(std::string& name) = 0; + virtual OH_NN_ReturnCode GetVersion(std::string& version) = 0; virtual OH_NN_ReturnCode GetDeviceType(OH_NN_DeviceType& deviceType) = 0; virtual OH_NN_ReturnCode GetDeviceStatus(DeviceStatus& status) = 0; virtual OH_NN_ReturnCode GetSupportedOperation(std::shared_ptr model, diff --git a/frameworks/native/device_manager.cpp b/frameworks/native/device_manager.cpp index 6ad79bbc5ffe954305079386c3e6c42c1dcacd85..25961e166e9e9a764db46c863e413b24fb41e66f 100644 --- a/frameworks/native/device_manager.cpp +++ b/frameworks/native/device_manager.cpp @@ -15,8 +15,6 @@ #include "device_manager.h" -#include "hdi_interfaces.h" -#include "hdi_device.h" #include "common/log.h" #include "common/utils.h" @@ -70,13 +68,21 @@ const std::string& DeviceManager::GetDeviceName(size_t deviceId) return m_tmpDeviceName; } - m_tmpDeviceName = GenUniqueName(deviceName, vendorName); + std::string version; + ret = iter->second->GetVersion(version); + if (ret != OH_NN_SUCCESS) { + LOGE("Get version failed."); + return m_tmpDeviceName; + } + + m_tmpDeviceName = GenUniqueName(deviceName, vendorName, version); return m_tmpDeviceName; } -std::string DeviceManager::GenUniqueName(const std::string& deviceName, const std::string& vendorName) const +std::string DeviceManager::GenUniqueName( + const std::string& deviceName, const std::string& vendorName, const std::string& version) const { - return deviceName + "_" + vendorName; + return deviceName + "_" + vendorName + "_" + version; } OH_NN_ReturnCode DeviceManager::RegisterDevice(std::function()> creator) @@ -106,8 +112,15 @@ OH_NN_ReturnCode DeviceManager::RegisterDevice(std::functionGetVersion(version); + if (ret != OH_NN_SUCCESS) { + LOGE("Get version failed."); + return ret; + } + const std::lock_guard lock(m_mtx); - std::string uniqueName = GenUniqueName(deviceName, vendorName); + std::string uniqueName = GenUniqueName(deviceName, vendorName, version); auto setResult = m_uniqueName.emplace(uniqueName); if (!setResult.second) { LOGE("Device already exists, cannot register again. deviceName=%s, vendorName=%s", @@ -119,45 +132,6 @@ OH_NN_ReturnCode DeviceManager::RegisterDevice(std::function iDevice = V1_0::INnrtDevice::Get(); - if (iDevice == nullptr) { - LOGW("Get HDI device failed."); - return; - } - - std::string deviceName; - std::string vendorName; - auto hdiRet = iDevice->GetDeviceName(deviceName); - if (hdiRet != HDF_SUCCESS) { - LOGW("Get device name failed. ErrorCode=%d", hdiRet); - return; - } - hdiRet = iDevice->GetVendorName(vendorName); - if (hdiRet != HDF_SUCCESS) { - LOGW("Get vendor name failed. ErrorCode=%d", hdiRet); - return; - } - - std::string uniqueName = GenUniqueName(deviceName, vendorName); - const std::lock_guard lock(m_mtx); - auto setResult = m_uniqueName.emplace(uniqueName); - if (!setResult.second) { - LOGW("Device already exists, cannot register again. deviceName=%s, vendorName=%s", - deviceName.c_str(), vendorName.c_str()); - return; - } - - std::shared_ptr device = CreateSharedPtr(iDevice); - if (device == nullptr) { - LOGW("Failed to register device, because fail to create device instance."); - return; - } - m_devices.emplace(std::hash{}(uniqueName), device); -} - bool DeviceManager::IsValidDevice(std::shared_ptr device) const { DeviceStatus status {DeviceStatus::UNKNOWN}; diff --git a/frameworks/native/device_manager.h b/frameworks/native/device_manager.h index 20d4bf05834c6cbac02191ffa1f743730ebbfb9e..2885a5dede7aba0c9dedd2fa7adb5bc141da6f9f 100644 --- a/frameworks/native/device_manager.h +++ b/frameworks/native/device_manager.h @@ -40,7 +40,6 @@ public: static DeviceManager& GetInstance() { static DeviceManager instance; - instance.DiscoverHDIDevices(); return instance; } @@ -49,8 +48,8 @@ private: DeviceManager(const DeviceManager&) = delete; DeviceManager& operator=(const DeviceManager&) = delete; - void DiscoverHDIDevices(); - std::string GenUniqueName(const std::string& deviceName, const std::string& vendorName) const; + std::string GenUniqueName( + const std::string& deviceName, const std::string& vendorName, const std::string& version) const; bool IsValidDevice(std::shared_ptr device) const; private: diff --git a/frameworks/native/device_registrar.h b/frameworks/native/device_registrar.h index a9645299821087407c45202087110eca1b8365ea..521a075a336db601d839d10eafe5ca1455c0ca34 100644 --- a/frameworks/native/device_registrar.h +++ b/frameworks/native/device_registrar.h @@ -34,7 +34,7 @@ public: #define REGISTER_DEVICE(deviceName, vendorName, creator) \ namespace { \ - static OHOS::NeuralNetworkRuntime::DeviceRegistrar g_##deviceName##_##vendorName##_device_registrar(creator) \ + static OHOS::NeuralNetworkRuntime::DeviceRegistrar g_##deviceName##_##vendorName##_device_registrar(creator); \ } // namespace } // namespace NeuralNetworkRuntime } // OHOS diff --git a/frameworks/native/execution_plan.cpp b/frameworks/native/execution_plan.cpp index b1ddfe3ac53676fb016bd9aeebd9bd1deaabfd01..20c4322cf41a50c644ce755c58afaeb9cb69fb66 100644 --- a/frameworks/native/execution_plan.cpp +++ b/frameworks/native/execution_plan.cpp @@ -23,6 +23,19 @@ namespace OHOS { namespace NeuralNetworkRuntime { +OH_NN_ReturnCode ExecutionPlan::GetInputDimRanges(uint32_t index, std::vector& minInputDims, + std::vector& maxInputDims) +{ + // todo + // OH_NN_ReturnCode ret = m_preparedModel->GetInputDimRanges(index, minInputDims, maxInputDims); + // if (ret != OH_NN_SUCCESS) { + // LOGE("ExecutionPlan GetInputDimRanges() failed."); + // return ret; + // } + + return OH_NN_SUCCESS; +} + OH_NN_ReturnCode ExecutionPlan::Run(const std::vector>& inputTensors, std::vector>& outputTensors) { diff --git a/frameworks/native/execution_plan.h b/frameworks/native/execution_plan.h index 9644a321d12b0120f24af6835e7eb035548c7445..0ec9c93d3e40221380ee0903de7e0e5a746b37f4 100644 --- a/frameworks/native/execution_plan.h +++ b/frameworks/native/execution_plan.h @@ -29,6 +29,10 @@ public: ExecutionPlan(std::shared_ptr preparedModel, std::shared_ptr device) : m_preparedModel(preparedModel), m_device(device) {}; + + OH_NN_ReturnCode GetInputDimRanges(uint32_t index, + std::vector& minInputDims, + std::vector& maxInputDims); OH_NN_ReturnCode Run(const std::vector>& inputTensors, std::vector>& outputTensors); diff --git a/frameworks/native/executor.cpp b/frameworks/native/executor.cpp index f99d28cdb1869484968217fb5b03c927e6e7b3f7..be21e80b97a0c7e5c27abd461027ae4eb8c69a84 100644 --- a/frameworks/native/executor.cpp +++ b/frameworks/native/executor.cpp @@ -19,6 +19,7 @@ #include "common/utils.h" #include "common/scoped_trace.h" +#include "transform.h" namespace OHOS { @@ -113,8 +114,50 @@ void Executor::SetInputTensorWithNewBuffer(uint32_t index, } +OH_NN_ReturnCode Executor::CheckInputDimRanges(uint32_t index, const OH_NN_Tensor& nnTensor) const +{ + std::vector minInputDims; + std::vector maxInputDims; + auto ret = m_executionPlan->GetInputDimRanges(index, minInputDims, maxInputDims); + if (ret != OH_NN_SUCCESS) { + LOGE("Get the dimension ranges of input %u failed. ErrorCode=%d", index, ret); + return ret; + } + + std::vector tensorShape = ConstructVectorFromArray(nnTensor.dimensions, nnTensor.dimensionCount); + if (minInputDims.size() != maxInputDims.size() && maxInputDims.size() != tensorShape.size()) { + LOGE("Size of minInputDims, maxInputDims and tensorShape of input %u are not equal.", index); + return OH_NN_INVALID_PARAMETER; + } + + for (size_t j = 0; j < tensorShape.size(); ++j) { + // Dimensions cannot be negative + if (tensorShape[j] < 0) { + LOGE("Dimension %zu of input %u is %d.", j, index, tensorShape[j]); + return OH_NN_INVALID_PARAMETER; + } + uint32_t dim = static_cast(tensorShape[j]); + if (dim < minInputDims[j] || dim > maxInputDims[j]) { + LOGE("The %zuth dimension of the %uth input is %u, which is out of range [%u, %u]", + j, index, dim, minInputDims[j], maxInputDims[j]); + return OH_NN_INVALID_PARAMETER; + } + } + + return OH_NN_SUCCESS; +} + + OH_NN_ReturnCode Executor::SetInput(uint32_t index, const OH_NN_Tensor& nnTensor, const void* buffer, size_t length) { + auto nnRet = CheckInputDimRanges(index, nnTensor); + if (nnRet == OH_NN_OPERATION_FORBIDDEN) { + LOGI("Skip input dimension bounds check."); + } else if (nnRet != OH_NN_SUCCESS) { + LOGE("SetInput failed, Check the range of the %uth input dimension ranges failed.", index); + return nnRet; + } + std::shared_ptr inputTensor = CreateSharedPtr(); if (inputTensor == nullptr) { LOGE("SetInput failed, error happened when creating NNTensor."); @@ -181,6 +224,14 @@ OH_NN_ReturnCode Executor::SetInput(uint32_t index, const OH_NN_Tensor& nnTensor OH_NN_ReturnCode Executor::SetInputFromMemory(uint32_t index, const OH_NN_Tensor& nnTensor, const OH_NN_Memory& memory) { + auto nnRet = CheckInputDimRanges(index, nnTensor); + if (nnRet == OH_NN_OPERATION_FORBIDDEN) { + LOGI("Skip input dimension bounds check."); + } else if (nnRet != OH_NN_SUCCESS) { + LOGE("SetInputFromMemory failed, Check the range of the %uth input dimension ranges failed.", index); + return nnRet; + } + // Build a input tensor std::shared_ptr inputTensor = CreateSharedPtr(); if (inputTensor == nullptr) { diff --git a/frameworks/native/executor.h b/frameworks/native/executor.h index f7a98eb094f35c4235e8f04b5d92e9746f7dff63..c7b2061e911800cef2efcf80bbec771e9b50b6dd 100644 --- a/frameworks/native/executor.h +++ b/frameworks/native/executor.h @@ -49,6 +49,7 @@ private: const void* buffer, size_t dataLength, size_t curBufferLength); void SetInputTensorWithNewBuffer(uint32_t index, std::shared_ptr inputTensor, const void* inputBuffer, size_t length, bool isInnerMem); + OH_NN_ReturnCode CheckInputDimRanges(uint32_t index, const OH_NN_Tensor& nnTensor) const; private: struct ExeTensor { diff --git a/frameworks/native/hdi_device.cpp b/frameworks/native/hdi_device_v1_0.cpp similarity index 68% rename from frameworks/native/hdi_device.cpp rename to frameworks/native/hdi_device_v1_0.cpp index b360ea73145b6c41fcb3324d0b3812a5ef155e4c..aace512f48137b93dd3a67e8999e18ca8b1d9e90 100644 --- a/frameworks/native/hdi_device.cpp +++ b/frameworks/native/hdi_device_v1_0.cpp @@ -13,12 +13,12 @@ * limitations under the License. */ -#include "hdi_device.h" +#include "hdi_device_v1_0.h" #include "hdf_base.h" #include "mindir.h" -#include "hdi_prepared_model.h" +#include "hdi_prepared_model_v1_0.h" #include "memory_manager.h" #include "transform.h" #include "common/log.h" @@ -26,12 +26,72 @@ namespace OHOS { namespace NeuralNetworkRuntime { -HDIDevice::HDIDevice(OHOS::sptr device) : m_iDevice(device) +namespace { +OH_NN_DeviceType TransHDIDeviceV1_0Type(const V1_0::DeviceType& iDeviceType) +{ + switch (iDeviceType) { + case V1_0::DeviceType::CPU: + return OH_NN_CPU; + case V1_0::DeviceType::GPU: + return OH_NN_GPU; + case V1_0::DeviceType::ACCELERATOR: + return OH_NN_ACCELERATOR; + default: + return OH_NN_OTHERS; + } +} + +DeviceStatus TransHDIDeviceV1_0Status(const V1_0::DeviceStatus& iDeviceStatus) +{ + switch (iDeviceStatus) { + case V1_0::DeviceStatus::AVAILABLE: + return DeviceStatus::AVAILABLE; + case V1_0::DeviceStatus::BUSY: + return DeviceStatus::BUSY; + case V1_0::DeviceStatus::OFFLINE: + return DeviceStatus::OFFLINE; + default: + return DeviceStatus::UNKNOWN; + } +} + +V1_0::PerformanceMode TransPerformanceMode(const OH_NN_PerformanceMode& mode) +{ + switch (mode) { + case OH_NN_PERFORMANCE_LOW: + return V1_0::PerformanceMode::PERFORMANCE_LOW; + case OH_NN_PERFORMANCE_MEDIUM: + return V1_0::PerformanceMode::PERFORMANCE_MEDIUM; + case OH_NN_PERFORMANCE_HIGH: + return V1_0::PerformanceMode::PERFORMANCE_HIGH; + case OH_NN_PERFORMANCE_EXTREME: + return V1_0::PerformanceMode::PERFORMANCE_EXTREME; + default: + return V1_0::PerformanceMode::PERFORMANCE_NONE; + } +} + +V1_0::Priority TransPriority(const OH_NN_Priority& priority) +{ + switch (priority) { + case OH_NN_PRIORITY_LOW: + return V1_0::Priority::PRIORITY_LOW; + case OH_NN_PRIORITY_MEDIUM: + return V1_0::Priority::PRIORITY_MEDIUM; + case OH_NN_PRIORITY_HIGH: + return V1_0::Priority::PRIORITY_HIGH; + default: + return V1_0::Priority::PRIORITY_NONE; + } +} +} + +HDIDeviceV1_0::HDIDeviceV1_0(OHOS::sptr device) : m_iDevice(device) { device->GetVersion(m_hdiVersion.first, m_hdiVersion.second); } -OH_NN_ReturnCode HDIDevice::GetDeviceName(std::string& name) +OH_NN_ReturnCode HDIDeviceV1_0::GetDeviceName(std::string& name) { auto ret = m_iDevice->GetDeviceName(name); if (ret != HDF_SUCCESS) { @@ -41,7 +101,7 @@ OH_NN_ReturnCode HDIDevice::GetDeviceName(std::string& name) return OH_NN_SUCCESS; } -OH_NN_ReturnCode HDIDevice::GetVendorName(std::string& name) +OH_NN_ReturnCode HDIDeviceV1_0::GetVendorName(std::string& name) { auto ret = m_iDevice->GetVendorName(name); if (ret != HDF_SUCCESS) { @@ -51,7 +111,13 @@ OH_NN_ReturnCode HDIDevice::GetVendorName(std::string& name) return OH_NN_SUCCESS; } -OH_NN_ReturnCode HDIDevice::GetDeviceType(OH_NN_DeviceType& deviceType) +OH_NN_ReturnCode HDIDeviceV1_0::GetVersion(std::string& version) +{ + version = 'v' + std::to_string(m_hdiVersion.first) + '_' + std::to_string(m_hdiVersion.second); + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode HDIDeviceV1_0::GetDeviceType(OH_NN_DeviceType& deviceType) { V1_0::DeviceType iDeviceType; auto ret = m_iDevice->GetDeviceType(iDeviceType); @@ -60,11 +126,11 @@ OH_NN_ReturnCode HDIDevice::GetDeviceType(OH_NN_DeviceType& deviceType) return OH_NN_UNAVALIDABLE_DEVICE; } - deviceType = HDIToNN::TransHDIDeviceType(iDeviceType); + deviceType = TransHDIDeviceV1_0Type(iDeviceType); return OH_NN_SUCCESS; } -OH_NN_ReturnCode HDIDevice::GetDeviceStatus(DeviceStatus& status) +OH_NN_ReturnCode HDIDeviceV1_0::GetDeviceStatus(DeviceStatus& status) { V1_0::DeviceStatus iDeviceStatus; auto ret = m_iDevice->GetDeviceStatus(iDeviceStatus); @@ -72,11 +138,11 @@ OH_NN_ReturnCode HDIDevice::GetDeviceStatus(DeviceStatus& status) LOGE("Get HDI device status failed. ErrorCode=%d", ret); return OH_NN_UNAVALIDABLE_DEVICE; } - status = HDIToNN::TransHDIDeviceStatus(iDeviceStatus); + status = TransHDIDeviceV1_0Status(iDeviceStatus); return OH_NN_SUCCESS; } -OH_NN_ReturnCode HDIDevice::GetSupportedOperation(std::shared_ptr model, +OH_NN_ReturnCode HDIDeviceV1_0::GetSupportedOperation(std::shared_ptr model, std::vector& ops) { if (model == nullptr) { @@ -117,7 +183,7 @@ OH_NN_ReturnCode HDIDevice::GetSupportedOperation(std::shared_ptrIsFloat16PrecisionSupported(isSupported); if (ret != HDF_SUCCESS) { @@ -127,7 +193,7 @@ OH_NN_ReturnCode HDIDevice::IsFloat16PrecisionSupported(bool& isSupported) return OH_NN_SUCCESS; } -OH_NN_ReturnCode HDIDevice::IsPerformanceModeSupported(bool& isSupported) +OH_NN_ReturnCode HDIDeviceV1_0::IsPerformanceModeSupported(bool& isSupported) { auto ret = m_iDevice->IsPerformanceModeSupported(isSupported); if (ret != HDF_SUCCESS) { @@ -137,7 +203,7 @@ OH_NN_ReturnCode HDIDevice::IsPerformanceModeSupported(bool& isSupported) return OH_NN_SUCCESS; } -OH_NN_ReturnCode HDIDevice::IsPrioritySupported(bool& isSupported) +OH_NN_ReturnCode HDIDeviceV1_0::IsPrioritySupported(bool& isSupported) { auto ret = m_iDevice->IsPrioritySupported(isSupported); if (ret != HDF_SUCCESS) { @@ -147,7 +213,7 @@ OH_NN_ReturnCode HDIDevice::IsPrioritySupported(bool& isSupported) return OH_NN_SUCCESS; } -OH_NN_ReturnCode HDIDevice::IsDynamicInputSupported(bool& isSupported) +OH_NN_ReturnCode HDIDeviceV1_0::IsDynamicInputSupported(bool& isSupported) { auto ret = m_iDevice->IsDynamicInputSupported(isSupported); if (ret != HDF_SUCCESS) { @@ -157,7 +223,7 @@ OH_NN_ReturnCode HDIDevice::IsDynamicInputSupported(bool& isSupported) return OH_NN_SUCCESS; } -OH_NN_ReturnCode HDIDevice::IsModelCacheSupported(bool& isSupported) +OH_NN_ReturnCode HDIDeviceV1_0::IsModelCacheSupported(bool& isSupported) { auto ret = m_iDevice->IsModelCacheSupported(isSupported); if (ret != HDF_SUCCESS) { @@ -167,7 +233,7 @@ OH_NN_ReturnCode HDIDevice::IsModelCacheSupported(bool& isSupported) return OH_NN_SUCCESS; } -OH_NN_ReturnCode HDIDevice::PrepareModel(std::shared_ptr model, +OH_NN_ReturnCode HDIDeviceV1_0::PrepareModel(std::shared_ptr model, const ModelConfig& config, std::shared_ptr& preparedModel) { @@ -196,8 +262,8 @@ OH_NN_ReturnCode HDIDevice::PrepareModel(std::shared_ptr iPreparedModel; auto preparedRet = m_iDevice->PrepareModel(*iModel, iModelConfig, iPreparedModel); @@ -213,7 +279,7 @@ OH_NN_ReturnCode HDIDevice::PrepareModel(std::shared_ptr(iPreparedModel); + preparedModel = CreateSharedPtr(iPreparedModel); if (preparedModel == nullptr) { LOGE("Prepare model failed, because fail to create preparedModel instance."); return OH_NN_MEMORY_ERROR; @@ -222,7 +288,7 @@ OH_NN_ReturnCode HDIDevice::PrepareModel(std::shared_ptr& modelCache, +OH_NN_ReturnCode HDIDeviceV1_0::PrepareModelFromModelCache(const std::vector& modelCache, const ModelConfig& config, std::shared_ptr& preparedModel) { @@ -242,8 +308,8 @@ OH_NN_ReturnCode HDIDevice::PrepareModelFromModelCache(const std::vector iPreparedModel; auto hdiRet = m_iDevice->PrepareModelFromModelCache(iBuffers, iModelConfig, iPreparedModel); @@ -252,7 +318,7 @@ OH_NN_ReturnCode HDIDevice::PrepareModelFromModelCache(const std::vector(iPreparedModel); + preparedModel = CreateSharedPtr(iPreparedModel); if (preparedModel == nullptr) { LOGE("Prepare model from model cache failed, because fail to create preparedModel instance."); return OH_NN_MEMORY_ERROR; @@ -260,7 +326,7 @@ OH_NN_ReturnCode HDIDevice::PrepareModelFromModelCache(const std::vector HDIDeviceV1_0Creator() +{ + // only one device from HDI now + OHOS::sptr iDevice = V1_0::INnrtDevice::Get(); + if (iDevice == nullptr) { + LOGE("Get HDI device failed."); + return nullptr; + } + + std::shared_ptr device = CreateSharedPtr(iDevice); + if (device == nullptr) { + LOGE("Create device failed."); + } + + return device; +} + +REGISTER_DEVICE(DeviceV1_0, VendorV1_0, HDIDeviceV1_0Creator) } // namespace NeuralNetworkRuntime } // namespace OHOS \ No newline at end of file diff --git a/frameworks/native/hdi_device.h b/frameworks/native/hdi_device_v1_0.h similarity index 84% rename from frameworks/native/hdi_device.h rename to frameworks/native/hdi_device_v1_0.h index d795832e4ff5456b3fa1cb050831d35a713ae121..e28beb18c0fbb8e8601154f9b949a74d8213bfbc 100644 --- a/frameworks/native/hdi_device.h +++ b/frameworks/native/hdi_device_v1_0.h @@ -13,22 +13,28 @@ * limitations under the License. */ -#ifndef NEURAL_NETWORK_RUNTIME_HDI_DEVICE_H -#define NEURAL_NETWORK_RUNTIME_HDI_DEVICE_H +#ifndef NEURAL_NETWORK_RUNTIME_HDI_DEVICE_V1_0_H +#define NEURAL_NETWORK_RUNTIME_HDI_DEVICE_V1_0_H #include "refbase.h" -#include "hdi_interfaces.h" +#include +#include +#include #include "device.h" namespace OHOS { namespace NeuralNetworkRuntime { -class HDIDevice : public Device { + +namespace V1_0 = OHOS::HDI::Nnrt::V1_0; + +class HDIDeviceV1_0 : public Device { public: - explicit HDIDevice(OHOS::sptr device); + explicit HDIDeviceV1_0(OHOS::sptr device); OH_NN_ReturnCode GetDeviceName(std::string& name) override; OH_NN_ReturnCode GetVendorName(std::string& name) override; + OH_NN_ReturnCode GetVersion(std::string& version) override; OH_NN_ReturnCode GetDeviceType(OH_NN_DeviceType& deviceType) override; OH_NN_ReturnCode GetDeviceStatus(DeviceStatus& status) override; OH_NN_ReturnCode GetSupportedOperation(std::shared_ptr model, @@ -60,4 +66,4 @@ private: }; } // namespace NeuralNetworkRuntime } // namespace OHOS -#endif // NEURAL_NETWORK_RUNTIME_HDI_DEVICE_H \ No newline at end of file +#endif // NEURAL_NETWORK_RUNTIME_HDI_DEVICE_V1_0_H \ No newline at end of file diff --git a/frameworks/native/hdi_device_v2_0.cpp b/frameworks/native/hdi_device_v2_0.cpp new file mode 100644 index 0000000000000000000000000000000000000000..32fe5d002816874e16ed6504661af88c048f66fa --- /dev/null +++ b/frameworks/native/hdi_device_v2_0.cpp @@ -0,0 +1,417 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "hdi_device_v2_0.h" + +#include "hdf_base.h" +#include "mindir.h" + +#include "device_registrar.h" +#include "prepared_model.h" // todo +#include "memory_manager.h" +#include "transform.h" +#include "common/log.h" +#include "common/utils.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace { +OH_NN_DeviceType TransHDIDeviceV2_0Type(const V2_0::DeviceType& iDeviceType) +{ + switch (iDeviceType) { + case V2_0::DeviceType::CPU: + return OH_NN_CPU; + case V2_0::DeviceType::GPU: + return OH_NN_GPU; + case V2_0::DeviceType::ACCELERATOR: + return OH_NN_ACCELERATOR; + default: + return OH_NN_OTHERS; + } +} + +DeviceStatus TransHDIDeviceV2_0Status(const V2_0::DeviceStatus& iDeviceStatus) +{ + switch (iDeviceStatus) { + case V2_0::DeviceStatus::AVAILABLE: + return DeviceStatus::AVAILABLE; + case V2_0::DeviceStatus::BUSY: + return DeviceStatus::BUSY; + case V2_0::DeviceStatus::OFFLINE: + return DeviceStatus::OFFLINE; + default: + return DeviceStatus::UNKNOWN; + } +} + +V2_0::PerformanceMode TransPerformanceMode(const OH_NN_PerformanceMode& mode) +{ + switch (mode) { + case OH_NN_PERFORMANCE_LOW: + return V2_0::PerformanceMode::PERFORMANCE_LOW; + case OH_NN_PERFORMANCE_MEDIUM: + return V2_0::PerformanceMode::PERFORMANCE_MEDIUM; + case OH_NN_PERFORMANCE_HIGH: + return V2_0::PerformanceMode::PERFORMANCE_HIGH; + case OH_NN_PERFORMANCE_EXTREME: + return V2_0::PerformanceMode::PERFORMANCE_EXTREME; + default: + return V2_0::PerformanceMode::PERFORMANCE_NONE; + } +} + +V2_0::Priority TransPriority(const OH_NN_Priority& priority) +{ + switch (priority) { + case OH_NN_PRIORITY_LOW: + return V2_0::Priority::PRIORITY_LOW; + case OH_NN_PRIORITY_MEDIUM: + return V2_0::Priority::PRIORITY_MEDIUM; + case OH_NN_PRIORITY_HIGH: + return V2_0::Priority::PRIORITY_HIGH; + default: + return V2_0::Priority::PRIORITY_NONE; + } +} +} + +HDIDeviceV2_0::HDIDeviceV2_0(OHOS::sptr device) : m_iDevice(device) +{ + device->GetVersion(m_hdiVersion.first, m_hdiVersion.second); +} + +OH_NN_ReturnCode HDIDeviceV2_0::GetDeviceName(std::string& name) +{ + auto ret = m_iDevice->GetDeviceName(name); + if (ret != HDF_SUCCESS) { + LOGE("Get HDI device name failed. ErrorCode=%d", ret); + return OH_NN_UNAVALIDABLE_DEVICE; + } + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode HDIDeviceV2_0::GetVendorName(std::string& name) +{ + auto ret = m_iDevice->GetVendorName(name); + if (ret != HDF_SUCCESS) { + LOGE("Get HDI device vendor name failed. ErrorCode=%d", ret); + return OH_NN_UNAVALIDABLE_DEVICE; + } + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode HDIDeviceV2_0::GetVersion(std::string& version) +{ + version = 'v' + std::to_string(m_hdiVersion.first) + '_' + std::to_string(m_hdiVersion.second); + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode HDIDeviceV2_0::GetDeviceType(OH_NN_DeviceType& deviceType) +{ + V2_0::DeviceType iDeviceType; + auto ret = m_iDevice->GetDeviceType(iDeviceType); + if (ret != HDF_SUCCESS) { + LOGE("Get HDI device type failed. ErrorCode=%d", ret); + return OH_NN_UNAVALIDABLE_DEVICE; + } + + deviceType = TransHDIDeviceV2_0Type(iDeviceType); + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode HDIDeviceV2_0::GetDeviceStatus(DeviceStatus& status) +{ + V2_0::DeviceStatus iDeviceStatus; + auto ret = m_iDevice->GetDeviceStatus(iDeviceStatus); + if (ret != HDF_SUCCESS) { + LOGE("Get HDI device status failed. ErrorCode=%d", ret); + return OH_NN_UNAVALIDABLE_DEVICE; + } + status = TransHDIDeviceV2_0Status(iDeviceStatus); + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode HDIDeviceV2_0::GetSupportedOperation(std::shared_ptr model, + std::vector& ops) +{ + if (model == nullptr) { + LOGE("Model is nullptr, cannot query supported operation."); + return OH_NN_NULL_PTR; + } + + V2_0::SharedBuffer tensorBuffer {INVALID_FD, 0, 0, 0}; + size_t tensorSize = mindspore::lite::MindIR_LiteGraph_GetConstTensorSize(model.get()); + int32_t hdiRet {0}; + if (tensorSize > 0) { + hdiRet = m_iDevice->AllocateBuffer(tensorSize, tensorBuffer); + if (hdiRet != HDF_SUCCESS || tensorBuffer.fd == INVALID_FD) { + LOGE("Allocate tensor buffer error when get supported operation. ErrorCode: %d", hdiRet); + return OH_NN_FAILED; + } + } + + auto iModel = mindspore::lite::MindIR_LiteGraph_To_Model(model.get(), tensorBuffer); + if (iModel == nullptr) { + LOGE("Parse litegraph to hdi model failed."); + ReleaseSharedBuffer(tensorBuffer); + return OH_NN_FAILED; + } + + hdiRet = m_iDevice->GetSupportedOperation(*iModel, ops); + + mindspore::lite::MindIR_Model_Destroy(&iModel); + auto ret = ReleaseSharedBuffer(tensorBuffer); + if (ret != OH_NN_SUCCESS) { + LOGE("Release tensorBuffer failed."); + return OH_NN_FAILED; + } + if (hdiRet != HDF_SUCCESS) { + LOGE("Get supported operation failed. ErrorCode=%d", hdiRet); + return OH_NN_UNAVALIDABLE_DEVICE; + } + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode HDIDeviceV2_0::IsFloat16PrecisionSupported(bool& isSupported) +{ + auto ret = m_iDevice->IsFloat16PrecisionSupported(isSupported); + if (ret != HDF_SUCCESS) { + LOGE("Query fp16 precision supported failed. ErrorCode=%d", ret); + return OH_NN_UNAVALIDABLE_DEVICE; + } + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode HDIDeviceV2_0::IsPerformanceModeSupported(bool& isSupported) +{ + auto ret = m_iDevice->IsPerformanceModeSupported(isSupported); + if (ret != HDF_SUCCESS) { + LOGE("Query performance mode supported failed. ErrorCode=%d", ret); + return OH_NN_UNAVALIDABLE_DEVICE; + } + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode HDIDeviceV2_0::IsPrioritySupported(bool& isSupported) +{ + auto ret = m_iDevice->IsPrioritySupported(isSupported); + if (ret != HDF_SUCCESS) { + LOGE("Query priority supported failed. ErrorCode=%d", ret); + return OH_NN_UNAVALIDABLE_DEVICE; + } + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode HDIDeviceV2_0::IsDynamicInputSupported(bool& isSupported) +{ + auto ret = m_iDevice->IsDynamicInputSupported(isSupported); + if (ret != HDF_SUCCESS) { + LOGE("Query dynamic input supported failed. ErrorCode=%d", ret); + return OH_NN_UNAVALIDABLE_DEVICE; + } + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode HDIDeviceV2_0::IsModelCacheSupported(bool& isSupported) +{ + auto ret = m_iDevice->IsModelCacheSupported(isSupported); + if (ret != HDF_SUCCESS) { + LOGE("Query cache model supported failed. ErrorCode=%d", ret); + return OH_NN_UNAVALIDABLE_DEVICE; + } + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode HDIDeviceV2_0::PrepareModel(std::shared_ptr model, + const ModelConfig& config, + std::shared_ptr& preparedModel) +{ + if (model == nullptr) { + LOGE("Model is nullptr, cannot prepare model."); + return OH_NN_INVALID_PARAMETER; + } + + V2_0::SharedBuffer tensorBuffer {INVALID_FD, 0, 0, 0}; + size_t tensorSize = mindspore::lite::MindIR_LiteGraph_GetConstTensorSize(model.get()); + int32_t hdiRet {0}; + if (tensorSize > 0) { + hdiRet = m_iDevice->AllocateBuffer(tensorSize, tensorBuffer); + if (hdiRet != HDF_SUCCESS || tensorBuffer.fd == INVALID_FD) { + LOGE("Allocate tensor buffer error when prepare model. ErrorCode: %d", hdiRet); + return OH_NN_FAILED; + } + } + + V2_0::Model* iModel = mindspore::lite::MindIR_LiteGraph_To_Model(model.get(), tensorBuffer); + if (iModel == nullptr) { + LOGE("Parse litegraph to hdi model failed."); + ReleaseSharedBuffer(tensorBuffer); + return OH_NN_FAILED; + } + + V2_0::ModelConfig iModelConfig; + iModelConfig.enableFloat16 = config.enableFloat16; + iModelConfig.mode = TransPerformanceMode(config.mode); + iModelConfig.priority = TransPriority(config.priority); + OHOS::sptr iPreparedModel; + + auto preparedRet = m_iDevice->PrepareModel(*iModel, iModelConfig, iPreparedModel); + + mindspore::lite::MindIR_Model_Destroy(&iModel); + auto ret = ReleaseSharedBuffer(tensorBuffer); + if (ret != OH_NN_SUCCESS) { + LOGE("Release tensorBuffer failed."); + return OH_NN_FAILED; + } + if (preparedRet != HDF_SUCCESS || iPreparedModel == nullptr) { + LOGE("Prepare model failed. ErrorCode=%d", preparedRet); + return OH_NN_FAILED; + } + + // preparedModel = CreateSharedPtr(iPreparedModel); // todo + // if (preparedModel == nullptr) { + // LOGE("Prepare model failed, because fail to create preparedModel instance."); + // return OH_NN_MEMORY_ERROR; + // } + + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode HDIDeviceV2_0::PrepareModelFromModelCache(const std::vector& modelCache, + const ModelConfig& config, + std::shared_ptr& preparedModel) +{ + std::vector iBuffers; + auto memManager = MemoryManager::GetInstance(); + Memory memory; + OH_NN_ReturnCode ret; + size_t modelCacheSize = modelCache.size(); + for (size_t i = 0; i < modelCacheSize; i++) { + ret = memManager->GetMemory(modelCache[i].buffer, memory); + if (ret != OH_NN_SUCCESS) { + LOGE("The %zuth model cache is invalid. Please put valid model cache.", i + 1); + return ret; + } + iBuffers.emplace_back(V2_0::SharedBuffer {memory.fd, memory.length, 0, memory.length}); + } + + V2_0::ModelConfig iModelConfig; + iModelConfig.enableFloat16 = config.enableFloat16; + iModelConfig.mode = TransPerformanceMode(config.mode); + iModelConfig.priority = TransPriority(config.priority); + + OHOS::sptr iPreparedModel; + auto hdiRet = m_iDevice->PrepareModelFromModelCache(iBuffers, iModelConfig, iPreparedModel); + if (hdiRet != HDF_SUCCESS) { + LOGE("Prepare model from cache failed. ErrorCode=%d", hdiRet); + return OH_NN_UNAVALIDABLE_DEVICE; + } + + // preparedModel = CreateSharedPtr(iPreparedModel); // todo + // if (preparedModel == nullptr) { + // LOGE("Prepare model from model cache failed, because fail to create preparedModel instance."); + // return OH_NN_MEMORY_ERROR; + // } + return OH_NN_SUCCESS; +} + +void* HDIDeviceV2_0::AllocateBuffer(size_t length) +{ + if (length == 0) { + LOGE("The length param is invalid, length=0"); + return nullptr; + } + + V2_0::SharedBuffer buffer; + auto ret = m_iDevice->AllocateBuffer(length, buffer); + if (ret != HDF_SUCCESS) { + LOGE("Allocate buffer error. ErrorCode: %d", ret); + return nullptr; + } + + auto memManager = MemoryManager::GetInstance(); + auto addr = memManager->MapMemory(buffer.fd, length); + if (addr == nullptr) { + LOGE("Map fd to address failed."); + } + return addr; +} + +OH_NN_ReturnCode HDIDeviceV2_0::ReleaseBuffer(const void* buffer) +{ + if (buffer == nullptr) { + LOGE("Buffer is nullptr, no need to release."); + return OH_NN_INVALID_PARAMETER; + } + + auto memManager = MemoryManager::GetInstance(); + Memory memory; + auto ret = memManager->GetMemory(buffer, memory); + if (ret != OH_NN_SUCCESS) { + LOGE("Invalid Buffer, it is not NNRt buffer."); + return ret; + } + + V2_0::SharedBuffer hdiBuffer {memory.fd, memory.length, 0, memory.length}; + auto deviceResult = m_iDevice->ReleaseBuffer(hdiBuffer); + if (deviceResult != HDF_SUCCESS) { + LOGE("Device release buffer error. ErrorCode: %d", deviceResult); + return OH_NN_FAILED; + } + + ret = memManager->UnMapMemory(buffer); + if (ret != OH_NN_SUCCESS) { + LOGE("Unmap memory failed."); + return ret; + } + + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode HDIDeviceV2_0::ReleaseSharedBuffer(const V2_0::SharedBuffer& buffer) +{ + if (buffer.fd == INVALID_FD) { + LOGI("No need to release. fd=%d", INVALID_FD); + return OH_NN_SUCCESS; + } + + auto ret = m_iDevice->ReleaseBuffer(buffer); + if (ret != HDF_SUCCESS) { + LOGE("Device release buffer error. ErrorCode=%d", ret); + return OH_NN_FAILED; + } + return OH_NN_SUCCESS; +} + +std::shared_ptr HDIDeviceV2_0Creator() +{ + // only one device from HDI now + OHOS::sptr iDevice = V2_0::INnrtDevice::Get(); + if (iDevice == nullptr) { + LOGE("Get HDI device failed."); + return nullptr; + } + + std::shared_ptr device = CreateSharedPtr(iDevice); + if (device == nullptr) { + LOGE("Create device failed."); + } + + return device; +} + +REGISTER_DEVICE(DeviceV2_0, VendorV2_0, HDIDeviceV2_0Creator) +} // namespace NeuralNetworkRuntime +} // namespace OHOS \ No newline at end of file diff --git a/frameworks/native/hdi_device_v2_0.h b/frameworks/native/hdi_device_v2_0.h new file mode 100644 index 0000000000000000000000000000000000000000..fee7831df4d556e3a96f254ef5cfbe4054884198 --- /dev/null +++ b/frameworks/native/hdi_device_v2_0.h @@ -0,0 +1,69 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef NEURAL_NETWORK_RUNTIME_HDI_DEVICE_V2_0_H +#define NEURAL_NETWORK_RUNTIME_HDI_DEVICE_V2_0_H + +#include "refbase.h" +#include +#include +#include + +#include "device.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { + +namespace V2_0 = OHOS::HDI::Nnrt::V2_0; + +class HDIDeviceV2_0 : public Device { +public: + explicit HDIDeviceV2_0(OHOS::sptr device); + + OH_NN_ReturnCode GetDeviceName(std::string& name) override; + OH_NN_ReturnCode GetVendorName(std::string& name) override; + OH_NN_ReturnCode GetVersion(std::string& version) override; + OH_NN_ReturnCode GetDeviceType(OH_NN_DeviceType& deviceType) override; + OH_NN_ReturnCode GetDeviceStatus(DeviceStatus& status) override; + OH_NN_ReturnCode GetSupportedOperation(std::shared_ptr model, + std::vector& ops) override; + + OH_NN_ReturnCode IsFloat16PrecisionSupported(bool& isSupported) override; + OH_NN_ReturnCode IsPerformanceModeSupported(bool& isSupported) override; + OH_NN_ReturnCode IsPrioritySupported(bool& isSupported) override; + OH_NN_ReturnCode IsDynamicInputSupported(bool& isSupported) override; + OH_NN_ReturnCode IsModelCacheSupported(bool& isSupported) override; + + OH_NN_ReturnCode PrepareModel(std::shared_ptr model, + const ModelConfig& config, + std::shared_ptr& preparedModel) override; + OH_NN_ReturnCode PrepareModelFromModelCache(const std::vector& modelCache, + const ModelConfig& config, + std::shared_ptr& preparedModel) override; + + void* AllocateBuffer(size_t length) override; + OH_NN_ReturnCode ReleaseBuffer(const void* buffer) override; + +private: + OH_NN_ReturnCode ReleaseSharedBuffer(const V2_0::SharedBuffer& buffer); + +private: + // first: major version, second: minor version + std::pair m_hdiVersion; + OHOS::sptr m_iDevice {nullptr}; +}; +} // namespace NeuralNetworkRuntime +} // namespace OHOS +#endif // NEURAL_NETWORK_RUNTIME_HDI_DEVICE_V2_0_H \ No newline at end of file diff --git a/frameworks/native/hdi_prepared_model.cpp b/frameworks/native/hdi_prepared_model_v1_0.cpp similarity index 51% rename from frameworks/native/hdi_prepared_model.cpp rename to frameworks/native/hdi_prepared_model_v1_0.cpp index 491aec696489b34b44c32bad5cdd5a8c93d7c969..898d37990ad63e39538b9206b0f10283cccb6680 100644 --- a/frameworks/native/hdi_prepared_model.cpp +++ b/frameworks/native/hdi_prepared_model_v1_0.cpp @@ -13,21 +13,93 @@ * limitations under the License. */ -#include "hdi_prepared_model.h" +#include "hdi_prepared_model_v1_0.h" #include "common/log.h" #include "memory_manager.h" -#include "transform.h" namespace OHOS { namespace NeuralNetworkRuntime { -HDIPreparedModel::HDIPreparedModel(OHOS::sptr hdiPreparedModel) +namespace { +V1_0::DataType TransDataType(const OH_NN_DataType& dataType) +{ + switch (dataType) { + case OH_NN_BOOL: + return V1_0::DataType::DATA_TYPE_BOOL; + case OH_NN_INT8: + return V1_0::DataType::DATA_TYPE_INT8; + case OH_NN_INT16: + return V1_0::DataType::DATA_TYPE_INT16; + case OH_NN_INT32: + return V1_0::DataType::DATA_TYPE_INT32; + case OH_NN_INT64: + return V1_0::DataType::DATA_TYPE_INT64; + case OH_NN_UINT8: + return V1_0::DataType::DATA_TYPE_UINT8; + case OH_NN_UINT16: + return V1_0::DataType::DATA_TYPE_UINT16; + case OH_NN_UINT32: + return V1_0::DataType::DATA_TYPE_UINT32; + case OH_NN_UINT64: + return V1_0::DataType::DATA_TYPE_UINT64; + case OH_NN_FLOAT16: + return V1_0::DataType::DATA_TYPE_FLOAT16; + case OH_NN_FLOAT32: + return V1_0::DataType::DATA_TYPE_FLOAT32; + case OH_NN_FLOAT64: + return V1_0::DataType::DATA_TYPE_FLOAT64; + default: + return V1_0::DataType::DATA_TYPE_UNKNOWN; + } +} + +V1_0::Format TransFormat(const OH_NN_Format& format) +{ + switch (format) { + case OH_NN_FORMAT_NCHW: + return V1_0::Format::FORMAT_NCHW; + case OH_NN_FORMAT_NHWC: + return V1_0::Format::FORMAT_NHWC; + default: + return V1_0::Format::FORMAT_NONE; + } +} + +V1_0::IOTensor TransIOTensor(const IOTensor& tensor) +{ + V1_0::IOTensor iTensor; + iTensor.name = tensor.name; + iTensor.dataType = TransDataType(tensor.dataType); + iTensor.dimensions = tensor.dimensions; + iTensor.format = TransFormat(tensor.format); + + V1_0::SharedBuffer iBuffer {INVALID_FD, 0, 0, 0}; + if (tensor.data != nullptr) { + auto memManager = MemoryManager::GetInstance(); + Memory memory; + auto ret = memManager->GetMemory(tensor.data, memory); + if (ret != OH_NN_SUCCESS) { + LOGE("Invalid Tensor buffer, cannot transform to fd."); + } else { + iBuffer.fd = memory.fd; + iBuffer.bufferSize = memory.length; + iBuffer.offset = 0; + iBuffer.dataSize = memory.length; + } + } + iTensor.data = iBuffer; + + return iTensor; +} +} // unamed namespace + +HDIPreparedModelV1_0::HDIPreparedModelV1_0(OHOS::sptr hdiPreparedModel) : m_hdiPreparedModel(hdiPreparedModel) { hdiPreparedModel->GetVersion(m_hdiVersion.first, m_hdiVersion.second); } -OH_NN_ReturnCode HDIPreparedModel::ExportModelCache(std::vector& modelCache) +OH_NN_ReturnCode HDIPreparedModelV1_0::ExportModelCache(std::vector& modelCache) { if (!modelCache.empty()) { LOGE("The vector of modelCache should be empty. size=%zu", modelCache.size()); @@ -55,13 +127,13 @@ OH_NN_ReturnCode HDIPreparedModel::ExportModelCache(std::vector& mo return OH_NN_SUCCESS; } -OH_NN_ReturnCode HDIPreparedModel::Run(const std::vector& inputs, const std::vector& outputs, +OH_NN_ReturnCode HDIPreparedModelV1_0::Run(const std::vector& inputs, const std::vector& outputs, std::vector>& outputsDims, std::vector& isOutputBufferEnough) { V1_0::IOTensor iTensor; std::vector iInputTensors; for (auto& input: inputs) { - iTensor = NNToHDI::TransIOTensor(input); + iTensor = TransIOTensor(input); if (iTensor.data.fd == INVALID_FD) { LOGE("Transform inputs tensor failed, cannot find data file descriptor."); return OH_NN_INVALID_PARAMETER; @@ -71,7 +143,7 @@ OH_NN_ReturnCode HDIPreparedModel::Run(const std::vector& inputs, cons std::vector iOutputTensors; for (auto& output: outputs) { - iTensor = NNToHDI::TransIOTensor(output); + iTensor = TransIOTensor(output); if (iTensor.data.fd == INVALID_FD) { LOGE("Transform outputs tensor failed, cannot find data file descriptor."); return OH_NN_INVALID_PARAMETER; diff --git a/frameworks/native/hdi_prepared_model.h b/frameworks/native/hdi_prepared_model_v1_0.h similarity index 75% rename from frameworks/native/hdi_prepared_model.h rename to frameworks/native/hdi_prepared_model_v1_0.h index d111977b329e3377a95c0de03ae825b1121410cf..f5a8911095571fd46c4d14091181b85753ac45e5 100644 --- a/frameworks/native/hdi_prepared_model.h +++ b/frameworks/native/hdi_prepared_model_v1_0.h @@ -14,21 +14,25 @@ */ -#ifndef NEURAL_NETWORK_RUNTIME_HDI_PREPARED_MODEL_H -#define NEURAL_NETWORK_RUNTIME_HDI_PREPARED_MODEL_H +#ifndef NEURAL_NETWORK_RUNTIME_HDI_PREPARED_MODEL_V1_0_H +#define NEURAL_NETWORK_RUNTIME_HDI_PREPARED_MODEL_V1_0_H #include #include "refbase.h" -#include "hdi_interfaces.h" #include "prepared_model.h" #include "cpp_type.h" +#include +#include +#include + +namespace V1_0 = OHOS::HDI::Nnrt::V1_0; namespace OHOS { namespace NeuralNetworkRuntime { -class HDIPreparedModel : public PreparedModel { +class HDIPreparedModelV1_0 : public PreparedModel { public: - explicit HDIPreparedModel(OHOS::sptr hdiPreparedModel); + explicit HDIPreparedModelV1_0(OHOS::sptr hdiPreparedModel); OH_NN_ReturnCode ExportModelCache(std::vector& modelCache) override; @@ -44,4 +48,4 @@ private: }; } // namespace NeuralNetworkRuntime } // OHOS -#endif // NEURAL_NETWORK_RUNTIME_HDI_PREPARED_MODEL_H \ No newline at end of file +#endif // NEURAL_NETWORK_RUNTIME_HDI_PREPARED_MODEL_V1_0_H \ No newline at end of file diff --git a/frameworks/native/hdi_prepared_model_v2_0.cpp b/frameworks/native/hdi_prepared_model_v2_0.cpp new file mode 100644 index 0000000000000000000000000000000000000000..406a454dd3595614a5e843f788f94c75172cf15b --- /dev/null +++ b/frameworks/native/hdi_prepared_model_v2_0.cpp @@ -0,0 +1,169 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "hdi_prepared_model_v2_0.h" + +#include "common/log.h" +#include "memory_manager.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace { +V2_0::DataType TransDataType(const OH_NN_DataType& dataType) +{ + switch (dataType) { + case OH_NN_BOOL: + return V2_0::DataType::DATA_TYPE_BOOL; + case OH_NN_INT8: + return V2_0::DataType::DATA_TYPE_INT8; + case OH_NN_INT16: + return V2_0::DataType::DATA_TYPE_INT16; + case OH_NN_INT32: + return V2_0::DataType::DATA_TYPE_INT32; + case OH_NN_INT64: + return V2_0::DataType::DATA_TYPE_INT64; + case OH_NN_UINT8: + return V2_0::DataType::DATA_TYPE_UINT8; + case OH_NN_UINT16: + return V2_0::DataType::DATA_TYPE_UINT16; + case OH_NN_UINT32: + return V2_0::DataType::DATA_TYPE_UINT32; + case OH_NN_UINT64: + return V2_0::DataType::DATA_TYPE_UINT64; + case OH_NN_FLOAT16: + return V2_0::DataType::DATA_TYPE_FLOAT16; + case OH_NN_FLOAT32: + return V2_0::DataType::DATA_TYPE_FLOAT32; + case OH_NN_FLOAT64: + return V2_0::DataType::DATA_TYPE_FLOAT64; + default: + return V2_0::DataType::DATA_TYPE_UNKNOWN; + } +} + +V2_0::Format TransFormat(const OH_NN_Format& format) +{ + switch (format) { + case OH_NN_FORMAT_NCHW: + return V2_0::Format::FORMAT_NCHW; + case OH_NN_FORMAT_NHWC: + return V2_0::Format::FORMAT_NHWC; + default: + return V2_0::Format::FORMAT_NONE; + } +} + +V2_0::IOTensor TransIOTensor(const IOTensor& tensor) +{ + V2_0::IOTensor iTensor; + iTensor.name = tensor.name; + iTensor.dataType = TransDataType(tensor.dataType); + iTensor.dimensions = tensor.dimensions; + iTensor.format = TransFormat(tensor.format); + + V2_0::SharedBuffer iBuffer {INVALID_FD, 0, 0, 0}; + if (tensor.data != nullptr) { + auto memManager = MemoryManager::GetInstance(); + Memory memory; + auto ret = memManager->GetMemory(tensor.data, memory); + if (ret != OH_NN_SUCCESS) { + LOGE("Invalid Tensor buffer, cannot transform to fd."); + } else { + iBuffer.fd = memory.fd; + iBuffer.bufferSize = memory.length; + iBuffer.offset = 0; + iBuffer.dataSize = memory.length; + } + } + iTensor.data = iBuffer; + + return iTensor; +} +} // unamed namespace + +HDIPreparedModelV2_0::HDIPreparedModelV2_0(OHOS::sptr hdiPreparedModel) + : m_hdiPreparedModel(hdiPreparedModel) +{ + hdiPreparedModel->GetVersion(m_hdiVersion.first, m_hdiVersion.second); +} + +OH_NN_ReturnCode HDIPreparedModelV2_0::ExportModelCache(std::vector& modelCache) +{ + if (!modelCache.empty()) { + LOGE("The vector of modelCache should be empty. size=%zu", modelCache.size()); + return OH_NN_INVALID_PARAMETER; + } + + std::vector iBuffers; + auto ret = m_hdiPreparedModel->ExportModelCache(iBuffers); + if (ret != HDF_SUCCESS) { + LOGE("Export model cache failed. ErrorCode=%d", ret); + return OH_NN_UNAVALIDABLE_DEVICE; + } + + auto memManager = MemoryManager::GetInstance(); + for (size_t i = 0; i < iBuffers.size(); i++) { + auto addr = memManager->MapMemory(iBuffers[i].fd, iBuffers[i].bufferSize); + if (addr == nullptr) { + LOGE("Export the %zuth model cache failed, cannot not map fd to address.", i + 1); + return OH_NN_MEMORY_ERROR; + } + ModelBuffer modelbuffer {addr, iBuffers[i].bufferSize}; + modelCache.emplace_back(modelbuffer); + } + + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode HDIPreparedModelV2_0::Run(const std::vector& inputs, const std::vector& outputs, + std::vector>& outputsDims, std::vector& isOutputBufferEnough) +{ + V2_0::IOTensor iTensor; + std::vector iInputTensors; + for (auto& input: inputs) { + iTensor = TransIOTensor(input); + if (iTensor.data.fd == INVALID_FD) { + LOGE("Transform inputs tensor failed, cannot find data file descriptor."); + return OH_NN_INVALID_PARAMETER; + } + iInputTensors.emplace_back(iTensor); + } + + std::vector iOutputTensors; + for (auto& output: outputs) { + iTensor = TransIOTensor(output); + if (iTensor.data.fd == INVALID_FD) { + LOGE("Transform outputs tensor failed, cannot find data file descriptor."); + return OH_NN_INVALID_PARAMETER; + } + iOutputTensors.emplace_back(iTensor); + } + + auto ret = m_hdiPreparedModel->Run(iInputTensors, iOutputTensors, outputsDims, isOutputBufferEnough); + if (ret != HDF_SUCCESS || outputsDims.empty()) { + LOGE("Run model failed. ErrorCode=%d", ret); + return OH_NN_UNAVALIDABLE_DEVICE; + } + + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode HDIPreparedModelV2_0::GetInputDimRanges(std::vector>& minInputDims, + std::vector>& maxInputDims) +{ + return OH_NN_OPERATION_FORBIDDEN; +} +} // namespace NeuralNetworkRuntime +} // OHOS \ No newline at end of file diff --git a/frameworks/native/hdi_interfaces.h b/frameworks/native/hdi_prepared_model_v2_0.h similarity index 35% rename from frameworks/native/hdi_interfaces.h rename to frameworks/native/hdi_prepared_model_v2_0.h index 1d3416ba6f9daff3cd10c3a5ea5bfa2bd02315d4..ad42dcbcb314c56727b8f641132fcddc23e2bb64 100644 --- a/frameworks/native/hdi_interfaces.h +++ b/frameworks/native/hdi_prepared_model_v2_0.h @@ -13,17 +13,43 @@ * limitations under the License. */ -#ifndef NEURAL_NETWORK_RUNTIME_HDI_INTERFACES_H -#define NEURAL_NETWORK_RUNTIME_HDI_INTERFACES_H -#include -#include -#include +#ifndef NEURAL_NETWORK_RUNTIME_HDI_PREPARED_MODEL_V2_0_H +#define NEURAL_NETWORK_RUNTIME_HDI_PREPARED_MODEL_V2_0_H + +#include + +#include +#include +#include + +#include "refbase.h" +#include "prepared_model.h" +#include "cpp_type.h" + +namespace V2_0 = OHOS::HDI::Nnrt::V2_0; namespace OHOS { namespace NeuralNetworkRuntime { -namespace V1_0 = OHOS::HDI::Nnrt::V1_0; -} // namespace NeuralNetworkRuntime -} // namespace OHOS +class HDIPreparedModelV2_0 : public PreparedModel { +public: + explicit HDIPreparedModelV2_0(OHOS::sptr hdiPreparedModel); -#endif // NEURAL_NETWORK_RUNTIME_HDI_INTERFACES_H \ No newline at end of file + OH_NN_ReturnCode ExportModelCache(std::vector& modelCache) override; + + OH_NN_ReturnCode Run(const std::vector& inputs, + const std::vector& outputs, + std::vector>& outputsDims, + std::vector& isOutputBufferEnough) override; + + OH_NN_ReturnCode GetInputDimRanges(std::vector>& minInputDims, + std::vector>& maxInputDims) override; + +private: + // first: major version, second: minor version + std::pair m_hdiVersion; + OHOS::sptr m_hdiPreparedModel {nullptr}; +}; +} // namespace NeuralNetworkRuntime +} // OHOS +#endif // NEURAL_NETWORK_RUNTIME_HDI_PREPARED_MODEL_V2_0_H \ No newline at end of file diff --git a/frameworks/native/inner_model.cpp b/frameworks/native/inner_model.cpp index bcd20c6e2cf1feafc2e058ea790dce815363b03e..205222fa258f1c5535e9d09477c46bb6ec0fc143 100644 --- a/frameworks/native/inner_model.cpp +++ b/frameworks/native/inner_model.cpp @@ -24,7 +24,6 @@ #include "common/utils.h" #include "common/scoped_trace.h" #include "device_manager.h" -#include "hdi_device.h" #include "validation.h" #include "ops_builder.h" #include "ops_registry.h" diff --git a/frameworks/native/ops/cast_builder.cpp b/frameworks/native/ops/cast_builder.cpp index 81dc1eb2b5f386bbf04fa022649b8dba146b4438..6336926209671c077fe40afd083ba5cfaab5f097 100644 --- a/frameworks/native/ops/cast_builder.cpp +++ b/frameworks/native/ops/cast_builder.cpp @@ -57,7 +57,6 @@ OH_NN_ReturnCode CastBuilder::Build(const std::vector& paramsIndex, LOGE("[Cast] Type of cast operator is not validation."); return OH_NN_INVALID_PARAMETER; } - *castTypeInt = (OH_NN_DataType)NNToHDI::TransDataType(*castTypeInt); if (!paramsIndex.empty()) { LOGE("[Cast] Cast expects no parameters"); diff --git a/frameworks/native/prepared_model.h b/frameworks/native/prepared_model.h index 65741311a999d456487091e11fc54e2f2c4641b1..2d25f6fbf0e33e0cdf6a43cab392db3676f40b29 100644 --- a/frameworks/native/prepared_model.h +++ b/frameworks/native/prepared_model.h @@ -34,6 +34,10 @@ public: const std::vector& outputs, std::vector>& outputsDims, std::vector& isOutputBufferEnough) = 0; + + virtual OH_NN_ReturnCode GetInputDimRanges( + std::vector>& minInputDims, + std::vector>& maxInputDims) { return OH_NN_OPERATION_FORBIDDEN; } }; } // OHOS } // namespace NeuralNetworkRuntime diff --git a/frameworks/native/transform.cpp b/frameworks/native/transform.cpp index ea0d3391a84757016eb600c0e782642a768cd627..d3705d5381f93b45c79d0def51b688b8608adf5f 100644 --- a/frameworks/native/transform.cpp +++ b/frameworks/native/transform.cpp @@ -25,134 +25,6 @@ const uint32_t BIT16_TO_BYTE = 2; const uint32_t BIT32_TO_BYTE = 4; const uint32_t BIT64_TO_BYTE = 8; -OH_NN_DeviceType HDIToNN::TransHDIDeviceType(const V1_0::DeviceType& iDeviceType) -{ - switch (iDeviceType) { - case V1_0::DeviceType::CPU: - return OH_NN_CPU; - case V1_0::DeviceType::GPU: - return OH_NN_GPU; - case V1_0::DeviceType::ACCELERATOR: - return OH_NN_ACCELERATOR; - default: - return OH_NN_OTHERS; - } -} - -DeviceStatus HDIToNN::TransHDIDeviceStatus(const V1_0::DeviceStatus& iDeviceStatus) -{ - switch (iDeviceStatus) { - case V1_0::DeviceStatus::AVAILABLE: - return DeviceStatus::AVAILABLE; - case V1_0::DeviceStatus::BUSY: - return DeviceStatus::BUSY; - case V1_0::DeviceStatus::OFFLINE: - return DeviceStatus::OFFLINE; - default: - return DeviceStatus::UNKNOWN; - } -} - -V1_0::PerformanceMode NNToHDI::TransPerformanceMode(const OH_NN_PerformanceMode& mode) -{ - switch (mode) { - case OH_NN_PERFORMANCE_LOW: - return V1_0::PerformanceMode::PERFORMANCE_LOW; - case OH_NN_PERFORMANCE_MEDIUM: - return V1_0::PerformanceMode::PERFORMANCE_MEDIUM; - case OH_NN_PERFORMANCE_HIGH: - return V1_0::PerformanceMode::PERFORMANCE_HIGH; - case OH_NN_PERFORMANCE_EXTREME: - return V1_0::PerformanceMode::PERFORMANCE_EXTREME; - default: - return V1_0::PerformanceMode::PERFORMANCE_NONE; - } -} -V1_0::Priority NNToHDI::TransPriority(const OH_NN_Priority& priority) -{ - switch (priority) { - case OH_NN_PRIORITY_LOW: - return V1_0::Priority::PRIORITY_LOW; - case OH_NN_PRIORITY_MEDIUM: - return V1_0::Priority::PRIORITY_MEDIUM; - case OH_NN_PRIORITY_HIGH: - return V1_0::Priority::PRIORITY_HIGH; - default: - return V1_0::Priority::PRIORITY_NONE; - } -} - -V1_0::DataType NNToHDI::TransDataType(const OH_NN_DataType& dataType) -{ - switch (dataType) { - case OH_NN_BOOL: - return V1_0::DataType::DATA_TYPE_BOOL; - case OH_NN_INT8: - return V1_0::DataType::DATA_TYPE_INT8; - case OH_NN_INT16: - return V1_0::DataType::DATA_TYPE_INT16; - case OH_NN_INT32: - return V1_0::DataType::DATA_TYPE_INT32; - case OH_NN_INT64: - return V1_0::DataType::DATA_TYPE_INT64; - case OH_NN_UINT8: - return V1_0::DataType::DATA_TYPE_UINT8; - case OH_NN_UINT16: - return V1_0::DataType::DATA_TYPE_UINT16; - case OH_NN_UINT32: - return V1_0::DataType::DATA_TYPE_UINT32; - case OH_NN_UINT64: - return V1_0::DataType::DATA_TYPE_UINT64; - case OH_NN_FLOAT16: - return V1_0::DataType::DATA_TYPE_FLOAT16; - case OH_NN_FLOAT32: - return V1_0::DataType::DATA_TYPE_FLOAT32; - case OH_NN_FLOAT64: - return V1_0::DataType::DATA_TYPE_FLOAT64; - default: - return V1_0::DataType::DATA_TYPE_UNKNOWN; - } -} - -V1_0::Format NNToHDI::TransFormat(const OH_NN_Format& format) -{ - switch (format) { - case OH_NN_FORMAT_NCHW: - return V1_0::Format::FORMAT_NCHW; - case OH_NN_FORMAT_NHWC: - return V1_0::Format::FORMAT_NHWC; - default: - return V1_0::Format::FORMAT_NONE; - } -} - -V1_0::IOTensor NNToHDI::TransIOTensor(const IOTensor& tensor) -{ - V1_0::IOTensor iTensor; - iTensor.name = tensor.name; - iTensor.dataType = TransDataType(tensor.dataType); - iTensor.dimensions = tensor.dimensions; - iTensor.format = TransFormat(tensor.format); - - V1_0::SharedBuffer iBuffer {INVALID_FD, 0, 0, 0}; - if (tensor.data != nullptr) { - auto memManager = MemoryManager::GetInstance(); - Memory memory; - auto ret = memManager->GetMemory(tensor.data, memory); - if (ret != OH_NN_SUCCESS) { - LOGE("Invalid Tensor buffer, cannot transform to fd."); - } else { - iBuffer.fd = memory.fd; - iBuffer.bufferSize = memory.length; - iBuffer.offset = 0; - iBuffer.dataSize = memory.length; - } - } - iTensor.data = iBuffer; - - return iTensor; -} - uint32_t GetTypeSize(OH_NN_DataType type) { switch (type) { diff --git a/frameworks/native/transform.h b/frameworks/native/transform.h index 2472ad3f8d1ca8e77912ac3e4f521d7ce7825c1b..24d54e8d351dc7cf9a05e70eceea8cc365412daa 100644 --- a/frameworks/native/transform.h +++ b/frameworks/native/transform.h @@ -16,7 +16,6 @@ #ifndef NEURAL_NETWORK_RUNTIME_TRANSFORM_H #define NEURAL_NETWORK_RUNTIME_TRANSFORM_H -#include "hdi_interfaces.h" #include "interfaces/kits/c/neural_network_runtime_type.h" #include "cpp_type.h" #include "mindir.h" @@ -38,19 +37,6 @@ std::vector ConstructVectorFromArray(const T* data, size_t size) uint32_t GetTypeSize(OH_NN_DataType type); -namespace HDIToNN { -OH_NN_DeviceType TransHDIDeviceType(const V1_0::DeviceType& iDeviceType); -DeviceStatus TransHDIDeviceStatus(const V1_0::DeviceStatus& iDeviceStatus); -} // namespace HDIToNN - -namespace NNToHDI { -V1_0::PerformanceMode TransPerformanceMode(const OH_NN_PerformanceMode& mode); -V1_0::Priority TransPriority(const OH_NN_Priority& priority); -V1_0::DataType TransDataType(const OH_NN_DataType& dataType); -V1_0::Format TransFormat(const OH_NN_Format& format); -V1_0::IOTensor TransIOTensor(const IOTensor& tensor); -} // namespace NNToHDI - namespace NNToMS { mindspore::lite::DataType TransformDataType(OH_NN_DataType type); mindspore::lite::Format TransformFormat(OH_NN_Format type);