Skip to content

Commit

Permalink
Common OV core
Browse files Browse the repository at this point in the history
  • Loading branch information
alessandropalla committed Jul 16, 2024
1 parent f06e806 commit 16c69bc
Show file tree
Hide file tree
Showing 4 changed files with 18 additions and 16 deletions.
6 changes: 6 additions & 0 deletions include/intel_npu_acceleration_library/common.h
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,12 @@

namespace intel_npu_acceleration_library {

/**
* @brief OpenVINO core object
*
*/
ov::Core core;

static constexpr ov::Property<std::string> npu_compiler_type{"NPU_COMPILER_TYPE"};
static constexpr ov::Property<std::string> npu_parameters{"NPU_COMPILATION_MODE_PARAMS"};

Expand Down
8 changes: 1 addition & 7 deletions include/intel_npu_acceleration_library/inference.h
Original file line number Diff line number Diff line change
Expand Up @@ -23,12 +23,6 @@

namespace intel_npu_acceleration_library {

/**
* @brief OpenVINO core object
*
*/
static ov::Core core;

/**
* @brief Create a remote tensor
*
Expand Down Expand Up @@ -82,7 +76,7 @@ class OVInferenceModel {
// set letency hint
core.set_property(ov::cache_dir("cache"));
core.set_property(device, ov::hint::performance_mode(ov::hint::PerformanceMode::THROUGHPUT));
core.set_property("NPU", ov::log::level(ov::log::Level::DEBUG));
// core.set_property("NPU", ov::log::level(ov::log::Level::DEBUG));
if (device == "NPU") {
core.set_property(device, intel_npu_acceleration_library::npu_compiler_type("DRIVER"));
if (profile) {
Expand Down
14 changes: 9 additions & 5 deletions include/intel_npu_acceleration_library/tensor.h
Original file line number Diff line number Diff line change
Expand Up @@ -28,11 +28,15 @@ class Tensor {
*/
Tensor(ov::element::Type_t dtype, ov::Shape shape, void* data,
ov::intel_npu::TensorType tensor_type = ov::intel_npu::TensorType::INPUT, std::string device = "NPU") {
ov::Core core;
auto context = core.get_default_context(device).as<ov::intel_npu::level_zero::ZeroContext>();
_remote_tensor = context.create_l0_host_tensor(dtype, shape, tensor_type);
data_ptr = _remote_tensor.get();
std::memcpy(data_ptr, data, _remote_tensor.get_byte_size());
if (!_isNPUAvailable(core)) {
// Cannot create NPU remote tensor... use the same pointer as before
data_ptr = data;
} else {
auto context = core.get_default_context(device).as<ov::intel_npu::level_zero::ZeroContext>();
_remote_tensor = context.create_l0_host_tensor(dtype, shape, tensor_type);
data_ptr = _remote_tensor.get();
std::memcpy(data_ptr, data, _remote_tensor.get_byte_size());
}
}

/**
Expand Down
6 changes: 2 additions & 4 deletions src/bindings.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -8,13 +8,11 @@
extern "C" {

intel_npu_acceleration_library_DLL_API bool isNPUAvailable() {
ov::Core core;
return intel_npu_acceleration_library::_isNPUAvailable(core);
return intel_npu_acceleration_library::_isNPUAvailable(intel_npu_acceleration_library::core);
}

intel_npu_acceleration_library_DLL_API uint32_t getNPUDriverVersion() {
ov::Core core;
return intel_npu_acceleration_library::driver_version(core);
return intel_npu_acceleration_library::driver_version(intel_npu_acceleration_library::core);
}

// ######################## Remote Tensors ########################
Expand Down

0 comments on commit 16c69bc

Please sign in to comment.