22 const unsigned int* subTensorOrigin)
25 const arm_compute::TensorShape shape = armcomputetensorutils::BuildArmComputeTensorShape(subTensorShape);
33 coords.set(i, armnn::numeric_cast<int>(subTensorOrigin[revertedIndex]));
36 const arm_compute::TensorShape parentShape = armcomputetensorutils::BuildArmComputeTensorShape(parent.
GetShape());
38 if (!::arm_compute::error_on_invalid_subtensor(__func__, __FILE__, __LINE__, parentShape, coords, shape))
43 return std::make_unique<NeonSubTensorHandle>(
44 PolymorphicDowncast<IAclTensorHandle*>(&parent), shape, coords);
59 const bool IsMemoryManaged)
const
61 auto tensorHandle = std::make_unique<NeonTensorHandle>(tensorInfo);
64 tensorHandle->SetMemoryGroup(m_MemoryManager->GetInterLayerMemoryGroup());
67 tensorHandle->SetImportEnabledFlag(!IsMemoryManaged);
75 const bool IsMemoryManaged)
const
77 auto tensorHandle = std::make_unique<NeonTensorHandle>(tensorInfo, dataLayout);
80 tensorHandle->SetMemoryGroup(m_MemoryManager->GetInterLayerMemoryGroup());
83 tensorHandle->SetImportEnabledFlag(!IsMemoryManaged);
112 return m_ExportFlags;
117 return m_ImportFlags;
126 std::vector<Capability> capabilities;
133 capabilities.push_back(paddingCapability);