8 #include <arm_compute/runtime/NEON/functions/NESplit.h>
21 using namespace armcomputetensorutils;
25 unsigned int CalcAclAxis(
unsigned int numDimensions,
unsigned int splitAxis)
27 return (numDimensions - splitAxis) - 1;
33 const std::vector<std::reference_wrapper<TensorInfo>>& outputs,
34 unsigned int splitAxis)
36 const arm_compute::TensorInfo aclInputInfo = BuildArmComputeTensorInfo(input);
38 size_t numOutputs = outputs.size();
40 std::vector<arm_compute::TensorInfo> aclOutputs;
41 aclOutputs.reserve(numOutputs);
43 std::vector<arm_compute::ITensorInfo*> aclOutputPtr;
44 aclOutputPtr.reserve(numOutputs);
46 for (
size_t i = 0u; i < outputs.size(); ++i)
48 aclOutputs.emplace_back(BuildArmComputeTensorInfo(outputs[i]));
49 aclOutputPtr.emplace_back(&aclOutputs.back());
53 return arm_compute::NESplit::validate(&aclInputInfo, aclOutputPtr, aclAxis);
65 bool allOutputsAreSubtensors =
true;
70 if (output && !output->GetParent())
73 allOutputsAreSubtensors =
false;
78 if (allOutputsAreSubtensors)
84 arm_compute::ITensor& input = PolymorphicDowncast<IAclTensorHandle*>(
m_Data.
m_Inputs[0])->GetTensor();
86 std::vector<arm_compute::ITensor *> aclOutputs;
89 arm_compute::ITensor& aclOutput = PolymorphicPointerDowncast<IAclTensorHandle>(output)->GetTensor();
90 aclOutputs.emplace_back(&aclOutput);
94 std::unique_ptr<arm_compute::NESplit> layer(
new arm_compute::NESplit());
98 if (splitAxis.size() != 1)
104 layer->configure(&input, aclOutputs, aclAxis);
108 m_Layer.reset(layer.release());