8 #include <arm_compute/runtime/NEON/functions/NESplit.h>
22 using namespace armcomputetensorutils;
26 unsigned int CalcAclAxis(
unsigned int numDimensions,
unsigned int splitAxis)
28 return (numDimensions - splitAxis) - 1;
34 const std::vector<std::reference_wrapper<TensorInfo>>& outputs,
35 unsigned int splitAxis)
37 const arm_compute::TensorInfo aclInputInfo = BuildArmComputeTensorInfo(input);
39 size_t numOutputs = outputs.size();
41 std::vector<arm_compute::TensorInfo> aclOutputs;
42 aclOutputs.reserve(numOutputs);
44 std::vector<arm_compute::ITensorInfo*> aclOutputPtr;
45 aclOutputPtr.reserve(numOutputs);
47 for (
size_t i = 0u; i < outputs.size(); ++i)
49 aclOutputs.emplace_back(BuildArmComputeTensorInfo(outputs[i]));
50 aclOutputPtr.emplace_back(&aclOutputs.back());
54 return arm_compute::NESplit::validate(&aclInputInfo, aclOutputPtr, aclAxis);
66 bool allOutputsAreSubtensors =
true;
71 if (output && !output->GetParent())
74 allOutputsAreSubtensors =
false;
79 if (allOutputsAreSubtensors)
85 arm_compute::ITensor& input = PolymorphicDowncast<IAclTensorHandle*>(
m_Data.
m_Inputs[0])->GetTensor();
87 std::vector<arm_compute::ITensor *> aclOutputs;
90 arm_compute::ITensor& aclOutput = PolymorphicPointerDowncast<IAclTensorHandle>(output)->GetTensor();
91 aclOutputs.emplace_back(&aclOutput);
95 std::unique_ptr<arm_compute::NESplit> layer(
new arm_compute::NESplit());
99 if (splitAxis.size() != 1)
105 layer->configure(&input, aclOutputs, aclAxis);
109 m_Layer.reset(layer.release());