Coverage Report

Created: 2026-02-14 07:18

next uncovered line (L), next uncovered region (R), next uncovered branch (B)
/src/opencv/modules/dnn/src/layers/gather_layer.cpp
Line
Count
Source
1
// This file is part of OpenCV project.
2
// It is subject to the license terms in the LICENSE file found in the top-level directory
3
// of this distribution and at http://opencv.org/license.html.
4
5
#include "../precomp.hpp"
6
#include "../op_inf_engine.hpp"
7
#include "../ie_ngraph.hpp"
8
#include "layers_common.hpp"
9
10
11
namespace cv { namespace dnn {
12
13
class GatherLayerImpl CV_FINAL : public GatherLayer
14
{
15
public:
16
    GatherLayerImpl(const LayerParams& params)
17
0
    {
18
0
        setParamsFrom(params);
19
0
        m_axis = params.get<int>("axis", 0);
20
0
        m_real_ndims = params.get<int>("real_ndims", -1);
21
0
    }
22
23
    virtual bool supportBackend(int backendId) CV_OVERRIDE
24
0
    {
25
0
        return backendId == DNN_BACKEND_OPENCV ||
26
0
               backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH;
27
0
    }
28
29
    virtual bool getMemoryShapes(const std::vector<MatShape> &inputs,
30
                                 const int requiredOutputs,
31
                                 std::vector<MatShape> &outputs,
32
                                 std::vector<MatShape> &internals) const CV_OVERRIDE
33
0
    {
34
0
        CV_CheckEQ(inputs.size(), 2ull, "");
35
0
        MatShape inpShape = inputs[0];
36
0
        const int axis = normalize_axis(m_axis, inpShape);
37
38
0
        inpShape.erase(inpShape.begin() + axis);
39
0
        auto end = m_real_ndims == -1 ? inputs[1].end() : inputs[1].begin() + m_real_ndims;
40
0
        inpShape.insert(inpShape.begin() + axis, inputs[1].begin(), end);
41
42
0
        outputs.assign(1, inpShape);
43
0
        return false;
44
0
    }
45
46
    void forward(InputArrayOfArrays inputs_arr, OutputArrayOfArrays outputs_arr, OutputArrayOfArrays internals_arr) CV_OVERRIDE
47
0
    {
48
0
        CV_TRACE_FUNCTION();
49
0
        CV_TRACE_ARG_VALUE(name, "name", name.c_str());
50
51
        // FP16 fallback is not needed as we handle FP16 below
52
53
0
        std::vector<Mat> inputs, outputs;
54
0
        inputs_arr.getMatVector(inputs);
55
0
        outputs_arr.getMatVector(outputs);
56
57
0
        CV_CheckEQ(inputs.size(), (size_t)2, "");
58
0
        CV_CheckEQ(outputs.size(), (size_t)1, "");
59
60
0
        const Mat& inp = inputs[0];
61
62
0
        int indicesType = inputs[1].type();
63
0
        CV_CheckType(indicesType, indicesType == CV_32FC1 || indicesType == CV_16FC1, "");
64
0
        Mat indices32S;
65
0
        if (indicesType == CV_16F/*FP16*/)
66
0
        {
67
0
            Mat indicesF32;
68
0
            inputs[1].convertTo(indicesF32, CV_32F);
69
0
            indicesF32.convertTo(indices32S, CV_32S);
70
0
        }
71
0
        else
72
0
        {
73
0
            inputs[1].convertTo(indices32S, CV_32S);
74
0
        }
75
0
        const size_t indices_total = indices32S.total();
76
0
        indices32S = indices32S.reshape(1, indices_total);
77
78
0
        Mat& out = outputs[0];
79
80
0
        CV_CheckTypeEQ(inp.type(), out.type(), "");
81
0
        CV_CheckTypeEQ(indices32S.type(), CV_32SC1, "");
82
83
0
        const int axis = normalize_axis(m_axis, shape(inp));
84
85
        // FIXIT: why should we work with non-normalized input? it should be handled in importer or layers's output generator
86
0
        const int axis_size = (int)inp.size[axis];
87
0
        for (size_t j = 0 ; j < indices_total; ++j)
88
0
        {
89
0
            int& idx = indices32S.at<int>(j);
90
0
            idx = normalize_axis(idx, axis_size);  // validate and normalize indices
91
0
        }
92
93
0
        const size_t outer_size = axis == 0 ? inp.total() : inp.step1(axis - 1);
94
0
        const size_t outer_dims = inp.total() / outer_size;
95
0
        const size_t inner_size = inp.step1(axis);
96
97
0
        const int* idx = indices32S.ptr<int>();
98
0
        const char* src = inp.ptr<const char>();
99
0
        char* dst = out.ptr<char>();
100
0
        CV_CheckEQ(out.total(), outer_dims * indices_total * inner_size, "");
101
102
0
        const size_t es = inp.elemSize1();
103
        // TODO: optimize through switch (inner_size * es)
104
0
        const size_t inner_bytes = inner_size * es;
105
0
        for (size_t i = 0; i < outer_dims; ++i)
106
0
        {
107
0
            const size_t src_offset = i * outer_size;
108
0
            for (size_t j = 0 ; j < indices_total; ++j)
109
0
            {
110
0
                const int index = idx[j];
111
0
                CV_DbgCheck(index, index >= 0 && index < axis_size, "");
112
0
                const size_t new_offset = src_offset + index * inner_size;
113
0
                std::memcpy(dst, src + new_offset * es, inner_bytes);
114
0
                dst += inner_bytes;
115
0
            }
116
0
        }
117
0
    }
118
119
#ifdef HAVE_DNN_NGRAPH
120
    virtual Ptr<BackendNode> initNgraph(const std::vector<Ptr<BackendWrapper> >& inputs,
121
                                        const std::vector<Ptr<BackendNode> >& nodes) CV_OVERRIDE
122
    {
123
        auto axisNode = std::make_shared<ov::op::v0::Constant>(ov::element::i32, ov::Shape{}, &m_axis);
124
        auto gather = std::make_shared<ov::op::v8::Gather>(
125
            nodes[0].dynamicCast<InfEngineNgraphNode>()->node,
126
            std::make_shared<ov::op::v0::Convert>(nodes[1].dynamicCast<InfEngineNgraphNode>()->node, ov::element::i32),
127
            axisNode);
128
        return Ptr<BackendNode>(new InfEngineNgraphNode(gather));
129
    }
130
#endif  // HAVE_DNN_NGRAPH
131
132
private:
133
    // The axis to gather along
134
    int m_axis;
135
    int m_real_ndims;
136
};
137
138
Ptr<GatherLayer> GatherLayer::create(const LayerParams& params)
139
0
{
140
0
    return makePtr<GatherLayerImpl>(params);
141
0
}
142
143
}}  // namespace cv::dnn