Coverage Report

Created: 2026-03-31 07:13

next uncovered line (L), next uncovered region (R), next uncovered branch (B)
/src/opencv/modules/dnn/src/net_impl_backend.cpp
Line
Count
Source
1
// This file is part of OpenCV project.
2
// It is subject to the license terms in the LICENSE file found in the top-level directory
3
// of this distribution and at http://opencv.org/license.html.
4
5
#include "precomp.hpp"
6
7
#include "net_impl.hpp"
8
#include "legacy_backend.hpp"
9
10
#include "backend.hpp"
11
#include "factory.hpp"
12
13
#ifdef HAVE_CUDA
14
#include "cuda4dnn/init.hpp"
15
#endif
16
17
namespace cv {
18
namespace dnn {
19
CV__DNN_INLINE_NS_BEGIN
20
21
22
Ptr<BackendWrapper> Net::Impl::wrap(Mat& host)
23
0
{
24
0
    if (preferableBackend == DNN_BACKEND_OPENCV &&
25
0
            (preferableTarget == DNN_TARGET_CPU || preferableTarget == DNN_TARGET_CPU_FP16))
26
0
        return Ptr<BackendWrapper>();
27
28
0
    MatShape shape(host.dims);
29
0
    for (int i = 0; i < host.dims; ++i)
30
0
        shape[i] = host.size[i];
31
32
0
    void* data = host.data;
33
0
    if (backendWrappers.find(data) != backendWrappers.end())
34
0
    {
35
0
        Ptr<BackendWrapper> baseBuffer = backendWrappers[data];
36
0
        if (preferableBackend == DNN_BACKEND_OPENCV)
37
0
        {
38
0
#ifdef HAVE_OPENCL
39
0
            CV_Assert(IS_DNN_OPENCL_TARGET(preferableTarget));
40
0
            return OpenCLBackendWrapper::create(baseBuffer, host);
41
#else
42
            CV_Error(Error::StsInternal, "");
43
#endif
44
0
        }
45
0
        else if (preferableBackend == DNN_BACKEND_HALIDE)
46
0
        {
47
0
            CV_Assert(haveHalide());
48
#ifdef HAVE_HALIDE
49
            return Ptr<BackendWrapper>(new HalideBackendWrapper(baseBuffer, shape));
50
#endif
51
0
        }
52
0
        else if (preferableBackend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019)
53
0
        {
54
0
            CV_ERROR_DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019;
55
0
        }
56
0
        else if (preferableBackend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
57
0
        {
58
0
            return wrapMat(preferableBackend, preferableTarget, host);
59
0
        }
60
0
        else if (preferableBackend == DNN_BACKEND_WEBNN)
61
0
        {
62
#ifdef HAVE_WEBNN
63
            return wrapMat(preferableBackend, preferableTarget, host);
64
#endif
65
0
        }
66
0
        else if (preferableBackend == DNN_BACKEND_VKCOM)
67
0
        {
68
#ifdef HAVE_VULKAN
69
            return Ptr<BackendWrapper>(new VkComBackendWrapper(baseBuffer, host));
70
#endif
71
0
        }
72
0
        else if (preferableBackend == DNN_BACKEND_CUDA)
73
0
        {
74
0
            CV_Assert(haveCUDA());
75
#ifdef HAVE_CUDA
76
            switch (preferableTarget)
77
            {
78
            case DNN_TARGET_CUDA:
79
                return CUDABackendWrapperFP32::create(baseBuffer, shape);
80
            case DNN_TARGET_CUDA_FP16:
81
                return CUDABackendWrapperFP16::create(baseBuffer, shape);
82
            default:
83
                CV_Assert(IS_DNN_CUDA_TARGET(preferableTarget));
84
            }
85
#endif
86
0
        }
87
0
        else if (preferableBackend == DNN_BACKEND_TIMVX)
88
0
        {
89
#ifdef HAVE_TIMVX
90
            return Ptr<BackendWrapper>(new TimVXBackendWrapper(baseBuffer, host));
91
#endif
92
0
        }
93
0
        else if (preferableBackend == DNN_BACKEND_CANN)
94
0
        {
95
0
            CV_Assert(0 && "Internal error: DNN_BACKEND_CANN must be implemented through inheritance");
96
0
        }
97
0
        else
98
0
            CV_Error(Error::StsNotImplemented, "Unknown backend identifier");
99
0
    }
100
101
0
    Ptr<BackendWrapper> wrapper = wrapMat(preferableBackend, preferableTarget, host);
102
0
    backendWrappers[data] = wrapper;
103
0
    return wrapper;
104
0
}
105
106
107
void Net::Impl::initBackend(const std::vector<LayerPin>& blobsToKeep_)
108
0
{
109
0
    CV_TRACE_FUNCTION();
110
0
    if (preferableBackend == DNN_BACKEND_OPENCV)
111
0
    {
112
0
        CV_Assert(preferableTarget == DNN_TARGET_CPU || preferableTarget == DNN_TARGET_CPU_FP16 || IS_DNN_OPENCL_TARGET(preferableTarget));
113
0
    }
114
0
    else if (preferableBackend == DNN_BACKEND_HALIDE)
115
0
    {
116
#ifdef HAVE_HALIDE
117
        initHalideBackend();
118
#else
119
0
        CV_Error(Error::StsNotImplemented, "This OpenCV version is built without support of Halide");
120
0
#endif
121
0
    }
122
0
    else if (preferableBackend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
123
0
    {
124
0
        CV_Assert(0 && "Inheritance must be used with OpenVINO backend");
125
0
    }
126
0
    else if (preferableBackend == DNN_BACKEND_WEBNN)
127
0
    {
128
#ifdef HAVE_WEBNN
129
        initWebnnBackend(blobsToKeep_);
130
#else
131
0
        CV_Error(Error::StsNotImplemented, "This OpenCV version is built without support of WebNN");
132
0
#endif
133
0
    }
134
0
    else if (preferableBackend == DNN_BACKEND_VKCOM)
135
0
    {
136
#ifdef HAVE_VULKAN
137
        initVkComBackend();
138
#else
139
0
        CV_Error(Error::StsNotImplemented, "This OpenCV version is built without support of Vulkan");
140
0
#endif
141
0
    }
142
0
    else if (preferableBackend == DNN_BACKEND_CUDA)
143
0
    {
144
#ifdef HAVE_CUDA
145
        initCUDABackend(blobsToKeep_);
146
#else
147
0
        CV_Error(Error::StsNotImplemented, "This OpenCV version is built without support of CUDA/CUDNN");
148
0
#endif
149
0
    }
150
0
    else if (preferableBackend == DNN_BACKEND_TIMVX)
151
0
    {
152
#ifdef HAVE_TIMVX
153
        initTimVXBackend();
154
#else
155
0
        CV_Error(Error::StsNotImplemented, "This OpenCV version is built without support of TimVX");
156
0
#endif
157
0
    }
158
0
    else if (preferableBackend == DNN_BACKEND_CANN)
159
0
    {
160
0
        CV_Assert(0 && "Internal error: DNN_BACKEND_CANN must be implemented through inheritance");
161
0
    }
162
0
    else
163
0
    {
164
0
        CV_Error(Error::StsNotImplemented, cv::format("Unknown backend identifier: %d", preferableBackend));
165
0
    }
166
0
}
167
168
169
void Net::Impl::setPreferableBackend(Net& net, int backendId)
170
14.2k
{
171
14.2k
    if (backendId == DNN_BACKEND_DEFAULT)
172
14.2k
        backendId = (Backend)getParam_DNN_BACKEND_DEFAULT();
173
174
14.2k
    if (backendId == DNN_BACKEND_INFERENCE_ENGINE)
175
0
        backendId = DNN_BACKEND_INFERENCE_ENGINE_NGRAPH;  // = getInferenceEngineBackendTypeParam();
176
177
14.2k
    if (netWasQuantized && backendId != DNN_BACKEND_OPENCV && backendId != DNN_BACKEND_TIMVX &&
178
0
        backendId != DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
179
0
    {
180
0
        CV_LOG_WARNING(NULL, "DNN: Only default, TIMVX and OpenVINO backends support quantized networks");
181
0
        backendId = DNN_BACKEND_OPENCV;
182
0
    }
183
#ifdef HAVE_DNN_NGRAPH
184
    if (netWasQuantized && backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && INF_ENGINE_VER_MAJOR_LT(INF_ENGINE_RELEASE_2023_0))
185
    {
186
        CV_LOG_WARNING(NULL, "DNN: OpenVINO 2023.0 and higher is required to supports quantized networks");
187
        backendId = DNN_BACKEND_OPENCV;
188
    }
189
#endif
190
191
14.2k
    if (preferableBackend != backendId)
192
0
    {
193
0
        clear();
194
0
        if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
195
0
        {
196
#if defined(HAVE_INF_ENGINE)
197
            switchToOpenVINOBackend(net);
198
#elif defined(ENABLE_PLUGINS)
199
            auto& networkBackend = dnn_backend::createPluginDNNNetworkBackend("openvino");
200
0
            networkBackend.switchBackend(net);
201
#else
202
            CV_Error(Error::StsNotImplemented, "OpenVINO backend is not available in the current OpenCV build");
203
#endif
204
0
        }
205
0
        else if (backendId == DNN_BACKEND_CANN)
206
0
        {
207
#ifdef HAVE_CANN
208
            switchToCannBackend(net);
209
#else
210
0
            CV_Error(Error::StsNotImplemented, "CANN backend is not availlable in the current OpenCV build");
211
0
#endif
212
0
        }
213
0
        else
214
0
        {
215
0
            preferableBackend = backendId;
216
0
        }
217
0
    }
218
14.2k
}
219
220
void Net::Impl::setPreferableTarget(int targetId)
221
0
{
222
0
    if (netWasQuantized && targetId != DNN_TARGET_CPU &&
223
0
        targetId != DNN_TARGET_OPENCL && targetId != DNN_TARGET_OPENCL_FP16 && targetId != DNN_TARGET_NPU)
224
0
    {
225
0
        CV_LOG_WARNING(NULL, "DNN: Only CPU, OpenCL/OpenCL FP16 and NPU targets are supported by quantized networks");
226
0
        targetId = DNN_TARGET_CPU;
227
0
    }
228
229
0
    if (preferableTarget != targetId)
230
0
    {
231
0
        preferableTarget = targetId;
232
0
        if (IS_DNN_OPENCL_TARGET(targetId))
233
0
        {
234
#ifndef HAVE_OPENCL
235
#ifdef HAVE_INF_ENGINE
236
            if (preferableBackend == DNN_BACKEND_OPENCV)
237
#else
238
            if (preferableBackend == DNN_BACKEND_DEFAULT ||
239
                preferableBackend == DNN_BACKEND_OPENCV)
240
#endif  // HAVE_INF_ENGINE
241
                preferableTarget = DNN_TARGET_CPU;
242
#else
243
0
            bool fp16 = ocl::Device::getDefault().isExtensionSupported("cl_khr_fp16");
244
0
            if (!fp16 && targetId == DNN_TARGET_OPENCL_FP16)
245
0
                preferableTarget = DNN_TARGET_OPENCL;
246
0
#endif
247
0
        }
248
249
0
        if (IS_DNN_CUDA_TARGET(targetId))
250
0
        {
251
0
            preferableTarget = DNN_TARGET_CPU;
252
#ifdef HAVE_CUDA
253
            if (cuda4dnn::doesDeviceSupportFP16() && targetId == DNN_TARGET_CUDA_FP16)
254
                preferableTarget = DNN_TARGET_CUDA_FP16;
255
            else
256
                preferableTarget = DNN_TARGET_CUDA;
257
#endif
258
0
        }
259
0
#if !defined(__arm64__) || !__arm64__
260
0
        if (targetId == DNN_TARGET_CPU_FP16)
261
0
        {
262
0
            CV_LOG_WARNING(NULL, "DNN: fall back to DNN_TARGET_CPU. Only ARM v8 CPU is supported by DNN_TARGET_CPU_FP16.");
263
0
            targetId = DNN_TARGET_CPU;
264
0
        }
265
0
#endif
266
267
0
        clear();
268
269
0
        if (targetId == DNN_TARGET_CPU_FP16)
270
0
        {
271
0
            if (useWinograd) {
272
0
                CV_LOG_INFO(NULL, "DNN: DNN_TARGET_CPU_FP16 is set => Winograd convolution is disabled by default to preserve accuracy. If needed, enable it explicitly using enableWinograd(true).");
273
0
                enableWinograd(false);
274
0
            }
275
0
        }
276
0
    }
277
0
}
278
279
280
CV__DNN_INLINE_NS_END
281
}}  // namespace cv::dnn