/src/opencv/modules/dnn/src/layers/padding_layer.cpp
Line | Count | Source (jump to first uncovered line) |
1 | | // This file is part of OpenCV project. |
2 | | // It is subject to the license terms in the LICENSE file found in the top-level directory |
3 | | // of this distribution and at http://opencv.org/license.html. |
4 | | |
5 | | // Copyright (C) 2017, Intel Corporation, all rights reserved. |
6 | | // Third party copyrights are property of their respective owners. |
7 | | |
8 | | /* |
9 | | Implementation of padding layer, which adds paddings to input blob. |
10 | | */ |
11 | | |
12 | | #include "../precomp.hpp" |
13 | | #include "layers_common.hpp" |
14 | | #include "../op_cuda.hpp" |
15 | | #include "../op_halide.hpp" |
16 | | #include "../op_inf_engine.hpp" |
17 | | #include "../ie_ngraph.hpp" |
18 | | #include "../op_cann.hpp" |
19 | | |
20 | | #include <vector> |
21 | | |
22 | | #ifdef HAVE_CUDA |
23 | | #include "../cuda4dnn/primitives/padding.hpp" |
24 | | using namespace cv::dnn::cuda4dnn; |
25 | | #endif |
26 | | |
27 | | namespace cv |
28 | | { |
29 | | namespace dnn |
30 | | { |
31 | | |
32 | | class PaddingLayerImpl CV_FINAL : public PaddingLayer |
33 | | { |
34 | | public: |
35 | | PaddingLayerImpl(const LayerParams ¶ms) |
36 | 0 | { |
37 | 0 | setParamsFrom(params); |
38 | 0 | paddingValue = params.get<float>("value", 0); |
39 | 0 | inputDims = params.get<int>("input_dims", -1); |
40 | 0 | paddingType = params.get<String>("type", "constant"); |
41 | |
|
42 | 0 | CV_Assert(params.has("paddings")); |
43 | 0 | const DictValue& paddingsParam = params.get("paddings"); |
44 | 0 | CV_Assert((paddingsParam.size() & 1) == 0); |
45 | | |
46 | 0 | paddings.resize(paddingsParam.size() / 2); |
47 | 0 | for (int i = 0; i < paddings.size(); ++i) |
48 | 0 | { |
49 | 0 | paddings[i].first = paddingsParam.get<int>(i * 2); // Pad before. |
50 | 0 | paddings[i].second = paddingsParam.get<int>(i * 2 + 1); // Pad after. |
51 | 0 | CV_Assert_N(paddings[i].first >= 0, paddings[i].second >= 0); |
52 | 0 | } |
53 | 0 | } |
54 | | |
55 | | bool getMemoryShapes(const std::vector<MatShape> &inputs, |
56 | | const int requiredOutputs, |
57 | | std::vector<MatShape> &outputs, |
58 | | std::vector<MatShape> &internals) const CV_OVERRIDE |
59 | 0 | { |
60 | 0 | CV_Assert(inputs.size() == 1); |
61 | 0 | const MatShape& inpShape = inputs[0]; |
62 | 0 | CV_Assert(inpShape.size() >= paddings.size()); |
63 | 0 | CV_Assert(inputDims == -1 || inpShape.size() == inputDims || inpShape.size() > paddings.size()); |
64 | | |
65 | 0 | outputs.resize(1, inpShape); |
66 | 0 | int offset = (inputDims == -1 ? 0 : (inpShape.size() > inputDims ? 1 : 0)); |
67 | 0 | for (int i = 0; i < paddings.size(); ++i) |
68 | 0 | { |
69 | 0 | outputs[0][offset + i] = inpShape[offset + i] + paddings[i].first + paddings[i].second; |
70 | 0 | } |
71 | 0 | return false; |
72 | 0 | } |
73 | | |
74 | | void finalize(InputArrayOfArrays inputs_arr, OutputArrayOfArrays) CV_OVERRIDE |
75 | 0 | { |
76 | 0 | std::vector<Mat> inputs; |
77 | 0 | inputs_arr.getMatVector(inputs); |
78 | | |
79 | | // Compute dstRanges. |
80 | 0 | const MatSize& inpShape = inputs[0].size; |
81 | |
|
82 | 0 | if (inputDims != -1 && inputs[0].dims != inputDims) |
83 | 0 | { |
84 | 0 | paddings.insert(paddings.begin(), std::make_pair(0, 0)); |
85 | 0 | } |
86 | |
|
87 | 0 | dstRanges.resize(paddings.size()); |
88 | 0 | for (int i = 0; i < paddings.size(); ++i) |
89 | 0 | { |
90 | 0 | dstRanges[i].start = paddings[i].first; |
91 | 0 | dstRanges[i].end = paddings[i].first + inpShape[i]; |
92 | 0 | } |
93 | | |
94 | | // Add the rest of dimensions. |
95 | 0 | for (int i = dstRanges.size(); i < inputs[0].dims; ++i) |
96 | 0 | { |
97 | 0 | dstRanges.push_back(Range::all()); |
98 | 0 | paddings.push_back(std::make_pair(0, 0)); |
99 | 0 | } |
100 | 0 | inputDims = -1; // Next time paddings are filled for all the dimensions. |
101 | 0 | } |
102 | | |
103 | | virtual bool supportBackend(int backendId) CV_OVERRIDE |
104 | 0 | { |
105 | | #ifdef HAVE_INF_ENGINE |
106 | | if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) |
107 | | { |
108 | | bool isMyriad = preferableTarget == DNN_TARGET_MYRIAD || preferableTarget == DNN_TARGET_HDDL; |
109 | | if (isMyriad) |
110 | | return dstRanges.size() == 4 && paddings[0].first == 0 && paddings[0].second == 0; |
111 | | |
112 | | return (dstRanges.size() <= 4 || !isArmComputePlugin()); |
113 | | } |
114 | | #endif |
115 | 0 | return backendId == DNN_BACKEND_OPENCV || |
116 | 0 | backendId == DNN_BACKEND_CUDA || |
117 | 0 | (backendId == DNN_BACKEND_HALIDE && haveHalide() && dstRanges.size() == 4) || |
118 | 0 | backendId == DNN_BACKEND_CANN; |
119 | 0 | } |
120 | | |
121 | | void forward(InputArrayOfArrays inputs_arr, OutputArrayOfArrays outputs_arr, OutputArrayOfArrays internals_arr) CV_OVERRIDE |
122 | 0 | { |
123 | 0 | CV_TRACE_FUNCTION(); |
124 | 0 | CV_TRACE_ARG_VALUE(name, "name", name.c_str()); |
125 | |
|
126 | 0 | std::vector<Mat> inputs, outputs; |
127 | 0 | inputs_arr.getMatVector(inputs); |
128 | 0 | outputs_arr.getMatVector(outputs); |
129 | |
|
130 | 0 | if (paddingType == "constant") |
131 | 0 | { |
132 | 0 | outputs[0].setTo(paddingValue); |
133 | 0 | inputs[0].copyTo(outputs[0](dstRanges)); |
134 | 0 | } |
135 | 0 | else if (paddingType == "reflect" || paddingType == "edge") |
136 | 0 | { |
137 | 0 | CV_Assert(inputs.size() == 1); |
138 | 0 | CV_Assert(outputs.size() == 1); |
139 | 0 | CV_Assert(inputs[0].dims == 4); |
140 | 0 | CV_Assert(outputs[0].dims == 4); |
141 | 0 | int borderType = paddingType == "reflect" ? BORDER_REFLECT_101 : BORDER_REPLICATE; |
142 | |
|
143 | 0 | if (inputs[0].size[0] != outputs[0].size[0] || inputs[0].size[1] != outputs[0].size[1]) |
144 | 0 | CV_Error(Error::StsNotImplemented, "Only spatial reflection padding is supported."); |
145 | | |
146 | 0 | const int inpHeight = inputs[0].size[2]; |
147 | 0 | const int inpWidth = inputs[0].size[3]; |
148 | 0 | const int outHeight = outputs[0].size[2]; |
149 | 0 | const int outWidth = outputs[0].size[3]; |
150 | 0 | const int padTop = dstRanges[2].start; |
151 | 0 | const int padBottom = outHeight - dstRanges[2].end; |
152 | 0 | const int padLeft = dstRanges[3].start; |
153 | 0 | const int padRight = outWidth - dstRanges[3].end; |
154 | 0 | CV_CheckLE(padTop, inpHeight, ""); CV_CheckLE(padBottom, inpHeight, ""); |
155 | 0 | CV_CheckLE(padLeft, inpWidth, ""); CV_CheckLE(padRight, inpWidth, ""); |
156 | | |
157 | 0 | for (size_t n = 0; n < inputs[0].size[0]; ++n) |
158 | 0 | { |
159 | 0 | for (size_t ch = 0; ch < inputs[0].size[1]; ++ch) |
160 | 0 | { |
161 | 0 | copyMakeBorder(getPlane(inputs[0], n, ch), |
162 | 0 | getPlane(outputs[0], n, ch), |
163 | 0 | padTop, padBottom, padLeft, padRight, |
164 | 0 | borderType); |
165 | 0 | } |
166 | 0 | } |
167 | 0 | } |
168 | 0 | else |
169 | 0 | CV_Error(Error::StsNotImplemented, "Unknown padding type: " + paddingType); |
170 | 0 | } |
171 | | |
172 | | #ifdef HAVE_CUDA |
173 | | Ptr<BackendNode> initCUDA( |
174 | | void *context_, |
175 | | const std::vector<Ptr<BackendWrapper>>& inputs, |
176 | | const std::vector<Ptr<BackendWrapper>>& outputs |
177 | | ) override |
178 | | { |
179 | | auto context = reinterpret_cast<csl::CSLContext*>(context_); |
180 | | |
181 | | cuda4dnn::PaddingType ptype; |
182 | | if (paddingType == "constant") |
183 | | ptype = PaddingType::CONSTANT; |
184 | | else if (paddingType == "reflect") |
185 | | ptype = PaddingType::REFLECTION101; |
186 | | else |
187 | | CV_Error(Error::StsNotImplemented, "Unsupported padding mode"); |
188 | | |
189 | | return make_cuda_node<cuda4dnn::PaddingOp>(preferableTarget, std::move(context->stream), ptype, paddingValue, dstRanges); |
190 | | } |
191 | | #endif |
192 | | |
193 | | virtual Ptr<BackendNode> initHalide(const std::vector<Ptr<BackendWrapper> > &inputs) CV_OVERRIDE |
194 | 0 | { |
195 | | #ifdef HAVE_HALIDE |
196 | | int inW, inH, inC, inN; |
197 | | int minN = std::max(dstRanges[0].start, 0); |
198 | | int minC = std::max(dstRanges[1].start, 0); |
199 | | int minY = std::max(dstRanges[2].start, 0); |
200 | | int minX = std::max(dstRanges[3].start, 0); |
201 | | Halide::Buffer<float> inputBuffer = halideBuffer(inputs[0]); |
202 | | getCanonicalSize(inputBuffer, &inW, &inH, &inC, &inN); |
203 | | |
204 | | Halide::Var x("x"), y("y"), c("c"), n("n"); |
205 | | Halide::Func top = (name.empty() ? Halide::Func() : Halide::Func(name)); |
206 | | Halide::Func padded = |
207 | | Halide::BoundaryConditions::constant_exterior(inputBuffer, paddingValue); |
208 | | top(x, y, c, n) = padded(x - minX, y - minY, c - minC, n - minN); |
209 | | return Ptr<BackendNode>(new HalideBackendNode(top)); |
210 | | #endif // HAVE_HALIDE |
211 | 0 | return Ptr<BackendNode>(); |
212 | 0 | } |
213 | | |
214 | | #ifdef HAVE_CANN |
215 | | virtual Ptr<BackendNode> initCann(const std::vector<Ptr<BackendWrapper> > &inputs, |
216 | | const std::vector<Ptr<BackendWrapper> > &outputs, |
217 | | const std::vector<Ptr<BackendNode> >& nodes) CV_OVERRIDE |
218 | | { |
219 | | auto x = inputs[0].dynamicCast<CannBackendWrapper>(); |
220 | | |
221 | | // create operator |
222 | | auto op = std::make_shared<ge::op::PadV3>(name); |
223 | | |
224 | | // set attributes |
225 | | op->set_attr_mode(paddingType.c_str()); |
226 | | |
227 | | // set inputs |
228 | | // set inputs : x |
229 | | auto op_x = nodes[0].dynamicCast<CannBackendNode>()->getOp(); |
230 | | op->set_input_x_by_name(*op_x, x->name.c_str()); |
231 | | auto x_desc = x->getTensorDesc(); |
232 | | op->update_input_desc_x(*x_desc); |
233 | | // set inputs : paddings |
234 | | std::vector<int> pads; |
235 | | for (int i = 0; i < paddings.size(); i++) |
236 | | { |
237 | | pads.push_back(paddings[i].first); |
238 | | pads.push_back(paddings[i].second); |
239 | | } |
240 | | std::vector<int> pads_shape{(int)pads.size()}; |
241 | | Mat paddings_mat(pads_shape, CV_32S, &pads[0]); |
242 | | auto op_const_paddings = std::make_shared<CannConstOp>(paddings_mat.data, paddings_mat.type(), pads_shape, cv::format("%s_paddings", name.c_str())); |
243 | | op->set_input_paddings(*(op_const_paddings->getOp())); |
244 | | op->update_input_desc_paddings(*(op_const_paddings->getTensorDesc())); |
245 | | // set inputs : constant_values |
246 | | std::vector<int> constant_values_shape{1}; |
247 | | Mat constant_values_mat(1, 1, CV_32F, Scalar(paddingValue)); |
248 | | auto op_const_constant_values = std::make_shared<CannConstOp>(constant_values_mat.data, constant_values_mat.type(), constant_values_shape, cv::format("%s_constant_values", name.c_str())); |
249 | | op->set_input_constant_values(*(op_const_constant_values->getOp())); |
250 | | op->update_input_desc_constant_values(*(op_const_constant_values->getTensorDesc())); |
251 | | |
252 | | // set outputs |
253 | | auto output_y_desc = std::make_shared<ge::TensorDesc>(ge::Shape(), ge::FORMAT_NCHW, ge::DT_FLOAT); |
254 | | op->update_output_desc_y(*output_y_desc); |
255 | | |
256 | | return Ptr<BackendNode>(new CannBackendNode(op)); |
257 | | } |
258 | | #endif |
259 | | |
260 | | #ifdef HAVE_DNN_NGRAPH |
261 | | virtual Ptr<BackendNode> initNgraph(const std::vector<Ptr<BackendWrapper> >& inputs, |
262 | | const std::vector<Ptr<BackendNode> >& nodes) CV_OVERRIDE |
263 | | { |
264 | | auto& ieInpNode = nodes[0].dynamicCast<InfEngineNgraphNode>()->node; |
265 | | std::vector<int64_t> begins(paddings.size(), 0), ends(paddings.size(), 0); |
266 | | for (int i = 0; i < paddings.size(); ++i) |
267 | | { |
268 | | begins[i] = static_cast<int64_t>(paddings[i].first); |
269 | | ends[i] = static_cast<int64_t>(paddings[i].second); |
270 | | } |
271 | | auto padding_below = std::make_shared<ov::op::v0::Constant>(ov::element::i64, ov::Shape{begins.size()}, begins.data()); |
272 | | auto padding_above = std::make_shared<ov::op::v0::Constant>(ov::element::i64, ov::Shape{ends.size()}, ends.data()); |
273 | | auto pad_mode = paddingType == "constant" ? ov::op::PadMode::CONSTANT : ov::op::PadMode::REFLECT; // SYMMETRIC |
274 | | auto arg_pad_value = std::make_shared<ov::op::v0::Constant>(ov::element::f32, ov::Shape{}, &paddingValue);; |
275 | | |
276 | | auto pad = paddingType == "constant" ? |
277 | | std::make_shared<ov::op::v1::Pad>(ieInpNode, padding_below, padding_above, arg_pad_value, pad_mode) : |
278 | | std::make_shared<ov::op::v1::Pad>(ieInpNode, padding_below, padding_above, pad_mode); |
279 | | return Ptr<BackendNode>(new InfEngineNgraphNode(pad)); |
280 | | } |
281 | | #endif |
282 | | |
283 | | virtual bool tryQuantize(const std::vector<std::vector<float> > &scales, |
284 | | const std::vector<std::vector<int> > &zeropoints, LayerParams& params) CV_OVERRIDE |
285 | 0 | { |
286 | 0 | float outputScale = scales[1][0]; |
287 | 0 | int outputZp = zeropoints[1][0]; |
288 | 0 | float padValue = outputZp + std::round(params.get<float>("value", 0)/outputScale); |
289 | 0 | params.set("value", padValue); |
290 | 0 | return true; |
291 | 0 | } |
292 | | |
293 | | private: |
294 | | std::vector<std::pair<int, int> > paddings; // Pairs pad before, pad after. |
295 | | std::vector<Range> dstRanges; |
296 | | int inputDims; |
297 | | float paddingValue; |
298 | | std::string paddingType; |
299 | | }; |
300 | | |
301 | | Ptr<PaddingLayer> PaddingLayer::create(const LayerParams ¶ms) |
302 | 0 | { |
303 | 0 | return Ptr<PaddingLayer>(new PaddingLayerImpl(params)); |
304 | 0 | } |
305 | | |
306 | | } |
307 | | } |