@Namespace(value="cv::dnn") @NoOffset public static class opencv_dnn.Layer extends opencv_core.Algorithm
| Constructor and Description |
|---|
Layer(org.bytedeco.javacpp.Pointer p)
Pointer cast constructor.
|
| Modifier and Type | Method and Description |
|---|---|
void |
applyHalideScheduler(opencv_dnn.BackendNode node,
opencv_dnn.MatPointerVector inputs,
opencv_core.MatVector outputs,
int targetId)
\brief Automatic Halide scheduling based on layer hyper-parameters.
|
opencv_core.MatVector |
blobs()
List of learned parameters must be stored here to allow read them by using Net::getParam().
|
opencv_dnn.Layer |
blobs(opencv_core.MatVector blobs) |
opencv_core.MatVector |
finalize(opencv_core.MatVector inputs)
\brief \overload
|
void |
finalize(opencv_core.MatVector inputs,
opencv_core.MatVector outputs)
\brief \overload
|
void |
finalize(opencv_dnn.MatPointerVector input,
opencv_core.MatVector output)
\brief Computes and sets internal parameters according to inputs, outputs and blobs.
|
void |
forward_fallback(opencv_core.GpuMatVector inputs,
opencv_core.GpuMatVector outputs,
opencv_core.GpuMatVector internals) |
void |
forward_fallback(opencv_core.MatVector inputs,
opencv_core.MatVector outputs,
opencv_core.MatVector internals)
\brief Given the \p input blobs, computes the output \p blobs.
|
void |
forward_fallback(opencv_core.UMatVector inputs,
opencv_core.UMatVector outputs,
opencv_core.UMatVector internals) |
void |
forward(opencv_core.GpuMatVector inputs,
opencv_core.GpuMatVector outputs,
opencv_core.GpuMatVector internals) |
void |
forward(opencv_core.MatVector inputs,
opencv_core.MatVector outputs,
opencv_core.MatVector internals)
\brief Given the \p input blobs, computes the output \p blobs.
|
void |
forward(opencv_core.UMatVector inputs,
opencv_core.UMatVector outputs,
opencv_core.UMatVector internals) |
void |
forward(opencv_dnn.MatPointerVector input,
opencv_core.MatVector output,
opencv_core.MatVector internals)
\brief Given the \p input blobs, computes the output \p blobs.
|
long |
getFLOPS(opencv_dnn.MatShapeVector inputs,
opencv_dnn.MatShapeVector outputs) |
boolean |
getMemoryShapes(opencv_dnn.MatShapeVector inputs,
int requiredOutputs,
opencv_dnn.MatShapeVector outputs,
opencv_dnn.MatShapeVector internals) |
void |
getScaleShift(opencv_core.Mat scale,
opencv_core.Mat shift)
\brief Returns parameters of layers with channel-wise multiplication and addition.
|
int |
inputNameToIndex(org.bytedeco.javacpp.BytePointer inputName)
\brief Returns index of input blob into the input array.
|
int |
inputNameToIndex(String inputName) |
org.bytedeco.javacpp.BytePointer |
name()
Name of the layer instance, can be used for logging or other internal purposes.
|
opencv_dnn.Layer |
name(org.bytedeco.javacpp.BytePointer name) |
int |
outputNameToIndex(org.bytedeco.javacpp.BytePointer outputName)
\brief Returns index of output blob in output array.
|
int |
outputNameToIndex(String outputName) |
int |
preferableTarget()
prefer target for layer forwarding
|
opencv_dnn.Layer |
preferableTarget(int preferableTarget) |
void |
run(opencv_core.MatVector inputs,
opencv_core.MatVector outputs,
opencv_core.MatVector internals)
\brief Allocates layer and computes output.
|
boolean |
setActivation(opencv_dnn.ActivationLayer layer)
\brief Tries to attach to the layer the subsequent activation layer, i.e.
|
void |
setParamsFrom(opencv_dnn.LayerParams params)
Initializes only #name, #type and #blobs fields.
|
boolean |
supportBackend(int backendId)
\brief Ask layer if it support specific backend for doing computations.
|
opencv_dnn.BackendNode |
tryAttach(opencv_dnn.BackendNode node)
\brief Implement layers fusing.
|
boolean |
tryFuse(opencv_dnn.Layer top)
\brief Try to fuse current layer with a next one
|
org.bytedeco.javacpp.BytePointer |
type()
Type name which was used for creating layer by layer factory.
|
opencv_dnn.Layer |
type(org.bytedeco.javacpp.BytePointer type) |
void |
unsetAttached()
\brief "Deattaches" all the layers, attached to particular layer.
|
clear, empty, getDefaultName, position, read, save, save, write, write, writeaddress, asBuffer, asByteBuffer, availablePhysicalBytes, calloc, capacity, capacity, close, deallocate, deallocate, deallocateReferences, deallocator, deallocator, equals, fill, formatBytes, free, hashCode, isNull, limit, limit, malloc, maxBytes, maxPhysicalBytes, memchr, memcmp, memcpy, memmove, memset, offsetof, parseBytes, physicalBytes, position, put, realloc, setNull, sizeof, toString, totalBytes, totalPhysicalBytes, withDeallocator, zeropublic Layer(org.bytedeco.javacpp.Pointer p)
Pointer.Pointer(Pointer).@ByRef public opencv_core.MatVector blobs()
public opencv_dnn.Layer blobs(opencv_core.MatVector blobs)
public void finalize(@Const @ByRef
opencv_dnn.MatPointerVector input,
@ByRef
opencv_core.MatVector output)
[in] - input vector of already allocated input blobs[out] - output vector of already allocated output blobs
If this method is called after network has allocated all memory for input and output blobs
and before inferencing.public void forward(@ByRef
opencv_dnn.MatPointerVector input,
@ByRef
opencv_core.MatVector output,
@ByRef
opencv_core.MatVector internals)
[in] - input the input blobs.[out] - output allocated output blobs, which will store results of the computation.[out] - internals allocated internal blobspublic void forward(@ByVal
opencv_core.MatVector inputs,
@ByVal
opencv_core.MatVector outputs,
@ByVal
opencv_core.MatVector internals)
[in] - inputs the input blobs.[out] - outputs allocated output blobs, which will store results of the computation.[out] - internals allocated internal blobspublic void forward(@ByVal
opencv_core.UMatVector inputs,
@ByVal
opencv_core.UMatVector outputs,
@ByVal
opencv_core.UMatVector internals)
public void forward(@ByVal
opencv_core.GpuMatVector inputs,
@ByVal
opencv_core.GpuMatVector outputs,
@ByVal
opencv_core.GpuMatVector internals)
public void forward_fallback(@ByVal
opencv_core.MatVector inputs,
@ByVal
opencv_core.MatVector outputs,
@ByVal
opencv_core.MatVector internals)
[in] - inputs the input blobs.[out] - outputs allocated output blobs, which will store results of the computation.[out] - internals allocated internal blobspublic void forward_fallback(@ByVal
opencv_core.UMatVector inputs,
@ByVal
opencv_core.UMatVector outputs,
@ByVal
opencv_core.UMatVector internals)
public void forward_fallback(@ByVal
opencv_core.GpuMatVector inputs,
@ByVal
opencv_core.GpuMatVector outputs,
@ByVal
opencv_core.GpuMatVector internals)
public void finalize(@Const @ByRef
opencv_core.MatVector inputs,
@ByRef
opencv_core.MatVector outputs)
@ByVal public opencv_core.MatVector finalize(@Const @ByRef opencv_core.MatVector inputs)
public void run(@Const @ByRef
opencv_core.MatVector inputs,
@ByRef
opencv_core.MatVector outputs,
@ByRef
opencv_core.MatVector internals)
public int inputNameToIndex(@opencv_core.Str org.bytedeco.javacpp.BytePointer inputName)
inputName - label of input blob
Each layer input and output can be labeled to easily identify them using "%public int inputNameToIndex(@opencv_core.Str String inputName)
public int outputNameToIndex(@opencv_core.Str org.bytedeco.javacpp.BytePointer outputName)
inputNameToIndex()public int outputNameToIndex(@opencv_core.Str String outputName)
@Cast(value="bool") public boolean supportBackend(int backendId)
[in] - backendId computation backend identifier.Backendpublic void applyHalideScheduler(@opencv_core.Ptr opencv_dnn.BackendNode node, @Const @ByRef opencv_dnn.MatPointerVector inputs, @Const @ByRef opencv_core.MatVector outputs, int targetId)
[in] - node Backend node with Halide functions.[in] - inputs Blobs that will be used in forward invocations.[in] - outputs Blobs that will be used in forward invocations.[in] - targetId Target identifierBackendNode, Target
Layer don't use own Halide::Func members because we can have applied
layers fusing. In this way the fused function should be scheduled.@opencv_core.Ptr public opencv_dnn.BackendNode tryAttach(@opencv_core.Ptr opencv_dnn.BackendNode node)
[in] - node Backend node of bottom layer.Actual for graph-based backends. If layer attached successfully,
returns non-empty cv::Ptr to node of the same backend.
Fuse only over the last function.@Cast(value="bool") public boolean setActivation(@opencv_core.Ptr opencv_dnn.ActivationLayer layer)
[in] - layer The subsequent activation layer.
Returns true if the activation layer has been attached successfully.@Cast(value="bool") public boolean tryFuse(@opencv_core.Ptr opencv_dnn.Layer top)
[in] - top Next layer to be fused.public void getScaleShift(@ByRef
opencv_core.Mat scale,
@ByRef
opencv_core.Mat shift)
[out] - scale Channel-wise multipliers. Total number of values should
be equal to number of channels.[out] - shift Channel-wise offsets. Total number of values should
be equal to number of channels.
Some layers can fuse their transformations with further layers.
In example, convolution + batch normalization. This way base layer
use weights from layer after it. Fused layer is skipped.
By default, \p scale and \p shift are empty that means layer has no
element-wise multiplications or additions.public void unsetAttached()
@Cast(value="bool")
public boolean getMemoryShapes(@Const @ByRef
opencv_dnn.MatShapeVector inputs,
int requiredOutputs,
@ByRef
opencv_dnn.MatShapeVector outputs,
@ByRef
opencv_dnn.MatShapeVector internals)
@Cast(value="int64")
public long getFLOPS(@Const @ByRef
opencv_dnn.MatShapeVector inputs,
@Const @ByRef
opencv_dnn.MatShapeVector outputs)
@opencv_core.Str public org.bytedeco.javacpp.BytePointer name()
public opencv_dnn.Layer name(org.bytedeco.javacpp.BytePointer name)
@opencv_core.Str public org.bytedeco.javacpp.BytePointer type()
public opencv_dnn.Layer type(org.bytedeco.javacpp.BytePointer type)
public int preferableTarget()
public opencv_dnn.Layer preferableTarget(int preferableTarget)
public void setParamsFrom(@Const @ByRef
opencv_dnn.LayerParams params)
Copyright © 2018. All rights reserved.